gt
stringclasses
1 value
context
stringlengths
2.49k
119k
from testify import * import datetime import pytz from dmc import ( Time, TimeInterval, TimeSpan, TimeIterator, TimeSpanIterator) class InitTimeTestCase(TestCase): def test_direct(self): d = Time(2014, 4, 18, 17, 50, 21) assert_equal(d.year, 2014) assert_equal(d.month, 4) assert_equal(d.day, 18) assert_equal(d.hour, 17) assert_equal(d.minute, 50) assert_equal(d.second, 21) def test_direct_tz(self): d = Time(2014, 4, 18, 17, 50, 21, tz='US/Pacific') assert_equal(d.year, 2014) assert_equal(d.month, 4) assert_equal(d.day, 19) assert_equal(d.hour, 1) assert_equal(d.minute, 50) assert_equal(d.second, 21) def test_direct_local(self): d = Time(2014, 4, 18, 17, 50, 21, local=True) assert_equal(d.year, 2014) assert_equal(d.month, 4) # can't really say for sure assert d.day in [18, 19] assert_equal(d.minute, 50) assert_equal(d.second, 21) def test_timestamp(self): ts = 1398125982.036391 t = Time.from_timestamp(ts) assert_equal(t.year, 2014) assert_equal(t.month, 4) assert_equal(t.day, 22) assert_equal(t.hour, 0) assert_equal(t.minute, 19) assert_equal(t.second, 42) assert_equal(t.microsecond, 36391) def test_datetime_naive(self): dt = datetime.datetime(2014, 4, 18, 17, 50, 21) t = Time.from_datetime(dt) assert_equal(t.day, 18) assert_equal(t.hour, 17) assert_equal(t.minute, 50) assert_equal(t.second, 21) def test_datetime_tz(self): dt = datetime.datetime(2014, 4, 18, 17, 50, 21) dt = pytz.timezone('US/Pacific').localize(dt) t = Time.from_datetime(dt) assert_equal(t.year, 2014) assert_equal(t.month, 4) assert_equal(t.day, 19) assert_equal(t.hour, 0) assert_equal(t.minute, 50) assert_equal(t.second, 21) def test_str(self): t = Time.from_str("2014-04-18T17:50:21.036391") assert_equal(t.year, 2014) assert_equal(t.month, 4) assert_equal(t.day, 18) assert_equal(t.hour, 17) assert_equal(t.minute, 50) assert_equal(t.second, 21) def test_str_tz(self): t = Time.from_str("2014-04-18T17:50:21.036391-07:00") assert_equal(t.year, 2014) assert_equal(t.month, 4) assert_equal(t.day, 19) assert_equal(t.hour, 0) assert_equal(t.minute, 50) assert_equal(t.second, 21) def test_str_specify_tz(self): t = Time.from_str("2014-04-18T17:50:21.036391", tz='US/Pacific') assert_equal(t.year, 2014) assert_equal(t.month, 4) assert_equal(t.day, 19) assert_equal(t.hour, 0) assert_equal(t.minute, 50) assert_equal(t.second, 21) class ConvertTimeTestCase(TestCase): @setup def create_time(self): self.t = Time(2014, 4, 18, 17, 50, 21, 36391) def test_str(self): assert_equal(self.t.to_str(), "2014-04-18T17:50:21.036391+00:00") def test_str_tz(self): assert_equal(self.t.to_str(tz='US/Pacific'), "2014-04-18T10:50:21.036391-07:00") def test_str_local(self): # We don't really konw assert self.t.to_str(local=True) def test_str_format(self): assert_equal(self.t.to_str(format="%m/%d/%Y %H:%M"), "04/18/2014 17:50") def test_timestamp(self): assert_equal(self.t.to_timestamp(), 1397872221.036391) def test_datetime(self): dt = self.t.to_datetime() assert_equal(dt.year, 2014) assert_equal(dt.month, 4) assert_equal(dt.day, 18) assert_equal(dt.hour, 17) assert_equal(dt.minute, 50) assert_equal(dt.second, 21) assert_equal(dt.tzinfo, pytz.UTC) def test_datetime_tz(self): dt = self.t.to_datetime(tz='US/Pacific') assert_equal(dt.year, 2014) assert_equal(dt.month, 4) assert_equal(dt.day, 18) assert_equal(dt.hour, 10) assert_equal(dt.minute, 50) assert_equal(dt.second, 21) assert_equal(str(dt.tzinfo), 'US/Pacific') def test_datetime_local(self): dt = self.t.to_datetime(local=True) assert_equal(dt.year, 2014) assert_equal(dt.month, 4) assert_equal(dt.minute, 50) assert_equal(dt.second, 21) def test_human(self): # Just make sure it doesn't crash assert self.t.to_human() class ArithmeticTimeTest(TestCase): def test_time_add(self): t1 = Time(2014, 4, 18, 17, 50, 21) ti = TimeInterval(2.22) t2 = t1 + ti assert_equal(t2.second, 23) assert_equal(t2.microsecond, 220000) def test_time_sub(self): t1 = Time(2014, 4, 18, 17, 50, 21) ti = TimeInterval(2.22) t2 = t1 - ti assert_equal(t2.second, 18) assert_equal(t2.microsecond, 780000) class InitTimeIntervalTest(TestCase): def test_seconds(self): i = TimeInterval(21) assert_equal(i.seconds, 21) assert_equal(i.microseconds, 0) def test_float_seconds(self): i = TimeInterval(1.2) assert_equal(i.seconds, 1) assert_equal(i.microseconds, 200000) def test_minutes(self): i = TimeInterval(minutes=2) assert_equal(i.seconds, 120) assert_equal(i.microseconds, 0) def test_hours(self): i = TimeInterval(hours=1) assert_equal(i.seconds, 3600) assert_equal(i.microseconds, 0) def test_microseconds(self): i = TimeInterval(microseconds=10) assert_equal(i.seconds, 0) assert_equal(i.microseconds, 10) def test_microsecond_overflow(self): i = TimeInterval(seconds=1.9, microseconds=200000) assert_equal(i.seconds, 2) assert_equal(i.microseconds, 100000) def test_timedelta(self): td = datetime.timedelta(days=1, seconds=10, microseconds=1000) i = TimeInterval.from_timedelta(td) assert_equal(i.seconds, round(td.total_seconds())) assert_equal(i.microseconds, td.microseconds) assert_equal(float(i), td.total_seconds()) class ConvertTimeIntervalTest(TestCase): def test_int(self): i = TimeInterval(4) assert_equal(int(i), 4) def test_int_round(self): i = TimeInterval(4, microseconds=600000) assert_equal(int(i), 5) def test_float(self): i = TimeInterval(4, microseconds=600000) assert_equal(float(i), 4.6) def test_str(self): i = TimeInterval(hours=1, minutes=45, seconds=21, microseconds=600000) assert_equal(str(i), "+01:45:21.6") class ArithmeticTimeIntervalTest(TestCase): def test_add(self): i1 = TimeInterval(1) i2 = TimeInterval(1) i3 = i1 + i2 assert_equal(i3.seconds, 2) def test_add_int(self): i1 = TimeInterval(1) i2 = 1 i3 = i1 + i2 assert_equal(i3.seconds, 2) i4 = i2 + i1 assert_equal(i4.seconds, 2) def test_sub(self): i1 = TimeInterval(2) i2 = TimeInterval(1) i3 = i1 - i2 assert_equal(i3.seconds, 1) def test_sub_neg(self): i1 = TimeInterval(1) i2 = TimeInterval(2) i3 = i1 - i2 assert_equal(i3.seconds, -1) def test_sub_int(self): i1 = TimeInterval(2) i2 = 1 i3 = i1 - i2 assert_equal(i3.seconds, 1) i4 = i2 - i1 assert_equal(i4, -1) def test_mul(self): i1 = TimeInterval(2) i2 = i1 * 3 assert_equal(int(i2), 6) def test_div(self): i1 = TimeInterval(5) i2 = i1 / 2 assert_equal(float(i2), 2.5) def test_div_micro(self): i1 = TimeInterval(2, microseconds=22) i2 = i1 / 2 assert_equal(i2.seconds, 1) assert_equal(i2.microseconds, 11) def test_abs(self): i1 = TimeInterval(-2.22) i2 = abs(i1) assert_equal(float(i1), 2.22) def test_abs(self): i1 = TimeInterval(2.22) i2 = abs(i1) assert_equal(float(i1), 2.22) def test_cmp(self): assert_equal(TimeInterval(2.22), TimeInterval(2, microseconds=220000)) assert_gt(TimeInterval(2.22), TimeInterval(2.20)) assert_gt(TimeInterval(3.22), TimeInterval(2.5)) assert_lt(TimeInterval(0), TimeInterval(microseconds=1)) assert_lt(TimeInterval(-3), TimeInterval(2.5)) class TimeSpanTest(TestCase): def test_iter(self): t1 = Time.now() t2 = Time.now() + 30 ts = TimeSpan(t1, t2) start_t, end_t = ts assert_equal(start_t, t1) assert_equal(end_t, t2) def test_get(self): t1 = Time.now() t2 = Time.now() + 30 ts = TimeSpan(t1, t2) assert_equal(ts[0], t1) assert_equal(ts[1], t2) class TimeIteratorTest(TestCase): def test(self): start_t = Time.now() end_t = start_t + 5*60 times = list(TimeIterator(TimeSpan(start_t, end_t), TimeInterval(60))) assert_equal(len(times), 6) assert_equal(times[0], start_t) assert_equal(times[-1], end_t) class TimeSpanIteratorTest(TestCase): def test(self): start_t = Time.now() end_t = start_t + 5*60 times = list(TimeSpanIterator(TimeSpan(start_t, end_t), TimeInterval(60))) assert_equal(len(times), 5) assert_equal(times[0].start, start_t) assert_equal(times[-1].end, end_t)
""" The Saltelli method Code is built upon the code provided by Vinzenze Eck. """ import numpy import numpoly import chaospy class Saltelli(object): """ Buffer class to be able to retrieve Saltelli matrices. The core of the method relies on cross-combining the columns of two random matrices as part of a double expectation. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> generator = Saltelli(dist, 3, rule="halton") >>> generator[False, False].round(4) array([[0.875 , 0.0625, 0.5625], [0.5556, 0.8889, 0.037 ]]) >>> generator[False, True].round(4) array([[0.875 , 0.0625, 0.5625], [0.4444, 0.7778, 0.2222]]) >>> generator[True, False].round(4) array([[0.125 , 0.625 , 0.375 ], [0.5556, 0.8889, 0.037 ]]) >>> generator[True, True].round(4) array([[0.125 , 0.625 , 0.375 ], [0.4444, 0.7778, 0.2222]]) """ def __init__(self, dist, samples, poly=None, rule="random"): """ Initialize the matrix generator. dist (chaopy.Distribution): distribution to sample from. samples (int): The number of samples to draw for each matrix. poly (numpoly.ndpoly): If provided, evaluated samples through polynomials before returned. rule (str): Scheme for generating random samples. """ self.dist = dist samples_ = chaospy.generate_samples( 2*samples, domain=len(dist), rule=rule) self.samples1 = samples_.T[:samples].T self.samples2 = samples_.T[samples:].T self.poly = poly self.buffer = {} def get_matrix(self, indices): """Retrieve Saltelli matrix.""" new = numpy.empty(self.samples1.shape) for idx in range(len(indices)): if indices[idx]: new[idx] = self.samples1[idx] else: new[idx] = self.samples2[idx] new = self.dist.inv(new) if isinstance(self.poly, numpoly.ndpoly) and self.poly.size: new = self.poly(*new) return new def __getitem__(self, indices): """Shortcut to `get_matrix`.""" assert len(self.dist) == len(indices) # uniquify: key = tuple(bool(idx) for idx in indices) if key in self.buffer: matrix = self.buffer[key] else: matrix = self.get_matrix(indices) self.buffer[key] = matrix return matrix def Sens_m_sample(poly, dist, samples, rule="random"): """ First order sensitivity indices estimated using Saltelli's method. Args: poly (numpoly.ndpoly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Distribution): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(poly))` where `sens[dim][pol]` is the first sensitivity index for distribution dimensions `dim` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> poly = chaospy.monomial(2, 3, dimensions=2, reverse=False) >>> poly polynomial([q0**2, q0*q1, q1**2]) >>> Sens_m_sample(poly, dist, 10000, rule="hammersley").round(4) array([[0.008 , 0.0026, 0. ], [0. , 0.6464, 2.1321]]) """ dim = len(dist) generator = Saltelli(dist, samples, poly, rule=rule) zeros = [0]*dim ones = [1]*dim index = [0]*dim variance = numpy.var(generator[zeros], -1) matrix_0 = generator[zeros] matrix_1 = generator[ones] mean = .5*(numpy.mean(matrix_1) + numpy.mean(matrix_0)) matrix_0 -= mean matrix_1 -= mean out = [ numpy.mean(matrix_1*((generator[index]-mean)-matrix_0), -1) / numpy.where(variance, variance, 1) for index in numpy.eye(dim, dtype=bool) ] return numpy.array(out) def Sens_m2_sample(poly, dist, samples, rule="random"): """ Second order sensitivity indices estimated using Saltelli's method. Args: poly (numpoly.ndpoly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Distribution): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(dist), len(poly))` where `sens[dim1][dim2][pol]` is the correlating sensitivity between dimension `dim1` and `dim2` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> poly = chaospy.monomial(2, 3, dimensions=2, reverse=False) >>> poly polynomial([q0**2, q0*q1, q1**2]) >>> Sens_m2_sample(poly, dist, 10000, rule="halton").round(4) array([[[ 0.008 , 0.0026, 0. ], [-0.0871, 1.1516, 1.2851]], <BLANKLINE> [[-0.0871, 1.1516, 1.2851], [ 0. , 0.7981, 1.38 ]]]) """ dim = len(dist) generator = Saltelli(dist, samples, poly, rule=rule) zeros = [0]*dim ones = [1]*dim index = [0]*dim variance = numpy.var(generator[zeros], -1) matrix_0 = generator[zeros] matrix_1 = generator[ones] mean = .5*(numpy.mean(matrix_1) + numpy.mean(matrix_0)) matrix_0 -= mean matrix_1 -= mean for dim1 in range(dim): index[dim1] = 1 matrix = generator[index]-mean vals = numpy.mean( matrix_1*(matrix-matrix_0), -1, ) / numpy.where(variance, variance, 1) if not dim1: out = numpy.empty((dim, dim)+vals.shape) out[dim1, dim1] = vals for dim2 in range(dim1+1, dim): index[dim2] = 1 matrix = generator[index]-mean out[dim1, dim2] = out[dim2, dim1] = numpy.mean( matrix_1*(matrix-matrix_0), -1, ) / numpy.where(variance, variance, 1) index[dim2] = 0 index[dim1] = 0 return out def Sens_t_sample(poly, dist, samples, rule="random"): """ Total order sensitivity indices estimated using Saltelli's method. Args: poly (numpoly.ndpoly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Distribution): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(poly))` where `sens[dim][pol]` is the total order sensitivity index for distribution dimensions `dim` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(0, 1), 2) >>> poly = chaospy.monomial(2, 3, dimensions=2, reverse=False) >>> poly polynomial([q0**2, q0*q1, q1**2]) >>> Sens_t_sample(poly, dist, 10000, rule="halton").round(4) array([[ 1. , 0.2 , -0.3807], [ 0.9916, 0.9962, 1. ]]) """ generator = Saltelli(dist, samples, poly, rule=rule) dim = len(dist) zeros = [0]*dim variance = numpy.var(generator[zeros], -1) return numpy.array([ 1-numpy.mean((generator[~index]-generator[zeros])**2, -1,) / (2*numpy.where(variance, variance, 1)) for index in numpy.eye(dim, dtype=bool) ])
#!/usr/bin/env python # -*- python -*- #BEGIN_LEGAL # #Copyright (c) 2020 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #END_LEGAL import sys import os import re import shlex import collections import math import genutil import enumer import enum_txt_writer import codegen # dict space name -> numerical space id _space_id = {'legacy':0, 'vex':1, 'evex':2, 'xop':3, 'knc':4} _space_id_to_name = {v: k for k,v in _space_id.items()} # list ordered by numerical space id _space_id_sorted = sorted(_space_id.keys(), key=lambda x: _space_id[x]) def _encoding_space_max(): return max(_space_id.values()) def _encoding_space_range(): #Could make this dynamic based on what spaces are enabled return range(0, _encoding_space_max()+1) def vexvalid_to_encoding_space(vv): """Input number, output string""" return _space_id_sorted[vv] def encoding_space_to_vexvalid(space): """Input string, output number""" return _space_id[space] def _die(s): genutil.die(s) def _msgb(b,s=''): genutil.msgerr("[{}] {}".format(b,s)) class map_info_t(object): def __init__(self): self.map_name = None self.space = None # legacy, vex, evex, xop self.legacy_escape = None # N/A or 0f self.legacy_opcode = None # N/A or 38, 3a self.map_id = None # N/A or 0,1,2,3,... 8,9,0xA # "var" means variable, requires a table generated based on defined instructions self.modrm = None # var,yes,no, has modrm self.disp = None # var,yes,no, has disp self.imm = None # var,0,1,2,4 (bytes) var=7 self.opcpos = None # 0,1,2, ... -1 (last) opcode position in pattern self.priority = 10 # search_pattern is the string that we use to identify this # map in the XED decode patterns. The pattern may have spaces # in it. (and motivates using shlex to parse the input lines) self.search_pattern = None def is_legacy(self): return self.space == 'legacy' def is_vex(self): return self.space == 'vex' def is_evex(self): return self.space == 'evex' def is_xop(self): return self.space == 'xop' def map_short_name(self): if self.map_name == 'amd_3dnow': return 'AMD' h = hex(self.map_id)[-1] return str(h) def ild_enum(self): s = self.map_short_name() if self.space == 'XOP': s = '_XOP{}'.format(s) return 'XED_ILD_MAP{}'.format(s) def get_legacy_escapes(self): if self.legacy_opcode == 'N/A': return (self.legacy_escape, None) return (self.legacy_escape, self.legacy_opcode) def has_variable_modrm(self): return self.modrm == 'var' def has_regular_modrm(self): return self.modrm == 'yes' def has_variable_disp(self): return self.disp == 'var' def has_variable_imm(self): return self.imm == 'var' def __str__(self): s = [] s.append("name: {}".format(self.map_name)) s.append("space: {}".format(self.space)) s.append("legacyesc: {}".format(self.legacy_escape)) s.append("legacyopc: {}".format(self.legacy_opcode)) s.append("mapid: {}".format(self.map_id)) s.append("modrm: {}".format(self.modrm)) s.append("disp: {}".format(self.disp)) s.append("imm: {}".format(self.imm)) s.append("opcpos: {}".format(self.opcpos)) s.append("priority: {}".format(self.priority)) s.append("search_pattern: {}".format(self.search_pattern)) return " ".join(s) _map_info_fields = ['map_name', 'space', 'legacy_escape', 'legacy_opcode', 'map_id', 'modrm', 'disp', 'imm', 'opcpos', 'search_pattern' ] def _parse_map_line(s): global _map_info_fields # shlex allows for quoted substrings containing spaces as # individual args. t = shlex.split(s.strip()) if len(t) != len(_map_info_fields): _die("Bad map description line: [{}]".format(s)) mi = map_info_t() for i,fld in enumerate(_map_info_fields): setattr(mi,fld,t[i]) # this gets used in function names so must only be legal characters mi.map_name = re.sub('-', '_', mi.map_name) if mi.space == 'legacy': if mi.legacy_escape != 'N/A': mi.legacy_escape_int = int(mi.legacy_escape,16) if mi.legacy_opcode != 'N/A': mi.legacy_opcode_int = int(mi.legacy_opcode,16) else: mi.legacy_opcode_int = None mi.map_id_fixup=False if mi.space not in ['legacy','vex','evex', 'xop','knc']: _die("Bad map description encoding space [{}]".format(s)) if mi.space == 'legacy': if genutil.is_hex(mi.legacy_escape): pass elif mi.legacy_escape != 'N/A': _die("Bad map description legacy escape [{}]".format(s)) if genutil.is_hex(mi.legacy_opcode): pass elif mi.legacy_opcode != 'N/A': _die("Bad map description legacy opcode [{}]".format(s)) if mi.map_id == 'N/A': _die("Bad map description map-id [{}]".format(s)) elif genutil.numeric(mi.map_id): mi.map_id = genutil.make_numeric(mi.map_id) else: mi.map_id_fixup=True else: if mi.legacy_escape != 'N/A': _die("Bad map description legacy escape [{}]".format(s)) if mi.legacy_opcode != 'N/A': _die("Bad map description legacy opcode [{}]".format(s)) if genutil.numeric(mi.map_id): mi.map_id = genutil.make_numeric(mi.map_id) else: _die("Bad map description map id [{}]".format(s)) if mi.disp not in ['var','no']: _die("Bad map description disp specifier [{}]".format(s)) if mi.modrm not in ['var','yes','no']: _die("Bad map description modrm specifier [{}]".format(s)) if mi.imm not in ['var','0','1','2','4']: _die("Bad map description imm specifier [{}]".format(s)) if genutil.numeric(mi.opcpos): mi.opcpos = genutil.make_numeric(mi.opcpos) else: _die("Bad map description opcode position specifier [{}]".format(s)) # we want the longer patterns first when we sort the map_info_t. mi.priority = 100-len(mi.search_pattern) return mi def emit_enums(agi): emit_ild_enum_dups(agi) # XED_ILD_* emit_ild_enum_unique(agi) # XED_MAPU_* file_list = emit_map_info_tables(agi) agi.hdr_files.extend(file_list) def emit_map_info_tables(agi): '''variable modrm,disp,imm tables, per encoding space using natural map ids. returns list of files generated''' map_features_cfn = 'xed-map-feature-tables.c' map_features_hfn = 'xed-map-feature-tables.h' private_gendir = os.path.join(agi.common.options.gendir,'include-private') hfe = codegen.xed_file_emitter_t(agi.common.options.xeddir, private_gendir, map_features_hfn) for h in [ 'xed-map-info.h' ]: hfe.add_header(h) hfe.start() sorted_list = sorted(agi.map_info, key=lambda x: x.map_name) spaces = list(set([ mi.space for mi in sorted_list ])) sorted_spaces = sorted(spaces, key=lambda x: encoding_space_to_vexvalid(x)) max_space_id = _encoding_space_max() # legacy,vex,evex,xop,knc #max_space_id = encoding_space_to_vexvalid(sorted_spaces[-1]) max_map_id = max([mi.map_id for mi in agi.map_info]) #0...31 fields = ['modrm', 'disp', 'imm'] cvt_yes_no_var = { 'yes':1, 'no':0, 'var':2 } cvt_imm = { '0':0, '1':1, '2':2, '4':4, 'var':7 } field_to_cvt = { 'modrm': cvt_yes_no_var, 'disp' : cvt_yes_no_var, 'imm' : cvt_imm } bits_per_chunk = 64 # The field width in bits must be a power of 2 for current design, # otherwise the bits of interest can span the 64b chunks we are # using to store the values. field_to_bits = { 'modrm': 2, 'disp' : 2, 'imm' : 4 } def collect_codes(field, space_maps): '''cvt is dict converting strings to integers. the codes are indexed by map id.''' cvt = field_to_cvt[field] codes = { key:0 for key in range(0,max_map_id+1) } for mi in space_maps: codes[mi.map_id] = cvt[getattr(mi,field)] codes_as_list = [ codes[i] for i in range(0,max_map_id+1) ] return codes_as_list def convert_list_to_integer(lst, bits_per_field): '''return an integer or a list of integer if more than 64b''' integers = [] tot = 0 shift = 0 for v in lst: if shift >= 64: integers.append(tot) tot = 0 shift = 0 tot = tot + (v << shift) shift = shift + bits_per_field integers.append(tot) if len(integers) == 1: return integers[0] return integers for space_id in _encoding_space_range(): space = _space_id_to_name[space_id] space_maps = [ mi for mi in sorted_list if mi.space == space ] for field in fields: bits_per_field = field_to_bits[field] total_bits = max_map_id * bits_per_field required_chunks = math.ceil(total_bits / bits_per_chunk) values_per_chunk = bits_per_chunk // bits_per_field ilog2_values_per_chunk = int(math.log2(values_per_chunk)) mask = (1<<bits_per_field)-1 f = codegen.function_object_t('xed_ild_has_{}_{}'.format(field,space), 'xed_bool_t', static=True, inline=True) f.add_arg('xed_uint_t m') if space_maps: codes = collect_codes(field, space_maps) constant = convert_list_to_integer(codes,bits_per_field) else: codes = [0] constant = 0 f.add_code('/* {} */'.format(codes)) if set(codes) == {0}: # all zero values... f.add_code_eol('return 0') f.add_code_eol('(void)m') else: if required_chunks <= 1: f.add_code_eol('const xed_uint64_t data_const = 0x{:x}ULL'.format(constant)) f.add_code_eol('return (xed_bool_t)((data_const >> ({}*m)) & {})'.format( bits_per_field, mask)) else: f.add_code('const xed_uint64_t data_const[{}] = {{'.format(required_chunks)) ln = ['0x{:x}ULL'.format(c) for c in constant] f.add_code_eol(' {} }}'.format(", ".join(ln))) f.add_code_eol('const xed_uint64_t chunkno = m >> {}'.format(ilog2_values_per_chunk)) f.add_code_eol('const xed_uint64_t offset = m & ({}-1)'.format(values_per_chunk)) f.add_code_eol('return (xed_bool_t)((data_const[chunkno] >> ({}*offset)) & {})'.format( bits_per_field, mask)) hfe.write(f.emit()) # emit the inline function in the header # emit a function that covers all spaces for field in fields: bits_per_field = field_to_bits[field] total_bits = max_map_id * bits_per_field required_chunks = math.ceil(total_bits / bits_per_chunk) values_per_chunk = bits_per_chunk // bits_per_field ilog2_values_per_chunk = int(math.log2(values_per_chunk)) mask = (1<<bits_per_field)-1 f = codegen.function_object_t('xed_ild_has_{}'.format(field), 'xed_bool_t', static=True, inline=True) f.add_arg('xed_uint_t vv') f.add_arg('xed_uint_t m') if required_chunks <= 1: f.add_code('const xed_uint64_t data_const[{}] = {{'.format(max_space_id+1)) else: f.add_code('const xed_uint64_t data_const[{}][{}] = {{'.format(max_space_id+1, required_chunks)) for space_id in _encoding_space_range(): space = _space_id_to_name[space_id] space_maps = [ mi for mi in sorted_list if mi.space == space ] if space_maps: codes = collect_codes(field, space_maps) constant = convert_list_to_integer(codes,bits_per_field) else: codes = [0]*required_chunks if required_chunks <= 1: constant = 0 else: constant = [0]*required_chunks f.add_code('/* {} {} */'.format(codes,space)) if required_chunks <= 1: f.add_code(' 0x{:x}ULL,'.format(constant)) else: ln = ['0x{:x}ULL'.format(c) for c in constant] f.add_code('{{ {} }},'.format(", ".join(ln))) f.add_code_eol('}') f.add_code_eol('xed_assert(vv < {})'.format(max_space_id+1)) if required_chunks <= 1: f.add_code_eol('return (xed_bool_t)((data_const[vv] >> ({}*m)) & {})'.format(bits_per_field, mask)) else: f.add_code_eol('const xed_uint64_t chunkno = m >> {}'.format(ilog2_values_per_chunk)) f.add_code_eol('const xed_uint64_t offset = m & ({}-1)'.format(values_per_chunk)) f.add_code_eol('return (xed_bool_t)((data_const[vv][chunkno] >> ({}*offset)) & {})'.format( bits_per_field, mask)) hfe.write(f.emit()) # emit the inline function in the header # emit a set of functions for determining the valid maps in each encoding space if max_map_id > 64: genutil.die("Need to make this work with multiple chunks of u64") for space_id in _encoding_space_range(): space = _space_id_to_name[space_id] space_maps = [ mi for mi in sorted_list if mi.space == space ] f = codegen.function_object_t('xed_ild_map_valid_{}'.format(space), 'xed_bool_t', static=True, inline=True) f.add_arg('xed_uint_t m') max_id = _encoding_space_max() #max_id = max( [mi.map_id for mi in space_maps ] ) codes_dict = { key:0 for key in range(0,max_map_id+1) } for mi in space_maps: codes_dict[mi.map_id] = 1 codes = [ codes_dict[i] for i in range(0,max_map_id+1) ] f.add_code('/* {} */'.format(codes)) constant = convert_list_to_integer(codes,1) f.add_code_eol('const xed_uint64_t data_const = 0x{:x}ULL'.format(constant)) # no need for a max-map test since, the upper bits of the # constant will be zero already f.add_code_eol('return (xed_bool_t)((data_const >> m) & 1)') hfe.write(f.emit()) # emit the inline function in the header # emit a table filling in "xed_map_info_t xed_legacy_maps[] = { ... }" legacy_maps = [ mi for mi in sorted_list if mi.space == 'legacy' ] legacy_maps = sorted(legacy_maps, key=lambda x: -len(x.search_pattern) * 10 + x.map_id) hfe.add_code('const xed_map_info_t xed_legacy_maps[] = {') for mi in legacy_maps: if mi.map_id == 0: continue has_legacy_opcode = 1 if mi.legacy_opcode != 'N/A' else 0 legacy_opcode = mi.legacy_opcode if mi.legacy_opcode != 'N/A' else 0 legacy_escape = mi.legacy_escape if mi.legacy_escape != 'N/A' else 0 hfe.add_code('{{ {}, {}, {}, {}, {} }},'.format(legacy_escape, has_legacy_opcode, legacy_opcode, mi.map_id, mi.opcpos)) hfe.add_code_eol('}') hfe.close() return [hfe.full_file_name] def emit_ild_enum_unique(agi): """modify map_info_t values to include mapu enum name so that we can build other arrays for the C-code based on that unique enum""" sorted_list = sorted(agi.map_info, key=lambda x: x.map_name) evalues = ['INVALID'] for mi in sorted_list: s = mi.map_name.upper() evalues.append(s) mi.mapu_name = 'XED_MAPU_{}'.format(s) enum = enum_txt_writer.enum_info_t(evalues, agi.common.options.xeddir, agi.common.options.gendir, 'xed-mapu', 'xed_mapu_enum_t', 'XED_MAPU_', cplusplus=False) enum.run_enumer() agi.add_file_name(enum.src_full_file_name) agi.add_file_name(enum.hdr_full_file_name, header=True) agi.all_enums['xed_mapu_enum_t'] = evalues def emit_ild_enum_dups(agi): evalues = [] sorted_list = sorted(agi.map_info, key=lambda x: x.map_name) for mi in sorted_list: val = None if isinstance(mi.map_id,int): val = str(mi.map_id) e = enumer.enumer_value_t(mi.map_name.upper(), val) evalues.append(e) evalues.append('MAP_INVALID') enum = enum_txt_writer.enum_info_t(evalues, agi.common.options.xeddir, agi.common.options.gendir, 'xed-ild', 'xed_ild_map_enum_t', 'XED_ILD_', cplusplus=False) enum.run_enumer() agi.add_file_name(enum.src_full_file_name) agi.add_file_name(enum.hdr_full_file_name, header=True) agi.all_enums['xed_ild_map_enum_t'] = evalues def fix_nonnumeric_maps(maps): d = collections.defaultdict(list) for mi in maps: if not mi.map_id_fixup: d[mi.space].append(mi.map_id) mx = {} # max per key for k in d.keys(): mx[k] = max(d[k]) for mi in maps: if mi.map_id_fixup: maxval = mx[mi.space] + 1 mi.map_id = maxval mx[mi.space] = maxval mi.map_id_fixup = False def read_file(fn): lines = open(fn,'r').readlines() lines = map(genutil.no_comments, lines) lines = list(filter(genutil.blank_line, lines)) maps = [] # list of map_info_t for line in lines: maps.append( _parse_map_line(line) ) fix_nonnumeric_maps(maps) maps.sort(key=lambda x: x.priority) #for m in maps: # _msgb("MAPINFO",m) return maps if __name__ == "__main__": read_file(sys.argv[1]) sys.exit(0)
# This is a set of Python modules that shouldn't be unloaded during hot reload. SAFE_MODULES = { "__future__", "__main__", "_abc", "_asyncio", "_bisect", "_blake2", "_bootlocale", "_bz2", "_cares", "_cares.lib", "_cffi_backend", "_codecs", "_collections", "_collections_abc", "_compat_pickle", "_compression", "_contextvars", "_ctypes", "_cython_0_29_21", "_datetime", "_decimal", "_elementtree", "_frozen_importlib", "_frozen_importlib_external", "_functools", "_hashlib", "_heapq", "_imp", "_io", "_json", "_locale", "_lzma", "_markupbase", "_opcode", "_operator", "_pickle", "_posixsubprocess", "_queue", "_random", "_sha3", "_sha512", "_signal", "_sitebuiltins", "_socket", "_sre", "_ssl", "_stat", "_string", "_struct", "_thread", "_uuid", "_warnings", "_weakref", "_weakrefset", "abc", "aioamqp", "aioamqp.channel", "aioamqp.constants", "aioamqp.envelope", "aioamqp.exceptions", "aioamqp.frame", "aioamqp.properties", "aioamqp.protocol", "aioamqp.version", "aiobotocore", "aiobotocore._endpoint_helpers", "aiobotocore.args", "aiobotocore.client", "aiobotocore.config", "aiobotocore.credentials", "aiobotocore.endpoint", "aiobotocore.eventstream", "aiobotocore.hooks", "aiobotocore.paginate", "aiobotocore.parsers", "aiobotocore.response", "aiobotocore.session", "aiobotocore.signers", "aiobotocore.utils", "aiobotocore.waiter", "aiodns", "aiodns.error", "aiohttp", "aiohttp._frozenlist", "aiohttp._helpers", "aiohttp._http_parser", "aiohttp._http_writer", "aiohttp._websocket", "aiohttp.abc", "aiohttp.base_protocol", "aiohttp.client", "aiohttp.client_exceptions", "aiohttp.client_proto", "aiohttp.client_reqrep", "aiohttp.client_ws", "aiohttp.connector", "aiohttp.cookiejar", "aiohttp.formdata", "aiohttp.frozenlist", "aiohttp.hdrs", "aiohttp.helpers", "aiohttp.http", "aiohttp.http_exceptions", "aiohttp.http_parser", "aiohttp.http_websocket", "aiohttp.http_writer", "aiohttp.locks", "aiohttp.log", "aiohttp.multipart", "aiohttp.payload", "aiohttp.payload_streamer", "aiohttp.resolver", "aiohttp.signals", "aiohttp.streams", "aiohttp.tcp_helpers", "aiohttp.tracing", "aiohttp.typedefs", "aiohttp.web", "aiohttp.web_app", "aiohttp.web_exceptions", "aiohttp.web_fileresponse", "aiohttp.web_log", "aiohttp.web_middlewares", "aiohttp.web_protocol", "aiohttp.web_request", "aiohttp.web_response", "aiohttp.web_routedef", "aiohttp.web_runner", "aiohttp.web_server", "aiohttp.web_urldispatcher", "aiohttp.web_ws", "aioitertools", "aioitertools.__version__", "aioitertools.asyncio", "aioitertools.builtins", "aioitertools.helpers", "aioitertools.itertools", "aioitertools.types", "argparse", "async_timeout", "asyncio", "asyncio.base_events", "asyncio.base_futures", "asyncio.base_subprocess", "asyncio.base_tasks", "asyncio.constants", "asyncio.coroutines", "asyncio.events", "asyncio.exceptions", "asyncio.format_helpers", "asyncio.futures", "asyncio.locks", "asyncio.log", "asyncio.protocols", "asyncio.queues", "asyncio.runners", "asyncio.selector_events", "asyncio.sslproto", "asyncio.staggered", "asyncio.streams", "asyncio.subprocess", "asyncio.tasks", "asyncio.transports", "asyncio.trsock", "asyncio.unix_events", "atexit", "attr", "attr._compat", "attr._config", "attr._funcs", "attr._make", "attr._next_gen", "attr._version_info", "attr.converters", "attr.exceptions", "attr.filters", "attr.setters", "attr.validators", "base64", "binascii", "bisect", "botocore", "botocore.args", "botocore.auth", "botocore.awsrequest", "botocore.client", "botocore.compat", "botocore.config", "botocore.configloader", "botocore.configprovider", "botocore.credentials", "botocore.discovery", "botocore.docs", "botocore.docs.bcdoc", "botocore.docs.bcdoc.docstringparser", "botocore.docs.bcdoc.restdoc", "botocore.docs.bcdoc.style", "botocore.docs.client", "botocore.docs.docstring", "botocore.docs.example", "botocore.docs.method", "botocore.docs.paginator", "botocore.docs.params", "botocore.docs.service", "botocore.docs.shape", "botocore.docs.sharedexample", "botocore.docs.utils", "botocore.docs.waiter", "botocore.endpoint", "botocore.errorfactory", "botocore.eventstream", "botocore.exceptions", "botocore.handlers", "botocore.history", "botocore.hooks", "botocore.httpsession", "botocore.loaders", "botocore.model", "botocore.monitoring", "botocore.paginate", "botocore.parsers", "botocore.regions", "botocore.response", "botocore.retries", "botocore.retries.adaptive", "botocore.retries.base", "botocore.retries.bucket", "botocore.retries.quota", "botocore.retries.special", "botocore.retries.standard", "botocore.retries.throttling", "botocore.retryhandler", "botocore.serialize", "botocore.session", "botocore.signers", "botocore.translate", "botocore.utils", "botocore.validate", "botocore.vendored", "botocore.vendored.requests", "botocore.vendored.requests.exceptions", "botocore.vendored.requests.packages", "botocore.vendored.requests.packages.urllib3", "botocore.vendored.requests.packages.urllib3.exceptions", "botocore.vendored.six", "botocore.vendored.six.moves", "botocore.vendored.six.moves.urllib", "botocore.vendored.six.moves.urllib.request", "botocore.vendored.six.moves.urllib_parse", "botocore.waiter", "builtins", "bz2", "calendar", "cchardet", "cchardet._cchardet", "cchardet.version", "certifi", "certifi.core", "cgi", "codecs", "collections", "collections.abc", "colorama", "colorama.ansi", "colorama.ansitowin32", "colorama.initialise", "colorama.win32", "colorama.winterm", "concurrent", "concurrent.futures", "concurrent.futures._base", "contextlib", "contextvars", "copy", "copyreg", "ctypes", "ctypes._endian", "cython_runtime", "datetime", "dateutil", "dateutil._common", "dateutil._version", "dateutil.parser", "dateutil.parser._parser", "dateutil.parser.isoparser", "dateutil.relativedelta", "dateutil.tz", "dateutil.tz._common", "dateutil.tz._factories", "dateutil.tz.tz", "decimal", "dis", "email", "email._encoded_words", "email._parseaddr", "email._policybase", "email.base64mime", "email.charset", "email.encoders", "email.errors", "email.feedparser", "email.header", "email.iterators", "email.message", "email.parser", "email.quoprimime", "email.utils", "encodings", "encodings.aliases", "encodings.latin_1", "encodings.utf_8", "enum", "errno", "fnmatch", "functools", "genericpath", "getopt", "getpass", "gettext", "google", "google.protobuf", "grp", "hashlib", "heapq", "hmac", "html", "html.entities", "html.parser", "http", "http.client", "http.cookies", "http.server", "idna", "idna.core", "idna.idnadata", "idna.intranges", "idna.package_data", "importlib", "importlib._bootstrap", "importlib._bootstrap_external", "importlib.abc", "importlib.machinery", "importlib.resources", "importlib.util", "inspect", "io", "ipaddress", "itertools", "jmespath", "jmespath.ast", "jmespath.compat", "jmespath.exceptions", "jmespath.functions", "jmespath.lexer", "jmespath.parser", "jmespath.visitor", "json", "json.decoder", "json.encoder", "json.scanner", "keyword", "linecache", "locale", "logging", "logging.handlers", "lzma", "marshal", "math", "mimetypes", "multidict", "multidict._abc", "multidict._compat", "multidict._multidict", "multidict._multidict_base", "netrc", "ntpath", "numbers", "opcode", "operator", "os", "os.path", "pamqp", "pamqp.body", "pamqp.constants", "pamqp.decode", "pamqp.encode", "pamqp.exceptions", "pamqp.frame", "pamqp.header", "pamqp.heartbeat", "pamqp.specification", "pathlib", "pickle", "platform", "posix", "posixpath", "pwd", "pycares", "pycares._cares", "pycares._version", "pycares.errno", "pycares.utils", "pyexpat", "pyexpat.errors", "pyexpat.model", "pytz", "pytz.exceptions", "pytz.lazy", "pytz.tzfile", "pytz.tzinfo", "queue", "quopri", "random", "re", "reprlib", "select", "selectors", "sentry_sdk", "sentry_sdk._compat", "sentry_sdk._functools", "sentry_sdk._queue", "sentry_sdk._types", "sentry_sdk.api", "sentry_sdk.attachments", "sentry_sdk.client", "sentry_sdk.consts", "sentry_sdk.debug", "sentry_sdk.envelope", "sentry_sdk.hub", "sentry_sdk.integrations", "sentry_sdk.integrations._wsgi_common", "sentry_sdk.integrations.aiohttp", "sentry_sdk.integrations.argv", "sentry_sdk.integrations.atexit", "sentry_sdk.integrations.aws_lambda", "sentry_sdk.integrations.beam", "sentry_sdk.integrations.boto3", "sentry_sdk.integrations.celery", "sentry_sdk.integrations.chalice", "sentry_sdk.integrations.dedupe", "sentry_sdk.integrations.excepthook", "sentry_sdk.integrations.flask", "sentry_sdk.integrations.gcp", "sentry_sdk.integrations.logging", "sentry_sdk.integrations.modules", "sentry_sdk.integrations.pyramid", "sentry_sdk.integrations.redis", "sentry_sdk.integrations.sanic", "sentry_sdk.integrations.serverless", "sentry_sdk.integrations.sqlalchemy", "sentry_sdk.integrations.stdlib", "sentry_sdk.integrations.threading", "sentry_sdk.integrations.tornado", "sentry_sdk.integrations.trytond", "sentry_sdk.integrations.wsgi", "sentry_sdk.scope", "sentry_sdk.serializer", "sentry_sdk.session", "sentry_sdk.sessions", "sentry_sdk.tracing", "sentry_sdk.transport", "sentry_sdk.utils", "sentry_sdk.worker", "shlex", "shutil", "signal", "site", "six", "six.moves", "socket", "socketserver", "sre_compile", "sre_constants", "sre_parse", "ssl", "stat", "string", "struct", "subprocess", "sys", "tempfile", "termios", "threading", "time", "token", "tokenize", "tomodachi", "tomodachi.__main__", "tomodachi.__version__", "tomodachi.cli", "tomodachi.config", "tomodachi.container", "tomodachi.discovery", "tomodachi.discovery.aws_sns_registration", "tomodachi.discovery.dummy_registry", "tomodachi.envelope", "tomodachi.envelope.json_base", "tomodachi.envelope.proto_build", "tomodachi.envelope.proto_build.protobuf", "tomodachi.envelope.proto_build.protobuf.sns_sqs_message_pb2", "tomodachi.envelope.protobuf_base", "tomodachi.helpers", "tomodachi.helpers.aiobotocore_connector", "tomodachi.helpers.crontab", "tomodachi.helpers.dict", "tomodachi.helpers.execution_context", "tomodachi.helpers.logging", "tomodachi.helpers.middleware", "tomodachi.helpers.safe_modules", "tomodachi.importer", "tomodachi.invoker", "tomodachi.invoker.base", "tomodachi.invoker.decorator", "tomodachi.launcher", "tomodachi.protocol", "tomodachi.protocol.json_base", "tomodachi.protocol.proto_build", "tomodachi.protocol.proto_build.protobuf", "tomodachi.protocol.proto_build.protobuf.sns_sqs_message_pb2", "tomodachi.protocol.protobuf_base", "tomodachi.run", "tomodachi.run.__main__", "tomodachi.transport", "tomodachi.transport.amqp", "tomodachi.transport.aws_sns_sqs", "tomodachi.transport.http", "tomodachi.transport.schedule", "tomodachi.watcher", "traceback", "types", "typing", "typing.io", "typing.re", "typing_extensions", "tzlocal", "tzlocal.unix", "tzlocal.utils", "unicodedata", "urllib", "urllib.error", "urllib.parse", "urllib.request", "urllib.response", "urllib3", "urllib3._collections", "urllib3._version", "urllib3.connection", "urllib3.connectionpool", "urllib3.contrib", "urllib3.contrib._appengine_environ", "urllib3.exceptions", "urllib3.fields", "urllib3.filepost", "urllib3.packages", "urllib3.packages.six", "urllib3.packages.six.moves", "urllib3.packages.six.moves.http_client", "urllib3.packages.six.moves.urllib", "urllib3.packages.six.moves.urllib.parse", "urllib3.packages.ssl_match_hostname", "urllib3.poolmanager", "urllib3.request", "urllib3.response", "urllib3.util", "urllib3.util.connection", "urllib3.util.queue", "urllib3.util.request", "urllib3.util.response", "urllib3.util.retry", "urllib3.util.ssl_", "urllib3.util.timeout", "urllib3.util.url", "urllib3.util.wait", "uu", "uuid", "warnings", "weakref", "wrapt", "wrapt.decorators", "wrapt.importer", "wrapt.wrappers", "xml", "xml.etree", "xml.etree.ElementPath", "xml.etree.ElementTree", "xml.etree.cElementTree", "yarl", "yarl._quoting", "yarl._quoting_c", "yarl._url", "zipimport", "zlib", }
# encoding: utf-8 """An object for managing IPython profile directories.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. import os import shutil import errno from traitlets.config.configurable import LoggingConfigurable from IPython.paths import get_ipython_package_dir from IPython.utils.path import expand_path, ensure_dir_exists from IPython.utils import py3compat from traitlets import Unicode, Bool, observe #----------------------------------------------------------------------------- # Module errors #----------------------------------------------------------------------------- class ProfileDirError(Exception): pass #----------------------------------------------------------------------------- # Class for managing profile directories #----------------------------------------------------------------------------- class ProfileDir(LoggingConfigurable): """An object to manage the profile directory and its resources. The profile directory is used by all IPython applications, to manage configuration, logging and security. This object knows how to find, create and manage these directories. This should be used by any code that wants to handle profiles. """ security_dir_name = Unicode('security') log_dir_name = Unicode('log') startup_dir_name = Unicode('startup') pid_dir_name = Unicode('pid') static_dir_name = Unicode('static') security_dir = Unicode(u'') log_dir = Unicode(u'') startup_dir = Unicode(u'') pid_dir = Unicode(u'') static_dir = Unicode(u'') location = Unicode(u'', help="""Set the profile location directly. This overrides the logic used by the `profile` option.""", ).tag(config=True) _location_isset = Bool(False) # flag for detecting multiply set location @observe('location') def _location_changed(self, change): if self._location_isset: raise RuntimeError("Cannot set profile location more than once.") self._location_isset = True new = change['new'] ensure_dir_exists(new) # ensure config files exist: self.security_dir = os.path.join(new, self.security_dir_name) self.log_dir = os.path.join(new, self.log_dir_name) self.startup_dir = os.path.join(new, self.startup_dir_name) self.pid_dir = os.path.join(new, self.pid_dir_name) self.static_dir = os.path.join(new, self.static_dir_name) self.check_dirs() def _mkdir(self, path, mode=None): """ensure a directory exists at a given path This is a version of os.mkdir, with the following differences: - returns True if it created the directory, False otherwise - ignores EEXIST, protecting against race conditions where the dir may have been created in between the check and the creation - sets permissions if requested and the dir already exists """ if os.path.exists(path): if mode and os.stat(path).st_mode != mode: try: os.chmod(path, mode) except OSError: self.log.warning( "Could not set permissions on %s", path ) return False try: if mode: os.mkdir(path, mode) else: os.mkdir(path) except OSError as e: if e.errno == errno.EEXIST: return False else: raise return True @observe('log_dir') def check_log_dir(self, change=None): self._mkdir(self.log_dir) @observe('startup_dir') def check_startup_dir(self, change=None): self._mkdir(self.startup_dir) readme = os.path.join(self.startup_dir, 'README') src = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'README_STARTUP') if not os.path.exists(src): self.log.warning("Could not copy README_STARTUP to startup dir. Source file %s does not exist.", src) if os.path.exists(src) and not os.path.exists(readme): shutil.copy(src, readme) @observe('security_dir') def check_security_dir(self, change=None): self._mkdir(self.security_dir, 0o40700) @observe('pid_dir') def check_pid_dir(self, change=None): self._mkdir(self.pid_dir, 0o40700) def check_dirs(self): self.check_security_dir() self.check_log_dir() self.check_pid_dir() self.check_startup_dir() def copy_config_file(self, config_file, path=None, overwrite=False): """Copy a default config file into the active profile directory. Default configuration files are kept in :mod:`IPython.core.profile`. This function moves these from that location to the working profile directory. """ dst = os.path.join(self.location, config_file) if os.path.isfile(dst) and not overwrite: return False if path is None: path = os.path.join(get_ipython_package_dir(), u'core', u'profile', u'default') src = os.path.join(path, config_file) shutil.copy(src, dst) return True @classmethod def create_profile_dir(cls, profile_dir, config=None): """Create a new profile directory given a full path. Parameters ---------- profile_dir : str The full path to the profile directory. If it does exist, it will be used. If not, it will be created. """ return cls(location=profile_dir, config=config) @classmethod def create_profile_dir_by_name(cls, path, name=u'default', config=None): """Create a profile dir by profile name and path. Parameters ---------- path : unicode The path (directory) to put the profile directory in. name : unicode The name of the profile. The name of the profile directory will be "profile_<profile>". """ if not os.path.isdir(path): raise ProfileDirError('Directory not found: %s' % path) profile_dir = os.path.join(path, u'profile_' + name) return cls(location=profile_dir, config=config) @classmethod def find_profile_dir_by_name(cls, ipython_dir, name=u'default', config=None): """Find an existing profile dir by profile name, return its ProfileDir. This searches through a sequence of paths for a profile dir. If it is not found, a :class:`ProfileDirError` exception will be raised. The search path algorithm is: 1. ``py3compat.getcwd()`` 2. ``ipython_dir`` Parameters ---------- ipython_dir : unicode or str The IPython directory to use. name : unicode or str The name of the profile. The name of the profile directory will be "profile_<profile>". """ dirname = u'profile_' + name paths = [py3compat.getcwd(), ipython_dir] for p in paths: profile_dir = os.path.join(p, dirname) if os.path.isdir(profile_dir): return cls(location=profile_dir, config=config) else: raise ProfileDirError('Profile directory not found in paths: %s' % dirname) @classmethod def find_profile_dir(cls, profile_dir, config=None): """Find/create a profile dir and return its ProfileDir. This will create the profile directory if it doesn't exist. Parameters ---------- profile_dir : unicode or str The path of the profile directory. """ profile_dir = expand_path(profile_dir) if not os.path.isdir(profile_dir): raise ProfileDirError('Profile directory not found: %s' % profile_dir) return cls(location=profile_dir, config=config)
#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2015 California Institute of Technology. # License: 3-clause BSD. The full license text is available at: # - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE from mystic.constraints import * from mystic.penalty import quadratic_equality from mystic.coupler import inner from mystic.math import almostEqual from mystic.tools import random_seed random_seed(213) def test_penalize(): from mystic.math.measures import mean, spread def mean_constraint(x, target): return mean(x) - target def range_constraint(x, target): return spread(x) - target @quadratic_equality(condition=range_constraint, kwds={'target':5.0}) @quadratic_equality(condition=mean_constraint, kwds={'target':5.0}) def penalty(x): return 0.0 def cost(x): return abs(sum(x) - 5.0) from mystic.solvers import fmin from numpy import array x = array([1,2,3,4,5]) y = fmin(cost, x, penalty=penalty, disp=False) assert round(mean(y)) == 5.0 assert round(spread(y)) == 5.0 assert round(cost(y)) == 4*(5.0) def test_solve(): from mystic.math.measures import mean def mean_constraint(x, target): return mean(x) - target def parameter_constraint(x): return x[-1] - x[0] @quadratic_equality(condition=mean_constraint, kwds={'target':5.0}) @quadratic_equality(condition=parameter_constraint) def penalty(x): return 0.0 x = solve(penalty, guess=[2,3,1]) assert round(mean_constraint(x, 5.0)) == 0.0 assert round(parameter_constraint(x)) == 0.0 assert issolution(penalty, x) def test_solve_constraint(): from mystic.math.measures import mean @with_mean(1.0) def constraint(x): x[-1] = x[0] return x x = solve(constraint, guess=[2,3,1]) assert almostEqual(mean(x), 1.0, tol=1e-15) assert x[-1] == x[0] assert issolution(constraint, x) def test_as_constraint(): from mystic.math.measures import mean, spread def mean_constraint(x, target): return mean(x) - target def range_constraint(x, target): return spread(x) - target @quadratic_equality(condition=range_constraint, kwds={'target':5.0}) @quadratic_equality(condition=mean_constraint, kwds={'target':5.0}) def penalty(x): return 0.0 ndim = 3 constraints = as_constraint(penalty, solver='fmin') #XXX: this is expensive to evaluate, as there are nested optimizations from numpy import arange x = arange(ndim) _x = constraints(x) assert round(mean(_x)) == 5.0 assert round(spread(_x)) == 5.0 assert round(penalty(_x)) == 0.0 def cost(x): return abs(sum(x) - 5.0) npop = ndim*3 from mystic.solvers import diffev y = diffev(cost, x, npop, constraints=constraints, disp=False, gtol=10) assert round(mean(y)) == 5.0 assert round(spread(y)) == 5.0 assert round(cost(y)) == 5.0*(ndim-1) def test_as_penalty(): from mystic.math.measures import mean, spread @with_spread(5.0) @with_mean(5.0) def constraint(x): return x penalty = as_penalty(constraint) from numpy import array x = array([1,2,3,4,5]) def cost(x): return abs(sum(x) - 5.0) from mystic.solvers import fmin y = fmin(cost, x, penalty=penalty, disp=False) assert round(mean(y)) == 5.0 assert round(spread(y)) == 5.0 assert round(cost(y)) == 4*(5.0) def test_with_penalty(): from mystic.math.measures import mean, spread @with_penalty(quadratic_equality, kwds={'target':5.0}) def penalty(x, target): return mean(x) - target def cost(x): return abs(sum(x) - 5.0) from mystic.solvers import fmin from numpy import array x = array([1,2,3,4,5]) y = fmin(cost, x, penalty=penalty, disp=False) assert round(mean(y)) == 5.0 assert round(cost(y)) == 4*(5.0) def test_with_mean(): from mystic.math.measures import mean, impose_mean @with_mean(5.0) def mean_of_squared(x): return [i**2 for i in x] from numpy import array x = array([1,2,3,4,5]) y = impose_mean(5, [i**2 for i in x]) assert mean(y) == 5.0 assert mean_of_squared(x) == y def test_with_mean_spread(): from mystic.math.measures import mean, spread, impose_mean, impose_spread @with_spread(50.0) @with_mean(5.0) def constrained_squared(x): return [i**2 for i in x] from numpy import array x = array([1,2,3,4,5]) y = impose_spread(50.0, impose_mean(5.0,[i**2 for i in x])) assert almostEqual(mean(y), 5.0, tol=1e-15) assert almostEqual(spread(y), 50.0, tol=1e-15) assert constrained_squared(x) == y def test_constrained_solve(): from mystic.math.measures import mean, spread @with_spread(5.0) @with_mean(5.0) def constraints(x): return x def cost(x): return abs(sum(x) - 5.0) from mystic.solvers import fmin_powell from numpy import array x = array([1,2,3,4,5]) y = fmin_powell(cost, x, constraints=constraints, disp=False) assert almostEqual(mean(y), 5.0, tol=1e-15) assert almostEqual(spread(y), 5.0, tol=1e-15) assert almostEqual(cost(y), 4*(5.0), tol=1e-6) def test_with_constraint(): from mystic.math.measures import mean, impose_mean @with_constraint(inner, kwds={'target':5.0}) def mean_of_squared(x, target): return impose_mean(target, [i**2 for i in x]) from numpy import array x = array([1,2,3,4,5]) y = impose_mean(5, [i**2 for i in x]) assert mean(y) == 5.0 assert mean_of_squared(x) == y def test_discrete(): @discrete([1.0, 3.5, 5.5, 7.0]) def discrete_squared(x): return x**2 from numpy import asarray assert discrete_squared(5.6) == 5.5**2 assert all(discrete_squared(asarray([1, 3])) == asarray([1.0, 3.5])**2) discrete_squared.samples([1.0, 7.0]) assert discrete_squared(5.6) == 7.0**2 discrete_squared.index([0, -1]) assert all(discrete_squared(asarray([0, 3, 6])) == asarray([1.0, 3.0, 7.0])**2) if __name__ == '__main__': test_penalize() test_solve() test_solve_constraint() test_as_constraint() test_as_penalty() test_with_penalty() test_with_mean() test_with_mean_spread() test_constrained_solve() test_with_constraint() test_discrete() # EOF
# -*- coding: utf-8 -*- """The EXT file entry implementation.""" from dfdatetime import posix_time as dfdatetime_posix_time from dfvfs.lib import definitions from dfvfs.lib import errors from dfvfs.path import ext_path_spec from dfvfs.vfs import attribute from dfvfs.vfs import ext_attribute from dfvfs.vfs import ext_directory from dfvfs.vfs import extent from dfvfs.vfs import file_entry class EXTFileEntry(file_entry.FileEntry): """File system file entry that uses pyfsext.""" TYPE_INDICATOR = definitions.TYPE_INDICATOR_EXT # Mappings of EXT file types to dfVFS file entry types. _ENTRY_TYPES = { 0x1000: definitions.FILE_ENTRY_TYPE_PIPE, 0x2000: definitions.FILE_ENTRY_TYPE_DEVICE, 0x4000: definitions.FILE_ENTRY_TYPE_DIRECTORY, 0x6000: definitions.FILE_ENTRY_TYPE_DEVICE, 0x8000: definitions.FILE_ENTRY_TYPE_FILE, 0xa000: definitions.FILE_ENTRY_TYPE_LINK, 0xc000: definitions.FILE_ENTRY_TYPE_SOCKET} _NANOSECONDS_PER_SECOND = 1000000000 def __init__( self, resolver_context, file_system, path_spec, fsext_file_entry=None, is_root=False, is_virtual=False): """Initializes a file entry. Args: resolver_context (Context): resolver context. file_system (FileSystem): file system. path_spec (PathSpec): path specification. fsext_file_entry (Optional[pyfsext.file_entry]): EXT file entry. is_root (Optional[bool]): True if the file entry is the root file entry of the corresponding file system. is_virtual (Optional[bool]): True if the file entry is a virtual file entry emulated by the corresponding file system. Raises: BackEndError: if the pyfsext file entry is missing. """ if not fsext_file_entry: fsext_file_entry = file_system.GetEXTFileEntryByPathSpec(path_spec) if not fsext_file_entry: raise errors.BackEndError('Missing pyfsext file entry.') if is_root: file_entry_name = '' else: file_entry_name = fsext_file_entry.name # Use the path specification location to determine the file entry name # if the file entry was retrieved by inode. if file_entry_name is None: location = getattr(path_spec, 'location', None) if location: location_segments = file_system.SplitPath(location) if location_segments: file_entry_name = location_segments[-1] super(EXTFileEntry, self).__init__( resolver_context, file_system, path_spec, is_root=is_root, is_virtual=is_virtual) self._creation_time = fsext_file_entry.get_creation_time_as_integer() self._fsext_file_entry = fsext_file_entry self._name = file_entry_name self.entry_type = self._ENTRY_TYPES.get( fsext_file_entry.file_mode & 0xf000, None) def _GetAttributes(self): """Retrieves the attributes. Returns: list[Attribute]: attributes. """ if self._attributes is None: self._attributes = [] for fsext_extended_attribute in ( self._fsext_file_entry.extended_attributes): extended_attribute = ext_attribute.EXTExtendedAttribute( fsext_extended_attribute) self._attributes.append(extended_attribute) return self._attributes def _GetDirectory(self): """Retrieves a directory. Returns: EXTDirectory: directory or None if not available. """ if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY: return None return ext_directory.EXTDirectory(self._file_system, self.path_spec) def _GetLink(self): """Retrieves the link. Returns: str: path of the linked file. """ if self._link is None: self._link = self._fsext_file_entry.symbolic_link_target if self._link and self._link[0] != self._file_system.PATH_SEPARATOR: # TODO: make link absolute. self._link = '/{0:s}'.format(self._link) return self._link def _GetStat(self): """Retrieves information about the file entry. Returns: VFSStat: a stat object. """ stat_object = super(EXTFileEntry, self)._GetStat() # File data stat information. stat_object.size = self._fsext_file_entry.size # Ownership and permissions stat information. stat_object.mode = self._fsext_file_entry.file_mode & 0x0fff stat_object.uid = self._fsext_file_entry.owner_identifier stat_object.gid = self._fsext_file_entry.group_identifier # File entry type stat information. stat_object.type = self.entry_type # Other stat information. stat_object.ino = self._fsext_file_entry.inode_number stat_object.fs_type = 'EXT' stat_object.is_allocated = True return stat_object def _GetStatAttribute(self): """Retrieves a stat attribute. Returns: StatAttribute: a stat attribute or None if not available. """ stat_attribute = attribute.StatAttribute() stat_attribute.group_identifier = self._fsext_file_entry.group_identifier stat_attribute.inode_number = self._fsext_file_entry.inode_number stat_attribute.mode = self._fsext_file_entry.file_mode stat_attribute.number_of_links = self._fsext_file_entry.number_of_links stat_attribute.owner_identifier = self._fsext_file_entry.owner_identifier stat_attribute.size = self._fsext_file_entry.size stat_attribute.type = self.entry_type return stat_attribute def _GetSubFileEntries(self): """Retrieves a sub file entries generator. Yields: EXTFileEntry: a sub file entry. """ if self._directory is None: self._directory = self._GetDirectory() if self._directory: for path_spec in self._directory.entries: yield EXTFileEntry(self._resolver_context, self._file_system, path_spec) @property def access_time(self): """dfdatetime.DateTimeValues: access time or None if not available.""" timestamp = self._fsext_file_entry.get_access_time_as_integer() # If creation time is not present (None) the timestamp precision is in # seconds. if self._creation_time is None: timestamp, _ = divmod(timestamp, self._NANOSECONDS_PER_SECOND) return dfdatetime_posix_time.PosixTime(timestamp=timestamp) return dfdatetime_posix_time.PosixTimeInNanoseconds(timestamp=timestamp) @property def change_time(self): """dfdatetime.DateTimeValues: change time or None if not available.""" timestamp = self._fsext_file_entry.get_inode_change_time_as_integer() # If creation time is not present (None) the timestamp precision is in # seconds. if self._creation_time is None: timestamp, _ = divmod(timestamp, self._NANOSECONDS_PER_SECOND) return dfdatetime_posix_time.PosixTime(timestamp=timestamp) return dfdatetime_posix_time.PosixTimeInNanoseconds(timestamp=timestamp) @property def creation_time(self): """dfdatetime.DateTimeValues: creation time or None if not available.""" # Creation time can be None if not present and 0 if not set. if not self._creation_time: return None return dfdatetime_posix_time.PosixTimeInNanoseconds( timestamp=self._creation_time) @property def deletion_time(self): """dfdatetime.DateTimeValues: deletion time or None if not available.""" timestamp = self._fsext_file_entry.get_deletion_time_as_integer() # Deletion time can be 0 if not set. if not timestamp: return None return dfdatetime_posix_time.PosixTime(timestamp=timestamp) @property def name(self): """str: name of the file entry, which does not include the full path.""" return self._name @property def modification_time(self): """dfdatetime.DateTimeValues: modification time or None if not available.""" timestamp = self._fsext_file_entry.get_modification_time_as_integer() # If creation time is not present (None) the timestamp precision is in # seconds. if self._creation_time is None: timestamp, _ = divmod(timestamp, self._NANOSECONDS_PER_SECOND) return dfdatetime_posix_time.PosixTime(timestamp=timestamp) return dfdatetime_posix_time.PosixTimeInNanoseconds(timestamp=timestamp) @property def size(self): """int: size of the file entry in bytes or None if not available.""" return self._fsext_file_entry.size def GetExtents(self): """Retrieves the extents. Returns: list[Extent]: the extents. """ if self.entry_type != definitions.FILE_ENTRY_TYPE_FILE: return [] extents = [] for extent_index in range(self._fsext_file_entry.number_of_extents): extent_offset, extent_size, extent_flags = ( self._fsext_file_entry.get_extent(extent_index)) if extent_flags & 0x1: extent_type = definitions.EXTENT_TYPE_SPARSE else: extent_type = definitions.EXTENT_TYPE_DATA data_stream_extent = extent.Extent( extent_type=extent_type, offset=extent_offset, size=extent_size) extents.append(data_stream_extent) return extents def GetEXTFileEntry(self): """Retrieves the EXT file entry. Returns: pyfsext.file_entry: EXT file entry. """ return self._fsext_file_entry def GetLinkedFileEntry(self): """Retrieves the linked file entry, e.g. for a symbolic link. Returns: EXTFileEntry: linked file entry or None if not available. """ link = self._GetLink() if not link: return None parent_path_spec = getattr(self.path_spec, 'parent', None) path_spec = ext_path_spec.EXTPathSpec( location=link, parent=parent_path_spec) is_root = bool(link == self._file_system.LOCATION_ROOT) return EXTFileEntry( self._resolver_context, self._file_system, path_spec, is_root=is_root) def GetParentFileEntry(self): """Retrieves the parent file entry. Returns: EXTFileEntry: parent file entry or None if not available. """ parent_location = None location = getattr(self.path_spec, 'location', None) if location is not None: parent_location = self._file_system.DirnamePath(location) if parent_location == '': parent_location = self._file_system.PATH_SEPARATOR parent_path_spec = getattr(self.path_spec, 'parent', None) path_spec = ext_path_spec.EXTPathSpec( location=parent_location, parent=parent_path_spec) is_root = bool(parent_location == self._file_system.LOCATION_ROOT) return EXTFileEntry( self._resolver_context, self._file_system, path_spec, is_root=is_root)
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import time import sqlite3 import numpy as np import six from tensorflow.contrib.summary import summary_test_util from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import types_pb2 from tensorflow.python.eager import function from tensorflow.python.eager import test from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import summary_ops_v2 as summary_ops from tensorflow.python.platform import gfile from tensorflow.python.training import training_util get_all = summary_test_util.get_all get_one = summary_test_util.get_one _NUMPY_NUMERIC_TYPES = { types_pb2.DT_HALF: np.float16, types_pb2.DT_FLOAT: np.float32, types_pb2.DT_DOUBLE: np.float64, types_pb2.DT_INT8: np.int8, types_pb2.DT_INT16: np.int16, types_pb2.DT_INT32: np.int32, types_pb2.DT_INT64: np.int64, types_pb2.DT_UINT8: np.uint8, types_pb2.DT_UINT16: np.uint16, types_pb2.DT_UINT32: np.uint32, types_pb2.DT_UINT64: np.uint64, types_pb2.DT_COMPLEX64: np.complex64, types_pb2.DT_COMPLEX128: np.complex128, types_pb2.DT_BOOL: np.bool_, } class EagerFileTest(test_util.TensorFlowTestCase): def testShouldRecordSummary(self): self.assertFalse(summary_ops.should_record_summaries()) with summary_ops.always_record_summaries(): self.assertTrue(summary_ops.should_record_summaries()) def testSummaryOps(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1) # The working condition of the ops is tested in the C++ test so we just # test here that we're calling them correctly. self.assertTrue(gfile.Exists(logdir)) @test_util.assert_no_new_pyobjects_executing_eagerly def testEagerMemory(self): training_util.get_or_create_global_step() logdir = self.get_temp_dir() with summary_ops.create_file_writer( logdir, max_queue=0, name='t0').as_default(), summary_ops.always_record_summaries(): summary_ops.generic('tensor', 1, '') summary_ops.scalar('scalar', 2.0) summary_ops.histogram('histogram', [1.0]) summary_ops.image('image', [[[[1.0]]]]) summary_ops.audio('audio', [[1.0]], 1.0, 1) def testDefunSummarys(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t1').as_default(), summary_ops.always_record_summaries(): @function.defun def write(): summary_ops.scalar('scalar', 2.0) write() events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].simple_value, 2.0) def testSummaryName(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scalar') def testSummaryNameScope(self): training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): with ops.name_scope('scope'): summary_ops.scalar('scalar', 2.0) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scope/scalar') def testSummaryGlobalStep(self): step = training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() with summary_ops.create_file_writer( logdir, max_queue=0, name='t2').as_default(), summary_ops.always_record_summaries(): summary_ops.scalar('scalar', 2.0, step=step) events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 2) self.assertEqual(events[1].summary.value[0].tag, 'scalar') def testRecordEveryNGlobalSteps(self): step = training_util.get_or_create_global_step() logdir = tempfile.mkdtemp() def run_step(): summary_ops.scalar('scalar', i, step=step) step.assign_add(1) with summary_ops.create_file_writer( logdir).as_default(), summary_ops.record_summaries_every_n_global_steps( 2, step): for i in range(10): run_step() # And another 10 steps as a graph function. run_step_fn = function.defun(run_step) for i in range(10): run_step_fn() events = summary_test_util.events_from_logdir(logdir) self.assertEqual(len(events), 11) def testMaxQueue(self): logs = tempfile.mkdtemp() with summary_ops.create_file_writer( logs, max_queue=1, flush_millis=999999, name='lol').as_default(), summary_ops.always_record_summaries(): get_total = lambda: len(summary_test_util.events_from_logdir(logs)) # Note: First tf.Event is always file_version. self.assertEqual(1, get_total()) summary_ops.scalar('scalar', 2.0, step=1) self.assertEqual(1, get_total()) # Should flush after second summary since max_queue = 1 summary_ops.scalar('scalar', 2.0, step=2) self.assertEqual(3, get_total()) def testFlushFunction(self): logs = tempfile.mkdtemp() writer = summary_ops.create_file_writer( logs, max_queue=999999, flush_millis=999999, name='lol') with writer.as_default(), summary_ops.always_record_summaries(): get_total = lambda: len(summary_test_util.events_from_logdir(logs)) # Note: First tf.Event is always file_version. self.assertEqual(1, get_total()) summary_ops.scalar('scalar', 2.0, step=1) summary_ops.scalar('scalar', 2.0, step=2) self.assertEqual(1, get_total()) summary_ops.flush() self.assertEqual(3, get_total()) # Test "writer" parameter summary_ops.scalar('scalar', 2.0, step=3) summary_ops.flush(writer=writer) self.assertEqual(4, get_total()) summary_ops.scalar('scalar', 2.0, step=4) summary_ops.flush(writer=writer._resource) # pylint:disable=protected-access self.assertEqual(5, get_total()) def testSharedName(self): logdir = self.get_temp_dir() with summary_ops.always_record_summaries(): # Create with default shared name (should match logdir) writer1 = summary_ops.create_file_writer(logdir) with writer1.as_default(): summary_ops.scalar('one', 1.0, step=1) summary_ops.flush() # Create with explicit logdir shared name (should be same resource/file) shared_name = 'logdir:' + logdir writer2 = summary_ops.create_file_writer(logdir, name=shared_name) with writer2.as_default(): summary_ops.scalar('two', 2.0, step=2) summary_ops.flush() # Create with different shared name (should be separate resource/file) time.sleep(1.1) # Ensure filename has a different timestamp writer3 = summary_ops.create_file_writer(logdir, name='other') with writer3.as_default(): summary_ops.scalar('three', 3.0, step=3) summary_ops.flush() event_files = iter(sorted(gfile.Glob(os.path.join(logdir, '*tfevents*')))) # First file has tags "one" and "two" events = iter(summary_test_util.events_from_file(next(event_files))) self.assertEqual('brain.Event:2', next(events).file_version) self.assertEqual('one', next(events).summary.value[0].tag) self.assertEqual('two', next(events).summary.value[0].tag) self.assertRaises(StopIteration, lambda: next(events)) # Second file has tag "three" events = iter(summary_test_util.events_from_file(next(event_files))) self.assertEqual('brain.Event:2', next(events).file_version) self.assertEqual('three', next(events).summary.value[0].tag) self.assertRaises(StopIteration, lambda: next(events)) # No more files self.assertRaises(StopIteration, lambda: next(event_files)) def testWriterInitAndClose(self): logdir = self.get_temp_dir() get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) self.assertEqual(1, get_total()) # file_version Event # Calling init() again while writer is open has no effect writer.init() self.assertEqual(1, get_total()) try: # Not using .as_default() to avoid implicit flush when exiting writer.set_as_default() summary_ops.scalar('one', 1.0, step=1) self.assertEqual(1, get_total()) # Calling .close() should do an implicit flush writer.close() self.assertEqual(2, get_total()) # Calling init() on a closed writer should start a new file time.sleep(1.1) # Ensure filename has a different timestamp writer.init() files = sorted(gfile.Glob(os.path.join(logdir, '*tfevents*'))) self.assertEqual(2, len(files)) get_total = lambda: len(summary_test_util.events_from_file(files[1])) self.assertEqual(1, get_total()) # file_version Event summary_ops.scalar('two', 2.0, step=2) writer.close() self.assertEqual(2, get_total()) finally: # Clean up by resetting default writer summary_ops.create_file_writer(None).set_as_default() def testWriterFlush(self): logdir = self.get_temp_dir() get_total = lambda: len(summary_test_util.events_from_logdir(logdir)) with summary_ops.always_record_summaries(): writer = summary_ops.create_file_writer( logdir, max_queue=100, flush_millis=1000000) self.assertEqual(1, get_total()) # file_version Event with writer.as_default(): summary_ops.scalar('one', 1.0, step=1) self.assertEqual(1, get_total()) writer.flush() self.assertEqual(2, get_total()) summary_ops.scalar('two', 2.0, step=2) # Exiting the "as_default()" should do an implicit flush of the "two" tag self.assertEqual(3, get_total()) class EagerDbTest(summary_test_util.SummaryDbTest): def testDbURIOpen(self): tmpdb_path = os.path.join(self.get_temp_dir(), 'tmpDbURITest.sqlite') tmpdb_uri = six.moves.urllib_parse.urljoin('file:', tmpdb_path) tmpdb_writer = summary_ops.create_db_writer(tmpdb_uri, 'experimentA', 'run1', 'user1') with summary_ops.always_record_summaries(): with tmpdb_writer.as_default(): summary_ops.scalar('t1', 2.0) tmpdb = sqlite3.connect(tmpdb_path) num = get_one(tmpdb, 'SELECT count(*) FROM Tags WHERE tag_name = "t1"') self.assertEqual(num, 1) tmpdb.close() def testIntegerSummaries(self): step = training_util.create_global_step() writer = self.create_db_writer() def adder(x, y): state_ops.assign_add(step, 1) summary_ops.generic('x', x) summary_ops.generic('y', y) sum_ = x + y summary_ops.generic('sum', sum_) return sum_ with summary_ops.always_record_summaries(): with writer.as_default(): self.assertEqual(5, adder(int64(2), int64(3)).numpy()) six.assertCountEqual( self, [1, 1, 1], get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL')) six.assertCountEqual(self, ['x', 'y', 'sum'], get_all(self.db, 'SELECT tag_name FROM Tags')) x_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "x"') y_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "y"') sum_id = get_one(self.db, 'SELECT tag_id FROM Tags WHERE tag_name = "sum"') with summary_ops.always_record_summaries(): with writer.as_default(): self.assertEqual(9, adder(int64(4), int64(5)).numpy()) six.assertCountEqual( self, [1, 1, 1, 2, 2, 2], get_all(self.db, 'SELECT step FROM Tensors WHERE dtype IS NOT NULL')) six.assertCountEqual(self, [x_id, y_id, sum_id], get_all(self.db, 'SELECT tag_id FROM Tags')) self.assertEqual(2, get_tensor(self.db, x_id, 1)) self.assertEqual(3, get_tensor(self.db, y_id, 1)) self.assertEqual(5, get_tensor(self.db, sum_id, 1)) self.assertEqual(4, get_tensor(self.db, x_id, 2)) self.assertEqual(5, get_tensor(self.db, y_id, 2)) self.assertEqual(9, get_tensor(self.db, sum_id, 2)) six.assertCountEqual( self, ['experiment'], get_all(self.db, 'SELECT experiment_name FROM Experiments')) six.assertCountEqual(self, ['run'], get_all(self.db, 'SELECT run_name FROM Runs')) six.assertCountEqual(self, ['user'], get_all(self.db, 'SELECT user_name FROM Users')) def testBadExperimentName(self): with self.assertRaises(ValueError): self.create_db_writer(experiment_name='\0') def testBadRunName(self): with self.assertRaises(ValueError): self.create_db_writer(run_name='\0') def testBadUserName(self): with self.assertRaises(ValueError): self.create_db_writer(user_name='-hi') with self.assertRaises(ValueError): self.create_db_writer(user_name='hi-') with self.assertRaises(ValueError): self.create_db_writer(user_name='@') def testGraphSummary(self): training_util.get_or_create_global_step() name = 'hi' graph = graph_pb2.GraphDef(node=(node_def_pb2.NodeDef(name=name),)) with summary_ops.always_record_summaries(): with self.create_db_writer().as_default(): summary_ops.graph(graph) six.assertCountEqual(self, [name], get_all(self.db, 'SELECT node_name FROM Nodes')) def get_tensor(db, tag_id, step): cursor = db.execute( 'SELECT dtype, shape, data FROM Tensors WHERE series = ? AND step = ?', (tag_id, step)) dtype, shape, data = cursor.fetchone() assert dtype in _NUMPY_NUMERIC_TYPES buf = np.frombuffer(data, dtype=_NUMPY_NUMERIC_TYPES[dtype]) if not shape: return buf[0] return buf.reshape([int(i) for i in shape.split(',')]) def int64(x): return array_ops.constant(x, dtypes.int64) if __name__ == '__main__': test.main()
######### # Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. from mock import patch from cloudify_agent.api import utils from cloudify_agent.shell.main import get_logger from cloudify_agent.tests.shell.commands import BaseCommandLineTestCase from cloudify_agent.tests import get_storage_directory @patch('cloudify_agent.api.utils.internal.get_storage_directory', get_storage_directory) @patch('cloudify_agent.shell.commands.daemons.DaemonFactory.new') @patch('cloudify_agent.shell.commands.daemons.DaemonFactory.save') @patch('cloudify_agent.shell.commands.daemons.DaemonFactory.load') @patch('cloudify_agent.shell.commands.daemons.DaemonFactory.delete') @patch('cloudify_agent.shell.commands.daemons.DaemonFactory.load_all') class TestPatchedDaemonCommandLine(BaseCommandLineTestCase): PROCESS_MANAGEMENT = 'init.d' def test_create(self, *factory_methods): self._run('cfy-agent daemons create --name=name ' '--process-management=init.d ' '--queue=queue --manager-ip=127.0.0.1 --user=user ') factory_new = factory_methods[4] factory_new.assert_called_once_with( name='name', queue='queue', user='user', manager_ip='127.0.0.1', process_management='init.d', broker_ip=None, workdir=None, broker_url=None, includes=None, log_level=None, pid_file=None, log_file=None, max_workers=None, min_workers=None, broker_port=None, manager_port=None, host=None, deployment_id=None, extra_env_path=None, logger=get_logger(), ) daemon = factory_new.return_value daemon.create.assert_called_once_with() def test_create_with_custom_options(self, *factory_methods): self._run('cfy-agent daemons create --name=name ' '--queue=queue --manager-ip=127.0.0.1 --user=user ' '--process-management=init.d ' '--key=value --complex-key=complex-value') factory_new = factory_methods[4] factory_new.assert_called_once_with( name='name', queue='queue', user='user', manager_ip='127.0.0.1', process_management='init.d', broker_ip=None, workdir=None, broker_url=None, max_workers=None, min_workers=None, broker_port=None, includes=None, host=None, deployment_id=None, log_level=None, pid_file=None, log_file=None, manager_port=None, extra_env_path=None, logger=get_logger(), key='value', complex_key='complex-value' ) def test_configure(self, *factory_methods): self._run('cfy-agent daemons configure --name=name ') factory_load = factory_methods[2] factory_load.assert_called_once_with('name', logger=get_logger()) daemon = factory_load.return_value daemon.configure.assert_called_once_with() factory_save = factory_methods[3] factory_save.assert_called_once_with(daemon) def test_start(self, *factory_methods): self._run('cfy-agent daemons start --name=name ' '--interval 5 --timeout 20 --no-delete-amqp-queue') factory_load = factory_methods[2] factory_load.assert_called_once_with('name', logger=get_logger()) daemon = factory_load.return_value daemon.start.assert_called_once_with( interval=5, timeout=20, delete_amqp_queue=False, ) def test_stop(self, *factory_methods): self._run('cfy-agent daemons stop --name=name ' '--interval 5 --timeout 20') factory_load = factory_methods[2] factory_load.assert_called_once_with('name', logger=get_logger()) daemon = factory_load.return_value daemon.stop.assert_called_once_with( interval=5, timeout=20 ) def test_delete(self, *factory_methods): self._run('cfy-agent daemons delete --name=name') factory_load = factory_methods[2] factory_load.assert_called_once_with('name', logger=get_logger()) daemon = factory_load.return_value daemon.delete.assert_called_once_with() def test_restart(self, *factory_methods): self._run('cfy-agent daemons restart --name=name') factory_load = factory_methods[2] factory_load.assert_called_once_with('name', logger=get_logger()) daemon = factory_load.return_value daemon.restart.assert_called_once_with() def test_register(self, *factory_methods): self._run('cfy-agent daemons register ' '--name=name --plugin=plugin') factory_load = factory_methods[2] factory_load.assert_called_once_with('name', logger=get_logger()) daemon = factory_load.return_value daemon.register.assert_called_once_with('plugin') def test_unregister(self, *factory_methods): self._run('cfy-agent daemons unregister ' '--name=name --plugin=plugin') factory_load = factory_methods[2] factory_load.assert_called_once_with('name', logger=get_logger()) daemon = factory_load.return_value daemon.unregister.assert_called_once_with('plugin') @patch('cloudify_agent.shell.commands.daemons.api_utils' '.internal.daemon_to_dict') def test_inspect(self, daemon_to_dict, *factory_methods): daemon_to_dict.return_value = {} name = utils.internal.generate_agent_name() self._run('cfy-agent daemons inspect --name={0}'.format(name)) factory_load = factory_methods[2] factory_load.assert_called_once_with(name, logger=get_logger()) daemon = factory_load.return_value daemon_to_dict.assert_called_once_with(daemon) def test_status(self, *factory_methods): name = utils.internal.generate_agent_name() self._run('cfy-agent daemons status --name={0}'.format(name)) factory_load = factory_methods[2] daemon = factory_load.return_value daemon.status.assert_called_once_with() def test_required(self, *_): self._run('cfy-agent daemons create --manager-ip=manager ' '--process-management=init.d', raise_system_exit=True) @patch('cloudify_agent.api.utils.internal.get_storage_directory', get_storage_directory) class TestDaemonCommandLine(BaseCommandLineTestCase): def test_inspect_non_existing_agent(self): try: self._run('cfy-agent daemons inspect --name=non-existing', raise_system_exit=True) except SystemExit as e: self.assertEqual(e.code, 203) def test_list(self): self._run('cfy-agent daemons create ' '--process-management=init.d ' '--queue=queue --name=test-name --manager-ip=127.0.0.1 ' '--user=user ') self._run('cfy-agent daemons create ' '--process-management=init.d ' '--queue=queue --name=test-name2 --manager-ip=127.0.0.1 ' '--user=user ') self._run('cfy-agent daemons list')
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from django.conf import settings from django.test import TestCase import os import shutil import ujson from mock import patch, MagicMock from six.moves import range from typing import Any from zerver.lib.actions import ( do_claim_attachments, ) from zerver.lib.export import ( do_export_realm, export_usermessages_batch, ) from zerver.lib.upload import ( claim_attachment, upload_message_image, ) from zerver.lib.utils import ( mkdir_p, query_chunker, ) from zerver.lib.test_classes import ( ZulipTestCase, ) from zerver.lib.test_runner import slow from zerver.models import ( get_user_profile_by_email, Message, Realm, Recipient, UserMessage, ) def rm_tree(path): # type: (str) -> None if os.path.exists(path): shutil.rmtree(path) class QueryUtilTest(ZulipTestCase): def _create_messages(self): # type: () -> None for email in ['cordelia@zulip.com', 'hamlet@zulip.com', 'iago@zulip.com']: for _ in range(5): self.send_message(email, 'othello@zulip.com', Recipient.PERSONAL) @slow('creates lots of data') def test_query_chunker(self): # type: () -> None self._create_messages() cordelia = get_user_profile_by_email('cordelia@zulip.com') hamlet = get_user_profile_by_email('hamlet@zulip.com') def get_queries(): # type: () -> List[Any] queries = [ Message.objects.filter(sender_id=cordelia.id), Message.objects.filter(sender_id=hamlet.id), Message.objects.exclude(sender_id__in=[cordelia.id, hamlet.id]) ] return queries for query in get_queries(): # For our test to be meaningful, we want non-empty queries # at first assert len(list(query)) > 0 queries = get_queries() all_msg_ids = set() # type: Set[int] chunker = query_chunker( queries=queries, id_collector=all_msg_ids, chunk_size=20, ) all_row_ids = [] for chunk in chunker: for row in chunk: all_row_ids.append(row.id) self.assertEqual(all_row_ids, sorted(all_row_ids)) self.assertEqual(len(all_msg_ids), len(Message.objects.all())) # Now just search for cordelia/hamlet. Note that we don't really # need the order_by here, but it should be harmless. queries = [ Message.objects.filter(sender_id=cordelia.id).order_by('id'), Message.objects.filter(sender_id=hamlet.id), ] all_msg_ids = set() chunker = query_chunker( queries=queries, id_collector=all_msg_ids, chunk_size=7, # use a different size ) list(chunker) # exhaust the iterator self.assertEqual( len(all_msg_ids), len(Message.objects.filter(sender_id__in=[cordelia.id, hamlet.id])) ) # Try just a single query to validate chunking. queries = [ Message.objects.exclude(sender_id=cordelia.id), ] all_msg_ids = set() chunker = query_chunker( queries=queries, id_collector=all_msg_ids, chunk_size=11, # use a different size each time ) list(chunker) # exhaust the iterator self.assertEqual( len(all_msg_ids), len(Message.objects.exclude(sender_id=cordelia.id)) ) self.assertTrue(len(all_msg_ids) > 15) # Verify assertions about disjoint-ness. queries = [ Message.objects.exclude(sender_id=cordelia.id), Message.objects.filter(sender_id=hamlet.id), ] all_msg_ids = set() chunker = query_chunker( queries=queries, id_collector=all_msg_ids, chunk_size=13, # use a different size each time ) with self.assertRaises(AssertionError): list(chunker) # exercise the iterator # Try to confuse things with ids part of the query... queries = [ Message.objects.filter(id__lte=10), Message.objects.filter(id__gt=10), ] all_msg_ids = set() chunker = query_chunker( queries=queries, id_collector=all_msg_ids, chunk_size=11, # use a different size each time ) self.assertEqual(len(all_msg_ids), 0) # until we actually use the iterator list(chunker) # exhaust the iterator self.assertEqual(len(all_msg_ids), len(Message.objects.all())) # Verify that we can just get the first chunk with a next() call. queries = [ Message.objects.all(), ] all_msg_ids = set() chunker = query_chunker( queries=queries, id_collector=all_msg_ids, chunk_size=10, # use a different size each time ) first_chunk = next(chunker) # type: ignore self.assertEqual(len(first_chunk), 10) self.assertEqual(len(all_msg_ids), 10) expected_msg = Message.objects.all()[0:10][5] actual_msg = first_chunk[5] self.assertEqual(actual_msg.content, expected_msg.content) self.assertEqual(actual_msg.sender_id, expected_msg.sender_id) class ExportTest(TestCase): def setUp(self): # type: () -> None rm_tree(settings.LOCAL_UPLOADS_DIR) def _make_output_dir(self): # type: () -> str output_dir = 'var/test-export' rm_tree(output_dir) mkdir_p(output_dir) return output_dir def _export_realm(self, domain, exportable_user_ids=None): # type: (str, Set[int]) -> Dict[str, Any] output_dir = self._make_output_dir() realm = Realm.objects.get(domain=domain) with patch('logging.info'), patch('zerver.lib.export.create_soft_link'): do_export_realm( realm=realm, output_dir=output_dir, threads=0, exportable_user_ids=exportable_user_ids, ) # TODO: Process the second partial file, which can be created # for certain edge cases. export_usermessages_batch( input_path=os.path.join(output_dir, 'messages-000001.json.partial'), output_path=os.path.join(output_dir, 'message.json') ) def read_file(fn): # type: (str) -> Any full_fn = os.path.join(output_dir, fn) with open(full_fn) as f: return ujson.load(f) result = {} result['realm'] = read_file('realm.json') result['attachment'] = read_file('attachment.json') result['message'] = read_file('message.json') result['uploads_dir'] = os.path.join(output_dir, 'uploads') return result def test_attachment(self): # type: () -> None message = Message.objects.all()[0] user_profile = message.sender url = upload_message_image(u'dummy.txt', u'text/plain', b'zulip!', user_profile) path_id = url.replace('/user_uploads/', '') claim_attachment( user_profile=user_profile, path_id=path_id, message=message, is_message_realm_public=True ) domain = 'zulip.com' full_data = self._export_realm(domain=domain) data = full_data['attachment'] self.assertEqual(len(data['zerver_attachment']), 1) record = data['zerver_attachment'][0] self.assertEqual(record['path_id'], path_id) fn = os.path.join(full_data['uploads_dir'], path_id) with open(fn) as f: self.assertEqual(f.read(), 'zulip!') def test_zulip_realm(self): # type: () -> None domain = 'zulip.com' full_data = self._export_realm(domain=domain) data = full_data['realm'] self.assertEqual(len(data['zerver_userprofile_crossrealm']), 0) self.assertEqual(len(data['zerver_userprofile_mirrordummy']), 0) def get_set(table, field): # type: (str, str) -> Set[str] values = set(r[field] for r in data[table]) # print('set(%s)' % sorted(values)) return values def find_by_id(table, db_id): # type: (str) -> Dict[str, Any] return [ r for r in data[table] if r['id'] == db_id][0] exported_user_emails = get_set('zerver_userprofile', 'email') self.assertIn('cordelia@zulip.com', exported_user_emails) self.assertIn('default-bot@zulip.com', exported_user_emails) self.assertIn('emailgateway@zulip.com', exported_user_emails) exported_streams = get_set('zerver_stream', 'name') self.assertEqual( exported_streams, set([u'Denmark', u'Rome', u'Scotland', u'Venice', u'Verona']) ) data = full_data['message'] um = UserMessage.objects.all()[0] exported_um = find_by_id('zerver_usermessage', um.id) self.assertEqual(exported_um['message'], um.message_id) self.assertEqual(exported_um['user_profile'], um.user_profile_id) exported_message = find_by_id('zerver_message', um.message_id) self.assertEqual(exported_message['content'], um.message.content) # TODO, extract get_set/find_by_id, so we can split this test up # Now, restrict users cordelia = get_user_profile_by_email('cordelia@zulip.com') hamlet = get_user_profile_by_email('hamlet@zulip.com') user_ids = set([cordelia.id, hamlet.id]) full_data = self._export_realm( domain=domain, exportable_user_ids=user_ids ) data = full_data['realm'] exported_user_emails = get_set('zerver_userprofile', 'email') self.assertIn('cordelia@zulip.com', exported_user_emails) self.assertIn('hamlet@zulip.com', exported_user_emails) self.assertNotIn('default-bot@zulip.com', exported_user_emails) self.assertNotIn('iago@zulip.com', exported_user_emails) dummy_user_emails = get_set('zerver_userprofile_mirrordummy', 'email') self.assertIn('iago@zulip.com', dummy_user_emails) self.assertNotIn('cordelia@zulip.com', dummy_user_emails)
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers # type: ignore from google.api_core import operations_v1 # type: ignore from google.api_core import gapic_v1 # type: ignore import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.ads.googleads.v8.resources.types import campaign_draft from google.ads.googleads.v8.services.types import campaign_draft_service from google.longrunning import operations_pb2 # type: ignore from .base import CampaignDraftServiceTransport, DEFAULT_CLIENT_INFO class CampaignDraftServiceGrpcTransport(CampaignDraftServiceTransport): """gRPC backend transport for CampaignDraftService. Service to manage campaign drafts. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ def __init__( self, *, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ self._ssl_channel_credentials = ssl_channel_credentials if channel: # Sanity check: Ensure that channel and credentials are not both # provided. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None elif api_mtls_endpoint: warnings.warn( "api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning, ) host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) if credentials is None: credentials, _ = google.auth.default( scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id ) # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() ssl_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: ssl_credentials = SslCredentials().ssl_credentials # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._ssl_channel_credentials = ssl_credentials else: host = host if ":" in host else host + ":443" if credentials is None: credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES) # create a new channel. The provided one is ignored. self._grpc_channel = type(self).create_channel( host, credentials=credentials, ssl_credentials=ssl_channel_credentials, scopes=self.AUTH_SCOPES, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( host=host, credentials=credentials, client_info=client_info, ) @classmethod def create_channel( cls, host: str = "googleads.googleapis.com", credentials: ga_credentials.Credentials = None, scopes: Optional[Sequence[str]] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: address (Optionsl[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. """ return grpc_helpers.create_channel( host, credentials=credentials, scopes=scopes or cls.AUTH_SCOPES, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsClient: """Create the client designed to process long-running operations. This property caches on the instance; repeated calls return the same client. """ # Sanity check: Only create a new client if we do not already have one. if "operations_client" not in self.__dict__: self.__dict__["operations_client"] = operations_v1.OperationsClient( self.grpc_channel ) # Return the client from cache. return self.__dict__["operations_client"] @property def get_campaign_draft( self, ) -> Callable[ [campaign_draft_service.GetCampaignDraftRequest], campaign_draft.CampaignDraft, ]: r"""Return a callable for the get campaign draft method over gRPC. Returns the requested campaign draft in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetCampaignDraftRequest], ~.CampaignDraft]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_campaign_draft" not in self._stubs: self._stubs["get_campaign_draft"] = self.grpc_channel.unary_unary( "/google.ads.googleads.v8.services.CampaignDraftService/GetCampaignDraft", request_serializer=campaign_draft_service.GetCampaignDraftRequest.serialize, response_deserializer=campaign_draft.CampaignDraft.deserialize, ) return self._stubs["get_campaign_draft"] @property def mutate_campaign_drafts( self, ) -> Callable[ [campaign_draft_service.MutateCampaignDraftsRequest], campaign_draft_service.MutateCampaignDraftsResponse, ]: r"""Return a callable for the mutate campaign drafts method over gRPC. Creates, updates, or removes campaign drafts. Operation statuses are returned. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `CampaignDraftError <>`__ `DatabaseError <>`__ `FieldError <>`__ `HeaderError <>`__ `InternalError <>`__ `MutateError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.MutateCampaignDraftsRequest], ~.MutateCampaignDraftsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_campaign_drafts" not in self._stubs: self._stubs[ "mutate_campaign_drafts" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v8.services.CampaignDraftService/MutateCampaignDrafts", request_serializer=campaign_draft_service.MutateCampaignDraftsRequest.serialize, response_deserializer=campaign_draft_service.MutateCampaignDraftsResponse.deserialize, ) return self._stubs["mutate_campaign_drafts"] @property def promote_campaign_draft( self, ) -> Callable[ [campaign_draft_service.PromoteCampaignDraftRequest], operations_pb2.Operation, ]: r"""Return a callable for the promote campaign draft method over gRPC. Promotes the changes in a draft back to the base campaign. This method returns a Long Running Operation (LRO) indicating if the Promote is done. Use [Operations.GetOperation] to poll the LRO until it is done. Only a done status is returned in the response. See the status in the Campaign Draft resource to determine if the promotion was successful. If the LRO failed, use [CampaignDraftService.ListCampaignDraftAsyncErrors][google.ads.googleads.v8.services.CampaignDraftService.ListCampaignDraftAsyncErrors] to view the list of error reasons. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `CampaignDraftError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.PromoteCampaignDraftRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "promote_campaign_draft" not in self._stubs: self._stubs[ "promote_campaign_draft" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v8.services.CampaignDraftService/PromoteCampaignDraft", request_serializer=campaign_draft_service.PromoteCampaignDraftRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["promote_campaign_draft"] @property def list_campaign_draft_async_errors( self, ) -> Callable[ [campaign_draft_service.ListCampaignDraftAsyncErrorsRequest], campaign_draft_service.ListCampaignDraftAsyncErrorsResponse, ]: r"""Return a callable for the list campaign draft async errors method over gRPC. Returns all errors that occurred during CampaignDraft promote. Throws an error if called before campaign draft is promoted. Supports standard list paging. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.ListCampaignDraftAsyncErrorsRequest], ~.ListCampaignDraftAsyncErrorsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_campaign_draft_async_errors" not in self._stubs: self._stubs[ "list_campaign_draft_async_errors" ] = self.grpc_channel.unary_unary( "/google.ads.googleads.v8.services.CampaignDraftService/ListCampaignDraftAsyncErrors", request_serializer=campaign_draft_service.ListCampaignDraftAsyncErrorsRequest.serialize, response_deserializer=campaign_draft_service.ListCampaignDraftAsyncErrorsResponse.deserialize, ) return self._stubs["list_campaign_draft_async_errors"] __all__ = ("CampaignDraftServiceGrpcTransport",)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import urlparse import uuid import requests from keystoneclient import exceptions from keystoneclient.v3 import roles from tests.v3 import utils class RoleTests(utils.TestCase, utils.CrudTests): def setUp(self): super(RoleTests, self).setUp() self.additionalSetUp() self.key = 'role' self.collection_key = 'roles' self.model = roles.Role self.manager = self.client.roles def new_ref(self, **kwargs): kwargs = super(RoleTests, self).new_ref(**kwargs) kwargs.setdefault('name', uuid.uuid4().hex) return kwargs def test_domain_role_grant(self): user_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 201, "text": '', }) method = 'PUT' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/domains/%s/users/%s/%s/%s' % ( domain_id, user_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.grant(role=ref['id'], domain=domain_id, user=user_id) def test_domain_group_role_grant(self): group_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 201, "text": '', }) method = 'PUT' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/domains/%s/groups/%s/%s/%s' % ( domain_id, group_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.grant(role=ref['id'], domain=domain_id, group=group_id) def test_domain_role_list(self): user_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref_list = [self.new_ref(), self.new_ref()] resp = utils.TestResponse({ "status_code": 200, "text": self.serialize(ref_list), }) method = 'GET' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/domains/%s/users/%s/%s' % ( domain_id, user_id, self.collection_key)), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.list(domain=domain_id, user=user_id) def test_domain_group_role_list(self): group_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref_list = [self.new_ref(), self.new_ref()] resp = utils.TestResponse({ "status_code": 200, "text": self.serialize(ref_list), }) method = 'GET' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/domains/%s/groups/%s/%s' % ( domain_id, group_id, self.collection_key)), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.list(domain=domain_id, group=group_id) def test_domain_role_check(self): user_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 204, "text": '', }) method = 'HEAD' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/domains/%s/users/%s/%s/%s' % ( domain_id, user_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.check(role=ref['id'], domain=domain_id, user=user_id) def test_domain_group_role_check(self): return group_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 204, "text": '', }) method = 'HEAD' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/domains/%s/groups/%s/%s/%s' % ( domain_id, group_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.check(role=ref['id'], domain=domain_id, group=group_id) def test_domain_role_revoke(self): user_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 204, "text": '', }) method = 'DELETE' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/domains/%s/users/%s/%s/%s' % ( domain_id, user_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.revoke(role=ref['id'], domain=domain_id, user=user_id) def test_domain_group_role_revoke(self): group_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 204, "text": '', }) method = 'DELETE' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/domains/%s/groups/%s/%s/%s' % ( domain_id, group_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.revoke(role=ref['id'], domain=domain_id, group=group_id) def test_project_role_grant(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 201, "text": '', }) method = 'PUT' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/projects/%s/users/%s/%s/%s' % ( project_id, user_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.grant(role=ref['id'], project=project_id, user=user_id) def test_project_group_role_grant(self): group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 201, "text": '', }) method = 'PUT' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/projects/%s/groups/%s/%s/%s' % ( project_id, group_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.grant(role=ref['id'], project=project_id, group=group_id) def test_project_role_list(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref_list = [self.new_ref(), self.new_ref()] resp = utils.TestResponse({ "status_code": 200, "text": self.serialize(ref_list), }) method = 'GET' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/projects/%s/users/%s/%s' % ( project_id, user_id, self.collection_key)), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.list(project=project_id, user=user_id) def test_project_group_role_list(self): group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref_list = [self.new_ref(), self.new_ref()] resp = utils.TestResponse({ "status_code": 200, "text": self.serialize(ref_list), }) method = 'GET' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/projects/%s/groups/%s/%s' % ( project_id, group_id, self.collection_key)), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.list(project=project_id, group=group_id) def test_project_role_check(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 200, "text": '', }) method = 'HEAD' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/projects/%s/users/%s/%s/%s' % ( project_id, user_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.check(role=ref['id'], project=project_id, user=user_id) def test_project_group_role_check(self): group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 200, "text": '', }) method = 'HEAD' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/projects/%s/groups/%s/%s/%s' % ( project_id, group_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.check(role=ref['id'], project=project_id, group=group_id) def test_project_role_revoke(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 204, "text": '', }) method = 'DELETE' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/projects/%s/users/%s/%s/%s' % ( project_id, user_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.revoke(role=ref['id'], project=project_id, user=user_id) def test_project_group_role_revoke(self): group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() resp = utils.TestResponse({ "status_code": 204, "text": '', }) method = 'DELETE' kwargs = copy.copy(self.TEST_REQUEST_BASE) kwargs['headers'] = self.headers[method] requests.request( method, urlparse.urljoin( self.TEST_URL, 'v3/projects/%s/groups/%s/%s/%s' % ( project_id, group_id, self.collection_key, ref['id'])), **kwargs).AndReturn((resp)) self.mox.ReplayAll() self.manager.revoke(role=ref['id'], project=project_id, group=group_id) def test_domain_project_role_grant_fails(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() self.assertRaises( exceptions.ValidationError, self.manager.grant, role=ref['id'], domain=domain_id, project=project_id, user=user_id) def test_domain_project_role_list_fails(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex self.assertRaises( exceptions.ValidationError, self.manager.list, domain=domain_id, project=project_id, user=user_id) def test_domain_project_role_check_fails(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() self.assertRaises( exceptions.ValidationError, self.manager.check, role=ref['id'], domain=domain_id, project=project_id, user=user_id) def test_domain_project_role_revoke_fails(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex ref = self.new_ref() self.assertRaises( exceptions.ValidationError, self.manager.revoke, role=ref['id'], domain=domain_id, project=project_id, user=user_id) def test_user_group_role_grant_fails(self): user_id = uuid.uuid4().hex group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() self.assertRaises( exceptions.ValidationError, self.manager.grant, role=ref['id'], project=project_id, group=group_id, user=user_id) def test_user_group_role_list_fails(self): user_id = uuid.uuid4().hex group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex self.assertRaises( exceptions.ValidationError, self.manager.list, project=project_id, group=group_id, user=user_id) def test_user_group_role_check_fails(self): user_id = uuid.uuid4().hex group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() self.assertRaises( exceptions.ValidationError, self.manager.check, role=ref['id'], project=project_id, group=group_id, user=user_id) def test_user_group_role_revoke_fails(self): user_id = uuid.uuid4().hex group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex ref = self.new_ref() self.assertRaises( exceptions.ValidationError, self.manager.revoke, role=ref['id'], project=project_id, group=group_id, user=user_id)
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors # MIT License. See license.txt from __future__ import unicode_literals, absolute_import import sys import os import json import click import hashlib import cProfile import StringIO import pstats import frappe import frappe.utils from frappe.utils import cint from distutils.spawn import find_executable from functools import wraps click.disable_unicode_literals_warning = True def pass_context(f): @wraps(f) def _func(ctx, *args, **kwargs): profile = ctx.obj['profile'] if profile: pr = cProfile.Profile() pr.enable() ret = f(frappe._dict(ctx.obj), *args, **kwargs) if profile: pr.disable() s = StringIO.StringIO() ps = pstats.Stats(pr, stream=s)\ .sort_stats('cumtime', 'tottime', 'ncalls') ps.print_stats() print s.getvalue() return ret return click.pass_context(_func) def get_single_site(context): if not len(context.sites) == 1: print 'please select a site' sys.exit(1) site = context.sites[0] return site def call_command(cmd, context): return click.Context(cmd, obj=context).forward(cmd) @click.command('new-site') @click.argument('site') @click.option('--db-name', help='Database name') @click.option('--mariadb-root-username', default='root', help='Root username for MariaDB') @click.option('--mariadb-root-password', help='Root password for MariaDB') @click.option('--admin-password', help='Administrator password for new site', default=None) @click.option('--verbose', is_flag=True, default=False, help='Verbose') @click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False) @click.option('--source_sql', help='Initiate database with a SQL file') @click.option('--install-app', multiple=True, help='Install app after installation') def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=None, install_app=None, db_name=None): "Install a new site" if not db_name: db_name = hashlib.sha1(site).hexdigest()[:10] frappe.init(site=site, new_site=True) _new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force) if len(frappe.utils.get_sites()) == 1: use(site) def _new_site(db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None,force=False, reinstall=False): "Install a new Frappe site" from frappe.installer import install_db, make_site_dirs from frappe.installer import install_app as _install_app import frappe.utils.scheduler frappe.init(site=site) try: # enable scheduler post install? enable_scheduler = _is_scheduler_enabled() except: enable_scheduler = False install_db(root_login=mariadb_root_username, root_password=mariadb_root_password, db_name=db_name, admin_password=admin_password, verbose=verbose, source_sql=source_sql,force=force, reinstall=reinstall) make_site_dirs() _install_app("frappe", verbose=verbose, set_as_patched=not source_sql) if frappe.conf.get("install_apps"): for app in frappe.conf.install_apps: _install_app(app, verbose=verbose, set_as_patched=not source_sql) if install_apps: for app in install_apps: _install_app(app, verbose=verbose, set_as_patched=not source_sql) frappe.utils.scheduler.toggle_scheduler(enable_scheduler) scheduler_status = "disabled" if frappe.utils.scheduler.is_scheduler_disabled() else "enabled" print "*** Scheduler is", scheduler_status, "***" frappe.destroy() def _is_scheduler_enabled(): enable_scheduler = False try: frappe.connect() enable_scheduler = cint(frappe.db.get_single_value("System Settings", "enable_scheduler")) and True or False except: pass finally: frappe.db.close() return enable_scheduler @click.command('restore') @click.argument('sql-file-path') @click.option('--mariadb-root-username', default='root', help='Root username for MariaDB') @click.option('--mariadb-root-password', help='Root password for MariaDB') @click.option('--db-name', help='Database name for site in case it is a new one') @click.option('--admin-password', help='Administrator password for new site') @click.option('--install-app', multiple=True, help='Install app after installation') @pass_context def restore(context, sql_file_path, mariadb_root_username=None, mariadb_root_password=None, db_name=None, verbose=None, install_app=None, admin_password=None, force=None): "Restore site database from an sql file" site = get_single_site(context) frappe.init(site=site) db_name = db_name or frappe.conf.db_name or hashlib.sha1(site).hexdigest()[:10] _new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=context.verbose, install_apps=install_app, source_sql=sql_file_path, force=context.force) @click.command('reinstall') @pass_context def reinstall(context): "Reinstall site ie. wipe all data and start over" site = get_single_site(context) try: frappe.init(site=site) frappe.connect() frappe.clear_cache() installed = frappe.get_installed_apps() frappe.clear_cache() except Exception: installed = [] finally: if frappe.db: frappe.db.close() frappe.destroy() frappe.init(site=site) _new_site(frappe.conf.db_name, site, verbose=context.verbose, force=True, reinstall=True, install_apps=installed) @click.command('install-app') @click.argument('app') @pass_context def install_app(context, app): "Install a new app to site" from frappe.installer import install_app as _install_app for site in context.sites: frappe.init(site=site) frappe.connect() try: _install_app(app, verbose=context.verbose) finally: frappe.destroy() @click.command('list-apps') @pass_context def list_apps(context): "Reinstall site ie. wipe all data and start over" site = get_single_site(context) frappe.init(site=site) frappe.connect() print "\n".join(frappe.get_installed_apps()) frappe.destroy() @click.command('add-system-manager') @click.argument('email') @click.option('--first-name') @click.option('--last-name') @pass_context def add_system_manager(context, email, first_name, last_name): "Add a new system manager to a site" import frappe.utils.user for site in context.sites: frappe.connect(site=site) try: frappe.utils.user.add_system_manager(email, first_name, last_name) frappe.db.commit() finally: frappe.destroy() @click.command('migrate') @click.option('--rebuild-website', help="Rebuild webpages after migration") @pass_context def migrate(context, rebuild_website=False): "Run patches, sync schema and rebuild files/translations" import frappe.modules.patch_handler import frappe.model.sync from frappe.utils.fixtures import sync_fixtures import frappe.translate from frappe.desk.notifications import clear_notifications for site in context.sites: print 'Migrating', site frappe.init(site=site) frappe.connect() try: prepare_for_update() # run patches frappe.modules.patch_handler.run_all() # sync frappe.model.sync.sync_all(verbose=context.verbose) frappe.translate.clear_cache() sync_fixtures() clear_notifications() finally: frappe.publish_realtime("version-update") frappe.destroy() if rebuild_website: call_command(build_website, context) else: call_command(sync_www, context) def prepare_for_update(): from frappe.sessions import clear_global_cache clear_global_cache() @click.command('run-patch') @click.argument('module') @pass_context def run_patch(context, module): "Run a particular patch" import frappe.modules.patch_handler for site in context.sites: frappe.init(site=site) try: frappe.connect() frappe.modules.patch_handler.run_single(module, force=context.force) finally: frappe.destroy() @click.command('reload-doc') @click.argument('module') @click.argument('doctype') @click.argument('docname') @pass_context def reload_doc(context, module, doctype, docname): "Reload schema for a DocType" for site in context.sites: try: frappe.init(site=site) frappe.connect() frappe.reload_doc(module, doctype, docname, force=context.force) frappe.db.commit() finally: frappe.destroy() @click.command('build') @click.option('--make-copy', is_flag=True, default=False, help='Copy the files instead of symlinking') @click.option('--verbose', is_flag=True, default=False, help='Verbose') def build(make_copy=False, verbose=False): "Minify + concatenate JS and CSS files, build translations" import frappe.build import frappe frappe.init('') frappe.build.bundle(False, make_copy=make_copy, verbose=verbose) @click.command('watch') def watch(): "Watch and concatenate JS and CSS files as and when they change" import frappe.build frappe.init('') frappe.build.watch(True) @click.command('clear-cache') @pass_context def clear_cache(context): "Clear cache, doctype cache and defaults" import frappe.sessions import frappe.website.render from frappe.desk.notifications import clear_notifications for site in context.sites: try: frappe.connect(site) frappe.clear_cache() clear_notifications() frappe.website.render.clear_cache() finally: frappe.destroy() @click.command('clear-website-cache') @pass_context def clear_website_cache(context): "Clear website cache" import frappe.website.render for site in context.sites: try: frappe.init(site=site) frappe.connect() frappe.website.render.clear_cache() finally: frappe.destroy() @click.command('destroy-all-sessions') @pass_context def destroy_all_sessions(context): "Clear sessions of all users (logs them out)" import frappe.sessions for site in context.sites: try: frappe.init(site=site) frappe.connect() frappe.sessions.clear_all_sessions() frappe.db.commit() finally: frappe.destroy() @click.command('sync-www') @click.option('--force', help='Rebuild all pages', is_flag=True, default=False) @pass_context def sync_www(context, force=False): "Sync files from static pages from www directory to Web Pages" from frappe.website import statics for site in context.sites: try: frappe.init(site=site) frappe.connect() statics.sync_statics(rebuild=force) frappe.db.commit() finally: frappe.destroy() @click.command('build-website') @pass_context def build_website(context): "Sync statics and clear cache" from frappe.website import render, statics for site in context.sites: try: frappe.init(site=site) frappe.connect() render.clear_cache() statics.sync(verbose=context.verbose).start(True) frappe.db.commit() finally: frappe.destroy() @click.command('make-docs') @pass_context @click.argument('app') @click.argument('docs_version') def make_docs(context, app, docs_version): "Setup docs in target folder of target app" from frappe.utils.setup_docs import setup_docs for site in context.sites: try: frappe.init(site=site) frappe.connect() make = setup_docs(app) make.build(docs_version) finally: frappe.destroy() @click.command('sync-docs') @pass_context @click.argument('app') def sync_docs(context, app): "Sync docs from /docs folder into the database (Web Page)" from frappe.utils.setup_docs import setup_docs for site in context.sites: try: frappe.init(site=site) frappe.connect() make = setup_docs(app) make.sync_docs() finally: frappe.destroy() @click.command('write-docs') @pass_context @click.argument('app') @click.argument('target') @click.option('--local', default=False, is_flag=True, help='Run app locally') def write_docs(context, app, target, local=False): "Setup docs in target folder of target app" from frappe.utils.setup_docs import setup_docs for site in context.sites: try: frappe.init(site=site) frappe.connect() make = setup_docs(app) make.make_docs(target, local) finally: frappe.destroy() @click.command('build-docs') @pass_context @click.argument('app') @click.option('--docs-version', default='current') @click.option('--target', default=None) @click.option('--local', default=False, is_flag=True, help='Run app locally') @click.option('--watch', default=False, is_flag=True, help='Watch for changes and rewrite') def build_docs(context, app, docs_version="current", target=None, local=False, watch=False): "Setup docs in target folder of target app" from frappe.utils import watch as start_watch if not target: target = os.path.abspath(os.path.join("..", "docs", app)) for site in context.sites: _build_docs_once(site, app, docs_version, target, local) if watch: def trigger_make(source_path, event_type): if "/templates/autodoc/" in source_path: _build_docs_once(site, app, docs_version, target, local) elif ("/docs.css" in source_path or "/docs/" in source_path or "docs.py" in source_path): _build_docs_once(site, app, docs_version, target, local, only_content_updated=True) apps_path = frappe.get_app_path("frappe", "..", "..") start_watch(apps_path, handler=trigger_make) def _build_docs_once(site, app, docs_version, target, local, only_content_updated=False): from frappe.utils.setup_docs import setup_docs try: frappe.init(site=site) frappe.connect() make = setup_docs(app) if not only_content_updated: make.build(docs_version) make.sync_docs() make.make_docs(target, local) finally: frappe.destroy() @click.command('reset-perms') @pass_context def reset_perms(context): "Reset permissions for all doctypes" from frappe.permissions import reset_perms for site in context.sites: try: frappe.init(site=site) frappe.connect() for d in frappe.db.sql_list("""select name from `tabDocType` where istable=0 and custom=0"""): frappe.clear_cache(doctype=d) reset_perms(d) finally: frappe.destroy() @click.command('execute') @click.argument('method') @click.option('--args') @click.option('--kwargs') @pass_context def execute(context, method, args=None, kwargs=None): "execute a function" for site in context.sites: try: frappe.init(site=site) frappe.connect() if args: args = eval(args) else: args = () if kwargs: kwargs = eval(args) else: kwargs = {} ret = frappe.get_attr(method)(*args, **kwargs) if frappe.db: frappe.db.commit() finally: frappe.destroy() if ret: print json.dumps(ret) @click.command('celery') @click.argument('args') def celery(args): "Run a celery command" python = sys.executable os.execv(python, [python, "-m", "frappe.celery_app"] + args.split()) @click.command('trigger-scheduler-event') @click.argument('event') @pass_context def trigger_scheduler_event(context, event): "Trigger a scheduler event" import frappe.utils.scheduler for site in context.sites: try: frappe.init(site=site) frappe.connect() frappe.utils.scheduler.trigger(site, event, now=context.force) finally: frappe.destroy() @click.command('enable-scheduler') @pass_context def enable_scheduler(context): "Enable scheduler" import frappe.utils.scheduler for site in context.sites: try: frappe.init(site=site) frappe.connect() frappe.utils.scheduler.enable_scheduler() frappe.db.commit() print "Enabled for", site finally: frappe.destroy() @click.command('disable-scheduler') @pass_context def disable_scheduler(context): "Disable scheduler" import frappe.utils.scheduler for site in context.sites: try: frappe.init(site=site) frappe.connect() frappe.utils.scheduler.disable_scheduler() frappe.db.commit() print "Disabled for", site finally: frappe.destroy() @click.command('export-doc') @click.argument('doctype') @click.argument('docname') @pass_context def export_doc(context, doctype, docname): "Export a single document to csv" import frappe.modules for site in context.sites: try: frappe.init(site=site) frappe.connect() frappe.modules.export_doc(doctype, docname) finally: frappe.destroy() @click.command('export-json') @click.argument('doctype') @click.argument('name') @click.argument('path') @pass_context def export_json(context, doctype, name, path): "Export doclist as json to the given path, use '-' as name for Singles." from frappe.core.page.data_import_tool import data_import_tool for site in context.sites: try: frappe.init(site=site) frappe.connect() data_import_tool.export_json(doctype, path, name=name) finally: frappe.destroy() @click.command('export-csv') @click.argument('doctype') @click.argument('path') @pass_context def export_csv(context, doctype, path): "Dump DocType as csv" from frappe.core.page.data_import_tool import data_import_tool for site in context.sites: try: frappe.init(site=site) frappe.connect() data_import_tool.export_csv(doctype, path) finally: frappe.destroy() @click.command('export-fixtures') @pass_context def export_fixtures(context): "export fixtures" from frappe.utils.fixtures import export_fixtures for site in context.sites: try: frappe.init(site=site) frappe.connect() export_fixtures() finally: frappe.destroy() @click.command('import-doc') @click.argument('path') @pass_context def import_doc(context, path, force=False): "Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported" from frappe.core.page.data_import_tool import data_import_tool for site in context.sites: try: frappe.init(site=site) frappe.connect() data_import_tool.import_doc(path, overwrite=context.force) finally: frappe.destroy() @click.command('import-csv') @click.argument('path') @click.option('--only-insert', default=False, is_flag=True, help='Do not overwrite existing records') @click.option('--submit-after-import', default=False, is_flag=True, help='Submit document after importing it') @click.option('--ignore-encoding-errors', default=False, is_flag=True, help='Ignore encoding errors while coverting to unicode') @pass_context def import_csv(context, path, only_insert=False, submit_after_import=False, ignore_encoding_errors=False): "Import CSV using data import tool" from frappe.core.page.data_import_tool import importer from frappe.utils.csvutils import read_csv_content site = get_single_site(context) with open(path, 'r') as csvfile: content = read_csv_content(csvfile.read()) frappe.init(site=site) frappe.connect() try: importer.upload(content, submit_after_import=submit_after_import, ignore_encoding_errors=ignore_encoding_errors, overwrite=not only_insert, via_console=True) frappe.db.commit() except Exception: print frappe.get_traceback() frappe.destroy() @click.command('bulk-rename') @click.argument('doctype') @click.argument('path') @pass_context def _bulk_rename(context, doctype, path): "Rename multiple records via CSV file" from frappe.model.rename_doc import bulk_rename from frappe.utils.csvutils import read_csv_content site = get_single_site(context) with open(path, 'r') as csvfile: rows = read_csv_content(csvfile.read()) frappe.init(site=site) frappe.connect() bulk_rename(doctype, rows, via_console = True) frappe.destroy() # translation @click.command('build-message-files') @pass_context def build_message_files(context): "Build message files for translation" import frappe.translate for site in context.sites: try: frappe.init(site=site) frappe.connect() frappe.translate.rebuild_all_translation_files() finally: frappe.destroy() @click.command('get-untranslated') @click.argument('lang') @click.argument('untranslated_file') @click.option('--all', default=False, is_flag=True, help='Get all message strings') @pass_context def get_untranslated(context, lang, untranslated_file, all=None): "Get untranslated strings for language" import frappe.translate site = get_single_site(context) try: frappe.init(site=site) frappe.connect() frappe.translate.get_untranslated(lang, untranslated_file, get_all=all) finally: frappe.destroy() @click.command('update-translations') @click.argument('lang') @click.argument('untranslated_file') @click.argument('translated-file') @pass_context def update_translations(context, lang, untranslated_file, translated_file): "Update translated strings" import frappe.translate site = get_single_site(context) try: frappe.init(site=site) frappe.connect() frappe.translate.update_translations(lang, untranslated_file, translated_file) finally: frappe.destroy() @click.command('set-admin-password') @click.argument('admin-password') @pass_context def set_admin_password(context, admin_password): "Set Administrator password for a site" import getpass for site in context.sites: try: frappe.init(site=site) while not admin_password: admin_password = getpass.getpass("Administrator's password for {0}: ".format(site)) frappe.connect() frappe.db.sql("""update __Auth set `password`=password(%s) where user='Administrator'""", (admin_password,)) frappe.db.commit() admin_password = None finally: frappe.destroy() @click.command('mysql') @pass_context def mysql(context): "Start Mariadb console for a site" site = get_single_site(context) frappe.init(site=site) msq = find_executable('mysql') os.execv(msq, [msq, '-u', frappe.conf.db_name, '-p'+frappe.conf.db_password, frappe.conf.db_name, '-h', frappe.conf.db_host or "localhost", "-A"]) @click.command('console') @pass_context def console(context): "Start ipython console for a site" site = get_single_site(context) frappe.init(site=site) frappe.connect() frappe.local.lang = frappe.db.get_default("lang") import IPython IPython.embed() @click.command('run-tests') @click.option('--app') @click.option('--doctype') @click.option('--test', multiple=True) @click.option('--driver') @click.option('--module') @pass_context def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None): "Run tests" import frappe.test_runner from frappe.utils import sel tests = test site = get_single_site(context) frappe.init(site=site) if frappe.conf.run_selenium_tests and False: sel.start(context.verbose, driver) try: ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force) if len(ret.failures) == 0 and len(ret.errors) == 0: ret = 0 finally: pass if frappe.conf.run_selenium_tests: sel.close() sys.exit(ret) @click.command('serve') @click.option('--port', default=8000) @click.option('--profile', is_flag=True, default=False) @pass_context def serve(context, port=None, profile=False, sites_path='.', site=None): "Start development web server" if not context.sites: site = None else: site = context.sites[0] import frappe.app frappe.app.serve(port=port, profile=profile, site=site, sites_path='.') @click.command('request') @click.argument('args') @pass_context def request(context, args): "Run a request as an admin" import frappe.handler import frappe.api for site in context.sites: try: frappe.init(site=site) frappe.connect() if "?" in args: frappe.local.form_dict = frappe._dict([a.split("=") for a in args.split("?")[-1].split("&")]) else: frappe.local.form_dict = frappe._dict() if args.startswith("/api/method"): frappe.local.form_dict.cmd = args.split("?")[0].split("/")[-1] frappe.handler.execute_cmd(frappe.form_dict.cmd) print frappe.response finally: frappe.destroy() @click.command('doctor') def doctor(): "Get diagnostic info about background workers" from frappe.utils.doctor import doctor as _doctor return _doctor() @click.command('celery-doctor') @click.option('--site', help='site name') def celery_doctor(site=None): "Get diagnostic info about background workers" from frappe.utils.doctor import celery_doctor as _celery_doctor frappe.init('') return _celery_doctor(site=site) @click.command('purge-pending-tasks') @click.option('--site', help='site name') @click.option('--event', default=None, help='one of "all", "weekly", "monthly", "hourly", "daily", "weekly_long", "daily_long"') def purge_all_tasks(site=None, event=None): "Purge any pending periodic tasks, if event option is not given, it will purge everything for the site" from frappe.utils.doctor import purge_pending_tasks frappe.init(site or '') count = purge_pending_tasks(event=None, site=None) print "Purged {} tasks".format(count) @click.command('dump-queue-status') def dump_queue_status(): "Dump detailed diagnostic infomation for task queues in JSON format" frappe.init('') from frappe.utils.doctor import dump_queue_status as _dump_queue_status, inspect_queue print json.dumps(_dump_queue_status(), indent=1) print inspect_queue() @click.command('make-app') @click.argument('destination') @click.argument('app_name') def make_app(destination, app_name): from frappe.utils.boilerplate import make_boilerplate make_boilerplate(destination, app_name) @click.command('use') @click.argument('site') def _use(site, sites_path='.'): use(site, sites_path=sites_path) def use(site, sites_path='.'): with open(os.path.join(sites_path, "currentsite.txt"), "w") as sitefile: sitefile.write(site) @click.command('backup') @click.option('--with-files', default=False, is_flag=True, help="Take backup with files") @pass_context def backup(context, with_files=False, backup_path_db=None, backup_path_files=None, backup_path_private_files=None, quiet=False): "Backup" from frappe.utils.backups import scheduled_backup verbose = context.verbose for site in context.sites: frappe.init(site=site) frappe.connect() odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files, backup_path_private_files=backup_path_private_files, force=True) if verbose: from frappe.utils import now print "database backup taken -", odb.backup_path_db, "- on", now() if with_files: print "files backup taken -", odb.backup_path_files, "- on", now() print "private files backup taken -", odb.backup_path_private_files, "- on", now() frappe.destroy() @click.command('remove-from-installed-apps') @click.argument('app') @pass_context def remove_from_installed_apps(context, app): from frappe.installer import remove_from_installed_apps for site in context.sites: try: frappe.init(site=site) frappe.connect() remove_from_installed_apps(app) finally: frappe.destroy() @click.command('uninstall-app') @click.argument('app') @click.option('--dry-run', help='List all doctypes that will be deleted', is_flag=True, default=False) @pass_context def uninstall(context, app, dry_run=False): from frappe.installer import remove_app for site in context.sites: try: frappe.init(site=site) frappe.connect() remove_app(app, dry_run) finally: frappe.destroy() def move(dest_dir, site): import os if not os.path.isdir(dest_dir): raise Exception, "destination is not a directory or does not exist" frappe.init(site) old_path = frappe.utils.get_site_path() new_path = os.path.join(dest_dir, site) # check if site dump of same name already exists site_dump_exists = True count = 0 while site_dump_exists: final_new_path = new_path + (count and str(count) or "") site_dump_exists = os.path.exists(final_new_path) count = int(count or 0) + 1 os.rename(old_path, final_new_path) frappe.destroy() return final_new_path @click.command('set-config') @click.argument('key') @click.argument('value') @pass_context def set_config(context, key, value): from frappe.installer import update_site_config for site in context.sites: frappe.init(site=site) update_site_config(key, value) frappe.destroy() @click.command('drop-site') @click.argument('site') @click.option('--root-login', default='root') @click.option('--root-password') def drop_site(site, root_login='root', root_password=None): from frappe.installer import get_current_host, make_connection from frappe.model.db_schema import DbManager from frappe.utils.backups import scheduled_backup frappe.init(site=site) frappe.connect() scheduled_backup(ignore_files=False, force=True) db_name = frappe.local.conf.db_name frappe.local.db = make_connection(root_login, root_password) dbman = DbManager(frappe.local.db) dbman.delete_user(db_name, get_current_host()) dbman.drop_database(db_name) archived_sites_dir = os.path.join(frappe.get_app_path('frappe'), '..', '..', '..', 'archived_sites') if not os.path.exists(archived_sites_dir): os.mkdir(archived_sites_dir) move(archived_sites_dir, site) @click.command('version') @pass_context def get_version(context): frappe.init(site=context.sites[0]) for m in sorted(frappe.local.app_modules.keys()): module = frappe.get_module(m) if hasattr(module, "__version__"): print "{0} {1}".format(m, module.__version__) # commands = [ # new_site, # restore, # install_app, # run_patch, # migrate, # add_system_manager, # celery # ] commands = [ new_site, restore, reinstall, install_app, list_apps, add_system_manager, migrate, run_patch, reload_doc, build, watch, clear_cache, clear_website_cache, destroy_all_sessions, sync_www, build_website, make_docs, sync_docs, write_docs, build_docs, reset_perms, execute, celery, trigger_scheduler_event, enable_scheduler, disable_scheduler, export_doc, export_json, export_csv, export_fixtures, import_doc, import_csv, _bulk_rename, build_message_files, get_untranslated, update_translations, set_admin_password, mysql, run_tests, serve, request, doctor, celery_doctor, purge_all_tasks, dump_queue_status, console, make_app, _use, backup, remove_from_installed_apps, uninstall, drop_site, set_config, get_version, ]
#======================================================================= # cpp_helpers.py #======================================================================= from __future__ import print_function from pymtl import * from ...model.signal_lists import PortList from cffi import FFI # Create position independent code #cc_src = "g++ -O3 -fPIC -c -o {in}.cc {in}.h {out.o}" #cc_lib = "g++ -shared -o libCSim.o {out}.so" #----------------------------------------------------------------------- # gen_cppsim #----------------------------------------------------------------------- def gen_cppsim( clib, cdef ): ffi = FFI() ffi.cdef( cdef ) cmodule = ffi.dlopen( clib ) return cmodule, ffi #return wrap( cmodule ) #----------------------------------------------------------------------- # gen_cdef #----------------------------------------------------------------------- # Create the string passed into ffi.cdef def gen_cdef( cycle_params, top_ports ): str_ = '\n' str_ += 'typedef struct {\n' for name, net, type_ in top_ports: str_ += ' {} {}; // {}\n'.format( type_, name[4:], net ) str_ += '} iface_t;\n\n' str_ += 'void cycle({});\n\n'.format('\n'+cycle_params+'\n') str_ += 'unsigned int ncycles;\n' return str_ #----------------------------------------------------------------------- # gen_cheader #----------------------------------------------------------------------- # Create the header for the simulator def gen_cheader( cycle_params, top_ports ): str_ = '\n' str_ += 'extern "C" {\n' str_ += ' typedef struct {\n' for name, net, type_ in top_ports: str_ += ' {} {}; // {}\n'.format( type_, name[4:], net ) str_ += ' } iface_t;\n\n' str_ += ' extern void cycle({} );\n'.format('\n'+cycle_params+'\n') str_ += ' extern unsigned int ncycles;\n' str_ += '};\n' return str_ #----------------------------------------------------------------------- # gen_pywrapper #----------------------------------------------------------------------- # Create the header for the simulator def gen_pywrapper( top_inports, top_outports ): def name_splitter( name ): sig, idx = name.split('_IDX') sig = [sig] + idx.split('_') nonidx, idx = [], [] for s in sig: (idx if s.isdigit() else nonidx).append(s) sig = '_'.join(nonidx) idx = idx[0] return sig, idx #--------------------------------------------------------------------- # CSimWrapper #--------------------------------------------------------------------- # Inner class for generated python wrapper. # TODO: better way? class CSimWrapper( object ): def __init__( self, cmodule, ffi ): #self._model = model self._cmodule = cmodule self._ffi = ffi self._top = ffi.new("iface_t *") #----------------------------------------------------------------- # CSimWrapper #----------------------------------------------------------------- # Utilty ListWrapper class for lists of ports class ListWrapper( object ): def __init__( self, top ): #self._top = top self._get = {} self._set = {} def __getitem__( self, key ): return self._get[ key ]( self ) def __setitem__( self, key, value ): self._set[ key ]( self, value ) #----------------------------------------------------------------- # create_fget #----------------------------------------------------------------- # Utilty method for creating fget def create_fget( top, name ): return lambda self: getattr( top[0], name ) #----------------------------------------------------------------- # create_fset #----------------------------------------------------------------- # Utilty method for creating fset def create_fset( top, name ): return lambda self, value : setattr( top[0], name, value ) # Add properties for all cffi exposed toplevel ports for fullname, net, type_ in top_inports[2:] + top_outports: name = fullname[4:] # This signal is a list of ports, use a ListWrapper object ) if '_IDX' in name: sig, idx = name_splitter(name) if not hasattr( self, sig ): setattr( self, sig, ListWrapper( self._top ) ) getattr( self, sig )._get[int(idx)] = create_fget( self._top, name ) getattr( self, sig )._set[int(idx)] = create_fset( self._top, name ) # This signal is a single port, create a property else: fget = create_fget( self._top, name ) fset = create_fset( self._top, name ) setattr(self.__class__, name, property(fget, fset) ) def reset( self ): self.cycle( reset=1 ) self.cycle( reset=1 ) def cycle( self, clk=0, reset=0 ): self._cmodule.cycle( clk, reset, self._top ) @property def ncycles( self ): return self._cmodule.ncycles return CSimWrapper #----------------------------------------------------------------------- # create_cpp_py_wrapper #----------------------------------------------------------------------- def create_cpp_py_wrapper( model, cdef, lib_file, wrapper_filename ): template_dir = os.path.dirname( os.path.abspath( __file__ ) ) template_filename = template_dir + os.path.sep + 'cpp_wrapper.templ.py' # translate pymtl to cpp, cdef # compile cpp to so # create so,cdef w cffi # create pymtl_wrap w pymtl_cppnames port_defs = [] set_inputs = [] set_outputs = [] for x in model.get_ports( preserve_hierarchy=True ): recurse_port_hierarchy( x, port_defs ) for x in model.get_inports(): if x.name in ['clk', 'reset']: continue # TODO: remove me! decl = "lambda: setattr( s._top, '{}', s.{}.uint() )" \ .format( x.cpp_name[4:], x.name ) call = "s._cffi_update[ s.{} ] = {}".format( x.name, decl ) set_inputs.append( call ) for x in model.get_outports(): decl = "s.{}.value = s._top.{}".format( x.name, x.cpp_name[4:] ) set_outputs.append( decl ) # pretty printing indent_four = '\n ' indent_six = '\n ' # create source with open( template_filename, 'r' ) as template, \ open( wrapper_filename, 'w' ) as output: py_src = template.read() py_src = py_src.format( model_name = model.class_name, cdef = cdef, lib_file = lib_file, port_defs = indent_four.join( port_defs ), set_inputs = indent_four.join( set_inputs ), set_outputs = indent_six .join( set_outputs ), ) output.write( py_src ) #print( py_src ) #----------------------------------------------------------------------- # recurse_port_hierarchy #----------------------------------------------------------------------- def recurse_port_hierarchy( p, list_ ): if isinstance( p, PortList ): list_.append( "s.{} = [None]*{}".format( p.name, len(p) ) ) #for child in p.get_ports(): for child in p._ports: recurse_port_hierarchy( child, list_ ) elif isinstance( p, PortBundle ): list_.append( "s.{} = BundleProxy()".format( p.name ) ) list_.append( "s.{}._ports = []".format( p.name ) ) for child in p.get_ports(): recurse_port_hierarchy( child, list_ ) list_.append( "s.{}._ports.append( s.{} )".format( p.name, child.name ) ) temp = child.name.split('.')[-1] list_.append( "s.{}.name = '{}'".format( child.name, temp ) ) # TODO: fix msg type elif isinstance( p, InPort ): if isinstance( p.dtype, BitStruct ): msg = p.dtype list_.append( "from {} import {}".format( msg._module, msg._classname ) ) list_.append( "s.{} = InPort( {} )".format( p.name, msg._instantiate ) ) else: list_.append( "s.{} = InPort( {} )".format( p.name, p.nbits ) ) # TODO: fix msg type elif isinstance( p, OutPort ): if isinstance( p.dtype, BitStruct ): msg = p.dtype list_.append( "from {} import {}".format( msg._module, msg._classname ) ) list_.append( "s.{} = OutPort( {} )".format( p.name, msg._instantiate ) ) else: list_.append( "s.{} = OutPort( {} )".format( p.name, p.nbits ) )
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: # Test numpy bindings import numpy as np from .. import (c_types, fff_type, npy_type, copy_vector, pass_matrix, pass_vector, pass_array, pass_vector_via_iterator, sum_via_iterators, copy_via_iterators) from nose.tools import assert_equal from numpy.testing import assert_almost_equal, assert_array_equal MAX_TEST_SIZE = 30 def random_shape(size): """ Output random dimensions in the range (2, MAX_TEST_SIZE) """ aux = np.random.randint(MAX_TEST_SIZE-1, size=size) + 2 if size==1: return aux else: return tuple(aux) # # Test type conversions # def test_type_conversions_to_fff(): # use np.sctypes for testing numpy types, np.typeDict.values # contains a lot of duplicates. There are 140 values in # np.typeDict, but only 21 unique numpy types. But only 11 fff # types in c_types. for type_key in np.sctypes: for npy_t in np.sctypes[type_key]: t, nbytes = fff_type(np.dtype(npy_t)) if not t == 'unknown type': yield assert_equal, nbytes, np.dtype(npy_t).itemsize def test_type_conversions_in_C(): for t in c_types: npy_t, nbytes = npy_type(t) yield assert_equal, npy_t, t # # Test bindings # def _test_copy_vector(x): # use fff y0 = copy_vector(x, 0) # use numpy y1 = copy_vector(x, 1) yield assert_equal, y0, x yield assert_equal, y1, x def test_copy_vector_contiguous(): x = (1000*np.random.rand(1e6)).astype('int32') _test_copy_vector(x) def test_copy_vector_strided(): x0 = (1000*np.random.rand(2e6)).astype('int32') x = x0[::2] _test_copy_vector(x) """ def test_copy_vector_int32(): x = np.random.rand(1e6).astype('int32') print('int32 buffer copy') _test_copy_vector(x) def test_copy_vector_uint8(): x = np.random.rand(1e6).astype('uint8') print('uint8 buffer copy') _test_copy_vector(x) """ def _test_pass_vector(x): y = pass_vector(x) assert_array_equal(y, x) def test_pass_vector(): x = np.random.rand(random_shape(1))-.5 _test_pass_vector(x) def test_pass_vector_int32(): x = (1000*(np.random.rand(random_shape(1))-.5)).astype('int32') _test_pass_vector(x) def test_pass_vector_uint8(): x = (256*(np.random.rand(random_shape(1)))).astype('uint8') _test_pass_vector(x) def _test_pass_matrix(x): y = pass_matrix(x) yield assert_equal, y, x y = pass_matrix(x.T) yield assert_equal, y, x.T def test_pass_matrix(): d0, d1 = random_shape(2) x = np.random.rand(d0, d1)-.5 _test_pass_matrix(x) def test_pass_matrix_int32(): d0, d1 = random_shape(2) x = (1000*(np.random.rand(d0, d1)-.5)).astype('int32') _test_pass_matrix(x) def test_pass_matrix_uint8(): d0, d1 = random_shape(2) x = (256*(np.random.rand(d0, d1))).astype('uint8') _test_pass_matrix(x) def _test_pass_array(x): y = pass_array(x) yield assert_equal, y, x y = pass_array(x.T) yield assert_equal, y, x.T def test_pass_array(): d0, d1, d2, d3 = random_shape(4) x = np.random.rand(d0, d1, d2, d3)-.5 _test_pass_array(x) def test_pass_array_int32(): d0, d1, d2, d3 = random_shape(4) x = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32') _test_pass_array(x) def test_pass_array_uint8(): d0, d1, d2, d3 = random_shape(4) x = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8') _test_pass_array(x) # # Multi-iterator testing # def _test_pass_vector_via_iterator(X, pos=0): """ Assume X.ndim == 2 """ # axis == 0 x = pass_vector_via_iterator(X, axis=0, niters=pos) yield assert_equal, x, X[:, pos] # axis == 1 x = pass_vector_via_iterator(X, axis=1, niters=pos) yield assert_equal, x, X[pos, :] def test_pass_vector_via_iterator(): d0, d1 = random_shape(2) X = np.random.rand(d0, d1)-.5 _test_pass_vector_via_iterator(X) def test_pass_vector_via_iterator_int32(): d0, d1 = random_shape(2) X = (1000*(np.random.rand(d0, d1)-.5)).astype('int32') _test_pass_vector_via_iterator(X) def test_pass_vector_via_iterator_uint8(): d0, d1 = random_shape(2) X = (100*(np.random.rand(d0, d1))).astype('uint8') _test_pass_vector_via_iterator(X) def test_pass_vector_via_iterator_shift(): d0, d1 = random_shape(2) X = np.random.rand(d0, d1)-.5 _test_pass_vector_via_iterator(X, pos=1) def test_pass_vector_via_iterator_shift_int32(): d0, d1 = random_shape(2) X = (1000*(np.random.rand(d0, d1)-.5)).astype('int32') _test_pass_vector_via_iterator(X, pos=1) def test_pass_vector_via_iterator_shift_uint8(): d0, d1 = random_shape(2) X = (100*(np.random.rand(d0, d1))).astype('uint8') _test_pass_vector_via_iterator(X, pos=1) def _test_copy_via_iterators(Y): for axis in range(4): Z = copy_via_iterators(Y, axis) yield assert_equal, Z, Y ZT = copy_via_iterators(Y.T, axis) yield assert_equal, ZT, Y.T def test_copy_via_iterators(): d0, d1, d2, d3 = random_shape(4) Y = np.random.rand(d0, d1, d2, d3) _test_copy_via_iterators(Y) def test_copy_via_iterators_int32(): d0, d1, d2, d3 = random_shape(4) Y = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32') _test_copy_via_iterators(Y) def test_copy_via_iterators_uint8(): d0, d1, d2, d3 = random_shape(4) Y = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8') _test_copy_via_iterators(Y) def _test_sum_via_iterators(Y): for axis in range(4): Z = sum_via_iterators(Y, axis) yield assert_almost_equal, Z, Y.sum(axis) ZT = sum_via_iterators(Y.T, axis) yield assert_almost_equal, ZT, Y.T.sum(axis) def test_sum_via_iterators(): d0, d1, d2, d3 = random_shape(4) Y = np.random.rand(d0, d1, d2, d3) _test_sum_via_iterators(Y) def test_sum_via_iterators_int32(): d0, d1, d2, d3 = random_shape(4) Y = (1000*(np.random.rand(d0, d1, d2, d3)-.5)).astype('int32') _test_sum_via_iterators(Y) def test_sum_via_iterators_uint8(): d0, d1, d2, d3 = random_shape(4) Y = (256*(np.random.rand(d0, d1, d2, d3))).astype('uint8') _test_sum_via_iterators(Y) if __name__ == "__main__": import nose nose.run(argv=['', __file__])
# -*- coding: utf-8 -*- """ The MIT License (MIT) Copyright (c) 2017 SML Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from random import choice from random import shuffle import datetime import discord import string import os import itertools from .utils.dataIO import dataIO from __main__ import send_cmd_help from cogs.utils.chat_formatting import pagify from discord.ext import commands from discord.ext.commands import Context CRDATA_PATH = os.path.join("data", "draftroyale", "clashroyale.json") SETTINGS_PATH = os.path.join("data", "draftroyale", "settings.json") EMOJI_JSON = os.path.join("data", "draftroyale", "emojis.json") HELP_TEXT = """ **Draft Royale: Clash Royale draft system** 1. Start a draft `!draft start` 2. Pick players `!draft players [username...]` 3. Pick cards `!draft pick` """ def grouper(iterable, n, fillvalue=None): """Collect data into fixed-length chunks or blocks. grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx """ args = [iter(iterable)] * n return itertools.zip_longest(*args, fillvalue=fillvalue) class Draft: """Clash Royale drafts.""" def __init__(self, admin: discord.Member=None): """Constructor. Args: admin (discord.Member): administrator of the draft """ self.admin = admin class DraftRoyale: """Clash Royale drafting bot. This cog is written to facilitate drafting in Clash Royale. Types of drafts --------------- - 4 players (10 cards) - 8 players (8 cards) - This system however will allow any number of players (2-8) with number of cards set to card count // players Bans ---- Some drafts have bans. For example, if graveyard is picked as a banned card, then no one can pick it.pick Drafting order -------------- Most drafts are snake drafts. They go from first to last then backwards. The first and last player gets two picks in a row. 1 2 3 4 4 3 2 1 1 2 3 4 etc. Required files -------------- - data/clashroyale.json: card data - data/settings.json: technically not needed but good to have a human-readable history log """ def __init__(self, bot): """Constructor.""" self.bot = bot self.crdata_path = CRDATA_PATH self.settings_path = SETTINGS_PATH self.crdata = dataIO.load_json(self.crdata_path) self.settings = dataIO.load_json(self.settings_path) self.emojis = dataIO.load_json(EMOJI_JSON) # init card data self.cards = [] self.cards_abbrev = {} self.min_players = 2 self.max_players = 8 self.prompt_timeout = 60.0 self.init() self.init_card_data() def init_card_data(self): """Initialize card data and popularize acceptable abbreviations.""" for card_key, card_value in self.crdata["Cards"].items(): self.cards.append(card_key) self.cards_abbrev[card_key] = card_key if card_key.find('-'): self.cards_abbrev[card_key.replace('-', '')] = card_key aka_list = card_value["aka"] for aka in aka_list: self.cards_abbrev[aka] = card_key if aka.find('-'): self.cards_abbrev[aka.replace('-', '')] = card_key def init(self): """Abort all operations.""" self.active_draft = None self.admin = None self.players = [] self.time_id = datetime.datetime.utcnow().isoformat() self.picked_cards = {} self.pick_order = [] self.is_snake_draft = True self.pick_player_id = 0 self.pick_direction_is_forward = True @commands.group(pass_context=True, no_pm=True) async def draft(self, ctx: Context): """Clash Royale Draft System. Full help !draft help """ if ctx.invoked_subcommand is None: await send_cmd_help(ctx) @draft.command(name="help", pass_context=True) async def draft_help(self, ctx: Context): """Display help for drating.""" await self.bot.say(HELP_TEXT) @draft.command(name="start", pass_context=True, no_pm=True) async def draft_start(self, ctx: Context): """Initialize a draft. The author who type this command will be designated as the owner / admin of the draft. """ await self.bot.say("Draft Royale") if self.active_draft is not None: await self.bot.say("An active draft is going on. " "Please finish the current draft " "before starting another.") return self.init() # server = ctx.message.server self.admin = ctx.message.author await self.bot.say( f"**Draft Admin** set to {self.admin.display_name}.") self.active_draft = { "admin_id": self.admin.id, "players": [] } if "drafts" not in self.settings: self.settings["drafts"] = {} self.settings["drafts"][self.time_id] = self.active_draft self.save_settings() await self.bot.say(HELP_TEXT) # await self.bot.say( # f"{self.admin.mention} Run `!draft players` to set the players.") @draft.command(name="players", pass_context=True, no_pm=True) async def draft_players(self, ctx: Context, *players: discord.Member): """Set the players playing in the draft.""" author = ctx.message.author server = ctx.message.server if author != self.admin: await self.bot.say("Players must be set by the draft admin.") await self.bot.say(f"Draft admin: {self.admin.display_name}") return if players is None: await send_cmd_help(ctx) return # reset players if already set self.players = [] for player in players: if player not in server.members: await self.bot.say( f"{player.display_name} is not on this server.") else: self.players.append(player) await self.list_players() self.save_players_settings() @draft.command(name="random", pass_context=True, no_pm=True) async def draft_random(self, ctx: Context): """Randomize the player order.""" if ctx.message.author != self.admin: msg = ( f"Only the draft admin, {self.admin.display_name}, " f"is allowed to randomize player order.") await self.bot.say(msg) return shuffle(self.players) self.active_draft["players"] = [] await self.list_players() self.save_players_settings() @draft.command(name="snake", pass_context=True, no_pm=True) async def draft_snake(self, ctx: Context, on_off: bool): """Enable / disable snake draft mode. Example: !draft snake 1 !draft snake 0 """ self.is_snake_draft = on_off if on_off: await self.bot.say("Snake draft enabled.") else: await self.bot.say("Snake draft disabled.") @draft.command(name="status", pass_context=True, no_pm=True) async def draft_status(self, ctx: Context): """Display status of the current draft.""" if self.admin is None: await self.bot.say("No active draft.") return data = discord.Embed( title="Clash Royale Drafting System", description="Current Status") data.add_field(name="Admin", value=self.admin.display_name) data.add_field(name="Snake Draft", value=self.is_snake_draft) data.add_field( name="Players", value="\n".join([f"+ {player.display_name}" for player in self.players]), inline=False) data.add_field( name="Available Cards", value=", ".join(self.get_available_card_names()), inline=False) try: await self.bot.say(embed=data) except discord.HTTPException: await self.bot.say("I need the `Embed links` permission " "to send this") @draft.command(name="cards", pass_context=True, no_pm=True) async def draft_cards(self, ctx: Context, sort: str=None): """Display available cards for picking. Optionally set sort order. """ out = [] out.append("**Available cards**") card_names = [ # self.card_key_to_name(key) self.card_key_to_emoji(key) for key in self.cards if key not in self.picked_cards] # out.append(", ".join(card_names)) out.append(" ".join(card_names)) for page in pagify("\n".join(out), shorten_by=12): await self.bot.say(page) @draft.command(name="pick", pass_context=True, no_pm=True) async def draft_pick(self, ctx: Context): """Player pick cards.""" player = self.get_next_player_to_pick() await self.bot.say(f"Next player to pick: {player.display_name}") @draft.command(name="abort", pass_context=True, no_pm=True) async def draft_abort(self, ctx: Context): """Abort an active draft.""" self.init() await self.bot.say("Draft Royale aborted.") @draft.command(name="listplayers", pass_context=True, no_pm=True) async def draft_listplayers(self, ctx: Context): """List players in the play order.""" await self.list_players() def get_next_player_to_pick(self): """Return the next player to pick cards.""" player = self.players[self.pick_player_id] next_id = self.pick_player_id if self.pick_direction_is_forward: next_id += 1 else: next_id -= 1 if next_id >= len(self.players): if self.is_snake_draft: next_id = len(self.players) - 1 self.pick_direction_is_forward = False else: next_id = 0 elif next_id < 0: self.pick_direction_is_forward = True next_id = 0 self.pick_player_id = next_id return player async def list_players(self): """List the players in the play order.""" out = [] out.append("Players for this draft:") for player in self.players: out.append(f"+ {player.display_name}") await self.bot.say("\n".join(out)) def save_players_settings(self): """Save players settings.""" self.active_draft["players"] = [] for player in self.players: self.active_draft["players"].append({ "user_id": player.id, "user_name": player.display_name}) self.save_settings() def save_settings(self): """Save settings to disk.""" dataIO.save_json(self.settings_path, self.settings) def card_key_to_name(self, card_key: str): """Return card name from card key.""" return string.capwords(card_key.replace('-', ' ')) def get_available_cards(self): """Return list of available cards that are not picked yet.""" return [ card for card in self.cards if card not in self.picked_cards] def card_key_to_emoji(self, card_key): """Return card emoji from id.""" name = self.crdata["Cards"][card_key]["emoji"] return '<:{}:{}>'.format(name, self.emojis[name]) def get_available_card_names(self): """Return list of available card names that are not picked yet.""" return [ self.card_key_to_name(card) for card in self.get_available_cards()] @commands.group(pass_context=True) async def draftutil(self, ctx): """Draft utilities.""" if ctx.invoked_subcommand is None: await send_cmd_help(ctx) @draftutil.command(name="cards", pass_context=True) async def draftutil_cards(self, ctx): """List available cards by emojis.""" emojis = [v["emoji"] for k, v in self.crdata["Cards"].items()] groups = grouper(emojis, 25) for group in groups: out = [] for emoji in group: if emoji is not None: out.append('<:{}:{}>'.format(emoji, self.emojis[emoji])) await self.bot.say(' '.join(out)) # out = [] # for emoji in emojis: # if emoji is not None: # out.append('<:{}:{}>'.format(emoji, self.emojis[emoji])) # em = discord.Embed() # em.add_field(name="", value="".join(out)) # await self.bot.say(embed=em) @draftutil.command(name="emojis", pass_context=True) async def draftutil_emojis(self, ctx): """Save list of emojis on all servers.""" self.emojis = {} for server in self.bot.servers: for emoji in server.emojis: self.emojis[emoji.name] = emoji.id dataIO.save_json(EMOJI_JSON, self.emojis) await self.bot.say("Emojis saved to {}".format(EMOJI_JSON)) def check_folder(): """Check data folders exist. Create if necessary.""" folders = [ os.path.join("data", "draftroyale"), os.path.join("data", "draftroyale", "img"), os.path.join("data", "draftroyale", "img", "cards")] for f in folders: if not os.path.exists(f): os.makedirs(f) def check_files(): """Check required data files exists.""" defaults = {} f = SETTINGS_PATH if not dataIO.is_valid_json(f): dataIO.save_json(f, defaults) f = EMOJI_JSON if not dataIO.is_valid_json(f): dataIO.save_json(f, defaults) def setup(bot): """Add cog to bot.""" check_folder() check_files() n = DraftRoyale(bot) bot.add_cog(n)
""" Test the base classes for managing datasets and tasks. """ import os import pytest import numpy as np from dbcollection.datasets import ( BaseDataset, BaseTask, BaseField, BaseColumnField ) @pytest.fixture() def test_data(): return { "data_path": '/path/to/data', "cache_path": '/path/to/cache', "extract_data": True, "verbose": True } @pytest.fixture() def mock_dataset_class(test_data): return BaseDataset( data_path=test_data["data_path"], cache_path=test_data["cache_path"], extract_data=test_data["extract_data"], verbose=test_data["verbose"] ) class TestBaseDataset: """Unit tests for the BaseDataset class.""" def test_init_with_all_input_args(self, mocker): data_path = '/path/to/data' cache_path = '/path/to/cache' extract_data=True verbose=True db_manager = BaseDataset( data_path=data_path, cache_path=cache_path, extract_data=extract_data, verbose=verbose ) assert db_manager.data_path == '/path/to/data' assert db_manager.cache_path == '/path/to/cache' assert db_manager.extract_data == True assert db_manager.verbose == True assert db_manager.urls == () assert db_manager.keywords == () assert db_manager.tasks == {} assert db_manager.default_task == '' def test_init_without_optional_input_args(self, mocker): data_path = '/path/to/data' cache_path = '/path/to/cache' db_manager = BaseDataset( data_path=data_path, cache_path=cache_path ) assert db_manager.data_path == '/path/to/data' assert db_manager.cache_path == '/path/to/cache' assert db_manager.extract_data == True assert db_manager.verbose == True assert db_manager.urls == () assert db_manager.keywords == () assert db_manager.tasks == {} assert db_manager.default_task == '' def test_init__raises_error_no_input_args(self, mocker): with pytest.raises(TypeError): BaseDataset() def test_init__raises_error_too_many_input_args(self, mocker): with pytest.raises(TypeError): BaseDataset('/path/to/data', '/path/to/cache', False, False, 'extra_field') def test_download(self, mocker, mock_dataset_class): mock_download_extract = mocker.patch("dbcollection.datasets.download_extract_urls") mock_dataset_class.download() assert mock_download_extract.called_once_with( urls=mock_dataset_class.urls, dir_save=mock_dataset_class.data_path, extract_data=mock_dataset_class.extract_data, verbose=mock_dataset_class.verbose ) def test_process(self, mocker, mock_dataset_class): mock_parse_task = mocker.patch.object(BaseDataset, "parse_task_name", return_value='taskA') mock_process_metadata = mocker.patch.object(BaseDataset, "process_metadata", return_value='/path/to/task/filename.h5') result = mock_dataset_class.process('taskA') assert mock_parse_task.called assert mock_process_metadata.called assert result == {'taskA': {"filename": '/path/to/task/filename.h5', "categories": ()}} def test_parse_task_name_with_valid_task_name(self, mocker, mock_dataset_class): task = 'taskA' result = mock_dataset_class.parse_task_name(task) assert result == 'taskA' def test_parse_task_name_with_empty_task_name(self, mocker, mock_dataset_class): task = '' mock_dataset_class.default_task = 'some_task' result = mock_dataset_class.parse_task_name(task) assert result == 'some_task' def test_parse_task_name_with_default_task_name(self, mocker, mock_dataset_class): task = 'default' mock_dataset_class.default_task = 'some_task' result = mock_dataset_class.parse_task_name(task) assert result == 'some_task' def test_process_metadata(self, mocker, mock_dataset_class): mock_get_constructor = mocker.patch.object(BaseDataset, "get_task_constructor", return_value=mocker.MagicMock()) mock_dataset_class.process_metadata('some_task') assert mock_get_constructor.called def test_get_task_constructor(self, mocker, mock_dataset_class): task = 'taskZ' mock_dataset_class.tasks = {task: 'some_data'} result = mock_dataset_class.get_task_constructor(task) assert result == 'some_data' @pytest.fixture() def mock_task_class(test_data): return BaseTask( data_path=test_data["data_path"], cache_path=test_data["cache_path"], verbose=test_data["verbose"] ) class TestBaseTask: """Unit tests for the BaseTask class.""" def test_init_with_all_input_args(self, mocker): mock_get_filename = mocker.patch.object(BaseTask, "get_hdf5_save_filename", return_value='/path/to/hdf5/file.h5') data_path = '/path/to/data' cache_path = '/path/to/cache' verbose = True task_manager = BaseTask(data_path=data_path, cache_path=cache_path, verbose=verbose) assert mock_get_filename.called assert task_manager.data_path == '/path/to/data' assert task_manager.cache_path == '/path/to/cache' assert task_manager.verbose == True assert task_manager.hdf5_filepath == '/path/to/hdf5/file.h5' assert task_manager.filename_h5 == '' assert task_manager.hdf5_manager == None def test_init_without_optional_input_args(self, mocker): mock_get_filename = mocker.patch.object(BaseTask, "get_hdf5_save_filename", return_value='/path/to/hdf5/file.h5') data_path = '/path/to/data' cache_path = '/path/to/cache' task_manager = BaseTask(data_path=data_path, cache_path=cache_path) assert mock_get_filename.called assert task_manager.data_path == '/path/to/data' assert task_manager.cache_path == '/path/to/cache' assert task_manager.verbose == True assert task_manager.hdf5_filepath == '/path/to/hdf5/file.h5' assert task_manager.filename_h5 == '' assert task_manager.hdf5_manager == None def test_init__raises_error_no_input_args(self, mocker): with pytest.raises(TypeError): BaseTask() def test_init__raises_error_too_many_input_args(self, mocker): with pytest.raises(TypeError): BaseTask('/path/to/data', '/path/to/cache', False, 'extra_input') def test_get_hdf5_save_filename(self, mocker, mock_task_class): mock_task_class.filename_h5 = 'classification' filepath = mock_task_class.get_hdf5_save_filename() assert filepath == os.path.join('/path/to/cache', 'classification.h5') def test_run(self, mocker, mock_task_class): mock_setup_manager = mocker.patch.object(BaseTask, "setup_hdf5_manager") mock_load_data = mocker.patch.object(BaseTask, "load_data", return_value={}) mock_process = mocker.patch.object(BaseTask, "process_metadata") mock_teardown_manager = mocker.patch.object(BaseTask, "teardown_hdf5_manager") filename = mock_task_class.run() assert mock_setup_manager.called assert mock_load_data.called assert mock_process.called assert mock_teardown_manager.called assert filename == mock_task_class.hdf5_filepath def test_load_data(self, mocker, mock_task_class): mock_task_class.load_data() def test_process_metadata(self, mocker, mock_task_class): mock_process_metadata = mocker.patch.object(BaseTask, "process_set_metadata") def sample_generator(): yield {'train': ['dummy', 'data']} yield {'test': ['dummy', 'data']} generator = sample_generator() mock_task_class.process_metadata(generator) assert mock_process_metadata.called def test_teardown_hdf5_manager(self, mocker, mock_task_class): mock_add_field = mocker.Mock() mock_task_class.hdf5_manager = mock_add_field mock_task_class.teardown_hdf5_manager() mock_add_field.close.assert_called_once_with() class TestBaseField: """Unit tests for the BaseField class.""" def test_init(self, mocker): args = { "data": ['some', 'data'], "set_name": 'train', "hdf5_manager": {'dummy': 'object'}, "verbose": True } base_field = BaseField(**args) assert base_field.data == args["data"] assert base_field.set_name == args["set_name"] assert base_field.hdf5_manager == args["hdf5_manager"] assert base_field.verbose == args["verbose"] def test_save_field_to_hdf5(self, mocker): mock_hdf5_manager = mocker.Mock() args = { "data": ['some', 'data'], "set_name": 'train', "hdf5_manager": mock_hdf5_manager, "verbose": True } base_field = BaseField(**args) group = 'train' field = 'fieldA' data = np.array(range(10)) base_field.save_field_to_hdf5(group, field, data) mock_hdf5_manager.add_field_to_group.assert_called_once_with( group='train', field='fieldA', data=data ) def test_save_field_to_hdf5_all_args(self, mocker): mock_hdf5_manager = mocker.Mock() args = { "data": ['some', 'data'], "set_name": 'train', "hdf5_manager": mock_hdf5_manager, "verbose": True } base_field = BaseField(**args) base_field.save_field_to_hdf5( set_name='train', field='fieldA', data= [0, 1, 2, 3, 4, 5], dtype=np.uint8, fillvalue=0, chunks=True, compression="gzip", compression_opts=4) mock_hdf5_manager.add_field_to_group.assert_called_once_with( group='train', field='fieldA', data=[0, 1, 2, 3, 4, 5], dtype=np.uint8, fillvalue=0, chunks=True, compression="gzip", compression_opts=4 ) class TestBaseColumnField: """Unit tests for the BaseColumnField class.""" def test_class_attributes(self, mocker): base_column_field = BaseColumnField() assert base_column_field.fields == [] def test_process(self, mocker): mock_save_field_to_hdf5 = mocker.patch.object(BaseColumnField, 'save_field_to_hdf5') base_column_field = BaseColumnField(set_name='train') base_column_field.fields = ['some', 'fields'] base_column_field.process() assert mock_save_field_to_hdf5.called
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for the robot module.""" import unittest import events import ops import robot import simplejson BLIP_JSON = ('{"wdykLROk*13":' '{"lastModifiedTime":1242079608457,' '"contributors":["someguy@test.com"],' '"waveletId":"test.com!conv+root",' '"waveId":"test.com!wdykLROk*11",' '"parentBlipId":null,' '"version":3,' '"creator":"someguy@test.com",' '"content":"\\nContent!",' '"blipId":"wdykLROk*13","' 'annotations":[{"range":{"start":0,"end":1},' '"name":"user/e/davidbyttow@google.com","value":"David"}],' '"elements":{},' '"childBlipIds":[]}' '}') WAVELET_JSON = ('{"lastModifiedTime":1242079611003,' '"title":"A title",' '"waveletId":"test.com!conv+root",' '"rootBlipId":"wdykLROk*13",' '"dataDocuments":null,' '"creationTime":1242079608457,' '"waveId":"test.com!wdykLROk*11",' '"participants":["someguy@test.com","monty@appspot.com"],' '"creator":"someguy@test.com",' '"version":5}') EVENTS_JSON = ('[{"timestamp":1242079611003,' '"modifiedBy":"someguy@test.com",' '"properties":{"participantsRemoved":[],' '"participantsAdded":["monty@appspot.com"]},' '"type":"WAVELET_PARTICIPANTS_CHANGED"}]') TEST_JSON = '{"blips":%s,"wavelet":%s,"events":%s}' % ( BLIP_JSON, WAVELET_JSON, EVENTS_JSON) NEW_WAVE_JSON = [{"data": {"waveletId": "wavesandbox.com!conv+root", "blipId": "b+LrODcLZkDlu", "waveId": "wavesandbox.com!w+LrODcLZkDlt"}, "id": "op2"}] NEW_WAVE_JSON_OLD = [{'data': [{'data': {'waveletId': 'googlewave.com!conv+root', 'blipId': 'b+VqQXQbZkCP1', 'waveId': 'googlewave.com!w+VqQXQbZkCP0'}, 'id': 'wavelet.create1265055048410'}], 'id': 'op10'}]; class TestRobot(unittest.TestCase): """Tests for testing the basic parsing of json in robots.""" def setUp(self): self.robot = robot.Robot('Testy') def testCreateWave(self): self.robot.submit = lambda x: NEW_WAVE_JSON new_wave = self.robot.new_wave('wavesandbox.com', submit=True) self.assertEqual('wavesandbox.com!w+LrODcLZkDlt', new_wave.wave_id) self.robot.submit = lambda x: NEW_WAVE_JSON_OLD new_wave = self.robot.new_wave('googlewave.com', submit=True) self.assertEqual('googlewave.com!w+VqQXQbZkCP0', new_wave.wave_id) def testEventParsing(self): def check(event, wavelet): # Test some basic properties; the rest should be covered by # ops.CreateContext. root = wavelet.root_blip self.assertEqual(1, len(wavelet.blips)) self.assertEqual('wdykLROk*13', root.blip_id) self.assertEqual('test.com!wdykLROk*11', root.wave_id) self.assertEqual('test.com!conv+root', root.wavelet_id) self.assertEqual('WAVELET_PARTICIPANTS_CHANGED', event.type) self.assertEqual({'participantsRemoved': [], 'participantsAdded': ['monty@appspot.com']}, event.properties) self.robot.test_called = True self.robot.test_called = False self.robot.register_handler(events.WaveletParticipantsChanged, check) json = self.robot.process_events(TEST_JSON) self.assertTrue(self.robot.test_called) operations = simplejson.loads(json) # there should be one operation indicating the current version: self.assertEqual(1, len(operations)) def testWrongEventsIgnored(self): self.robot.test_called = True def check(event, wavelet): called = True self.robot.test_called = False self.robot.register_handler(events.BlipSubmitted, check) self.robot.process_events(TEST_JSON) self.assertFalse(self.robot.test_called) def testOperationParsing(self): def check(event, wavelet): wavelet.reply() wavelet.title = 'new title' wavelet.root_blip.append_markup('<b>Hello</b>') self.robot.register_handler(events.WaveletParticipantsChanged, check) json = self.robot.process_events(TEST_JSON) operations = simplejson.loads(json) expected = set([ops.ROBOT_NOTIFY_CAPABILITIES_HASH, ops.WAVELET_APPEND_BLIP, ops.WAVELET_SET_TITLE, ops.DOCUMENT_APPEND_MARKUP]) methods = [operation['method'] for operation in operations] for method in methods: self.assertTrue(method in expected) expected.remove(method) self.assertEquals(0, len(expected)) def testSerializeWavelets(self): wavelet = self.robot.blind_wavelet(TEST_JSON) serialized = wavelet.serialize() unserialized = self.robot.blind_wavelet(serialized) self.assertEquals(wavelet.creator, unserialized.creator) self.assertEquals(wavelet.creation_time, unserialized.creation_time) self.assertEquals(wavelet.last_modified_time, unserialized.last_modified_time) self.assertEquals(wavelet.root_blip.blip_id, unserialized.root_blip.blip_id) self.assertEquals(wavelet.title, unserialized.title) self.assertEquals(wavelet.wave_id, unserialized.wave_id) self.assertEquals(wavelet.wavelet_id, unserialized.wavelet_id) self.assertEquals(wavelet.domain, unserialized.domain) def testProxiedBlindWavelet(self): def handler(event, wavelet): blind_wavelet = self.robot.blind_wavelet(TEST_JSON, 'proxyid') blind_wavelet.reply() blind_wavelet.submit_with(wavelet) self.robot.register_handler(events.WaveletParticipantsChanged, handler) json = self.robot.process_events(TEST_JSON) operations = simplejson.loads(json) self.assertEqual(2, len(operations)) self.assertEquals(ops.ROBOT_NOTIFY_CAPABILITIES_HASH, operations[0]['method']) self.assertEquals(ops.WAVELET_APPEND_BLIP, operations[1]['method']) self.assertEquals('proxyid', operations[1]['params']['proxyingFor']) def testCapabilitiesHashIncludesContextAndFilter(self): robot1 = robot.Robot('Robot1') robot1.register_handler(events.WaveletSelfAdded, lambda: '') robot2 = robot.Robot('Robot2') robot2.register_handler(events.WaveletSelfAdded, lambda: '', context=events.Context.ALL) self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash()) robot3 = robot.Robot('Robot3') robot2.register_handler(events.WaveletSelfAdded, lambda: '', context=events.Context.ALL, filter="foo") self.assertNotEqual(robot1.capabilities_hash(), robot2.capabilities_hash()) self.assertNotEqual(robot1.capabilities_hash(), robot3.capabilities_hash()) self.assertNotEqual(robot2.capabilities_hash(), robot3.capabilities_hash()) class TestGetCapabilitiesXml(unittest.TestCase): def setUp(self): self.robot = robot.Robot('Testy') self.robot.capabilities_hash = lambda: '1' def assertStringsEqual(self, s1, s2): self.assertEqual(s1, s2, 'Strings differ:\n%s--\n%s' % (s1, s2)) def testDefault(self): expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testUrls(self): profile_robot = robot.Robot( 'Testy', image_url='http://example.com/image.png', profile_url='http://example.com/profile.xml') profile_robot.capabilities_hash = lambda: '1' expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = profile_robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testConsumerKey(self): # setup_oauth doesn't work during testing, so heavy handed setting of # properties it is: self.robot._consumer_key = 'consumer' expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:consumer_key>consumer</w:consumer_key>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n</w:capabilities>\n' '</w:robot>\n') % ops.PROTOCOL_VERSION xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) def testCapsAndEvents(self): self.robot.register_handler(events.BlipSubmitted, None, context=[events.Context.SELF, events.Context.ROOT]) expected = ( '<?xml version="1.0"?>\n' '<w:robot xmlns:w="http://wave.google.com/extensions/robots/1.0">\n' '<w:version>1</w:version>\n' '<w:protocolversion>%s</w:protocolversion>\n' '<w:capabilities>\n' ' <w:capability name="%s" context="SELF,ROOT"/>\n' '</w:capabilities>\n' '</w:robot>\n') % (ops.PROTOCOL_VERSION, events.BlipSubmitted.type) xml = self.robot.capabilities_xml() self.assertStringsEqual(expected, xml) if __name__ == '__main__': unittest.main()
#!/usr/bin/env python import os import glob from optparse import OptionParser import numpy as np import matplotlib.pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure from rompy import rompy, plot_utils, utils, extract_utils def surface_map(file,img_file=None,varname='salt',clim=None): (data, coords) = rompy.extract(file,varname=varname,extraction_type='surface') # plot_utils.plot_surface(coords['xm'],coords['ym'],data) title = '%s %s %s %s' % ( extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt) ) plot_utils.plot_map(coords['xm'],coords['ym'],data,filename=img_file, clim=clim, title=title, caxis_label=clabel_map[varname]) def main_basin_curtain(file,img_file,varname,n=4,clim=None): # Main Basin if varname == 'U': main_basin_U_curtain(file,img_file,n,clim) else: x,y = utils.high_res_main_basin_xy(n=n) (data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y) title = '%s %s Main Basin %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt)) plot_utils.plot_parker(coords=coords, data=data, varname=varname, region='Main Basin', filename=img_file, n=n, x_axis_offset=utils.offset_region(coords), clim=clim,cmap='banas_hsv_cm',labeled_contour_gap=2, title=title, caxis_label=clabel_map[varname]) def hood_canal_curtain(file,img_file,varname,n=1,clim=None): # Hood Canal if varname == 'U': hood_canal_U_curtain(file,img_file,n,clim) else: x,y = utils.high_res_hood_canal_xy(n=n) (data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y) title = '%s %s Hood Canal %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt)) plot_utils.plot_parker(coords=coords, data=data, varname=varname, region='Hood Canal', filename=img_file, n=n, x_axis_offset=utils.offset_region(coords), clim=clim, cmap='banas_hsv_cm',labeled_contour_gap=2, title=title, caxis_label=clabel_map[varname]) def hood_canal_U_curtain(file,img_file,n=1,clim=None): # velocity in Hood Canal x,y = utils.high_res_hood_canal_xy(n=n) (u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y) (v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y) data = np.zeros(u.shape) for i in range(u.shape[1]): if i == u.shape[1]-1: x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]]) else: x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]]) for j in range(u.shape[0]): u_vec = np.array([u[j,i], v[j,i]]) data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec))) data = np.ma.array(data, mask=np.abs(data) > 100) title = '%s %s Hood Canal %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt)) hood_U_clim = (np.array(clim)/2.0).tolist() plot_utils.plot_parker(coords=coords,data=data,varname='U', region='Hood Canal', filename=img_file, n=n, clim=clim, x_axis_offset=utils.offset_region(coords), cmap='red_blue', title=title, caxis_label=clabel_map['U']) def main_basin_U_curtain(file,img_file,n=1,clim=None): # velocity in Main Basin x,y = utils.high_res_main_basin_xy(n=n) (u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y) (v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y) data = np.zeros(u.shape) for i in range(u.shape[1]): if i == u.shape[1]-1: x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]]) else: x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]]) for j in range(u.shape[0]): u_vec = np.array([u[j,i], v[j,i]]) data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec))) data = np.ma.array(data, mask=np.abs(data) > 100) title = '%s %s Main Basin %s %s' % (extract_utils.run_title(file), os.path.basename(file), var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt)) plot_utils.plot_parker(coords=coords,data=data,varname='U', region=' Main Basin', filename=img_file, n=n, clim=clim, x_axis_offset=utils.offset_region(coords),cmap='red_blue', title=title, caxis_label=clabel_map['U']) def daves_curtain(file,img_file,section,varname,clim=None): if varname == 'U': daves_U_curtain(file,img_file,section,varname,clim) else: x = utils.get_daves_section_var(section=section,var='lon') y = utils.get_daves_section_var(section=section,var='lat') (data, coords) = rompy.extract(file, varname=varname, extraction_type='profile', x=x, y=y) title = '%s %s %s %s %s' % (extract_utils.run_title(file), os.path.basename(file), section, var_title_map[var], extract_utils.file_time(file).strftime(title_time_fmt)) plot_utils.plot_parker( coords=coords, data=data, filename=img_file, title=title, x_axis_offset=utils.offset_region(coords), clim=clim, cmap='banas_hsv_cm', labeled_contour_gap=2, caxis_label=clabel_map[varname], inset=inset_dict[section], ctd_ind=ctd_ind_dict[section], label=utils.get_daves_section_var(section=section,var='label'), label_ind=utils.get_daves_section_var(section=section,var='label_ind') ) return def daves_U_curtain(file,img_file,section,varname,clim): x = utils.get_daves_section_var(section=section,var='lon') y = utils.get_daves_section_var(section=section,var='lat') (u, coords) = rompy.extract(file,varname='u',extraction_type='profile',x=x,y=y) (v, coords) = rompy.extract(file,varname='v',extraction_type='profile',x=x,y=y) data = np.zeros(u.shape) for i in range(u.shape[1]): if i == u.shape[1]-1: x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]]) else: x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]]) for j in range(u.shape[0]): u_vec = np.array([u[j,i], v[j,i]]) data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec))) data = np.ma.array(data, mask=np.abs(data) > 100) title = '%s %s %s %s %s' % (extract_utils.run_title(file), os.path.basename(file), section, var_title_map['U'], extract_utils.file_time(file).strftime(title_time_fmt)) plot_utils.plot_parker( coords=coords, data=data, filename=img_file, title=title, x_axis_offset=utils.offset_region(coords), clim=clim, cmap='red_blue', caxis_label=clabel_map[varname], inset=inset_dict[section], ctd_ind=ctd_ind_dict[section], label=utils.get_daves_section_var(section=section,var='label'), label_ind=utils.get_daves_section_var(section=section,var='label_ind') ) return # begin actual code that runs. parser = OptionParser() parser.add_option('-i', '--img_dir', dest='img_dir', default='./image_sequence', help='Location to save images. Default is ./image_sequnce') (options, args) = parser.parse_args() if args == []: fl = glob.glob('ocean_his*.nc') print(fl) file_list = [fl[0]] else: file_list = args img_dir = options.img_dir var_list = ['salt','temp','U'] var_title_map = {'salt':'Salinity','temp':'Temperature','U':'Velocity'} title_time_fmt = '%Y-%m-%d %H:%M UTC' clims = {'salt':[0, 21,33, 33], 'temp': [8, 20], 'U':[-2,2]} clabel_map = {'temp': u'\u00B0 C', 'salt': 'psu', 'U': 'm/s'} inset_dict = {'AI_HC':'Puget Sound','AI_WB':'Puget Sound','JdF_SoG':'Strait of Georgia','JdF_SS':'Puget Sound','JdF_PS':'JdF_PS'} ctd_ind_dict = { 'AI_HC':utils.get_daves_section_var(section='AI_HC',var='PRISM_ctd_ind'), 'AI_WB':utils.get_daves_section_var(section='AI_WB',var='PRISM_ctd_ind'), 'JdF_SoG':utils.get_daves_section_var(section='JdF_SoG',var='IOS_ctd_ind'), 'JdF_SS':utils.get_daves_section_var(section='JdF_SS',var='PRISM_ctd_ind'), 'JdF_PS':utils.get_daves_section_var(section='JdF_PS',var='PRISM_ctd_ind') } ctd_ind_dict['JdF_SoG'].extend(utils.get_daves_section_var(section='JdF_SoG',var='JEMS_ctd_ind')) for file in file_list: ncf_index = os.path.basename(file)[:-3] print('%s' %ncf_index) if not os.path.exists(img_dir): os.makedirs(img_dir) for var in var_list: hood_img_file = '%s/%s_hood_%s.png' %(img_dir, ncf_index,var) main_img_file = '%s/%s_main_%s.png' %(img_dir, ncf_index,var) surface_img_file = '%s/%s_surface_%s.png' % (img_dir, ncf_index, var) AI_HC_img_file = '%s/AI_HC_%s_%s.png' %(img_dir,var,ncf_index) AI_WB_img_file = '%s/AI_WB_%s_%s.png' %(img_dir,var,ncf_index) JdF_SoG_img_file = '%s/JdF_SoG_%s_%s.png' %(img_dir,var,ncf_index) JdF_SS_img_file = '%s/JdF_SS_%s_%s.png' %(img_dir,var,ncf_index) JdF_PS_img_file = '%s/JdF_PS_%s_%s.png' %(img_dir,var,ncf_index) print('making hood canal %s' % var) hood_canal_curtain(file, hood_img_file, var, n=8, clim=clims[var]) print('making main basin %s' % var) main_basin_curtain(file, main_img_file, var, n=8, clim=clims[var]) print('making AI_HC %s' % var) daves_curtain(file,AI_HC_img_file,section='AI_HC',varname=var,clim=clims[var]) print('making AI_WB %s' % var) daves_curtain(file,AI_WB_img_file,section='AI_WB',varname=var,clim=clims[var]) print('making JdF_SoG %s' % var) daves_curtain(file,JdF_SoG_img_file,section='JdF_SoG',varname=var,clim=clims[var]) print('making JdF_SS %s' % var) daves_curtain(file,JdF_SS_img_file,section='JdF_SS',varname=var,clim=clims[var]) print('making JdF_PS %s' % var) daves_curtain(file,JdF_PS_img_file,section='JdF_PS',varname=var,clim=clims[var]) if not var == 'U': print('making surface %s' % var) surface_map(file,surface_img_file,var,clim=clims[var])
""" Support code for building Python extensions on Windows. # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) # 3. Force windows to use g77 """ from __future__ import division, absolute_import, print_function import os import sys import subprocess import re # Overwrite certain distutils.ccompiler functions: import numpy.distutils.ccompiler if sys.version_info[0] < 3: from . import log else: from numpy.distutils import log # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) # --> this is done in numpy/distutils/ccompiler.py # 3. Force windows to use g77 import distutils.cygwinccompiler from distutils.version import StrictVersion from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options from distutils.unixccompiler import UnixCCompiler from distutils.msvccompiler import get_build_version as get_build_msvc_version from distutils.errors import (DistutilsExecError, CompileError, UnknownFileError) from numpy.distutils.misc_util import (msvc_runtime_library, msvc_runtime_version, msvc_runtime_major, get_build_architecture) def get_msvcr_replacement(): """Replacement for outdated version of get_msvcr from cygwinccompiler""" msvcr = msvc_runtime_library() return [] if msvcr is None else [msvcr] # monkey-patch cygwinccompiler with our updated version from misc_util # to avoid getting an exception raised on Python 3.5 distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement # Useful to generate table of symbols from a dll _START = re.compile(r'\[Ordinal/Name Pointer\] Table') _TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') # the same as cygwin plus some additional parameters class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): """ A modified MingW32 compiler compatible with an MSVC built Python. """ compiler_type = 'mingw32' def __init__ (self, verbose=0, dry_run=0, force=0): distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, dry_run, force) # we need to support 3.2 which doesn't match the standard # get_versions methods regex if self.gcc_version is None: import re p = subprocess.Popen(['gcc', '-dumpversion'], shell=True, stdout=subprocess.PIPE) out_string = p.stdout.read() p.stdout.close() result = re.search(r'(\d+\.\d+)', out_string) if result: self.gcc_version = StrictVersion(result.group(1)) # A real mingw32 doesn't need to specify a different entry point, # but cygwin 2.91.57 in no-cygwin-mode needs it. if self.gcc_version <= "2.91.57": entry_point = '--entry _DllMain@12' else: entry_point = '' if self.linker_dll == 'dllwrap': # Commented out '--driver-name g++' part that fixes weird # g++.exe: g++: No such file or directory # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). # If the --driver-name part is required for some environment # then make the inclusion of this part specific to that # environment. self.linker = 'dllwrap' # --driver-name g++' elif self.linker_dll == 'gcc': self.linker = 'g++' # **changes: eric jones 4/11/01 # 1. Check for import library on Windows. Build if it doesn't exist. build_import_library() # Check for custom msvc runtime library on Windows. Build if it doesn't exist. msvcr_success = build_msvcr_library() msvcr_dbg_success = build_msvcr_library(debug=True) if msvcr_success or msvcr_dbg_success: # add preprocessor statement for using customized msvcr lib self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') # Define the MSVC version as hint for MinGW msvcr_version = msvc_runtime_version() if msvcr_version: self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) # MS_WIN64 should be defined when building for amd64 on windows, # but python headers define it only for MS compilers, which has all # kind of bad consequences, like using Py_ModuleInit4 instead of # Py_ModuleInit4_64, etc... So we add it here if get_build_architecture() == 'AMD64': if self.gcc_version < "4.0": self.set_executables( compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0' ' -Wall -Wstrict-prototypes', linker_exe='gcc -g -mno-cygwin', linker_so='gcc -g -mno-cygwin -shared') else: # gcc-4 series releases do not support -mno-cygwin option self.set_executables( compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', linker_exe='gcc -g', linker_so='gcc -g -shared') else: if self.gcc_version <= "3.0.0": self.set_executables( compiler='gcc -mno-cygwin -O2 -w', compiler_so='gcc -mno-cygwin -mdll -O2 -w' ' -Wstrict-prototypes', linker_exe='g++ -mno-cygwin', linker_so='%s -mno-cygwin -mdll -static %s' % (self.linker, entry_point)) elif self.gcc_version < "4.0": self.set_executables( compiler='gcc -mno-cygwin -O2 -Wall', compiler_so='gcc -mno-cygwin -O2 -Wall' ' -Wstrict-prototypes', linker_exe='g++ -mno-cygwin', linker_so='g++ -mno-cygwin -shared') else: # gcc-4 series releases do not support -mno-cygwin option self.set_executables(compiler='gcc -O2 -Wall', compiler_so='gcc -O2 -Wall -Wstrict-prototypes', linker_exe='g++ ', linker_so='g++ -shared') # added for python2.3 support # we can't pass it through set_executables because pre 2.2 would fail self.compiler_cxx = ['g++'] # Maybe we should also append -mthreads, but then the finished dlls # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support # thread-safe exception handling on `Mingw32') # no additional libraries needed #self.dll_libraries=[] return # __init__ () def link(self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols = None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): # Include the appropriate MSVC runtime library if Python was built # with MSVC >= 7.0 (MinGW standard is msvcrt) runtime_library = msvc_runtime_library() if runtime_library: if not libraries: libraries = [] libraries.append(runtime_library) args = (self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, None, #export_symbols, we do this in our def-file debug, extra_preargs, extra_postargs, build_temp, target_lang) if self.gcc_version < "3.0.0": func = distutils.cygwinccompiler.CygwinCCompiler.link else: func = UnixCCompiler.link func(*args[:func.__code__.co_argcount]) return def object_filenames (self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: # use normcase to make sure '.rc' is really '.rc' and not '.RC' (base, ext) = os.path.splitext (os.path.normcase(src_name)) # added these lines to strip off windows drive letters # without it, .o files are placed next to .c files # instead of the build directory drv, base = os.path.splitdrive(base) if drv: base = base[1:] if ext not in (self.src_extensions + ['.rc', '.res']): raise UnknownFileError( "unknown file type '%s' (from '%s')" % \ (ext, src_name)) if strip_dir: base = os.path.basename (base) if ext == '.res' or ext == '.rc': # these need to be compiled to object files obj_names.append (os.path.join (output_dir, base + ext + self.obj_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names # object_filenames () def find_python_dll(): # We can't do much here: # - find it in the virtualenv (sys.prefix) # - find it in python main dir (sys.base_prefix, if in a virtualenv) # - in system32, # - ortherwise (Sxs), I don't know how to get it. stems = [sys.prefix] if sys.base_prefix != sys.prefix: stems.append(sys.base_prefix) sub_dirs = ['', 'lib', 'bin'] # generate possible combinations of directory trees and sub-directories lib_dirs = [] for stem in stems: for folder in sub_dirs: lib_dirs = os.path.join(stem, folder) # add system directory as well if 'SYSTEMROOT' in os.environ: lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) # search in the file system for possible candidates major_version, minor_version = tuple(sys.version_info[:2]) patterns = ['python%d%d.dll'] for pat in patterns: dllname = pat % (major_version, minor_version) print("Looking for %s" % dllname) for folder in lib_dirs: dll = os.path.join(folder, dllname) if os.path.exists(dll): return dll raise ValueError("%s not found in %s" % (dllname, lib_dirs)) def dump_table(dll): st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE) return st.stdout.readlines() def generate_def(dll, dfile): """Given a dll file location, get all its exported symbols and dump them into the given def file. The .def file will be overwritten""" dump = dump_table(dll) for i in range(len(dump)): if _START.match(dump[i].decode()): break else: raise ValueError("Symbol table not found") syms = [] for j in range(i+1, len(dump)): m = _TABLE.match(dump[j].decode()) if m: syms.append((int(m.group(1).strip()), m.group(2))) else: break if len(syms) == 0: log.warn('No symbols found in %s' % dll) d = open(dfile, 'w') d.write('LIBRARY %s\n' % os.path.basename(dll)) d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') d.write(';DATA PRELOAD SINGLE\n') d.write('\nEXPORTS\n') for s in syms: #d.write('@%d %s\n' % (s[0], s[1])) d.write('%s\n' % s[1]) d.close() def find_dll(dll_name): arch = {'AMD64' : 'amd64', 'Intel' : 'x86'}[get_build_architecture()] def _find_dll_in_winsxs(dll_name): # Walk through the WinSxS directory to find the dll. winsxs_path = os.path.join(os.environ['WINDIR'], 'winsxs') if not os.path.exists(winsxs_path): return None for root, dirs, files in os.walk(winsxs_path): if dll_name in files and arch in root: return os.path.join(root, dll_name) return None def _find_dll_in_path(dll_name): # First, look in the Python directory, then scan PATH for # the given dll name. for path in [sys.prefix] + os.environ['PATH'].split(';'): filepath = os.path.join(path, dll_name) if os.path.exists(filepath): return os.path.abspath(filepath) return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) def build_msvcr_library(debug=False): if os.name != 'nt': return False msvcr_name = msvc_runtime_library() # Skip using a custom library for versions < MSVC 8.0 msvcr_ver = msvc_runtime_major() if msvcr_ver and msvcr_ver < 80: log.debug('Skip building msvcr library:' ' custom functionality not present') return False if debug: msvcr_name += 'd' # Skip if custom library already exists out_name = "lib%s.a" % msvcr_name out_file = os.path.join(sys.prefix, 'libs', out_name) if os.path.isfile(out_file): log.debug('Skip building msvcr library: "%s" exists' % (out_file,)) return True # Find the msvcr dll msvcr_dll_name = msvcr_name + '.dll' dll_file = find_dll(msvcr_dll_name) if not dll_file: log.warn('Cannot build msvcr library: "%s" not found' % msvcr_dll_name) return False def_name = "lib%s.def" % msvcr_name def_file = os.path.join(sys.prefix, 'libs', def_name) log.info('Building msvcr library: "%s" (from %s)' \ % (out_file, dll_file)) # Generate a symbol definition file from the msvcr dll generate_def(dll_file, def_file) # Create a custom mingw library for the given symbol definitions cmd = ['dlltool', '-d', def_file, '-l', out_file] retcode = subprocess.call(cmd) # Clean up symbol definitions os.remove(def_file) return (not retcode) def build_import_library(): if os.name != 'nt': return arch = get_build_architecture() if arch == 'AMD64': return _build_import_library_amd64() elif arch == 'Intel': return _build_import_library_x86() else: raise ValueError("Unhandled arch %s" % arch) def _check_for_import_lib(): """Check if an import library for the Python runtime already exists.""" major_version, minor_version = tuple(sys.version_info[:2]) # patterns for the file name of the library itself patterns = ['libpython%d%d.a', 'libpython%d%d.dll.a', 'libpython%d.%d.dll.a'] # directory trees that may contain the library stems = [sys.prefix] if sys.base_prefix != sys.prefix: stems.append(sys.base_prefix) # possible subdirectories within those trees where it is placed sub_dirs = ['libs', 'lib'] # generate a list of candidate locations candidates = [] for pat in patterns: filename = pat % (major_version, minor_version) for stem_dir in stems: for folder in sub_dirs: candidates.append(os.path.join(stem_dir, folder, filename)) # test the filesystem to see if we can find any of these for fullname in candidates: if os.path.isfile(fullname): # already exists, in location given return (True, fullname) # needs to be built, preferred location given first return (False, candidates[0]) def _build_import_library_amd64(): out_exists, out_file = _check_for_import_lib() if out_exists: log.debug('Skip building import library: "%s" exists', out_file) return # get the runtime dll for which we are building import library dll_file = find_python_dll() log.info('Building import library (arch=AMD64): "%s" (from %s)' % (out_file, dll_file)) # generate symbol list from this library def_name = "python%d%d.def" % tuple(sys.version_info[:2]) def_file = os.path.join(sys.prefix, 'libs', def_name) generate_def(dll_file, def_file) # generate import library from this symbol list cmd = ['dlltool', '-d', def_file, '-l', out_file] subprocess.Popen(cmd) def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ out_exists, out_file = _check_for_import_lib() if out_exists: log.debug('Skip building import library: "%s" exists', out_file) return lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) lib_file = os.path.join(sys.prefix, 'libs', lib_name) if not os.path.isfile(lib_file): # didn't find library file in virtualenv, try base distribution, too, # and use that instead if found there base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) if os.path.isfile(base_lib): lib_file = base_lib else: log.warn('Cannot build import library: "%s" not found', lib_file) return log.info('Building import library (ARCH=x86): "%s"', out_file) from numpy.distutils import lib2def def_name = "python%d%d.def" % tuple(sys.version_info[:2]) def_file = os.path.join(sys.prefix, 'libs', def_name) nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) nm_output = lib2def.getnm(nm_cmd) dlist, flist = lib2def.parse_nm(nm_output) lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) dll_name = find_python_dll () args = (dll_name, def_file, out_file) cmd = 'dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args status = os.system(cmd) # for now, fail silently if status: log.warn('Failed to build import library for gcc. Linking will fail.') return #===================================== # Dealing with Visual Studio MANIFESTS #===================================== # Functions to deal with visual studio manifests. Manifest are a mechanism to # enforce strong DLL versioning on windows, and has nothing to do with # distutils MANIFEST. manifests are XML files with version info, and used by # the OS loader; they are necessary when linking against a DLL not in the # system path; in particular, official python 2.6 binary is built against the # MS runtime 9 (the one from VS 2008), which is not available on most windows # systems; python 2.6 installer does install it in the Win SxS (Side by side) # directory, but this requires the manifest for this to work. This is a big # mess, thanks MS for a wonderful system. # XXX: ideally, we should use exactly the same version as used by python. I # submitted a patch to get this version, but it was only included for python # 2.6.1 and above. So for versions below, we use a "best guess". _MSVCRVER_TO_FULLVER = {} if sys.platform == 'win32': try: import msvcrt # I took one version in my SxS directory: no idea if it is the good # one, and we can't retrieve it from python _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 # on Windows XP: _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION del major, minor, rest except ImportError: # If we are here, means python was not built with MSVC. Not sure what # to do in that case: manifest building will fail, but it should not be # used in that case anyway log.warn('Cannot import msvcrt: using manifest will not be possible') def msvc_manifest_xml(maj, min): """Given a major and minor version of the MSVCR, returns the corresponding XML file.""" try: fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] except KeyError: raise ValueError("Version %d,%d of MSVCRT not supported yet" % (maj, min)) # Don't be fooled, it looks like an XML, but it is not. In particular, it # should not have any space before starting, and its size should be # divisible by 4, most likely for alignement constraints when the xml is # embedded in the binary... # This template was copied directly from the python 2.6 binary (using # strings.exe from mingw on python.exe). template = """\ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> <security> <requestedPrivileges> <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel> </requestedPrivileges> </security> </trustInfo> <dependency> <dependentAssembly> <assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity> </dependentAssembly> </dependency> </assembly>""" return template % {'fullver': fullver, 'maj': maj, 'min': min} def manifest_rc(name, type='dll'): """Return the rc file used to generate the res file which will be embedded as manifest for given manifest file name, of given type ('dll' or 'exe'). Parameters ---------- name : str name of the manifest file to embed type : str {'dll', 'exe'} type of the binary which will embed the manifest """ if type == 'dll': rctype = 2 elif type == 'exe': rctype = 1 else: raise ValueError("Type %s not supported" % type) return """\ #include "winuser.h" %d RT_MANIFEST %s""" % (rctype, name) def check_embedded_msvcr_match_linked(msver): """msver is the ms runtime version used for the MANIFEST.""" # check msvcr major version are the same for linking and # embedding maj = msvc_runtime_major() if maj: if not maj == int(msver): raise ValueError( "Discrepancy between linked msvcr " \ "(%d) and the one about to be embedded " \ "(%d)" % (int(msver), maj)) def configtest_name(config): base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) return os.path.splitext(base)[0] def manifest_name(config): # Get configest name (including suffix) root = configtest_name(config) exext = config.compiler.exe_extension return root + exext + ".manifest" def rc_name(config): # Get configtest name (including suffix) root = configtest_name(config) return root + ".rc" def generate_manifest(config): msver = get_build_msvc_version() if msver is not None: if msver >= 8: check_embedded_msvcr_match_linked(msver) ma = int(msver) mi = int((msver - ma) * 10) # Write the manifest file manxml = msvc_manifest_xml(ma, mi) man = open(manifest_name(config), "w") config.temp_files.append(manifest_name(config)) man.write(manxml) man.close()
# Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import logging from typing import Dict, Iterable, List, Optional, Tuple from signedjson.key import decode_verify_key_bytes from synapse.storage._base import SQLBaseStore from synapse.storage.keys import FetchKeyResult from synapse.storage.types import Cursor from synapse.util.caches.descriptors import cached, cachedList from synapse.util.iterutils import batch_iter logger = logging.getLogger(__name__) db_binary_type = memoryview class KeyStore(SQLBaseStore): """Persistence for signature verification keys""" @cached() def _get_server_verify_key(self, server_name_and_key_id): raise NotImplementedError() @cachedList( cached_method_name="_get_server_verify_key", list_name="server_name_and_key_ids" ) async def get_server_verify_keys( self, server_name_and_key_ids: Iterable[Tuple[str, str]] ) -> Dict[Tuple[str, str], FetchKeyResult]: """ Args: server_name_and_key_ids: iterable of (server_name, key-id) tuples to fetch keys for Returns: A map from (server_name, key_id) -> FetchKeyResult, or None if the key is unknown """ keys = {} def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None: """Processes a batch of keys to fetch, and adds the result to `keys`.""" # batch_iter always returns tuples so it's safe to do len(batch) sql = ( "SELECT server_name, key_id, verify_key, ts_valid_until_ms " "FROM server_signature_keys WHERE 1=0" ) + " OR (server_name=? AND key_id=?)" * len(batch) txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) for row in txn: server_name, key_id, key_bytes, ts_valid_until_ms = row if ts_valid_until_ms is None: # Old keys may be stored with a ts_valid_until_ms of null, # in which case we treat this as if it was set to `0`, i.e. # it won't match key requests that define a minimum # `ts_valid_until_ms`. ts_valid_until_ms = 0 keys[(server_name, key_id)] = FetchKeyResult( verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)), valid_until_ts=ts_valid_until_ms, ) def _txn(txn: Cursor) -> Dict[Tuple[str, str], FetchKeyResult]: for batch in batch_iter(server_name_and_key_ids, 50): _get_keys(txn, batch) return keys return await self.db_pool.runInteraction("get_server_verify_keys", _txn) async def store_server_verify_keys( self, from_server: str, ts_added_ms: int, verify_keys: Iterable[Tuple[str, str, FetchKeyResult]], ) -> None: """Stores NACL verification keys for remote servers. Args: from_server: Where the verification keys were looked up ts_added_ms: The time to record that the key was added verify_keys: keys to be stored. Each entry is a triplet of (server_name, key_id, key). """ key_values = [] value_values = [] invalidations = [] for server_name, key_id, fetch_result in verify_keys: key_values.append((server_name, key_id)) value_values.append( ( from_server, ts_added_ms, fetch_result.valid_until_ts, db_binary_type(fetch_result.verify_key.encode()), ) ) # invalidate takes a tuple corresponding to the params of # _get_server_verify_key. _get_server_verify_key only takes one # param, which is itself the 2-tuple (server_name, key_id). invalidations.append((server_name, key_id)) await self.db_pool.simple_upsert_many( table="server_signature_keys", key_names=("server_name", "key_id"), key_values=key_values, value_names=( "from_server", "ts_added_ms", "ts_valid_until_ms", "verify_key", ), value_values=value_values, desc="store_server_verify_keys", ) invalidate = self._get_server_verify_key.invalidate for i in invalidations: invalidate((i,)) async def store_server_keys_json( self, server_name: str, key_id: str, from_server: str, ts_now_ms: int, ts_expires_ms: int, key_json_bytes: bytes, ) -> None: """Stores the JSON bytes for a set of keys from a server The JSON should be signed by the originating server, the intermediate server, and by this server. Updates the value for the (server_name, key_id, from_server) triplet if one already existed. Args: server_name: The name of the server. key_id: The identifier of the key this JSON is for. from_server: The server this JSON was fetched from. ts_now_ms: The time now in milliseconds. ts_valid_until_ms: The time when this json stops being valid. key_json_bytes: The encoded JSON. """ await self.db_pool.simple_upsert( table="server_keys_json", keyvalues={ "server_name": server_name, "key_id": key_id, "from_server": from_server, }, values={ "server_name": server_name, "key_id": key_id, "from_server": from_server, "ts_added_ms": ts_now_ms, "ts_valid_until_ms": ts_expires_ms, "key_json": db_binary_type(key_json_bytes), }, desc="store_server_keys_json", ) async def get_server_keys_json( self, server_keys: Iterable[Tuple[str, Optional[str], Optional[str]]] ) -> Dict[Tuple[str, Optional[str], Optional[str]], List[dict]]: """Retrieve the key json for a list of server_keys and key ids. If no keys are found for a given server, key_id and source then that server, key_id, and source triplet entry will be an empty list. The JSON is returned as a byte array so that it can be efficiently used in an HTTP response. Args: server_keys (list): List of (server_name, key_id, source) triplets. Returns: A mapping from (server_name, key_id, source) triplets to a list of dicts """ def _get_server_keys_json_txn(txn): results = {} for server_name, key_id, from_server in server_keys: keyvalues = {"server_name": server_name} if key_id is not None: keyvalues["key_id"] = key_id if from_server is not None: keyvalues["from_server"] = from_server rows = self.db_pool.simple_select_list_txn( txn, "server_keys_json", keyvalues=keyvalues, retcols=( "key_id", "from_server", "ts_added_ms", "ts_valid_until_ms", "key_json", ), ) results[(server_name, key_id, from_server)] = rows return results return await self.db_pool.runInteraction( "get_server_keys_json", _get_server_keys_json_txn )
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import inspect import os import time from oslo_config import cfg import requests from six.moves import configparser from six.moves.urllib import parse from rally.common.i18n import _ from rally.common import log as logging from rally import db from rally import exceptions from rally import objects from rally import osclients LOG = logging.getLogger(__name__) IMAGE_OPTS = [ cfg.StrOpt("cirros_version", default="0.3.2", help="Version of cirros image"), cfg.StrOpt("cirros_image", default="cirros-0.3.2-x86_64-disk.img", help="Cirros image name"), cfg.StrOpt("cirros_base_url", default="http://download.cirros-cloud.net", help="Cirros image base URL"), ] CONF = cfg.CONF CONF.register_opts(IMAGE_OPTS, "image") class TempestConfigCreationFailure(exceptions.RallyException): msg_fmt = _("Unable create tempest.conf: '%(message)s'") class TempestConf(object): def __init__(self, deployment): self.endpoint = db.deployment_get(deployment)["admin"] self.clients = osclients.Clients(objects.Endpoint(**self.endpoint)) try: self.keystoneclient = self.clients.verified_keystone() except exceptions.InvalidAdminException: msg = (_("Admin permission is required to generate tempest " "configuration file. User %s doesn't have admin role.") % self.endpoint["username"]) raise TempestConfigCreationFailure(msg) self.available_services = self.clients.services().values() self.conf = configparser.ConfigParser() self.conf.read(os.path.join(os.path.dirname(__file__), "config.ini")) self.deployment = deployment self.data_path = os.path.join(os.path.expanduser("~"), ".rally", "tempest", "data") if not os.path.exists(self.data_path): os.makedirs(self.data_path) self.img_path = os.path.join(self.data_path, CONF.image.cirros_image) if not os.path.isfile(self.img_path): self._load_img() def _load_img(self): cirros_url = ("%s/%s/%s" % (CONF.image.cirros_base_url, CONF.image.cirros_version, CONF.image.cirros_image)) try: response = requests.get(cirros_url, stream=True) except requests.ConnectionError as err: msg = _("Error on downloading cirros image, possibly" " no connection to Internet with message %s") % str(err) raise TempestConfigCreationFailure(msg) if response.status_code == 200: with open(self.img_path + ".tmp", "wb") as img_file: for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks img_file.write(chunk) img_file.flush() os.rename(self.img_path + ".tmp", self.img_path) else: if response.status_code == 404: msg = _("Error on downloading cirros image, possibly" "invalid cirros_version or cirros_image in rally.conf") else: msg = _("Error on downloading cirros image, " "HTTP error code %s") % response.getcode() raise TempestConfigCreationFailure(msg) def _get_url(self, servicename): services_type2name_map = self.clients.services() for service in self.keystoneclient.auth_ref["serviceCatalog"]: if services_type2name_map.get(service["type"]) == servicename: return service["endpoints"][0]["publicURL"] def _set_default(self): lock_path = os.path.join(self.data_path, "lock_files_%s" % self.deployment) if not os.path.exists(lock_path): os.makedirs(lock_path) self.conf.set("DEFAULT", "lock_path", lock_path) def _set_boto(self, section_name="boto"): self.conf.set(section_name, "ec2_url", self._get_url("ec2")) self.conf.set(section_name, "s3_url", self._get_url("s3")) materials_path = os.path.join(self.data_path, "s3materials") self.conf.set(section_name, "s3_materials_path", materials_path) # TODO(olkonami): find out how can we get ami, ari, aki manifest files def _set_compute_images(self, section_name="compute"): glanceclient = self.clients.glance() image_list = [img for img in glanceclient.images.list() if img.status.lower() == "active" and img.name is not None and "cirros" in img.name] # Upload new images if there are no # necessary images in the cloud (cirros) while len(image_list) < 2: now = (datetime.datetime.fromtimestamp(time.time()). strftime("%Y_%m_%d_%H_%M_%S")) try: image = glanceclient.images.create(name=("cirros_%s" % now), disk_format="qcow2", container_format="bare") image.update(data=open(self.img_path, "rb")) image_list.append(image) except Exception as e: msg = _("There are no desired images (cirros) or only one and " "new image could not be created.\n" "Reason: %s") % getattr(e, "message", "unknown") raise TempestConfigCreationFailure(msg) self.conf.set(section_name, "image_ref", image_list[0].id) self.conf.set(section_name, "image_ref_alt", image_list[1].id) def _set_compute_flavors(self, section_name="compute"): novaclient = self.clients.nova() flavor_list = sorted(novaclient.flavors.list(), key=lambda flv: flv.ram) # Create new flavors if they are missing while len(flavor_list) < 2: now = (datetime.datetime.fromtimestamp(time.time()). strftime("%Y_%m_%d_%H_%M_%S")) try: flv = novaclient.flavors.create("m1.tiny_%s" % now, 512, 1, 1) flavor_list.append(flv) except Exception as e: msg = _("There are no desired flavors or only one and " "new flavor could not be created.\n" "Reason: %s") % getattr(e, "message", "unknown") raise TempestConfigCreationFailure(msg) self.conf.set(section_name, "flavor_ref", flavor_list[0].id) self.conf.set(section_name, "flavor_ref_alt", flavor_list[1].id) def _set_compute_ssh_connect_method(self, section_name="compute"): if "neutron" in self.available_services: self.conf.set(section_name, "ssh_connect_method", "floating") else: self.conf.set(section_name, "ssh_connect_method", "fixed") def _set_compute_admin(self, section_name="compute-admin"): self.conf.set(section_name, "username", self.endpoint["username"]) self.conf.set(section_name, "password", self.endpoint["password"]) self.conf.set(section_name, "tenant_name", self.endpoint["tenant_name"]) def _set_identity(self, section_name="identity"): self.conf.set(section_name, "username", self.endpoint["username"]) self.conf.set(section_name, "password", self.endpoint["password"]) self.conf.set(section_name, "tenant_name", self.endpoint["tenant_name"]) self.conf.set(section_name, "alt_username", self.endpoint["username"]) self.conf.set(section_name, "alt_password", self.endpoint["password"]) self.conf.set(section_name, "alt_tenant_name", self.endpoint["tenant_name"]) self.conf.set(section_name, "admin_username", self.endpoint["username"]) self.conf.set(section_name, "admin_password", self.endpoint["password"]) self.conf.set(section_name, "admin_tenant_name", self.endpoint["tenant_name"]) self.conf.set(section_name, "uri", self.endpoint["auth_url"]) self.conf.set(section_name, "uri_v3", self.endpoint["auth_url"].replace("/v2.0", "/v3")) self.conf.set(section_name, "admin_domain_name", self.endpoint["admin_domain_name"]) def _set_network(self, section_name="network"): if "neutron" in self.available_services: neutron = self.clients.neutron() public_net = [net for net in neutron.list_networks()["networks"] if net["status"] == "ACTIVE" and net["router:external"] is True] if public_net: net_id = public_net[0]["id"] self.conf.set(section_name, "public_network_id", net_id) public_router = neutron.list_routers( network_id=net_id)["routers"][0] self.conf.set(section_name, "public_router_id", public_router["id"]) subnets = neutron.list_subnets(network_id=net_id)["subnets"] if subnets: subnet = subnets[0] else: # TODO(akurilin): create public subnet LOG.warn("No public subnet is found.") else: subnets = neutron.list_subnets()["subnets"] if subnets: subnet = subnets[0] else: # TODO(akurilin): create subnet LOG.warn("No subnet is found.") self.conf.set(section_name, "default_network", subnet["cidr"]) else: network = self.clients.nova().networks.list()[0] self.conf.set(section_name, "default_network", network.cidr) def _set_service_available(self, section_name="service_available"): services = ["neutron", "heat", "ceilometer", "swift", "cinder", "nova", "glance"] for service in services: self.conf.set(section_name, service, str(service in self.available_services)) horizon_url = ("http://" + parse.urlparse(self.endpoint["auth_url"]).hostname) try: horizon_req = requests.get(horizon_url) except requests.RequestException as e: LOG.debug("Failed to connect to Horizon: %s" % e) horizon_availability = False else: horizon_availability = (horizon_req.status_code == 200) # convert boolean to string because ConfigParser fails # on attempt to get option with boolean value self.conf.set(section_name, "horizon", str(horizon_availability)) def write_config(self, file_name): with open(file_name, "w+") as f: self.conf.write(f) def generate(self, file_name=None): for name, func in inspect.getmembers(self, predicate=inspect.ismethod): if name.startswith("_set_"): func() if file_name: self.write_config(file_name) return self.conf
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # cython: profile=False # cython: overflowcheck=True # cython: language_level=3 """Counters collect the progress of the Worker for reporting to the service. For internal use only; no backwards-compatibility guarantees. """ # pytype: skip-file import threading from collections import namedtuple from typing import TYPE_CHECKING from typing import Dict from apache_beam.transforms import cy_combiners if TYPE_CHECKING: from apache_beam.transforms import core # Information identifying the IO being measured by a counter. # # A CounterName with IOTarget helps identify the IO being measured by a # counter. # # It may represent the consumption of Shuffle IO, or the consumption of # side inputs. The way in which each is represented is explained in the # documentation of the side_input_id, and shuffle_id functions. IOTargetName = namedtuple( 'IOTargetName', ['requesting_step_name', 'input_index']) def side_input_id(step_name, input_index): # type: (str, int) -> IOTargetName """Create an IOTargetName that identifies the reading of a side input. Given a step "s4" that receives two side inputs, then the CounterName that represents the consumption of side input number 2 is: * step_name: s4 <---| * input_index: 2 <---|-- Identifying the side input itself * requesting_step_name: s4 <-- Identifying the step that reads from it. If "s4" emits the whole AsIter of the side input, down to a step, say "s5", then the requesting_step_name of the subsequent consumption will be "s5". """ return IOTargetName(step_name, input_index) def shuffle_id(step_name): # type: (str) -> IOTargetName """Create an IOTargetName that identifies a GBK step. Given a step "s6" that is downstream from a GBK "s5", then "s6" will read from shuffle. The CounterName that quantifies the consumption of data from shuffle has: * step_name: s5 * requesting_step_name: s6 If "s6" emits the whole iterable down to a step, say "s7", and "s7" continues to consume data from the iterable, then a new CounterName will be: * step_name: s5 <--- Identifying the GBK * requesting_step_name: s6 """ return IOTargetName(step_name, None) _CounterName = namedtuple( '_CounterName', [ 'name', 'stage_name', 'step_name', 'system_name', 'namespace', 'origin', 'output_index', 'io_target' ]) class CounterName(_CounterName): """Naming information for a counter.""" SYSTEM = object() USER = object() def __new__( cls, name, stage_name=None, step_name=None, system_name=None, namespace=None, origin=None, output_index=None, io_target=None): origin = origin or CounterName.SYSTEM return super(CounterName, cls).__new__( cls, name, stage_name, step_name, system_name, namespace, origin, output_index, io_target) def __repr__(self): return '<CounterName<%s> at %s>' % (self._str_internal(), hex(id(self))) def __str__(self): return self._str_internal() def _str_internal(self): if self.origin == CounterName.USER: return 'user-%s-%s' % (self.step_name, self.name) elif self.origin == CounterName.SYSTEM and self.output_index: return '%s-out%s-%s' % (self.step_name, self.output_index, self.name) else: return '%s-%s-%s' % (self.stage_name, self.step_name, self.name) class Counter(object): """A counter aggregates a series of values. The aggregation kind of the Counter is specified when the Counter is created. The values aggregated must be of an appropriate for the aggregation used. Aggregations supported are listed in the code. (The aggregated value will be reported to the Dataflow service.) Do not create directly; call CounterFactory.get_counter instead. Attributes: name: the name of the counter, a string combine_fn: the CombineFn to use for aggregation accumulator: the accumulator created for the combine_fn """ # Handy references to common counters. SUM = cy_combiners.SumInt64Fn() MEAN = cy_combiners.MeanInt64Fn() BEAM_DISTRIBUTION = cy_combiners.DistributionInt64Fn() # Dataflow Distribution Accumulator Fn. # TODO(BEAM-4045): Generalize distribution counter if necessary. DATAFLOW_DISTRIBUTION = cy_combiners.DataflowDistributionCounterFn() def __init__(self, name, combine_fn): # type: (CounterName, core.CombineFn) -> None """Creates a Counter object. Args: name: the name of this counter. It may be a string, or a CounterName object. combine_fn: the CombineFn to use for aggregation """ self.name = name self.combine_fn = combine_fn self.accumulator = combine_fn.create_accumulator() self._add_input = self.combine_fn.add_input def update(self, value): self.accumulator = self._add_input(self.accumulator, value) def reset(self, value): self.accumulator = self.combine_fn.create_accumulator() def value(self): return self.combine_fn.extract_output(self.accumulator) def __str__(self): return '<%s>' % self._str_internal() def __repr__(self): return '<%s at %s>' % (self._str_internal(), hex(id(self))) def _str_internal(self): return '%s %s %s' % ( self.name, self.combine_fn.__class__.__name__, self.value()) class AccumulatorCombineFnCounter(Counter): """Counter optimized for a mutating accumulator that holds all the logic.""" def __init__(self, name, combine_fn): # type: (CounterName, cy_combiners.AccumulatorCombineFn) -> None assert isinstance(combine_fn, cy_combiners.AccumulatorCombineFn) super(AccumulatorCombineFnCounter, self).__init__(name, combine_fn) self.reset() def update(self, value): self._fast_add_input(value) def reset(self): self.accumulator = self.combine_fn.create_accumulator() self._fast_add_input = self.accumulator.add_input class CounterFactory(object): """Keeps track of unique counters.""" def __init__(self): self.counters = {} # type: Dict[CounterName, Counter] # Lock to be acquired when accessing the counters map. self._lock = threading.Lock() def get_counter(self, name, combine_fn): # type: (CounterName, core.CombineFn) -> Counter """Returns a counter with the requested name. Passing in the same name will return the same counter; the combine_fn must agree. Args: name: the name of this counter. Typically has three parts: "step-output-counter". combine_fn: the CombineFn to use for aggregation Returns: A new or existing counter with the requested name. """ with self._lock: counter = self.counters.get(name, None) if counter: assert counter.combine_fn == combine_fn else: if isinstance(combine_fn, cy_combiners.AccumulatorCombineFn): counter = AccumulatorCombineFnCounter(name, combine_fn) else: counter = Counter(name, combine_fn) self.counters[name] = counter return counter def reset(self): # Counters are cached in state sampler states. with self._lock: for counter in self.counters.values(): counter.reset() def get_counters(self): """Returns the current set of counters. Returns: An iterable that contains the current set of counters. To make sure that multiple threads can iterate over the set of counters, we return a new iterable here. Note that the actual set of counters may get modified after this method returns hence the returned iterable may be stale. """ with self._lock: return self.counters.values() # pylint: disable=dict-values-not-iterating
"""Support for interface with a Gree climate systems.""" from datetime import timedelta import logging from typing import List from greeclimate.device import ( FanSpeed, HorizontalSwing, Mode, TemperatureUnits, VerticalSwing, ) from greeclimate.exceptions import DeviceTimeoutError from homeassistant.components.climate import ClimateEntity from homeassistant.components.climate.const import ( FAN_AUTO, FAN_HIGH, FAN_LOW, FAN_MEDIUM, HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_DRY, HVAC_MODE_FAN_ONLY, HVAC_MODE_HEAT, HVAC_MODE_OFF, PRESET_AWAY, PRESET_BOOST, PRESET_ECO, PRESET_NONE, PRESET_SLEEP, SUPPORT_FAN_MODE, SUPPORT_PRESET_MODE, SUPPORT_SWING_MODE, SUPPORT_TARGET_TEMPERATURE, SWING_BOTH, SWING_HORIZONTAL, SWING_OFF, SWING_VERTICAL, ) from homeassistant.const import ( ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS, TEMP_FAHRENHEIT, ) from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC from .const import ( DOMAIN, FAN_MEDIUM_HIGH, FAN_MEDIUM_LOW, MAX_ERRORS, MAX_TEMP, MIN_TEMP, TARGET_TEMPERATURE_STEP, ) _LOGGER = logging.getLogger(__name__) SCAN_INTERVAL = timedelta(seconds=60) PARALLEL_UPDATES = 0 HVAC_MODES = { Mode.Auto: HVAC_MODE_AUTO, Mode.Cool: HVAC_MODE_COOL, Mode.Dry: HVAC_MODE_DRY, Mode.Fan: HVAC_MODE_FAN_ONLY, Mode.Heat: HVAC_MODE_HEAT, } HVAC_MODES_REVERSE = {v: k for k, v in HVAC_MODES.items()} PRESET_MODES = [ PRESET_ECO, # Power saving mode PRESET_AWAY, # Steady heat, or 8C mode on gree units PRESET_BOOST, # Turbo mode PRESET_NONE, # Default operating mode PRESET_SLEEP, # Sleep mode ] FAN_MODES = { FanSpeed.Auto: FAN_AUTO, FanSpeed.Low: FAN_LOW, FanSpeed.MediumLow: FAN_MEDIUM_LOW, FanSpeed.Medium: FAN_MEDIUM, FanSpeed.MediumHigh: FAN_MEDIUM_HIGH, FanSpeed.High: FAN_HIGH, } FAN_MODES_REVERSE = {v: k for k, v in FAN_MODES.items()} SWING_MODES = [SWING_OFF, SWING_VERTICAL, SWING_HORIZONTAL, SWING_BOTH] SUPPORTED_FEATURES = ( SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_PRESET_MODE | SUPPORT_SWING_MODE ) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Gree HVAC device from a config entry.""" async_add_entities( GreeClimateEntity(device) for device in hass.data[DOMAIN].pop("pending") ) class GreeClimateEntity(ClimateEntity): """Representation of a Gree HVAC device.""" def __init__(self, device): """Initialize the Gree device.""" self._device = device self._name = device.device_info.name self._mac = device.device_info.mac self._available = False self._error_count = 0 async def async_update(self): """Update the state of the device.""" try: await self._device.update_state() if not self._available and self._error_count: _LOGGER.warning( "Device is available: %s (%s)", self._name, str(self._device.device_info), ) self._available = True self._error_count = 0 except DeviceTimeoutError: self._error_count += 1 # Under normal conditions GREE units timeout every once in a while if self._available and self._error_count >= MAX_ERRORS: self._available = False _LOGGER.warning( "Device is unavailable: %s (%s)", self._name, self._device.device_info, ) except Exception: # pylint: disable=broad-except # Under normal conditions GREE units timeout every once in a while if self._available: self._available = False _LOGGER.exception( "Unknown exception caught during update by gree device: %s (%s)", self._name, self._device.device_info, ) async def _push_state_update(self): """Send state updates to the physical device.""" try: return await self._device.push_state_update() except DeviceTimeoutError: self._error_count += 1 # Under normal conditions GREE units timeout every once in a while if self._available and self._error_count >= MAX_ERRORS: self._available = False _LOGGER.warning( "Device timedout while sending state update: %s (%s)", self._name, self._device.device_info, ) except Exception: # pylint: disable=broad-except # Under normal conditions GREE units timeout every once in a while if self._available: self._available = False _LOGGER.exception( "Unknown exception caught while sending state update to: %s (%s)", self._name, self._device.device_info, ) @property def available(self) -> bool: """Return if the device is available.""" return self._available @property def name(self) -> str: """Return the name of the device.""" return self._name @property def unique_id(self) -> str: """Return a unique id for the device.""" return self._mac @property def device_info(self): """Return device specific attributes.""" return { "name": self._name, "identifiers": {(DOMAIN, self._mac)}, "manufacturer": "Gree", "connections": {(CONNECTION_NETWORK_MAC, self._mac)}, } @property def temperature_unit(self) -> str: """Return the temperature units for the device.""" units = self._device.temperature_units return TEMP_CELSIUS if units == TemperatureUnits.C else TEMP_FAHRENHEIT @property def precision(self) -> float: """Return the precision of temperature for the device.""" return PRECISION_WHOLE @property def current_temperature(self) -> float: """Return the target temperature, gree devices don't provide internal temp.""" return self.target_temperature @property def target_temperature(self) -> float: """Return the target temperature for the device.""" return self._device.target_temperature async def async_set_temperature(self, **kwargs): """Set new target temperature.""" if ATTR_TEMPERATURE not in kwargs: raise ValueError(f"Missing parameter {ATTR_TEMPERATURE}") temperature = kwargs[ATTR_TEMPERATURE] _LOGGER.debug( "Setting temperature to %d for %s", temperature, self._name, ) self._device.target_temperature = round(temperature) await self._push_state_update() @property def min_temp(self) -> float: """Return the minimum temperature supported by the device.""" return MIN_TEMP @property def max_temp(self) -> float: """Return the maximum temperature supported by the device.""" return MAX_TEMP @property def target_temperature_step(self) -> float: """Return the target temperature step support by the device.""" return TARGET_TEMPERATURE_STEP @property def hvac_mode(self) -> str: """Return the current HVAC mode for the device.""" if not self._device.power: return HVAC_MODE_OFF return HVAC_MODES.get(self._device.mode) async def async_set_hvac_mode(self, hvac_mode): """Set new target hvac mode.""" if hvac_mode not in self.hvac_modes: raise ValueError(f"Invalid hvac_mode: {hvac_mode}") _LOGGER.debug( "Setting HVAC mode to %s for device %s", hvac_mode, self._name, ) if hvac_mode == HVAC_MODE_OFF: self._device.power = False await self._push_state_update() return if not self._device.power: self._device.power = True self._device.mode = HVAC_MODES_REVERSE.get(hvac_mode) await self._push_state_update() @property def hvac_modes(self) -> List[str]: """Return the HVAC modes support by the device.""" modes = [*HVAC_MODES_REVERSE] modes.append(HVAC_MODE_OFF) return modes @property def preset_mode(self) -> str: """Return the current preset mode for the device.""" if self._device.steady_heat: return PRESET_AWAY if self._device.power_save: return PRESET_ECO if self._device.sleep: return PRESET_SLEEP if self._device.turbo: return PRESET_BOOST return PRESET_NONE async def async_set_preset_mode(self, preset_mode): """Set new preset mode.""" if preset_mode not in PRESET_MODES: raise ValueError(f"Invalid preset mode: {preset_mode}") _LOGGER.debug( "Setting preset mode to %s for device %s", preset_mode, self._name, ) self._device.steady_heat = False self._device.power_save = False self._device.turbo = False self._device.sleep = False if preset_mode == PRESET_AWAY: self._device.steady_heat = True elif preset_mode == PRESET_ECO: self._device.power_save = True elif preset_mode == PRESET_BOOST: self._device.turbo = True elif preset_mode == PRESET_SLEEP: self._device.sleep = True await self._push_state_update() @property def preset_modes(self) -> List[str]: """Return the preset modes support by the device.""" return PRESET_MODES @property def fan_mode(self) -> str: """Return the current fan mode for the device.""" speed = self._device.fan_speed return FAN_MODES.get(speed) async def async_set_fan_mode(self, fan_mode): """Set new target fan mode.""" if fan_mode not in FAN_MODES_REVERSE: raise ValueError(f"Invalid fan mode: {fan_mode}") self._device.fan_speed = FAN_MODES_REVERSE.get(fan_mode) await self._push_state_update() @property def fan_modes(self) -> List[str]: """Return the fan modes support by the device.""" return [*FAN_MODES_REVERSE] @property def swing_mode(self) -> str: """Return the current swing mode for the device.""" h_swing = self._device.horizontal_swing == HorizontalSwing.FullSwing v_swing = self._device.vertical_swing == VerticalSwing.FullSwing if h_swing and v_swing: return SWING_BOTH if h_swing: return SWING_HORIZONTAL if v_swing: return SWING_VERTICAL return SWING_OFF async def async_set_swing_mode(self, swing_mode): """Set new target swing operation.""" if swing_mode not in SWING_MODES: raise ValueError(f"Invalid swing mode: {swing_mode}") _LOGGER.debug( "Setting swing mode to %s for device %s", swing_mode, self._name, ) self._device.horizontal_swing = HorizontalSwing.Center self._device.vertical_swing = VerticalSwing.FixedMiddle if swing_mode in (SWING_BOTH, SWING_HORIZONTAL): self._device.horizontal_swing = HorizontalSwing.FullSwing if swing_mode in (SWING_BOTH, SWING_VERTICAL): self._device.vertical_swing = VerticalSwing.FullSwing await self._push_state_update() @property def swing_modes(self) -> List[str]: """Return the swing modes currently supported for this device.""" return SWING_MODES @property def supported_features(self) -> int: """Return the supported features for this device integration.""" return SUPPORTED_FEATURES
# coding: utf-8 # Copyright 2014 Globo.com Player authors. All rights reserved. # Use of this source code is governed by a MIT License # license that can be found in the LICENSE file. import iso8601 import datetime import itertools import re from m3u8 import protocol ''' http://tools.ietf.org/html/draft-pantos-http-live-streaming-08#section-3.2 http://stackoverflow.com/questions/2785755/how-to-split-but-ignore-separators-in-quoted-strings-in-python ''' ATTRIBUTELISTPATTERN = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''') def cast_date_time(value): return iso8601.parse_date(value) def format_date_time(value): return value.isoformat() class ParseError(Exception): def __init__(self, lineno, line): self.lineno = lineno self.line = line def __str__(self): return 'Syntax error in manifest on line %d: %s' % (self.lineno, self.line) def parse(content, strict=False): ''' Given a M3U8 playlist content returns a dictionary with all data found ''' data = { 'media_sequence': 0, 'is_variant': False, 'is_endlist': False, 'is_i_frames_only': False, 'is_independent_segments': False, 'playlist_type': None, 'playlists': [], 'segments': [], 'iframe_playlists': [], 'media': [], 'keys': [], } state = { 'expect_segment': False, 'expect_playlist': False, 'current_key': None, } lineno = 0 for line in string_to_lines(content): lineno += 1 line = line.strip() if line.startswith(protocol.ext_x_byterange): _parse_byterange(line, state) state['expect_segment'] = True elif line.startswith(protocol.ext_x_targetduration): _parse_simple_parameter(line, data, float) elif line.startswith(protocol.ext_x_media_sequence): _parse_simple_parameter(line, data, int) elif line.startswith(protocol.ext_x_program_date_time): _, program_date_time = _parse_simple_parameter_raw_value(line, cast_date_time) if not data.get('program_date_time'): data['program_date_time'] = program_date_time state['current_program_date_time'] = program_date_time elif line.startswith(protocol.ext_x_discontinuity): state['discontinuity'] = True elif line.startswith(protocol.ext_x_cue_out): _parse_cueout(line, state) state['cue_out'] = True state['cue_start'] = True elif line.startswith(protocol.ext_x_cue_out_start): _parse_cueout_start(line, state, string_to_lines(content)[lineno - 2]) state['cue_out'] = True state['cue_start'] = True elif line.startswith(protocol.ext_x_cue_span): state['cue_out'] = True state['cue_start'] = True elif line.startswith(protocol.ext_x_version): _parse_simple_parameter(line, data) elif line.startswith(protocol.ext_x_allow_cache): _parse_simple_parameter(line, data) elif line.startswith(protocol.ext_x_key): key = _parse_key(line) state['current_key'] = key if key not in data['keys']: data['keys'].append(key) elif line.startswith(protocol.extinf): _parse_extinf(line, data, state, lineno, strict) state['expect_segment'] = True elif line.startswith(protocol.ext_x_stream_inf): state['expect_playlist'] = True _parse_stream_inf(line, data, state) elif line.startswith(protocol.ext_x_i_frame_stream_inf): _parse_i_frame_stream_inf(line, data) elif line.startswith(protocol.ext_x_media): _parse_media(line, data, state) elif line.startswith(protocol.ext_x_playlist_type): _parse_simple_parameter(line, data) elif line.startswith(protocol.ext_i_frames_only): data['is_i_frames_only'] = True elif line.startswith(protocol.ext_is_independent_segments): data['is_independent_segments'] = True elif line.startswith(protocol.ext_x_endlist): data['is_endlist'] = True elif line.startswith('#'): # comment pass elif line.strip() == '': # blank lines are legal pass elif state['expect_segment']: _parse_ts_chunk(line, data, state) state['expect_segment'] = False elif state['expect_playlist']: _parse_variant_playlist(line, data, state) state['expect_playlist'] = False elif strict: raise ParseError(lineno, line) return data def _parse_key(line): params = ATTRIBUTELISTPATTERN.split(line.replace(protocol.ext_x_key + ':', ''))[1::2] key = {} for param in params: name, value = param.split('=', 1) key[normalize_attribute(name)] = remove_quotes(value) if key['method'] == "NONE": if 'uri' not in key: key['uri'] = None return key def _parse_extinf(line, data, state, lineno, strict): chunks = line.replace(protocol.extinf + ':', '').split(',') if len(chunks) == 2: duration, title = chunks elif len(chunks) == 1: if strict: raise ParseError(lineno, line) else: duration = chunks[0] title = '' if 'segment' not in state: state['segment'] = {} state['segment']['duration'] = float(duration) state['segment']['title'] = remove_quotes(title) def _parse_ts_chunk(line, data, state): segment = state.pop('segment') if state.get('current_program_date_time'): segment['program_date_time'] = state['current_program_date_time'] state['current_program_date_time'] += datetime.timedelta(seconds=segment['duration']) segment['uri'] = line segment['cue_out'] = state.pop('cue_out', False) if state.get('current_cue_out_scte35'): segment['scte35'] = state['current_cue_out_scte35'] segment['scte35_duration'] = state['current_cue_out_duration'] segment['discontinuity'] = state.pop('discontinuity', False) if state.get('current_key'): segment['key'] = state['current_key'] else: # For unencrypted segments, the initial key would be None if None not in data['keys']: data['keys'].append(None) data['segments'].append(segment) def _parse_attribute_list(prefix, line, atribute_parser): params = ATTRIBUTELISTPATTERN.split(line.replace(prefix + ':', ''))[1::2] attributes = {} for param in params: name, value = param.split('=', 1) name = normalize_attribute(name) if name in atribute_parser: value = atribute_parser[name](value) attributes[name] = value return attributes def _parse_stream_inf(line, data, state): data['is_variant'] = True data['media_sequence'] = None atribute_parser = remove_quotes_parser('codecs', 'audio', 'video', 'subtitles') atribute_parser["program_id"] = int atribute_parser["bandwidth"] = int atribute_parser["average_bandwidth"] = int state['stream_info'] = _parse_attribute_list(protocol.ext_x_stream_inf, line, atribute_parser) def _parse_i_frame_stream_inf(line, data): atribute_parser = remove_quotes_parser('codecs', 'uri') atribute_parser["program_id"] = int atribute_parser["bandwidth"] = int iframe_stream_info = _parse_attribute_list(protocol.ext_x_i_frame_stream_inf, line, atribute_parser) iframe_playlist = {'uri': iframe_stream_info.pop('uri'), 'iframe_stream_info': iframe_stream_info} data['iframe_playlists'].append(iframe_playlist) def _parse_media(line, data, state): quoted = remove_quotes_parser('uri', 'group_id', 'language', 'name', 'characteristics') media = _parse_attribute_list(protocol.ext_x_media, line, quoted) data['media'].append(media) def _parse_variant_playlist(line, data, state): playlist = {'uri': line, 'stream_info': state.pop('stream_info')} data['playlists'].append(playlist) def _parse_byterange(line, state): if 'segment' not in state: state['segment'] = {} state['segment']['byterange'] = line.replace(protocol.ext_x_byterange + ':', '') def _parse_simple_parameter_raw_value(line, cast_to=str, normalize=False): param, value = line.split(':', 1) param = normalize_attribute(param.replace('#EXT-X-', '')) if normalize: value = normalize_attribute(value) return param, cast_to(value) def _parse_and_set_simple_parameter_raw_value(line, data, cast_to=str, normalize=False): param, value = _parse_simple_parameter_raw_value(line, cast_to, normalize) data[param] = value return data[param] def _parse_simple_parameter(line, data, cast_to=str): return _parse_and_set_simple_parameter_raw_value(line, data, cast_to, True) def _parse_cueout(line, state): param, value = line.split(':', 1) res = re.match('.*Duration=(.*),SCTE35=(.*)$', value) if res: state['current_cue_out_duration'] = res.group(1) state['current_cue_out_scte35'] = res.group(2) def _cueout_elemental(line, state, prevline): param, value = line.split(':', 1) res = re.match('.*EXT-OATCLS-SCTE35:(.*)$', prevline) if res: return (res.group(1), value) else: return None def _cueout_envivio(line, state, prevline): param, value = line.split(':', 1) res = re.match('.*DURATION=(.*),.*,CUE="(.*)"', value) if res: return (res.group(2), res.group(1)) else: return None def _parse_cueout_start(line, state, prevline): _cueout_state = _cueout_elemental(line, state, prevline) or _cueout_envivio(line, state, prevline) if _cueout_state: state['current_cue_out_scte35'] = _cueout_state[0] state['current_cue_out_duration'] = _cueout_state[1] def string_to_lines(string): return string.strip().replace('\r\n', '\n').split('\n') def remove_quotes_parser(*attrs): return dict(zip(attrs, itertools.repeat(remove_quotes))) def remove_quotes(string): ''' Remove quotes from string. Ex.: "foo" -> foo 'foo' -> foo 'foo -> 'foo ''' quotes = ('"', "'") if string and string[0] in quotes and string[-1] in quotes: return string[1:-1] return string def normalize_attribute(attribute): return attribute.replace('-', '_').lower().strip() def is_url(uri): return re.match(r'https?://', uri) is not None
# Copyright 2016 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import total_ordering import unittest import mock def _make_database(name="name"): from google.cloud.spanner_v1.database import Database return mock.create_autospec(Database, instance=True) def _make_session(): from google.cloud.spanner_v1.database import Session return mock.create_autospec(Session, instance=True) class TestAbstractSessionPool(unittest.TestCase): def _getTargetClass(self): from google.cloud.spanner_v1.pool import AbstractSessionPool return AbstractSessionPool def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_ctor_defaults(self): pool = self._make_one() self.assertIsNone(pool._database) self.assertEqual(pool.labels, {}) def test_ctor_explicit(self): labels = {"foo": "bar"} pool = self._make_one(labels=labels) self.assertIsNone(pool._database) self.assertEqual(pool.labels, labels) def test_bind_abstract(self): pool = self._make_one() database = _make_database("name") with self.assertRaises(NotImplementedError): pool.bind(database) def test_get_abstract(self): pool = self._make_one() with self.assertRaises(NotImplementedError): pool.get() def test_put_abstract(self): pool = self._make_one() session = object() with self.assertRaises(NotImplementedError): pool.put(session) def test_clear_abstract(self): pool = self._make_one() with self.assertRaises(NotImplementedError): pool.clear() def test__new_session_wo_labels(self): pool = self._make_one() database = pool._database = _make_database("name") session = _make_session() database.session.return_value = session new_session = pool._new_session() self.assertIs(new_session, session) database.session.assert_called_once_with() def test__new_session_w_labels(self): labels = {"foo": "bar"} pool = self._make_one(labels=labels) database = pool._database = _make_database("name") session = _make_session() database.session.return_value = session new_session = pool._new_session() self.assertIs(new_session, session) database.session.assert_called_once_with(labels=labels) def test_session_wo_kwargs(self): from google.cloud.spanner_v1.pool import SessionCheckout pool = self._make_one() checkout = pool.session() self.assertIsInstance(checkout, SessionCheckout) self.assertIs(checkout._pool, pool) self.assertIsNone(checkout._session) self.assertEqual(checkout._kwargs, {}) def test_session_w_kwargs(self): from google.cloud.spanner_v1.pool import SessionCheckout pool = self._make_one() checkout = pool.session(foo="bar") self.assertIsInstance(checkout, SessionCheckout) self.assertIs(checkout._pool, pool) self.assertIsNone(checkout._session) self.assertEqual(checkout._kwargs, {"foo": "bar"}) class TestFixedSizePool(unittest.TestCase): def _getTargetClass(self): from google.cloud.spanner_v1.pool import FixedSizePool return FixedSizePool def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_ctor_defaults(self): pool = self._make_one() self.assertIsNone(pool._database) self.assertEqual(pool.size, 10) self.assertEqual(pool.default_timeout, 10) self.assertTrue(pool._sessions.empty()) self.assertEqual(pool.labels, {}) def test_ctor_explicit(self): labels = {"foo": "bar"} pool = self._make_one(size=4, default_timeout=30, labels=labels) self.assertIsNone(pool._database) self.assertEqual(pool.size, 4) self.assertEqual(pool.default_timeout, 30) self.assertTrue(pool._sessions.empty()) self.assertEqual(pool.labels, labels) def test_bind(self): pool = self._make_one() database = _Database("name") SESSIONS = [_Session(database)] * 10 database._sessions.extend(SESSIONS) pool.bind(database) self.assertIs(pool._database, database) self.assertEqual(pool.size, 10) self.assertEqual(pool.default_timeout, 10) self.assertTrue(pool._sessions.full()) api = database.spanner_api self.assertEqual(api.batch_create_sessions.call_count, 5) for session in SESSIONS: session.create.assert_not_called() def test_get_non_expired(self): pool = self._make_one(size=4) database = _Database("name") SESSIONS = sorted([_Session(database) for i in range(0, 4)]) database._sessions.extend(SESSIONS) pool.bind(database) # check if sessions returned in LIFO order for i in (3, 2, 1, 0): session = pool.get() self.assertIs(session, SESSIONS[i]) self.assertTrue(session._exists_checked) self.assertFalse(pool._sessions.full()) def test_get_expired(self): pool = self._make_one(size=4) database = _Database("name") SESSIONS = [_Session(database)] * 5 SESSIONS[0]._exists = False database._sessions.extend(SESSIONS) pool.bind(database) session = pool.get() self.assertIs(session, SESSIONS[4]) session.create.assert_called() self.assertTrue(SESSIONS[0]._exists_checked) self.assertFalse(pool._sessions.full()) def test_get_empty_default_timeout(self): from six.moves.queue import Empty pool = self._make_one(size=1) queue = pool._sessions = _Queue() with self.assertRaises(Empty): pool.get() self.assertEqual(queue._got, {"block": True, "timeout": 10}) def test_get_empty_explicit_timeout(self): from six.moves.queue import Empty pool = self._make_one(size=1, default_timeout=0.1) queue = pool._sessions = _Queue() with self.assertRaises(Empty): pool.get(timeout=1) self.assertEqual(queue._got, {"block": True, "timeout": 1}) def test_put_full(self): from six.moves.queue import Full pool = self._make_one(size=4) database = _Database("name") SESSIONS = [_Session(database)] * 4 database._sessions.extend(SESSIONS) pool.bind(database) with self.assertRaises(Full): pool.put(_Session(database)) self.assertTrue(pool._sessions.full()) def test_put_non_full(self): pool = self._make_one(size=4) database = _Database("name") SESSIONS = [_Session(database)] * 4 database._sessions.extend(SESSIONS) pool.bind(database) pool._sessions.get() pool.put(_Session(database)) self.assertTrue(pool._sessions.full()) def test_clear(self): pool = self._make_one() database = _Database("name") SESSIONS = [_Session(database)] * 10 database._sessions.extend(SESSIONS) pool.bind(database) self.assertTrue(pool._sessions.full()) api = database.spanner_api self.assertEqual(api.batch_create_sessions.call_count, 5) for session in SESSIONS: session.create.assert_not_called() pool.clear() for session in SESSIONS: self.assertTrue(session._deleted) class TestBurstyPool(unittest.TestCase): def _getTargetClass(self): from google.cloud.spanner_v1.pool import BurstyPool return BurstyPool def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_ctor_defaults(self): pool = self._make_one() self.assertIsNone(pool._database) self.assertEqual(pool.target_size, 10) self.assertTrue(pool._sessions.empty()) self.assertEqual(pool.labels, {}) def test_ctor_explicit(self): labels = {"foo": "bar"} pool = self._make_one(target_size=4, labels=labels) self.assertIsNone(pool._database) self.assertEqual(pool.target_size, 4) self.assertTrue(pool._sessions.empty()) self.assertEqual(pool.labels, labels) def test_get_empty(self): pool = self._make_one() database = _Database("name") database._sessions.append(_Session(database)) pool.bind(database) session = pool.get() self.assertIsInstance(session, _Session) self.assertIs(session._database, database) session.create.assert_called() self.assertTrue(pool._sessions.empty()) def test_get_non_empty_session_exists(self): pool = self._make_one() database = _Database("name") previous = _Session(database) pool.bind(database) pool.put(previous) session = pool.get() self.assertIs(session, previous) session.create.assert_not_called() self.assertTrue(session._exists_checked) self.assertTrue(pool._sessions.empty()) def test_get_non_empty_session_expired(self): pool = self._make_one() database = _Database("name") previous = _Session(database, exists=False) newborn = _Session(database) database._sessions.append(newborn) pool.bind(database) pool.put(previous) session = pool.get() self.assertTrue(previous._exists_checked) self.assertIs(session, newborn) session.create.assert_called() self.assertFalse(session._exists_checked) self.assertTrue(pool._sessions.empty()) def test_put_empty(self): pool = self._make_one() database = _Database("name") pool.bind(database) session = _Session(database) pool.put(session) self.assertFalse(pool._sessions.empty()) def test_put_full(self): pool = self._make_one(target_size=1) database = _Database("name") pool.bind(database) older = _Session(database) pool.put(older) self.assertFalse(pool._sessions.empty()) younger = _Session(database) pool.put(younger) # discarded silently self.assertTrue(younger._deleted) self.assertIs(pool.get(), older) def test_put_full_expired(self): pool = self._make_one(target_size=1) database = _Database("name") pool.bind(database) older = _Session(database) pool.put(older) self.assertFalse(pool._sessions.empty()) younger = _Session(database, exists=False) pool.put(younger) # discarded silently self.assertTrue(younger._deleted) self.assertIs(pool.get(), older) def test_clear(self): pool = self._make_one() database = _Database("name") pool.bind(database) previous = _Session(database) pool.put(previous) pool.clear() self.assertTrue(previous._deleted) class TestPingingPool(unittest.TestCase): def _getTargetClass(self): from google.cloud.spanner_v1.pool import PingingPool return PingingPool def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_ctor_defaults(self): pool = self._make_one() self.assertIsNone(pool._database) self.assertEqual(pool.size, 10) self.assertEqual(pool.default_timeout, 10) self.assertEqual(pool._delta.seconds, 3000) self.assertTrue(pool._sessions.empty()) self.assertEqual(pool.labels, {}) def test_ctor_explicit(self): labels = {"foo": "bar"} pool = self._make_one( size=4, default_timeout=30, ping_interval=1800, labels=labels ) self.assertIsNone(pool._database) self.assertEqual(pool.size, 4) self.assertEqual(pool.default_timeout, 30) self.assertEqual(pool._delta.seconds, 1800) self.assertTrue(pool._sessions.empty()) self.assertEqual(pool.labels, labels) def test_bind(self): pool = self._make_one() database = _Database("name") SESSIONS = [_Session(database)] * 10 database._sessions.extend(SESSIONS) pool.bind(database) self.assertIs(pool._database, database) self.assertEqual(pool.size, 10) self.assertEqual(pool.default_timeout, 10) self.assertEqual(pool._delta.seconds, 3000) self.assertTrue(pool._sessions.full()) api = database.spanner_api self.assertEqual(api.batch_create_sessions.call_count, 5) for session in SESSIONS: session.create.assert_not_called() def test_get_hit_no_ping(self): pool = self._make_one(size=4) database = _Database("name") SESSIONS = [_Session(database)] * 4 database._sessions.extend(SESSIONS) pool.bind(database) session = pool.get() self.assertIs(session, SESSIONS[0]) self.assertFalse(session._exists_checked) self.assertFalse(pool._sessions.full()) def test_get_hit_w_ping(self): import datetime from google.cloud._testing import _Monkey from google.cloud.spanner_v1 import pool as MUT pool = self._make_one(size=4) database = _Database("name") SESSIONS = [_Session(database)] * 4 database._sessions.extend(SESSIONS) sessions_created = datetime.datetime.utcnow() - datetime.timedelta(seconds=4000) with _Monkey(MUT, _NOW=lambda: sessions_created): pool.bind(database) session = pool.get() self.assertIs(session, SESSIONS[0]) self.assertTrue(session._exists_checked) self.assertFalse(pool._sessions.full()) def test_get_hit_w_ping_expired(self): import datetime from google.cloud._testing import _Monkey from google.cloud.spanner_v1 import pool as MUT pool = self._make_one(size=4) database = _Database("name") SESSIONS = [_Session(database)] * 5 SESSIONS[0]._exists = False database._sessions.extend(SESSIONS) sessions_created = datetime.datetime.utcnow() - datetime.timedelta(seconds=4000) with _Monkey(MUT, _NOW=lambda: sessions_created): pool.bind(database) session = pool.get() self.assertIs(session, SESSIONS[4]) session.create.assert_called() self.assertTrue(SESSIONS[0]._exists_checked) self.assertFalse(pool._sessions.full()) def test_get_empty_default_timeout(self): from six.moves.queue import Empty pool = self._make_one(size=1) queue = pool._sessions = _Queue() with self.assertRaises(Empty): pool.get() self.assertEqual(queue._got, {"block": True, "timeout": 10}) def test_get_empty_explicit_timeout(self): from six.moves.queue import Empty pool = self._make_one(size=1, default_timeout=0.1) queue = pool._sessions = _Queue() with self.assertRaises(Empty): pool.get(timeout=1) self.assertEqual(queue._got, {"block": True, "timeout": 1}) def test_put_full(self): from six.moves.queue import Full pool = self._make_one(size=4) database = _Database("name") SESSIONS = [_Session(database)] * 4 database._sessions.extend(SESSIONS) pool.bind(database) with self.assertRaises(Full): pool.put(_Session(database)) self.assertTrue(pool._sessions.full()) def test_put_non_full(self): import datetime from google.cloud._testing import _Monkey from google.cloud.spanner_v1 import pool as MUT pool = self._make_one(size=1) queue = pool._sessions = _Queue() now = datetime.datetime.utcnow() database = _Database("name") session = _Session(database) with _Monkey(MUT, _NOW=lambda: now): pool.put(session) self.assertEqual(len(queue._items), 1) ping_after, queued = queue._items[0] self.assertEqual(ping_after, now + datetime.timedelta(seconds=3000)) self.assertIs(queued, session) def test_clear(self): pool = self._make_one() database = _Database("name") SESSIONS = [_Session(database)] * 10 database._sessions.extend(SESSIONS) pool.bind(database) self.assertTrue(pool._sessions.full()) api = database.spanner_api self.assertEqual(api.batch_create_sessions.call_count, 5) for session in SESSIONS: session.create.assert_not_called() pool.clear() for session in SESSIONS: self.assertTrue(session._deleted) def test_ping_empty(self): pool = self._make_one(size=1) pool.ping() # Does not raise 'Empty' def test_ping_oldest_fresh(self): pool = self._make_one(size=1) database = _Database("name") SESSIONS = [_Session(database)] * 1 database._sessions.extend(SESSIONS) pool.bind(database) pool.ping() self.assertFalse(SESSIONS[0]._exists_checked) def test_ping_oldest_stale_but_exists(self): import datetime from google.cloud._testing import _Monkey from google.cloud.spanner_v1 import pool as MUT pool = self._make_one(size=1) database = _Database("name") SESSIONS = [_Session(database)] * 1 database._sessions.extend(SESSIONS) pool.bind(database) later = datetime.datetime.utcnow() + datetime.timedelta(seconds=4000) with _Monkey(MUT, _NOW=lambda: later): pool.ping() self.assertTrue(SESSIONS[0]._exists_checked) def test_ping_oldest_stale_and_not_exists(self): import datetime from google.cloud._testing import _Monkey from google.cloud.spanner_v1 import pool as MUT pool = self._make_one(size=1) database = _Database("name") SESSIONS = [_Session(database)] * 2 SESSIONS[0]._exists = False database._sessions.extend(SESSIONS) pool.bind(database) later = datetime.datetime.utcnow() + datetime.timedelta(seconds=4000) with _Monkey(MUT, _NOW=lambda: later): pool.ping() self.assertTrue(SESSIONS[0]._exists_checked) SESSIONS[1].create.assert_called() class TestTransactionPingingPool(unittest.TestCase): def _getTargetClass(self): from google.cloud.spanner_v1.pool import TransactionPingingPool return TransactionPingingPool def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_ctor_defaults(self): pool = self._make_one() self.assertIsNone(pool._database) self.assertEqual(pool.size, 10) self.assertEqual(pool.default_timeout, 10) self.assertEqual(pool._delta.seconds, 3000) self.assertTrue(pool._sessions.empty()) self.assertTrue(pool._pending_sessions.empty()) self.assertEqual(pool.labels, {}) def test_ctor_explicit(self): labels = {"foo": "bar"} pool = self._make_one( size=4, default_timeout=30, ping_interval=1800, labels=labels ) self.assertIsNone(pool._database) self.assertEqual(pool.size, 4) self.assertEqual(pool.default_timeout, 30) self.assertEqual(pool._delta.seconds, 1800) self.assertTrue(pool._sessions.empty()) self.assertTrue(pool._pending_sessions.empty()) self.assertEqual(pool.labels, labels) def test_bind(self): pool = self._make_one() database = _Database("name") SESSIONS = [_Session(database) for _ in range(10)] database._sessions.extend(SESSIONS) pool.bind(database) self.assertIs(pool._database, database) self.assertEqual(pool.size, 10) self.assertEqual(pool.default_timeout, 10) self.assertEqual(pool._delta.seconds, 3000) self.assertTrue(pool._sessions.full()) api = database.spanner_api self.assertEqual(api.batch_create_sessions.call_count, 5) for session in SESSIONS: session.create.assert_not_called() txn = session._transaction txn.begin.assert_called_once_with() self.assertTrue(pool._pending_sessions.empty()) def test_bind_w_timestamp_race(self): import datetime from google.cloud._testing import _Monkey from google.cloud.spanner_v1 import pool as MUT NOW = datetime.datetime.utcnow() pool = self._make_one() database = _Database("name") SESSIONS = [_Session(database) for _ in range(10)] database._sessions.extend(SESSIONS) with _Monkey(MUT, _NOW=lambda: NOW): pool.bind(database) self.assertIs(pool._database, database) self.assertEqual(pool.size, 10) self.assertEqual(pool.default_timeout, 10) self.assertEqual(pool._delta.seconds, 3000) self.assertTrue(pool._sessions.full()) api = database.spanner_api self.assertEqual(api.batch_create_sessions.call_count, 5) for session in SESSIONS: session.create.assert_not_called() txn = session._transaction txn.begin.assert_called_once_with() self.assertTrue(pool._pending_sessions.empty()) def test_put_full(self): from six.moves.queue import Full pool = self._make_one(size=4) database = _Database("name") SESSIONS = [_Session(database) for _ in range(4)] database._sessions.extend(SESSIONS) pool.bind(database) with self.assertRaises(Full): pool.put(_Session(database)) self.assertTrue(pool._sessions.full()) def test_put_non_full_w_active_txn(self): pool = self._make_one(size=1) queue = pool._sessions = _Queue() pending = pool._pending_sessions = _Queue() database = _Database("name") session = _Session(database) txn = session.transaction() pool.put(session) self.assertEqual(len(queue._items), 1) _, queued = queue._items[0] self.assertIs(queued, session) self.assertEqual(len(pending._items), 0) txn.begin.assert_not_called() def test_put_non_full_w_committed_txn(self): pool = self._make_one(size=1) queue = pool._sessions = _Queue() pending = pool._pending_sessions = _Queue() database = _Database("name") session = _Session(database) committed = session.transaction() committed.committed = True pool.put(session) self.assertEqual(len(queue._items), 0) self.assertEqual(len(pending._items), 1) self.assertIs(pending._items[0], session) self.assertIsNot(session._transaction, committed) session._transaction.begin.assert_not_called() def test_put_non_full(self): pool = self._make_one(size=1) queue = pool._sessions = _Queue() pending = pool._pending_sessions = _Queue() database = _Database("name") session = _Session(database) pool.put(session) self.assertEqual(len(queue._items), 0) self.assertEqual(len(pending._items), 1) self.assertIs(pending._items[0], session) self.assertFalse(pending.empty()) def test_begin_pending_transactions_empty(self): pool = self._make_one(size=1) pool.begin_pending_transactions() # no raise def test_begin_pending_transactions_non_empty(self): pool = self._make_one(size=1) pool._sessions = _Queue() database = _Database("name") TRANSACTIONS = [_make_transaction(object())] PENDING_SESSIONS = [_Session(database, transaction=txn) for txn in TRANSACTIONS] pending = pool._pending_sessions = _Queue(*PENDING_SESSIONS) self.assertFalse(pending.empty()) pool.begin_pending_transactions() # no raise for txn in TRANSACTIONS: txn.begin.assert_called_once_with() self.assertTrue(pending.empty()) class TestSessionCheckout(unittest.TestCase): def _getTargetClass(self): from google.cloud.spanner_v1.pool import SessionCheckout return SessionCheckout def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) def test_ctor_wo_kwargs(self): pool = _Pool() checkout = self._make_one(pool) self.assertIs(checkout._pool, pool) self.assertIsNone(checkout._session) self.assertEqual(checkout._kwargs, {}) def test_ctor_w_kwargs(self): pool = _Pool() checkout = self._make_one(pool, foo="bar") self.assertIs(checkout._pool, pool) self.assertIsNone(checkout._session) self.assertEqual(checkout._kwargs, {"foo": "bar"}) def test_context_manager_wo_kwargs(self): session = object() pool = _Pool(session) checkout = self._make_one(pool) self.assertEqual(len(pool._items), 1) self.assertIs(pool._items[0], session) with checkout as borrowed: self.assertIs(borrowed, session) self.assertEqual(len(pool._items), 0) self.assertEqual(len(pool._items), 1) self.assertIs(pool._items[0], session) self.assertEqual(pool._got, {}) def test_context_manager_w_kwargs(self): session = object() pool = _Pool(session) checkout = self._make_one(pool, foo="bar") self.assertEqual(len(pool._items), 1) self.assertIs(pool._items[0], session) with checkout as borrowed: self.assertIs(borrowed, session) self.assertEqual(len(pool._items), 0) self.assertEqual(len(pool._items), 1) self.assertIs(pool._items[0], session) self.assertEqual(pool._got, {"foo": "bar"}) def _make_transaction(*args, **kw): from google.cloud.spanner_v1.transaction import Transaction txn = mock.create_autospec(Transaction)(*args, **kw) txn.committed = None txn._rolled_back = False return txn @total_ordering class _Session(object): _transaction = None def __init__(self, database, exists=True, transaction=None): self._database = database self._exists = exists self._exists_checked = False self.create = mock.Mock() self._deleted = False self._transaction = transaction def __lt__(self, other): return id(self) < id(other) def exists(self): self._exists_checked = True return self._exists def delete(self): from google.cloud.exceptions import NotFound self._deleted = True if not self._exists: raise NotFound("unknown session") def transaction(self): txn = self._transaction = _make_transaction(self) return txn class _Database(object): def __init__(self, name): self.name = name self._sessions = [] def mock_batch_create_sessions(db, session_count=10, timeout=10, metadata=[]): from google.cloud.spanner_v1.proto import spanner_pb2 response = spanner_pb2.BatchCreateSessionsResponse() if session_count < 2: response.session.add() else: response.session.add() response.session.add() return response from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient self.spanner_api = mock.create_autospec(SpannerClient, instance=True) self.spanner_api.batch_create_sessions.side_effect = mock_batch_create_sessions def session(self): # always return first session in the list # to avoid reversing the order of putting # sessions into pool (important for order tests) return self._sessions.pop(0) class _Queue(object): _size = 1 def __init__(self, *items): self._items = list(items) def empty(self): return len(self._items) == 0 def full(self): return len(self._items) >= self._size def get(self, **kwargs): from six.moves.queue import Empty self._got = kwargs try: return self._items.pop() except IndexError: raise Empty() def put(self, item, **kwargs): self._put = kwargs self._items.append(item) def put_nowait(self, item, **kwargs): self._put_nowait = kwargs self._items.append(item) class _Pool(_Queue): _database = None
from __future__ import absolute_import from builtins import object from proteus import * from proteus.default_p import * from math import * try: from .vortex2D import * except: from vortex2D import * from proteus.mprans import NCLS #import Profiling LevelModelType = NCLS.LevelModel logEvent = Profiling.logEvent name=soname+"_ls" nd=2 ## \page Tests Test Problems # \ref ls_vortex_2d_p.py "Linear advection of a circular level set function in an oscillating vortex velocity field" # ##\ingroup test # \file la_vortex_2d_p.py # @{ # \brief Conservative linear advection of a circle signed distance function # in a oscillating vortex velocity field. # # \f{eqnarray*} # \phi_t + \nabla \cdot (\vec u \phi) &=& 0 \\ # \Omega &=& [0,1] \times [0,1] \\ # u^{x} &=& \cos(\pi t/8)\sin(2\pi y)\sin^2(\pi x) \\ # u^{y} &=& -\cos(\pi t/8)\sin(2\pi x)\sin^{2}(\pi y) \\ # \phi^{0} &=& \left(x-\frac{1}{2}\right)^2 + \left(y-\frac{3}{4}\right)^2 - 0.15^2 # \f} # The solution should return to the initial condition at \f$T=8\f$. # Outflow boundaries are applied on \f$\partial \Omega\f$. # # # \image html save_la_vortex_2d_dgp2_exact.jpg "exact solution, T=8.0" # \image latex save_la_vortex_2d_dgp2_exact.eps "exact solution, T=8.0" # \image html save_la_vortex_2d_dgp2_phi.jpg "RKDG P^2 solution, Cr=0.1, L^2 error= 7.84e-3" # \image latex save_la_vortex_2d_dgp2_phi.eps "RKDG $P^2$ solution, Cr=0.1, $L^2$ error= 7.84e-3" # class OscillatingVortex2D(object): #cek changed to put sphere inside arbitrary box with dimensions in L def __init__(self,L): self.radius = 0.15*L[0] self.xc=0.5*L[0] self.yc=0.75*L[1] def uOfXT(self,x,t): return self.radius - math.sqrt((x[0]-self.xc)**2 + (x[1]-self.yc)**2) class OscillatingVortex2Dcylinder(object): #cek changed to put sphere inside arbitrary box with dimensions in L def __init__(self,L): self.radius = 0.15*L[0] self.xc=0.5*L[0] self.yc=0.75*L[1] def uOfXT(self,x,t): return self.radius - math.sqrt((x[0]-self.xc)**2 + (x[1]-self.yc)**2) analyticalSolution = {0:OscillatingVortex2D(L)} class UnitSquareVortex(NCLS.Coefficients): from proteus.ctransportCoefficients import unitSquareVortexEvaluate from proteus.ctransportCoefficients import unitSquareVortexLevelSetEvaluate def __init__(self,useHJ=False,epsFact=1.5,checkMass=False, RD_model=None, useMetrics=0.0,sc_uref=1.0,sc_beta=1.0): self.waterline_interval=-1 self.epsFact=epsFact self.useHJ = useHJ self.RD_modelIndex=RD_model self.sc_uref=sc_uref self.sc_beta=sc_beta self.useMetrics=useMetrics mass={0:{0:'linear'}} advection={0:{0:'linear'}} diffusion={} potential={} reaction={} if self.useHJ: hamiltonian={0:{0:'linear'}} else: hamiltonian={} NCLS.Coefficients.__init__(self) self.checkMass=checkMass self.useMetrics = 0.0 self.sc_uref=1.0 self.sc_beta=1.0 def attachModels(self,modelList): self.model = modelList[0] self.u_old_dof = numpy.copy(self.model.u[0].dof) self.q_v = numpy.zeros(self.model.q[('dH',0,0)].shape,'d') self.ebqe_v = numpy.zeros(self.model.ebqe[('dH',0,0)].shape,'d') self.unitSquareVortexLevelSetEvaluate(self.model.timeIntegration.tLast, self.model.q['x'], self.model.q[('u',0)],self.model.q[('grad(u)',0)], self.model.q[('m',0)],self.model.q[('dm',0,0)], self.model.q[('dH',0,0)],self.model.q[('dH',0,0)], self.model.q[('H',0)],self.q_v) self.model.q[('velocity',0)]=self.q_v self.model.ebqe[('velocity',0)]=self.ebqe_v if self.RD_modelIndex != None: #print self.RD_modelIndex,len(modelList) self.rdModel = modelList[self.RD_modelIndex] else: self.rdModel = self.model # if self.checkMass: # self.m_pre = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact, # self.model.mesh.elementDiametersArray, # self.model.q['dV'], # self.model.q[('m',0)], # self.model.mesh.nElements_owned) # logEvent("Attach Models UnitSquareVortex: Phase 0 mass before NCLS step = %12.5e" % (self.m_pre,),level=2) # self.totalFluxGlobal=0.0 # self.lsGlobalMassArray = [self.m_pre] # self.lsGlobalMassErrorArray = [0.0] # self.fluxArray = [0.0] # self.timeArray = [self.model.timeIntegration.t] def preStep(self,t,firstStep=False): self.unitSquareVortexLevelSetEvaluate(t, self.model.q['x'], self.model.q[('u',0)],self.model.q[('grad(u)',0)], self.model.q[('m',0)],self.model.q[('dm',0,0)], self.model.q[('dH',0,0)],self.model.q[('dH',0,0)], self.model.q[('H',0)],self.q_v) # if self.checkMass: # self.m_pre = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact, # self.model.mesh.elementDiametersArray, # self.model.q['dV'], # self.model.q[('m',0)], # self.model.mesh.nElements_owned) # logEvent("Phase 0 mass before NCLS step = %12.5e" % (self.m_pre,),level=2) # self.m_last = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact, # self.model.mesh.elementDiametersArray, # self.model.q['dV'], # self.model.timeIntegration.m_last[0], # self.model.mesh.nElements_owned) # logEvent("Phase 0 mass before NCLS step (m_last) = %12.5e" % (self.m_last,),level=2) #cek todo why is this here # if self.model.ebq.has_key(('v',1)): # self.model.u[0].getValuesTrace(self.model.ebq[('v',1)],self.model.ebq[('u',0)]) # self.model.u[0].getGradientValuesTrace(self.model.ebq[('grad(v)',1)],self.model.ebq[('grad(u)',0)]) copyInstructions = {} return copyInstructions def postStep(self,t,firstStep=False): self.u_old_dof = numpy.copy(self.model.u[0].dof) # if self.checkMass: # self.m_post = Norms.scalarSmoothedHeavisideDomainIntegral(self.epsFact, # self.model.mesh.elementDiametersArray, # self.model.q['dV'], # self.model.q[('m',0)], # self.model.mesh.nElements_owned) # logEvent("Phase 0 mass after NCLS step = %12.5e" % (self.m_post,),level=2) # #need a flux here not a velocity # self.fluxIntegral = Norms.fluxDomainBoundaryIntegralFromVector(self.model.ebqe['dS'], # self.model.ebqe[('velocity',0)], # self.model.ebqe['n'], # self.model.mesh) # logEvent("Flux integral = %12.5e" % (self.fluxIntegral,),level=2) # logEvent("Phase 0 mass conservation after NCLS step = %12.5e" % (self.m_post - self.m_last + self.model.timeIntegration.dt*self.fluxIntegral,),level=2) # self.lsGlobalMass = self.m_post # self.fluxGlobal = self.fluxIntegral*self.model.timeIntegration.dt # self.totalFluxGlobal += self.fluxGlobal # self.lsGlobalMassArray.append(self.lsGlobalMass) # self.lsGlobalMassErrorArray.append(self.lsGlobalMass - self.lsGlobalMassArray[0] + self.totalFluxGlobal) # self.fluxArray.append(self.fluxIntegral) # self.timeArray.append(self.model.timeIntegration.t) # if self.model.ebq.has_key(('v',1)): # self.model.u[0].getValuesTrace(self.model.ebq[('v',1)],self.model.ebq[('u',0)]) # self.model.u[0].getGradientValuesTrace(self.model.ebq[('grad(v)',1)],self.model.ebq[('grad(u)',0)]) copyInstructions = {} return copyInstructions def evaluate(self,t,c): if self.useHJ: self.unitSquareVortexLevelSetEvaluate(t, c['x'], c[('u',0)],c[('grad(u)',0)], c[('m',0)],c[('dm',0,0)], c[('f',0)],c[('df',0,0)], c[('H',0)],c[('dH',0,0)]) else: self.unitSquareVortexEvaluate(t, c['x'], c[('u',0)], c[('m',0)],c[('dm',0,0)], c[('f',0)],c[('df',0,0)]) c[('velocity',0)]=c[('df',0,0)] if applyRedistancing: RD_model=1 else: RD_model=None coefficients = UnitSquareVortex(useHJ=True,epsFact=epsFactHeaviside,checkMass=checkMass,RD_model=RD_model,useMetrics=useMetrics) coefficients.variableNames=['u'] #now define the Dirichlet boundary conditions def getDBC(x,flag): pass #if (x[1] == 0.0): # return lambda x,t: 0.0 #if (x[0] == 0.0 or # x[0] == 1.0 or # x[1] == 0.0 or # x[1] == 1.0): # return lambda x,t: 0.0 def zeroInflow(x): return lambda x,t: 0.0 # if (x[0] == 0.0 and x[1] <= 0.5): # return lambda x,t: 0.0 # if (x[0] == 1.0 and x[1] >= 0.5): # return lambda x,t: 0.0 # if (x[1] == 0.0 and x[0] >= 0.5): # return lambda x,t: 0.0 # if (x[1] == 1.0 and x[0] <= 0.5): # return lambda x,t: 0.0 dirichletConditions = {0:getDBC} initialConditions = {0:analyticalSolution[0]} fluxBoundaryConditions = {0:'outFlow'} def zeroadv(x): return lambda x,t: 0.0 advectiveFluxBoundaryConditions = {} #advectiveFluxBoundaryConditions = {0:zeroadv} diffusiveFluxBoundaryConditions = {0:{}} ## @}
import random import re from sets import Set from background_hang_reporter_job.categories import categories_p1, categories_p2 # For now we want like deterministic results, to aid debugging random.seed(0) def to_struct_of_arrays(a): if len(a) == 0: raise Exception('Need at least one item in array for this to work.') result = {k:[e[k] for e in a] for k in a[0].keys()} result['length'] = len(a) return result class UniqueKeyedTable(object): def __init__(self, get_default_from_key, key_names=()): self.get_default_from_key = get_default_from_key self.key_to_index_map = {} self.key_names = key_names self.items = [] def key_to_index(self, key): if key in self.key_to_index_map: return self.key_to_index_map[key] index = len(self.items) self.items.append(self.get_default_from_key(key)) self.key_to_index_map[key] = index return index def key_to_item(self, key): return self.items[self.key_to_index(key)] def index_to_item(self, index): return self.items[index] def get_items(self): return self.items def inner_struct_of_arrays(self, items): if len(items) == 0: raise Exception('Need at least one item in array for this to work.') result = {} num_keys = len(self.key_names) for i in xrange(0, num_keys): result[self.key_names[i]] = [x[i] for x in items] result['length'] = len(items) return result def struct_of_arrays(self): return self.inner_struct_of_arrays(self.items) def sorted_struct_of_arrays(self, key): return self.inner_struct_of_arrays(sorted(self.items, key=key)) class GrowToFitList(list): def __setitem__(self, index, value): if index >= len(self): to_grow = index + 1 - len(self) self.extend([None] * to_grow) list.__setitem__(self, index, value) def __getitem__(self, index): if index >= len(self): return None return list.__getitem__(self, index) def hexify(num): return "{0:#0{1}x}".format(num, 8) def get_default_lib(name): return ({ 'name': re.sub(r'\.pdb$', '', name), 'offset': 0, 'path': "", 'debugName': name, 'debugPath': name, 'arch': "", }) def get_default_thread(name, minimal_sample_table): strings_table = UniqueKeyedTable(lambda str: str) libs = UniqueKeyedTable(get_default_lib) func_table = UniqueKeyedTable(lambda key: ( strings_table.key_to_index(key[0]), None if key[1] is None else libs.key_to_index(key[1]) ), ('name', 'lib')) stack_table = UniqueKeyedTable(lambda key: ( key[2], func_table.key_to_index((key[0], key[1])) ), ('prefix', 'func')) if minimal_sample_table: sample_table = UniqueKeyedTable(lambda key: ( key[0], strings_table.key_to_index(key[1]), key[2], strings_table.key_to_index(key[3]), ), ('stack', 'platform')) else: sample_table = UniqueKeyedTable(lambda key: ( key[0], strings_table.key_to_index(key[1]), key[2], strings_table.key_to_index(key[3]), ), ('stack', 'runnable', 'userInteracting', 'platform')) stack_table.key_to_index(('(root)', None, None)) prune_stack_cache = UniqueKeyedTable(lambda key: [0.0]) prune_stack_cache.key_to_index(('(root)', None, None)) return { 'name': name, 'libs': libs, 'funcTable': func_table, 'stackTable': stack_table, 'pruneStackCache': prune_stack_cache, 'sampleTable': sample_table, 'stringArray': strings_table, 'processType': 'tab' if name == 'Gecko_Child' or name == 'Gecko_Child_ForcePaint' else 'default', 'dates': UniqueKeyedTable(lambda date: ({ 'date': date, 'sampleHangMs': GrowToFitList(), 'sampleHangCount': GrowToFitList() }), ('date', 'sampleHangMs', 'sampleHangCount')), } def sample_categorizer(categories, stack_table, func_table, string_array): func_name_to_category_cache = {} def function_name_to_category(name): if name in func_name_to_category_cache: return func_name_to_category_cache[name] for matches, pattern, category in categories: if matches(name, pattern): func_name_to_category_cache[name] = category return category func_name_to_category_cache[name] = False return False stack_category_cache = {} def compute_category(stack_index): while True: if stack_index in stack_category_cache: return stack_category_cache[stack_index] else: if not stack_index: stack_category_cache[stack_index] = None return None else: func_index = stack_table['func'][stack_index] name = string_array.index_to_item(func_table['name'][func_index]) category = function_name_to_category(name) if category != False: stack_category_cache[stack_index] = category return category stack_index = stack_table['prefix'][stack_index] return compute_category def reconstruct_stack(string_array, func_table, stack_table, lib_table, stack_index): result = [] while stack_index != 0: func_index = stack_table['func'][stack_index] prefix = stack_table['prefix'][stack_index] func_name = string_array[func_table['name'][func_index]] lib_name = lib_table[func_table['lib'][func_index]]['debugName'] result.append((func_name, lib_name)) stack_index = prefix return result[::-1] def merge_number_dicts(a, b): keys = Set(a.keys() + b.keys()) return {k: a.get(k, 0.) + b.get(k, 0.) for k in keys} class ProfileProcessor(object): def __init__(self, config): self.config = config def default_thread_closure(name): return get_default_thread(name, config['use_minimal_sample_table']) self.thread_table = UniqueKeyedTable(default_thread_closure) self.usage_hours_by_date = {} def debugDump(self, dump_str): if self.config['print_debug_info']: print dump_str def ingest_processed_profile(self, profile): for existing_thread in self.thread_table.get_items(): prune_stack_cache = UniqueKeyedTable(lambda key: [0.0]) prune_stack_cache.key_to_index(('(root)', None, None)) existing_thread['pruneStackCache'] = prune_stack_cache sample_size = self.config['post_sample_size'] threads = profile['threads'] for other in threads: other_samples = other['sampleTable'] other_dates = other['dates'] for date in other_dates: build_date = date['date'] for i in xrange(0, len(date['sampleHangCount'])): stack_index = other_samples['stack'][i] stack = reconstruct_stack(other['stringArray'], other['funcTable'], other['stackTable'], other['libs'], stack_index) self.pre_ingest_row((stack, other['stringArray'][other_samples['runnable'][i]], other['name'], build_date, other_samples['userInteracting'][i], other['stringArray'][other_samples['platform'][i]], date['sampleHangMs'][i], date['sampleHangCount'][i])) for date in other_dates: build_date = date['date'] for i in xrange(0, len(date['sampleHangCount'])): stack_index = other_samples['stack'][i] stack = reconstruct_stack(other['stringArray'], other['funcTable'], other['stackTable'], other['libs'], stack_index) if sample_size == 1.0 or random.random() <= sample_size: self.ingest_row((stack, other['stringArray'][other_samples['runnable'][i]], other['name'], build_date, other_samples['userInteracting'][i], other['stringArray'][other_samples['platform'][i]], date['sampleHangMs'][i], date['sampleHangCount'][i])) self.usage_hours_by_date = merge_number_dicts(self.usage_hours_by_date, profile.get('usageHoursByDate', {})) def pre_ingest_row(self, row): #pylint: disable=unused-variable stack, runnable_name, thread_name, build_date, pending_input, platform, hang_ms, hang_count = row thread = self.thread_table.key_to_item(thread_name) prune_stack_cache = thread['pruneStackCache'] root_stack = prune_stack_cache.key_to_item(('(root)', None, None)) root_stack[0] += hang_ms last_stack = 0 for (func_name, lib_name) in stack: last_stack = prune_stack_cache.key_to_index((func_name, lib_name, last_stack)) cache_item = prune_stack_cache.index_to_item(last_stack) cache_item[0] += hang_ms def ingest_row(self, row): #pylint: disable=unused-variable stack, runnable_name, thread_name, build_date, pending_input, platform, hang_ms, hang_count = row thread = self.thread_table.key_to_item(thread_name) stack_table = thread['stackTable'] sample_table = thread['sampleTable'] dates = thread['dates'] prune_stack_cache = thread['pruneStackCache'] root_stack = prune_stack_cache.key_to_item(('(root)', None, None)) last_stack = 0 last_cache_item_index = 0 last_lib_name = None for (func_name, lib_name) in stack: cache_item_index = prune_stack_cache.key_to_index((func_name, lib_name, last_cache_item_index)) cache_item = prune_stack_cache.index_to_item(cache_item_index) parent_cache_item = prune_stack_cache.index_to_item(last_cache_item_index) if cache_item[0] / parent_cache_item[0] > self.config['stack_acceptance_threshold']: last_lib_name = lib_name last_stack = stack_table.key_to_index((func_name, lib_name, last_stack)) last_cache_item_index = cache_item_index else: # If we're below the acceptance threshold, just lump it under (other) below # its parent. last_lib_name = lib_name last_stack = stack_table.key_to_index(('(other)', lib_name, last_stack)) last_cache_item_index = cache_item_index break if self.config['use_minimal_sample_table'] and thread_name == 'Gecko_Child' and not pending_input: return sample_index = sample_table.key_to_index((last_stack, runnable_name, pending_input, platform)) date = dates.key_to_item(build_date) if date['sampleHangCount'][sample_index] is None: date['sampleHangCount'][sample_index] = 0.0 date['sampleHangMs'][sample_index] = 0.0 date['sampleHangCount'][sample_index] += hang_count date['sampleHangMs'][sample_index] += hang_ms def ingest(self, data, usage_hours_by_date): print "{} unfiltered samples in data".format(len(data)) data = [ x for x in data # x[6] should be hang_ms if x[6] > 0.0 ] print "{} filtered samples in data".format(len(data)) print "Preprocessing stacks for prune cache..." for row in data: self.pre_ingest_row(row) print "Processing stacks..." for row in data: self.ingest_row(row) self.usage_hours_by_date = merge_number_dicts(self.usage_hours_by_date, usage_hours_by_date) def process_date(self, date): if self.config['use_minimal_sample_table']: return { 'date': date['date'], 'sampleHangCount': date['sampleHangCount'], } return date def process_thread(self, thread): string_array = thread['stringArray'] func_table = thread['funcTable'].struct_of_arrays() stack_table = thread['stackTable'].struct_of_arrays() categorizer_p1 = sample_categorizer(categories_p1, stack_table, func_table, string_array) categorizer_p2 = sample_categorizer(categories_p2, stack_table, func_table, string_array) sample_table = thread['sampleTable'].struct_of_arrays() sample_table['category'] = [] for s in sample_table['stack']: category_string = categorizer_p1(s) if category_string is None: category_string = categorizer_p2(s) if category_string is None: sample_table['category'].append(None) else: sample_table['category'].append(string_array.key_to_index(category_string)) else: sample_table['category'].append(string_array.key_to_index(category_string)) return { 'name': thread['name'], 'processType': thread['processType'], 'libs': thread['libs'].get_items(), 'funcTable': func_table, 'stackTable': stack_table, 'sampleTable': sample_table, 'stringArray': string_array.get_items(), 'dates': [self.process_date(d) for d in thread['dates'].get_items()], } def process_into_split_profile(self): return { 'main_payload': { 'splitFiles': { t['name']: [k for k in t.keys() if k != 'name'] for t in self.thread_table.get_items() }, 'usageHoursByDate': self.usage_hours_by_date, 'uuid': self.config['uuid'], 'isSplit': True, }, 'file_data': [ [ (t['name'] + '_' + k, v) for k, v in self.process_thread(t).iteritems() if k != 'name' ] for t in self.thread_table.get_items() ] } def process_into_profile(self): print "Processing into final format..." if self.config['split_threads_in_out_file']: return [ { 'name': t['name'], 'threads': [self.process_thread(t)], 'usageHoursByDate': self.usage_hours_by_date, 'uuid': self.config['uuid'], } for t in self.thread_table.get_items() ] return { 'threads': [self.process_thread(t) for t in self.thread_table.get_items()], 'usageHoursByDate': self.usage_hours_by_date, 'uuid': self.config['uuid'], }
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for cond_v2.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.protobuf import config_pb2 from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import def_function from tensorflow.python.eager import function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_ops from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import cond_v2 from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import saver from tensorflow.python.util import compat _OPTIONAL_OPS = frozenset([ "OptionalFromValue", "OptionalNone", "OptionalHasValue", "OptionalGetValue" ]) class CondV2Test(test.TestCase): def _testCond(self, true_fn, false_fn, train_vals, feed_dict=None): if not feed_dict: feed_dict = {} with self.session(graph=ops.get_default_graph()) as sess: pred = array_ops.placeholder(dtypes.bool, name="pred") expected = control_flow_ops.cond( array_ops.squeeze_v2(pred), true_fn, false_fn, name="expected") actual = cond_v2.cond_v2(pred, true_fn, false_fn, name="actual") expected_grad = gradients_impl.gradients(expected, train_vals) actual_grad = gradients_impl.gradients(actual, train_vals) sess_run_args = {pred: True} sess_run_args.update(feed_dict) expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run( (expected, actual, expected_grad, actual_grad), sess_run_args) self.assertEqual(expected_val, actual_val) self.assertEqual(expected_grad_val, actual_grad_val) sess_run_args = {pred: [[True]]} sess_run_args.update(feed_dict) expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run( (expected, actual, expected_grad, actual_grad), sess_run_args) self.assertEqual(expected_val, actual_val) self.assertEqual(expected_grad_val, actual_grad_val) sess_run_args = {pred: False} sess_run_args.update(feed_dict) expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run( (expected, actual, expected_grad, actual_grad), sess_run_args) self.assertEqual(expected_val, actual_val) self.assertEqual(expected_grad_val, actual_grad_val) sess_run_args = {pred: [[False]]} sess_run_args.update(feed_dict) expected_val, actual_val, expected_grad_val, actual_grad_val = sess.run( (expected, actual, expected_grad, actual_grad), sess_run_args) self.assertEqual(expected_val, actual_val) self.assertEqual(expected_grad_val, actual_grad_val) @test_util.run_deprecated_v1 def testBasic(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return x * 2.0 def false_fn(): return y * 3.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) def testReturnsIndexedSlicesAndNones(self): @def_function.function def build_cond_with_indexed_slices(): pred = constant_op.constant(True) def true_fn(): return math_ops._as_indexed_slices(constant_op.constant([1.])), None def false_fn(): return math_ops._as_indexed_slices(constant_op.constant([2.])), None result = cond_v2.cond_v2(pred, true_fn, false_fn) self.assertIsNone(result[1]) return ops.convert_to_tensor(result[0]) output = build_cond_with_indexed_slices() self.assertAllEqual(output, [1.]) def testReturnsNonesAndIndexedSlices(self): @def_function.function def build_cond_with_indexed_slices(): pred = constant_op.constant(True) def true_fn(): return (None, None, None, math_ops._as_indexed_slices(constant_op.constant([1.]))) def false_fn(): return (None, None, None, math_ops._as_indexed_slices(constant_op.constant([2.]))) result = cond_v2.cond_v2(pred, true_fn, false_fn) self.assertIsNone(result[0]) self.assertIsNone(result[1]) self.assertIsNone(result[2]) return ops.convert_to_tensor(result[3]) output = build_cond_with_indexed_slices() self.assertAllEqual(output, [1.]) def testExternalControlDependencies(self): with ops.Graph().as_default(), self.test_session(): v = variables.Variable(1.0) v.initializer.run() op = v.assign_add(1.0) def true_branch(): with ops.control_dependencies([op]): return 1.0 cond_v2.cond_v2(array_ops.placeholder_with_default(False, None), true_branch, lambda: 2.0).eval() self.assertAllEqual(self.evaluate(v), 2.0) @test_util.run_deprecated_v1 def testMultipleOutputs(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(3.0, name="y") def true_fn(): return x * y, y def false_fn(): return x, y * 3.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testBasic2(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return x * y * 2.0 def false_fn(): return 2.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testNoInputs(self): with self.cached_session() as sess: pred = array_ops.placeholder(dtypes.bool, name="pred") def true_fn(): return constant_op.constant(1.0) def false_fn(): return constant_op.constant(2.0) out = cond_v2.cond_v2(pred, true_fn, false_fn) self.assertEqual(sess.run(out, {pred: True}), (1.0,)) self.assertEqual(sess.run(out, {pred: False}), (2.0,)) def _createCond(self, name): """Creates a cond_v2 call and returns the output tensor and the cond op.""" pred = constant_op.constant(True, name="pred") x = constant_op.constant(1.0, name="x") def true_fn(): return x def false_fn(): return x + 1 output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name) cond_op = output.op.inputs[0].op self.assertEqual(cond_op.type, "StatelessIf") return output, cond_op def _createNestedCond(self, name): """Like _createCond but creates a nested cond_v2 call as well.""" pred = constant_op.constant(True, name="pred") x = constant_op.constant(1.0, name="x") def true_fn(): return cond_v2.cond_v2(pred, lambda: x, lambda: x + 1) def false_fn(): return x + 2 output = cond_v2.cond_v2(pred, true_fn, false_fn, name=name) cond_op = output.op.inputs[0].op self.assertEqual(cond_op.type, "StatelessIf") return output, cond_op def testDefaultName(self): with ops.Graph().as_default(): _, cond_op = self._createCond(None) self.assertEqual(cond_op.name, "cond") self.assertRegexpMatches( cond_op.get_attr("then_branch").name, r"cond_true_\d*") self.assertRegexpMatches( cond_op.get_attr("else_branch").name, r"cond_false_\d*") with ops.Graph().as_default(): with ops.name_scope("foo"): _, cond1_op = self._createCond("") self.assertEqual(cond1_op.name, "foo/cond") self.assertRegexpMatches( cond1_op.get_attr("then_branch").name, r"foo_cond_true_\d*") self.assertRegexpMatches( cond1_op.get_attr("else_branch").name, r"foo_cond_false_\d*") _, cond2_op = self._createCond(None) self.assertEqual(cond2_op.name, "foo/cond_1") self.assertRegexpMatches( cond2_op.get_attr("then_branch").name, r"foo_cond_1_true_\d*") self.assertRegexpMatches( cond2_op.get_attr("else_branch").name, r"foo_cond_1_false_\d*") @test_util.run_v1_only("b/120545219") def testDefunInCond(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): @function.defun def fn(): return x * y * 2.0 return fn() def false_fn(): return 2.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testNestedDefunInCond(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): @function.defun def fn(): @function.defun def nested_fn(): return x * y * 2.0 return nested_fn() return fn() self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) @test_util.run_deprecated_v1 def testDoubleNestedDefunInCond(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): @function.defun def fn(): @function.defun def nested_fn(): @function.defun def nested_nested_fn(): return x * y * 2.0 return nested_nested_fn() return nested_fn() return fn() def false_fn(): return 2.0 self._testCond(true_fn, false_fn, [x]) self._testCond(true_fn, false_fn, [x, y]) self._testCond(true_fn, false_fn, [y]) def testNestedCond(self): def run_test(pred_value): def build_graph(): pred = array_ops.placeholder(dtypes.bool, name="pred") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): def false_true_fn(): return x * y * 2.0 def false_false_fn(): return x * 5.0 return _cond(pred, false_true_fn, false_false_fn, "inside_false_fn") return x, y, pred, true_fn, false_fn with ops.Graph().as_default(): x, y, pred, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [x, y], {pred: pred_value}) self._testCond(true_fn, false_fn, [x], {pred: pred_value}) self._testCond(true_fn, false_fn, [y], {pred: pred_value}) run_test(True) run_test(False) def testNestedCondBothBranches(self): def run_test(pred_value): def build_graph(): pred = array_ops.placeholder(dtypes.bool, name="pred") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return _cond(pred, lambda: x + y, lambda: x * x, name=None) def false_fn(): return _cond(pred, lambda: x - y, lambda: y * y, name=None) return x, y, pred, true_fn, false_fn with ops.Graph().as_default(): x, y, pred, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [x, y], {pred: pred_value}) self._testCond(true_fn, false_fn, [x], {pred: pred_value}) self._testCond(true_fn, false_fn, [y], {pred: pred_value}) run_test(True) run_test(False) def testDoubleNestedCond(self): def run_test(pred1_value, pred2_value): def build_graph(): pred1 = array_ops.placeholder(dtypes.bool, name="pred1") pred2 = array_ops.placeholder(dtypes.bool, name="pred2") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): def false_true_fn(): def false_true_true_fn(): return x * y * 2.0 def false_true_false_fn(): return x * 10.0 return _cond( pred1, false_true_true_fn, false_true_false_fn, name="inside_false_true_fn") def false_false_fn(): return x * 5.0 return _cond( pred2, false_true_fn, false_false_fn, name="inside_false_fn") return x, y, pred1, pred2, true_fn, false_fn with ops.Graph().as_default(): x, y, pred1, pred2, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [x, y], { pred1: pred1_value, pred2: pred2_value }) x, y, pred1, pred2, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [x], { pred1: pred1_value, pred2: pred2_value }) x, y, pred1, pred2, true_fn, false_fn = build_graph() self._testCond(true_fn, false_fn, [y], { pred1: pred1_value, pred2: pred2_value }) run_test(True, True) run_test(True, False) run_test(False, False) run_test(False, True) def testGradientFromInsideDefun(self): def build_graph(): pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer") pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): def inner_true_fn(): return x * y * 2.0 def inner_false_fn(): return x * 5.0 return cond_v2.cond_v2( pred_inner, inner_true_fn, inner_false_fn, name="inner_cond") cond_outer = cond_v2.cond_v2( pred_outer, true_fn, false_fn, name="outer_cond") # Compute grads inside a Defun. @function.defun def nesting_fn(): return gradients_impl.gradients(cond_outer, [x, y]) grads = nesting_fn() return grads, pred_outer, pred_inner with ops.Graph().as_default(): grads, pred_outer, pred_inner = build_graph() with self.session(graph=ops.get_default_graph()) as sess: self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: True }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: False }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: True }), [4., 2.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: False }), [5., 0.]) def testGradientFromInsideNestedDefun(self): def build_graph(): pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer") pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") def true_fn(): return 2.0 def false_fn(): def inner_true_fn(): return x * y * 2.0 def inner_false_fn(): return x * 5.0 return cond_v2.cond_v2( pred_inner, inner_true_fn, inner_false_fn, name="inner_cond") cond_outer = cond_v2.cond_v2( pred_outer, true_fn, false_fn, name="outer_cond") # Compute grads inside a Defun. @function.defun def nesting_fn(): @function.defun def inner_nesting_fn(): return gradients_impl.gradients(cond_outer, [x, y]) return inner_nesting_fn() grads = nesting_fn() return grads, pred_outer, pred_inner with ops.Graph().as_default(): grads, pred_outer, pred_inner = build_graph() with self.session(graph=ops.get_default_graph()) as sess: self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: True }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: False }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: True }), [4., 2.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: False }), [5., 0.]) def testBuildCondAndGradientInsideDefun(self): def build_graph(): pred_outer = array_ops.placeholder(dtypes.bool, name="pred_outer") pred_inner = array_ops.placeholder(dtypes.bool, name="pred_inner") x = constant_op.constant(1.0, name="x") y = constant_op.constant(2.0, name="y") # Build cond and its gradient inside a Defun. @function.defun def fn(): def true_fn(): return 2.0 def false_fn(): def inner_true_fn(): return x * y * 2.0 def inner_false_fn(): return x * 5.0 return cond_v2.cond_v2( pred_inner, inner_true_fn, inner_false_fn, name="inner_cond") cond_outer = cond_v2.cond_v2( pred_outer, true_fn, false_fn, name="outer_cond") return gradients_impl.gradients(cond_outer, [x, y]) grads = fn() return grads, pred_outer, pred_inner with ops.Graph().as_default(), self.session( graph=ops.get_default_graph()) as sess: grads, pred_outer, pred_inner = build_graph() self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: True }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: True, pred_inner: False }), [0., 0.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: True }), [4., 2.]) self.assertSequenceEqual( sess.run(grads, { pred_outer: False, pred_inner: False }), [5., 0.]) @test_util.run_deprecated_v1 def testSecondDerivative(self): with self.cached_session() as sess: pred = array_ops.placeholder(dtypes.bool, name="pred") x = constant_op.constant(3.0, name="x") def true_fn(): return math_ops.pow(x, 3) def false_fn(): return x cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond") cond_grad = gradients_impl.gradients(cond, [x]) cond_grad_grad = gradients_impl.gradients(cond_grad, [x]) # d[x^3]/dx = 3x^2 true_val = sess.run(cond_grad, {pred: True}) self.assertEqual(true_val, [27.0]) # d[x]/dx = 1 false_val = sess.run(cond_grad, {pred: False}) self.assertEqual(false_val, [1.0]) true_val = sess.run(cond_grad_grad, {pred: True}) # d2[x^3]/dx2 = 6x self.assertEqual(true_val, [18.0]) false_val = sess.run(cond_grad_grad, {pred: False}) # d2[x]/dx2 = 0 self.assertEqual(false_val, [0.0]) def testGradientOfDeserializedCond(self): with ops.Graph().as_default(): pred = array_ops.placeholder(dtypes.bool, name="pred") x = constant_op.constant(3.0, name="x") ops.add_to_collection("x", x) def true_fn(): return math_ops.pow(x, 3) def false_fn(): return x ops.add_to_collection("pred", pred) cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond") ops.add_to_collection("cond", cond) meta_graph = saver.export_meta_graph() with ops.Graph().as_default() as g: with self.session(graph=g) as sess: saver.import_meta_graph(meta_graph) x = ops.get_collection("x")[0] pred = ops.get_collection("pred")[0] cond = ops.get_collection("cond") cond_grad = gradients_impl.gradients(cond, [x], name="cond_grad") cond_grad_grad = gradients_impl.gradients( cond_grad, [x], name="cond_grad_grad") # d[x^3]/dx = 3x^2 true_val = sess.run(cond_grad, {pred: True}) self.assertEqual(true_val, [27.0]) # d[x]/dx = 1 false_val = sess.run(cond_grad, {pred: False}) self.assertEqual(false_val, [1.0]) true_val = sess.run(cond_grad_grad, {pred: True}) # d2[x^3]/dx2 = 6x self.assertEqual(true_val, [18.0]) false_val = sess.run(cond_grad_grad, {pred: False}) # d2[x]/dx2 = 0 self.assertEqual(false_val, [0.0]) @test_util.run_deprecated_v1 def testFuncCond(self): @def_function.function def fn_with_cond(): cond_v2.cond_v2( constant_op.constant(True), lambda: array_ops.zeros([]), lambda: array_ops.ones([]), name="cond_1") return cond_v2.cond_v2( constant_op.constant(False), lambda: array_ops.zeros([]), lambda: array_ops.ones([]), name="cond_2") concrete_fn = fn_with_cond.get_concrete_function() cond_1 = concrete_fn.graph.get_operation_by_name("cond_1") cond_2 = concrete_fn.graph.get_operation_by_name("cond_2") # Verify that all functional ops are stateless and cond_2 does not have # any control inputs. self.assertEqual(cond_1.type, "StatelessIf") self.assertEqual(cond_2.type, "StatelessIf") self.assertLen(cond_2.control_inputs, 0) fn_output = concrete_fn() self.assertEqual(fn_output.op.type, "PartitionedCall") self.assertAllEqual(fn_output, 1.0) @test_util.run_deprecated_v1 def testFuncCondFunc(self): @def_function.function def fn_with_cond(): cond_v2.cond_v2( constant_op.constant(True), lambda: constant_op.constant(1.), lambda: constant_op.constant(2.), name="cond_1") @def_function.function def true_branch(): return constant_op.constant(3.) return cond_v2.cond_v2( constant_op.constant(True), true_branch, lambda: constant_op.constant(4.), name="cond_2") concrete_fn = fn_with_cond.get_concrete_function() cond_1 = concrete_fn.graph.get_operation_by_name("cond_1") cond_2 = concrete_fn.graph.get_operation_by_name("cond_2") # Verify that all functional ops are stateless and cond_2 does not have # any control inputs. self.assertEqual(cond_1.type, "StatelessIf") self.assertEqual(cond_2.type, "StatelessIf") self.assertLen(cond_2.control_inputs, 0) cond_2_true_graph, _ = cond_v2.get_func_graphs(cond_2) cond_2_true_graph_operations = cond_2_true_graph.get_operations() self.assertEmpty([ op for op in cond_2_true_graph_operations if op.type == "StatefulPartitionedCall" ]) self.assertLen([ op for op in cond_2_true_graph_operations if op.type == "PartitionedCall" ], 1) fn_output = concrete_fn() self.assertEqual(fn_output.op.type, "PartitionedCall") self.assertAllEqual(fn_output, 3.0) @test_util.run_deprecated_v1 def testFuncCondWithVariable(self): v1 = variables.Variable(2.) v2 = variables.Variable(4.) self.evaluate(variables.global_variables_initializer()) def update_v1(): v1.assign(v1) return v1 def update_v2(): v2.assign(v2) return v2 @def_function.function def fn_with_cond(): cond_v2.cond_v2( constant_op.constant(True), update_v1, lambda: constant_op.constant(0.), name="cond_1") cond_2 = cond_v2.cond_v2( constant_op.constant(False), lambda: constant_op.constant(0.), update_v1, name="cond_2") cond_v2.cond_v2( constant_op.constant(True), update_v2, lambda: constant_op.constant(0.), name="cond_3") cond_4 = cond_v2.cond_v2( constant_op.constant(False), lambda: constant_op.constant(0.), lambda: v2, name="cond_4") stateless_cond = cond_v2.cond_v2( constant_op.constant(False), lambda: constant_op.constant(5.), lambda: constant_op.constant(6.), name="stateless_cond") return cond_2, cond_4, stateless_cond concrete_fn = fn_with_cond.get_concrete_function() cond_1 = concrete_fn.graph.get_operation_by_name("cond_1") cond_2 = concrete_fn.graph.get_operation_by_name("cond_2") cond_3 = concrete_fn.graph.get_operation_by_name("cond_3") cond_4 = concrete_fn.graph.get_operation_by_name("cond_4") stateless_cond = concrete_fn.graph.get_operation_by_name("stateless_cond") self.assertEqual(cond_1.type, "If") self.assertEqual(cond_2.type, "If") self.assertEqual(cond_3.type, "If") self.assertEqual(cond_4.type, "If") self.assertEqual(stateless_cond.type, "StatelessIf") self.assertEmpty(cond_1.control_inputs) self.assertLen(cond_2.control_inputs, 1) self.assertIs(cond_2.control_inputs[0], cond_1) self.assertEmpty(cond_3.control_inputs) self.assertLen(cond_4.control_inputs, 1) self.assertIs(cond_4.control_inputs[0], cond_3) # Does not touch any variable so should not have any control inputs. self.assertEmpty(stateless_cond.control_inputs) fn_output = concrete_fn() self.assertEqual(fn_output[0].op.type, "StatefulPartitionedCall") self.assertAllEqual(self.evaluate(fn_output), [2.0, 4.0, 6.0]) @test_util.run_deprecated_v1 def testFuncCondFuncWithVariable(self): v1 = variables.Variable(2.) v2 = variables.Variable(4.) self.evaluate(variables.global_variables_initializer()) @def_function.function def fn_with_cond(): def update_v1(): v1.assign(v1) return v1 def update_v2(): v2.assign(v2) return v2 cond_v2.cond_v2( constant_op.constant(True), update_v1, lambda: constant_op.constant(0.), name="cond_1") cond_2 = cond_v2.cond_v2( constant_op.constant(False), lambda: constant_op.constant(0.), update_v1, name="cond_2") cond_v2.cond_v2( constant_op.constant(True), update_v2, lambda: constant_op.constant(0.), name="cond_3") @def_function.function def cond_4_false_branch(): v2.assign(v2) return v2 cond_4 = cond_v2.cond_v2( constant_op.constant(False), lambda: constant_op.constant(0.), cond_4_false_branch, name="cond_4") return cond_2, cond_4 concrete_fn = fn_with_cond.get_concrete_function() cond_1 = concrete_fn.graph.get_operation_by_name("cond_1") cond_2 = concrete_fn.graph.get_operation_by_name("cond_2") cond_3 = concrete_fn.graph.get_operation_by_name("cond_3") cond_4 = concrete_fn.graph.get_operation_by_name("cond_4") self.assertEqual(cond_1.type, "If") self.assertEqual(cond_2.type, "If") self.assertEqual(cond_3.type, "If") self.assertEqual(cond_4.type, "If") self.assertEmpty(cond_1.control_inputs) self.assertLen(cond_2.control_inputs, 1) self.assertIs(cond_2.control_inputs[0], cond_1) self.assertEmpty(cond_3.control_inputs) self.assertLen(cond_4.control_inputs, 1) self.assertIs(cond_4.control_inputs[0], cond_3) _, cond_4_false_graph = cond_v2.get_func_graphs(cond_4) cond_4_false_graph_operations = cond_4_false_graph.get_operations() self.assertEmpty([ op for op in cond_4_false_graph_operations if op.type == "PartitionedCall" ]) self.assertLen([ op for op in cond_4_false_graph_operations if op.type == "StatefulPartitionedCall" ], 1) fn_output = concrete_fn() self.assertEqual(fn_output[0].op.type, "StatefulPartitionedCall") self.assertAllEqual(self.evaluate(fn_output), [2.0, 4.0]) def testGradientTapeOfCondWithResourceVariableInFunction(self): with context.eager_mode(): v = variables.Variable(2.) @def_function.function def fn_with_cond(): with backprop.GradientTape() as tape: pred = constant_op.constant(True, dtype=dtypes.bool) def true_fn(): return math_ops.pow(v, 3) def false_fn(): return v cond = cond_v2.cond_v2(pred, true_fn, false_fn, name="cond") return tape.gradient(cond, v) self.assertAllEqual(fn_with_cond(), 12.0) def testLowering(self): with ops.Graph().as_default() as g: with self.session(graph=g) as sess: cond_output, _ = self._createCond("cond") run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(cond_output, options=run_options, run_metadata=run_metadata) # If lowering was enabled, there should be a `Switch` node self.assertTrue( _has_node_with_op(run_metadata, "Switch"), "A `Switch` op should exist if the graph was lowered.") # If lowering was enabled, there should be no `If` node self.assertFalse( _has_node_with_op(run_metadata, "StatelessIf"), "An `If` op was found, but it should be lowered.") @test_util.run_deprecated_v1 def testLoweringDisabledInXLA(self): with self.session(graph=ops.Graph()) as sess: # Build the cond_v2 in an XLA context xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() cond_output, cond_op = self._createCond("cond") xla_context.Exit() # Check lowering attr is not set. with self.assertRaises(ValueError): cond_op.get_attr("_lower_using_switch_merge") # Check the actual graph that is run. run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(cond_output, options=run_options, run_metadata=run_metadata) # Lowering disabled in XLA, there should be no `Switch` node self.assertFalse( _has_node_with_op(run_metadata, "Switch"), "A `Switch` op exists, but the graph should not be lowered.") if test_util.is_xla_enabled(): # If XLA is actually enabled then we expect the StatelessIf to have been # put inside an XLA cluster. self.assertFalse( _has_node_with_op(run_metadata, "StatelessIf"), ("A `StatelessIf` op was found, but the node should have been " + "clustered.")) self.assertTrue( _has_node_with_op(run_metadata, "_XlaCompile"), ("An `_XlaCompile` op was not found, but the `StatelessIf` (at " + "least) op should have been clustered.")) self.assertTrue( _has_node_with_op(run_metadata, "_XlaRun"), ("An `_XlaRun` op was not found, but the `StatelessIf` (at " + "least) op should have been clustered.")) else: # Lowering disabled in XLA, there should still be an `If` node self.assertTrue( _has_node_with_op(run_metadata, "StatelessIf"), ("A `StatelessIf` op was not found, but the graph should not be " + "lowered.")) @test_util.run_deprecated_v1 def testNestedLoweringDisabledInXLA(self): # Build the cond_v2 in an XLA context xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() _, cond_op = self._createNestedCond("cond") xla_context.Exit() # Check lowering attr is not set for either If node. with self.assertRaises(ValueError): cond_op.get_attr("_lower_using_switch_merge") nested_if_ops = [] for func in ops.get_default_graph()._functions.values(): nested_if_ops.extend( op for op in func.graph.get_operations() if op.type == "StatelessIf") self.assertEqual(len(nested_if_ops), 1) with self.assertRaises(ValueError): nested_if_ops[0].get_attr("_lower_using_switch_merge") # TODO(skyewm): check the actual graphs that are run once we have a way to # programmatically access those graphs. # b/131355614 @test_util.run_deprecated_v1 def testNoOptionalsInXla(self): @def_function.function def func_with_cond(): pred = constant_op.constant(True, name="pred") x = constant_op.constant(1.0, name="x") def true_fn(): intermediate = x + 1 return intermediate * x def false_fn(): return x + 1 output = cond_v2.cond_v2(pred, true_fn, false_fn) grad = gradients_impl.gradients(output, x)[0] forward_if_op = output.op.inputs[0].op gradient_if_op = grad.op.inputs[0].op def verify_no_optional_ops(op, branch_name): branch_function = ops.get_default_graph()._get_function( op.get_attr(branch_name).name) function_def = branch_function.definition for node_def in function_def.node_def: self.assertNotIn(node_def.op, _OPTIONAL_OPS) verify_no_optional_ops(forward_if_op, "then_branch") verify_no_optional_ops(forward_if_op, "else_branch") verify_no_optional_ops(gradient_if_op, "then_branch") verify_no_optional_ops(gradient_if_op, "else_branch") return grad xla_context = control_flow_ops.XLAControlFlowContext() xla_context.Enter() func_with_cond() xla_context.Exit() @test_util.run_deprecated_v1 def testLoweringDisabledWithSingleThreadedExecutorContext(self): with self.session(graph=ops.Graph()) as sess: @function.defun def _add_cond(x): return cond_v2.cond_v2( constant_op.constant(True, name="pred"), lambda: x, lambda: x + 1) x = array_ops.placeholder(shape=None, dtype=dtypes.float32) with context.function_executor_type("SINGLE_THREADED_EXECUTOR"): out_cond = _add_cond(x) # The fact that sess.run() succeeds means lowering is disabled, because # the single threaded executor does not support cond v1 ops. sess.run(out_cond, feed_dict={x: 1.0}) @test_util.enable_control_flow_v2 def testStructuredOutputs(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(3.0, name="y") def true_fn(): return ((x * y,), y) def false_fn(): return ((x,), y * 3.0) output = control_flow_ops.cond( constant_op.constant(False), true_fn, false_fn) self.assertEqual(self.evaluate(output[0][0]), 1.) self.assertEqual(self.evaluate(output[1]), 9.) @test_util.enable_control_flow_v2 @test_util.run_deprecated_v1 def testRaisesOutputStructuresMismatch(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(3.0, name="y") def true_fn(): return x * y, y def false_fn(): return ((x,), y * 3.0) with self.assertRaisesRegexp( TypeError, "true_fn and false_fn arguments to tf.cond must have the " "same number, type, and overall structure of return values."): control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn) @test_util.enable_control_flow_v2 def testCondAndTensorArray(self): x = math_ops.range(-5, 5) output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0]) def loop_body(i, output): def if_true(): return output.write(i, x[i]**2) def if_false(): return output.write(i, x[i]) output = control_flow_ops.cond(x[i] > 0, if_true, if_false) return i + 1, output _, output = control_flow_ops.while_loop( lambda i, arr: i < x.shape[0], loop_body, loop_vars=(constant_op.constant(0), output)) output_t = output.stack() self.assertAllEqual( self.evaluate(output_t), [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16]) @test_util.enable_control_flow_v2 def testCondAndTensorArrayInDefun(self): @function.defun def f(): x = math_ops.range(-5, 5) output = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=x.shape[0]) def loop_body(i, output): def if_true(): return output.write(i, x[i]**2) def if_false(): return output.write(i, x[i]) output = control_flow_ops.cond(x[i] > 0, if_true, if_false) return i + 1, output _, output = control_flow_ops.while_loop( lambda i, arr: i < x.shape[0], loop_body, loop_vars=(constant_op.constant(0), output)) return output.stack() output_t = f() self.assertAllEqual(output_t, [-5, -4, -3, -2, -1, 0, 1, 4, 9, 16]) @test_util.run_deprecated_v1 def testForwardPassRewrite(self): x = constant_op.constant(1.0, name="x") y = constant_op.constant(1.0, name="y") def true_fn(): y_plus_one = y + 1. return x * y_plus_one output = cond_v2.cond_v2(constant_op.constant(True), true_fn, lambda: x) if_op = output.op.inputs[0].op self.assertEqual(if_op.type, "StatelessIf") # pylint: disable=g-deprecated-assert self.assertEqual(len(if_op.outputs), 1) gradients_impl.gradients(output, x) # if_op should have been rewritten to output `y_plus_one`. self.assertEqual(len(if_op.outputs), 2) gradients_impl.gradients(output, x) # Computing the gradient again shouldn't rewrite if_op again. self.assertEqual(len(if_op.outputs), 2) # pylint: enable=g-deprecated-assert @test_util.run_deprecated_v1 def testDoNotAccumulateConstants(self): x = constant_op.constant(1.0, name="x") output = cond_v2.cond_v2( constant_op.constant(True), lambda: x * 2.0, lambda: x) if_op = output.op.inputs[0].op self.assertEqual(if_op.type, "StatelessIf") # pylint: disable=g-deprecated-assert self.assertEqual(len(if_op.outputs), 1) gradients_impl.gradients(output, x) # Number of outputs does change because # 1. `x` is a loop input so does not need to be accumulated. # 2. 2.0 is a constant so it is not accumulated. self.assertEqual(len(if_op.outputs), 1) gradients_impl.gradients(output, x) # Computing the gradient again shouldn't rewrite if_op again. self.assertEqual(len(if_op.outputs), 1) # pylint: enable=g-deprecated-assert class CondV2CollectionTest(test.TestCase): def testCollectionIntValueAccessInCond(self): """Read values from graph collections inside of cond_v2.""" with ops.Graph().as_default() as g: with self.session(graph=g): x = 2 y = 5 ops.add_to_collection("x", x) ops.add_to_collection("y", y) def fn(): x_const = constant_op.constant(ops.get_collection("x")[0]) y_const = constant_op.constant(ops.get_collection("y")[0]) return math_ops.add(x_const, y_const) cnd = cond_v2.cond_v2(constant_op.constant(True), fn, fn) self.assertEquals(cnd.eval(), 7) def testCollectionTensorValueAccessInCond(self): """Read tensors from collections inside of cond_v2 & use them.""" with ops.Graph().as_default() as g: with self.session(graph=g): x = constant_op.constant(2) y = constant_op.constant(5) ops.add_to_collection("x", x) ops.add_to_collection("y", y) def fn(): x_read = ops.get_collection("x")[0] y_read = ops.get_collection("y")[0] return math_ops.add(x_read, y_read) cnd = cond_v2.cond_v2(math_ops.less(x, y), fn, fn) self.assertEquals(cnd.eval(), 7) def testCollectionIntValueWriteInCond(self): """Make sure Int writes to collections work inside of cond_v2.""" with ops.Graph().as_default() as g: with self.session(graph=g): x = constant_op.constant(2) y = constant_op.constant(5) def true_fn(): z = math_ops.add(x, y) ops.add_to_collection("z", 7) return math_ops.mul(x, z) def false_fn(): z = math_ops.add(x, y) return math_ops.mul(x, z) cnd = cond_v2.cond_v2(constant_op.constant(True), true_fn, false_fn) self.assertEquals(cnd.eval(), 14) read_z_collection = ops.get_collection("z") self.assertEquals(read_z_collection, [7]) class CondV2ContainerTest(test.TestCase): def testContainer(self): """Set containers outside & inside of cond_v2. Make sure the containers are set correctly for both variable creation (tested by variables.Variable) and for stateful ops (tested by FIFOQueue) """ self.skipTest("b/113048653") with ops.Graph().as_default() as g: with self.session(graph=g): v0 = variables.Variable([0]) q0 = data_flow_ops.FIFOQueue(1, dtypes.float32) def container(node): return node.op.get_attr("container") self.assertEqual(compat.as_bytes(""), container(v0)) self.assertEqual(compat.as_bytes(""), container(q0.queue_ref)) def true_fn(): # When this branch is created in cond below, # the container should begin with 'l1' v1 = variables.Variable([1]) q1 = data_flow_ops.FIFOQueue(1, dtypes.float32) with ops.container("l2t"): v2 = variables.Variable([2]) q2 = data_flow_ops.FIFOQueue(1, dtypes.float32) v3 = variables.Variable([1]) q3 = data_flow_ops.FIFOQueue(1, dtypes.float32) self.assertEqual(compat.as_bytes("l1"), container(v1)) self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref)) self.assertEqual(compat.as_bytes("l2t"), container(v2)) self.assertEqual(compat.as_bytes("l2t"), container(q2.queue_ref)) self.assertEqual(compat.as_bytes("l1"), container(v3)) self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref)) return constant_op.constant(2.0) def false_fn(): # When this branch is created in cond below, # the container should begin with 'l1' v1 = variables.Variable([1]) q1 = data_flow_ops.FIFOQueue(1, dtypes.float32) with ops.container("l2f"): v2 = variables.Variable([2]) q2 = data_flow_ops.FIFOQueue(1, dtypes.float32) v3 = variables.Variable([1]) q3 = data_flow_ops.FIFOQueue(1, dtypes.float32) self.assertEqual(compat.as_bytes("l1"), container(v1)) self.assertEqual(compat.as_bytes("l1"), container(q1.queue_ref)) self.assertEqual(compat.as_bytes("l2f"), container(v2)) self.assertEqual(compat.as_bytes("l2f"), container(q2.queue_ref)) self.assertEqual(compat.as_bytes("l1"), container(v3)) self.assertEqual(compat.as_bytes("l1"), container(q3.queue_ref)) return constant_op.constant(6.0) with ops.container("l1"): cnd_true = cond_v2.cond_v2( constant_op.constant(True), true_fn, false_fn) self.assertEquals(cnd_true.eval(), 2) cnd_false = cond_v2.cond_v2( constant_op.constant(False), true_fn, false_fn) self.assertEquals(cnd_false.eval(), 6) v4 = variables.Variable([3]) q4 = data_flow_ops.FIFOQueue(1, dtypes.float32) v5 = variables.Variable([4]) q5 = data_flow_ops.FIFOQueue(1, dtypes.float32) self.assertEqual(compat.as_bytes("l1"), container(v4)) self.assertEqual(compat.as_bytes("l1"), container(q4.queue_ref)) self.assertEqual(compat.as_bytes(""), container(v5)) self.assertEqual(compat.as_bytes(""), container(q5.queue_ref)) class CondV2ColocationGroupAndDeviceTest(test.TestCase): def testColocateWithBeforeCond(self): with ops.Graph().as_default() as g: with self.session(graph=g): a = constant_op.constant([2.0], name="a") b = constant_op.constant([2.0], name="b") def fn(): c = constant_op.constant(3.0) self.assertEqual([b"loc:@a"], c.op.colocation_groups()) return c with ops.colocate_with(a.op): self.assertEquals( cond_v2.cond_v2(constant_op.constant(True), fn, fn).eval(), 3) def fn2(): c = constant_op.constant(3.0) self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups()) return c with ops.colocate_with(a.op): with ops.colocate_with(b.op): self.assertEquals( cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3) def testColocateWithInAndOutOfCond(self): with ops.Graph().as_default() as g: with self.session(graph=g): a = constant_op.constant([2.0], name="a") b = constant_op.constant([2.0], name="b") def fn2(): with ops.colocate_with(b.op): c = constant_op.constant(3.0) self.assertEqual([b"loc:@a", b"loc:@b"], c.op.colocation_groups()) return c with ops.colocate_with(a.op): self.assertEquals( cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3) d = constant_op.constant([2.0], name="d") self.assertEqual([b"loc:@a"], d.op.colocation_groups()) def testColocateWithInCondGraphPartitioning(self): with ops.Graph().as_default() as g: with self.session( graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2}) ) as sess: with ops.device("/device:CPU:0"): a = constant_op.constant([2.0], name="a") with ops.device("/device:CPU:1"): b = constant_op.constant([2.0], name="b") def fn(): with ops.colocate_with(b.op): c = math_ops.add(a, a, name="c") return c out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn) run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(out_cond_2, options=run_options, run_metadata=run_metadata) # We expect there to be two partitions because of the # colocate_with. We are only running the cond, which has a data # dependency on `a` but not on `b`. So, without the colocate_with # we would expect execution on just one device. self.assertTrue(len(run_metadata.partition_graphs) >= 2) def testDeviceBeforeCond(self): with ops.Graph().as_default() as g: with self.session(graph=g): def fn(): self.assertEqual("", constant_op.constant(3.0).op.device) return test_ops.device_placement_op() with ops.device("/device:CPU:0"): self.assertIn( compat.as_bytes("CPU:0"), self.evaluate(cond_v2.cond_v2(constant_op.constant(True), fn, fn))) def fn2(): self.assertEqual("", constant_op.constant(3.0).op.device) return test_ops.device_placement_op() if test_util.is_gpu_available(): with ops.device("/device:GPU:0"): self.assertIn( compat.as_bytes("GPU:0"), self.evaluate(cond_v2.cond_v2(constant_op.constant(True), fn2, fn2))) else: self.skipTest("Test requires a GPU to check GPU device placement.") def testDeviceInAndOutOfCond(self): with ops.Graph().as_default() as g: with self.session( graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2})): def fn2(): with ops.device("/device:CPU:1"): c = constant_op.constant(3.0) self.assertEqual("/device:CPU:1", c.op.device) return c with ops.device("/device:CPU:0"): self.assertEquals( cond_v2.cond_v2(constant_op.constant(True), fn2, fn2).eval(), 3) d = constant_op.constant(4.0) self.assertEqual("/device:CPU:0", d.op.device) def testDeviceInCondGraphPartitioning(self): with ops.Graph().as_default() as g: with self.session( graph=g, config=config_pb2.ConfigProto(device_count={"CPU": 2}) ) as sess: def fn(): with ops.device("/device:CPU:1"): c = math_ops.add(a, a, name="c") return c with ops.device("/device:CPU:0"): a = constant_op.constant([2.0], name="a") out_cond_2 = cond_v2.cond_v2(constant_op.constant(True), fn, fn) run_options = config_pb2.RunOptions(output_partition_graphs=True) run_metadata = config_pb2.RunMetadata() sess.run(out_cond_2, options=run_options, run_metadata=run_metadata) self.assertTrue(len(run_metadata.partition_graphs) >= 2) def _cond(pred, true_fn, false_fn, name): if _is_old_cond(): return control_flow_ops.cond(pred, true_fn, false_fn, name=name) else: return cond_v2.cond_v2(pred, true_fn, false_fn, name=name) def _is_old_cond(): return isinstance(ops.get_default_graph()._get_control_flow_context(), control_flow_ops.CondContext) def _has_node_with_op(run_metadata, op_type): """Whether any node in `run_metadata.partition_graphs` matches `op_type`.""" for graph in run_metadata.partition_graphs: for node in graph.node: if node.op == op_type: return True return False if __name__ == "__main__": test.main()
import warnings from distutils.version import LooseVersion from typing import TYPE_CHECKING import requests import six from pyramid.authentication import IAuthenticationPolicy from pyramid.httpexceptions import ( HTTPBadRequest, HTTPException, HTTPForbidden, HTTPOk, HTTPServiceUnavailable, HTTPUnauthorized ) from pyramid_beaker import set_cache_regions_from_settings from requests.exceptions import HTTPError from six.moves.urllib.parse import parse_qsl, urlparse from magpie.__meta__ import __version__ as magpie_version from magpie.adapter.magpieowssecurity import MagpieOWSSecurity from magpie.adapter.magpieservice import MagpieServiceStore from magpie.api.exception import evaluate_call, raise_http, valid_http, verify_param from magpie.api.generic import get_request_info from magpie.api.schemas import SigninAPI from magpie.security import get_auth_config from magpie.utils import ( CONTENT_TYPE_JSON, SingletonMeta, get_cookies, get_json, get_logger, get_magpie_url, get_settings, is_json_body, setup_cache_settings, setup_session_config ) # WARNING: # Twitcher available only when this module is imported from it. # It is installed during tests for evaluation. # Module 'magpie.adapter' should not be imported from 'magpie' package. from twitcher.__version__ import __version__ as twitcher_version # noqa from twitcher.adapter.base import AdapterInterface # noqa from twitcher.owsproxy import owsproxy_defaultconfig # noqa if LooseVersion(twitcher_version) >= LooseVersion("0.6.0"): from twitcher.owsregistry import OWSRegistry # noqa # pylint: disable=E0611 # Twitcher >= 0.6.x if LooseVersion(twitcher_version) >= LooseVersion("0.7.0"): warnings.warn( "Magpie version is not guaranteed to work with newer versions of Twitcher. " "This Magpie version offers compatibility with Twitcher 0.6.x. " "Current package versions are (Twitcher: {}, Magpie: {})".format(twitcher_version, magpie_version), ImportWarning ) if LooseVersion(twitcher_version) < LooseVersion("0.6.0"): warnings.warn( "Magpie version is not guaranteed to work with versions prior to Twitcher 0.6.x. " "It is recommended to either use more recent Twitcher 0.6.x version or revert back " "to older Magpie < 3.18 in order to use Twitcher 0.5.x versions. " "Current package versions are (Twitcher: {}, Magpie: {})".format(twitcher_version, magpie_version), ImportWarning ) if LooseVersion(twitcher_version) == LooseVersion("0.6.0"): warnings.warn( "Twitcher 0.6.0 exact version does not have complete compatibility support for MagpieAdapter. " "It is recommended to either revert to Twitcher 0.5.x and previous Magpie < 3.18 version, " "or use an higher Twitcher 0.6.x version. " "Current package versions are (Twitcher: {}, Magpie: {})".format(twitcher_version, magpie_version), ImportWarning ) if TYPE_CHECKING: from typing import Optional, Union from pyramid.authentication import AuthTktCookieHelper from pyramid.config import Configurator from pyramid.request import Request from magpie.typedefs import JSON, AnyResponseType, AnySettingsContainer, Str from twitcher.store import AccessTokenStoreInterface # noqa # pylint: disable=E0611 # Twitcher <= 0.5.x LOGGER = get_logger("TWITCHER|{}".format(__name__)) def verify_user(request): # type: (Request) -> HTTPException """ Verifies that a valid user authentication on the pointed ``Magpie`` instance (via configuration) also results into a valid user authentication with the current ``Twitcher`` instance to ensure settings match between them. :param request: an HTTP request with valid authentication token/cookie credentials. :return: appropriate HTTP success or error response with details about the result. """ magpie_url = get_magpie_url(request) def try_login(): # type: () -> Union[AnyResponseType, HTTPError] try: params = dict(parse_qsl(urlparse(request.url).query)) if is_json_body(request.text) and not params: return requests.post(magpie_url + SigninAPI.path, json=request.json, headers={"Content-Type": CONTENT_TYPE_JSON, "Accept": CONTENT_TYPE_JSON}) return requests.get(magpie_url + SigninAPI.path, data=request.text, params=params) except HTTPError as exc: if getattr(exc, "status_code", 500) >= 500: raise return exc # must generate request metadata manually because Twitcher # won't have Magpie's tween that builds it automatically info = get_request_info(request) resp = evaluate_call(lambda: try_login(), http_error=HTTPServiceUnavailable, metadata=info, content={"magpie_url": magpie_url}, content_type=CONTENT_TYPE_JSON, msg_on_fail="Could not obtain response from Magpie to validate login.") try: verify_param(resp.status_code, is_equal=True, param_compare=HTTPOk.code, param_name="status_code", http_error=HTTPBadRequest, content={"response": get_json(resp)}, content_type=CONTENT_TYPE_JSON, msg_on_fail="Failed Magpie login due to invalid or missing parameters.", metadata=info) authn_policy = request.registry.queryUtility(IAuthenticationPolicy) # noqa authn_cookie = authn_policy.cookie # type: AuthTktCookieHelper cookie_name = authn_cookie.cookie_name req_cookies = get_cookies(request) resp_cookies = get_cookies(resp) verify_param(cookie_name, is_in=True, param_compare=req_cookies, with_param=False, http_error=HTTPUnauthorized, content_type=CONTENT_TYPE_JSON, metadata=info, msg_on_fail="Authentication cookies missing from request to validate against Magpie instance.") verify_param(cookie_name, is_in=True, param_compare=resp_cookies, with_param=False, http_error=HTTPUnauthorized, content_type=CONTENT_TYPE_JSON, metadata=info, msg_on_fail="Authentication cookies missing from response to validate against Magpie instance.") twitcher_identity = authn_cookie.identify(request) verify_param(twitcher_identity, not_none=True, with_param=False, http_error=HTTPUnauthorized, content_type=CONTENT_TYPE_JSON, metadata=info, msg_on_fail="Authentication failed from Twitcher policy.") twitcher_user_id = twitcher_identity["userid"] verify_param(twitcher_user_id, not_none=True, is_type=True, param_compare=int, with_param=False, http_error=HTTPUnauthorized, content_type=CONTENT_TYPE_JSON, metadata=info, msg_on_fail="Authentication failed from Twitcher policy.") cookie_value = resp_cookies[cookie_name] cookie_userid_type = cookie_value.split("!userid_type:")[-1] cookie_decode = authn_cookie.userid_type_decoders[cookie_userid_type] cookie_ip = "0.0.0.0" # nosec: B104 result = authn_cookie.parse_ticket(authn_cookie.secret, cookie_value, cookie_ip, authn_cookie.hashalg) magpie_user_id = cookie_decode(result[1]) verify_param(magpie_user_id, is_equal=True, param_compare=twitcher_user_id, with_param=False, http_error=HTTPForbidden, content_type=CONTENT_TYPE_JSON, metadata=info, msg_on_fail="Twitcher login incompatible with Magpie login.") except HTTPException as resp_err: return resp_err except Exception: return raise_http(HTTPForbidden, content_type=CONTENT_TYPE_JSON, metadata=info, detail="Twitcher login incompatible with Magpie login.", nothrow=True) return valid_http(HTTPOk, detail="Twitcher login verified successfully with Magpie login.") @six.add_metaclass(SingletonMeta) class MagpieAdapter(AdapterInterface): # pylint: disable: W0223,W0612 def __init__(self, container): # type: (AnySettingsContainer) -> None self._servicestore = None self._owssecurity = None super(MagpieAdapter, self).__init__(container) # pylint: disable=E1101,no-member @property def name(self): # type: () -> Str # pylint: disable=E1101,no-member return AdapterInterface.name.fget(self) # noqa def describe_adapter(self): # type: () -> JSON return {"name": self.name, "version": magpie_version} def servicestore_factory(self, request): # type: (Request) -> MagpieServiceStore if self._servicestore is None: self._servicestore = MagpieServiceStore(request) return self._servicestore def tokenstore_factory(self, request): # type: (Request) -> AccessTokenStoreInterface """ Unused token store implementation. .. versionchanged:: 3.18 Available only in ``Twitcher <= 0.5.x``. """ raise NotImplementedError def owsregistry_factory(self, request): # type: (Request) -> OWSRegistry """ Creates the :class:`OWSRegistry` implementation derived from :class:`MagpieServiceStore`. .. versionadded:: 3.18 Available only in ``Twitcher >= 0.6.x``. """ return OWSRegistry(self.servicestore_factory(request)) def owssecurity_factory(self, request=None): # noqa # pylint: disable=W0221 # diff between Twitcher 0.5.x/0.6.x # type: (Optional[AnySettingsContainer]) -> MagpieOWSSecurity """ Creates the :class:`OWSSecurity` implementation derived from :class:`MagpieOWSSecurity`. .. versionchanged:: 3.18 Method :paramref:`request` does not exist starting in ``Twitcher >= 0.6.x``. """ if self._owssecurity is None: self._owssecurity = MagpieOWSSecurity(request or self.settings) return self._owssecurity def owsproxy_config(self, container): # type: (AnySettingsContainer) -> None LOGGER.info("Loading MagpieAdapter owsproxy config") config = self.configurator_factory(container) owsproxy_defaultconfig(config) # let Twitcher configure the rest normally def configurator_factory(self, container): # noqa: R0201 # type: (AnySettingsContainer) -> Configurator LOGGER.debug("Preparing database session.") settings = get_settings(container) setup_cache_settings(settings) # default 'cache=off' if missing since 'pyramid_beaker' enables it otherwise set_cache_regions_from_settings(settings) # parse/convert cache settings into regions understood by beaker # disable rpcinterface which is conflicting with postgres db settings["twitcher.rpcinterface"] = False LOGGER.info("Loading Magpie AuthN/AuthZ configuration for adapter.") config = get_auth_config(container) config.include("pyramid_beaker") setup_session_config(config) # add route to verify user token matching between Magpie/Twitcher config.add_route("verify-user", "/verify", request_method=("GET", "POST")) config.add_view(verify_user, route_name="verify-user") return config
"""Transformers This module contains transformers for preprocessing data. Most operate on DataFrames and are named appropriately. """ import numpy as np import pandas as pd from sklearn.base import TransformerMixin from imblearn.over_sampling import RandomOverSampler from imblearn.under_sampling import RandomUnderSampler from sklearn.preprocessing import StandardScaler class DataFrameImputer(TransformerMixin): """ Impute missing values in a dataframe. Columns of dtype object or category (assumed categorical) are imputed with the mode (most frequent value in column). Columns of other types (assumed continuous) are imputed with mean of column. """ def __init__(self, impute=True): self.impute = impute self.object_columns = None self.fill = None def fit(self, X, y=None): # Return if not imputing if self.impute is False: return self # Grab list of object column names before doing imputation self.object_columns = X.select_dtypes(include=['object']).columns.values self.fill = pd.Series([X[c].value_counts().index[0] if X[c].dtype == np.dtype('O') or pd.core.common.is_categorical_dtype(X[c]) else X[c].mean() for c in X], index=X.columns) num_nans = sum(X.select_dtypes(include=[np.number]).isnull().sum()) num_total = sum(X.select_dtypes(include=[np.number]).count()) percentage_imputed = num_nans / num_total * 100 print("Percentage Imputed: {}%".format(percentage_imputed)) # return self for scikit compatibility return self def transform(self, X, y=None): # Return if not imputing if self.impute is False: return X result = X.fillna(self.fill) for i in self.object_columns: if result[i].dtype not in ['object', 'category']: result[i] = result[i].astype('object') return result class DataFrameConvertTargetToBinary(TransformerMixin): # TODO Note that this makes healthcareai only handle N/Y in pred column """ Convert classification model's predicted col to 0/1 (otherwise won't work with GridSearchCV). Passes through data for regression models unchanged. This is to simplify the data pipeline logic. (Though that may be a more appropriate place for the logic...) Note that this makes healthcareai only handle N/Y in pred column """ def __init__(self, model_type, target_column): self.model_type = model_type self.target_column = target_column def fit(self, X, y=None): # return self for scikit compatibility return self def transform(self, X, y=None): # TODO: put try/catch here when type = class and predictor is numeric # TODO this makes healthcareai only handle N/Y in pred column if self.model_type == 'classification': # Turn off warning around replace pd.options.mode.chained_assignment = None # default='warn' # Replace 'Y'/'N' with 1/0 X[self.target_column].replace(['Y', 'N'], [1, 0], inplace=True) return X class DataFrameCreateDummyVariables(TransformerMixin): """Convert all categorical columns into dummy/indicator variables. Exclude given columns.""" def __init__(self, excluded_columns=None): self.excluded_columns = excluded_columns def fit(self, X, y=None): # return self for scikit compatibility return self def transform(self, X, y=None): columns_to_dummify = X.select_dtypes(include=[object, 'category']) # remove excluded columns (if they are still in the list) for column in columns_to_dummify: if column in self.excluded_columns: columns_to_dummify.remove(column) # Create dummy variables X = pd.get_dummies(X, columns=columns_to_dummify, drop_first=True, prefix_sep='.') return X class DataFrameConvertColumnToNumeric(TransformerMixin): """Convert a column into numeric variables.""" def __init__(self, column_name): self.column_name = column_name def fit(self, X, y=None): # return self for scikit compatibility return self def transform(self, X, y=None): X[self.column_name] = pd.to_numeric(arg=X[self.column_name], errors='raise') return X class DataFrameUnderSampling(TransformerMixin): """ Performs undersampling on a dataframe. Must be done BEFORE train/test split so that when we split the under/over sampled dataset. Must be done AFTER imputation, since under/over sampling will not work with missing values (imblearn requires target column to be converted to numerical values) """ def __init__(self, predicted_column, random_seed=0): self.random_seed = random_seed self.predicted_column = predicted_column def fit(self, X, y=None): # return self for scikit compatibility return self def transform(self, X, y=None): # TODO how do we validate this happens before train/test split? Or do we need to? Can we implement it in the # TODO simple trainer in the correct order and leave this to advanced users? # Extract predicted column y = np.squeeze(X[[self.predicted_column]]) # Copy the dataframe without the predicted column temp_dataframe = X.drop([self.predicted_column], axis=1) # Initialize and fit the under sampler under_sampler = RandomUnderSampler(random_state=self.random_seed) x_under_sampled, y_under_sampled = under_sampler.fit_sample(temp_dataframe, y) # Build the resulting under sampled dataframe result = pd.DataFrame(x_under_sampled) # Restore the column names result.columns = temp_dataframe.columns # Restore the y values y_under_sampled = pd.Series(y_under_sampled) result[self.predicted_column] = y_under_sampled return result class DataFrameOverSampling(TransformerMixin): """ Performs oversampling on a dataframe. Must be done BEFORE train/test split so that when we split the under/over sampled dataset. Must be done AFTER imputation, since under/over sampling will not work with missing values (imblearn requires target column to be converted to numerical values) """ def __init__(self, predicted_column, random_seed=0): self.random_seed = random_seed self.predicted_column = predicted_column def fit(self, X, y=None): # return self for scikit compatibility return self def transform(self, X, y=None): # TODO how do we validate this happens before train/test split? Or do we need to? Can we implement it in the # TODO simple trainer in the correct order and leave this to advanced users? # Extract predicted column y = np.squeeze(X[[self.predicted_column]]) # Copy the dataframe without the predicted column temp_dataframe = X.drop([self.predicted_column], axis=1) # Initialize and fit the under sampler over_sampler = RandomOverSampler(random_state=self.random_seed) x_over_sampled, y_over_sampled = over_sampler.fit_sample(temp_dataframe, y) # Build the resulting under sampled dataframe result = pd.DataFrame(x_over_sampled) # Restore the column names result.columns = temp_dataframe.columns # Restore the y values y_over_sampled = pd.Series(y_over_sampled) result[self.predicted_column] = y_over_sampled return result class DataFrameDropNaN(TransformerMixin): """Remove NaN values. Columns that are NaN or None are removed.""" def __init__(self): pass def fit(self, X, y=None): return self def transform(self, X, y=None): # Uses pandas.DataFrame.dropna function where axis=1 is column action, and # how='all' requires all the values to be NaN or None to be removed. return X.dropna(axis=1, how='all') class DataFrameFeatureScaling(TransformerMixin): """Scales numeric features. Columns that are numerics are scaled, or otherwise specified.""" def __init__(self, columns_to_scale=None, reuse=None): self.columns_to_scale = columns_to_scale self.reuse = reuse def fit(self, X, y=None): return self def transform(self, X, y=None): # Check if it's reuse, if so, then use the reuse's DataFrameFeatureScaling if self.reuse: return self.reuse.fit_transform(X, y) # Check if we know what columns to scale, if not, then get all the numeric columns' names if not self.columns_to_scale: self.columns_to_scale = list(X.select_dtypes(include=[np.number]).columns) X[self.columns_to_scale] = StandardScaler().fit_transform(X[self.columns_to_scale]) return X
# Copyright 2017 The Kubernetes Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates a SQLite DB containing test data downloaded from GCS.""" import argparse import logging import json import os import random import re import signal import sys import time import urllib.parse from xml.etree import cElementTree as ET import multiprocessing import multiprocessing.pool import requests import ruamel.yaml as yaml import model def pad_numbers(string): """Modify a string to make its numbers suitable for natural sorting.""" return re.sub(r'\d+', lambda m: m.group(0).rjust(16, '0'), string) WORKER_CLIENT = None # used for multiprocessing class GCSClient: def __init__(self, jobs_dir, metadata=None): self.jobs_dir = jobs_dir self.metadata = metadata or {} self.session = requests.Session() def _request(self, path, params, as_json=True): """GETs a JSON resource from GCS, with retries on failure. Retries are based on guidance from cloud.google.com/storage/docs/gsutil/addlhelp/RetryHandlingStrategy """ url = f'https://www.googleapis.com/storage/v1/b/{path}' for retry in range(23): try: resp = self.session.get(url, params=params, stream=False) if 400 <= resp.status_code < 500 and resp.status_code != 429: return None resp.raise_for_status() if as_json: try: return resp.json() except json.decoder.JSONDecodeError: logging.exception('Failed to decode request for %s', path) return None return resp.text except requests.exceptions.RequestException: logging.exception('request failed %s', url) time.sleep(random.random() * min(60, 2 ** retry)) @staticmethod def _parse_uri(path): if not path.startswith('gs://'): raise ValueError("Bad path: %s, should start with 'gs://'" % path) bucket, prefix = path[5:].split('/', 1) return bucket, prefix def get(self, path, as_json=False): """Get an object from GCS.""" bucket, path = self._parse_uri(path) return self._request(f'{bucket}/o/{urllib.parse.quote(path, "")}', {'alt': 'media'}, as_json=as_json) def ls(self, path, dirs=True, files=True, delim=True, item_field='name', build_limit=sys.maxsize,): """Lists objects under a path on gcs.""" # pylint: disable=invalid-name bucket, path = self._parse_uri(path) params = {'prefix': path, 'fields': 'nextPageToken'} if delim: params['delimiter'] = '/' if dirs: params['fields'] += ',prefixes' if files: params['fields'] += f',items({item_field})' while build_limit > 0: resp = self._request(f'{bucket}/o', params) if resp is None: # nothing under path? return for prefix in resp.get('prefixes', []): build_limit -= 1 yield f'gs://{bucket}/{prefix}' for item in resp.get('items', []): if item_field == 'name': build_limit -= 1 yield f'gs://{bucket}/{item["name"]}' else: build_limit -= 1 yield item[item_field] if 'nextPageToken' not in resp: break params['pageToken'] = resp['nextPageToken'] def ls_dirs(self, path): return self.ls(path, dirs=True, files=False) def _ls_junit_paths(self, build_dir): """Lists the paths of JUnit XML files for a build.""" url = f'{build_dir}artifacts/' for path in self.ls(url): if re.match(r'.*/junit.*\.xml$', path): yield path def get_junits_from_build(self, build_dir): """Generates all tests for a build.""" files = {} assert not build_dir.endswith('/') for junit_path in self._ls_junit_paths(build_dir + '/'): files[junit_path] = self.get(junit_path) return files def _get_jobs(self): """Generates all jobs in the bucket.""" for job_path in self.ls_dirs(self.jobs_dir): yield os.path.basename(os.path.dirname(job_path)) def _get_builds(self, job, build_limit=sys.maxsize): '''Returns whether builds are precise (guarantees existence)''' if self.metadata.get('sequential', True): try: latest_build = int(self.get(f'{self.jobs_dir}{job}/latest-build.txt')) except (ValueError, TypeError): pass else: return False, (str(n) for n in range(latest_build, 0, -1)[:build_limit]) # Invalid latest-build or bucket is using timestamps build_paths = self.ls_dirs(f'{self.jobs_dir}{job}/') return True, sorted( (os.path.basename(os.path.dirname(b)) for b in build_paths), key=pad_numbers, reverse=True)[:build_limit] def get_started_finished(self, job, build): if self.metadata.get('pr'): build_dir = self.get(f'{self.jobs_dir}/directory/{job}/{build}.txt').strip() else: build_dir = f'{self.jobs_dir}{job}/{build}' started = self.get(f'{build_dir}/started.json', as_json=True) finished = self.get(f'{build_dir}/finished.json', as_json=True) return build_dir, started, finished def get_builds(self, builds_have, build_limit=sys.maxsize): """Generates all (job, build) pairs ever.""" if self.metadata.get('pr'): files = self.ls(self.jobs_dir + '/directory/', delim=False, build_limit=build_limit) for fname in files: if fname.endswith('.txt') and 'latest-build' not in fname: job, build = fname[:-4].split('/')[-2:] if (job, build) in builds_have: continue yield job, build return for job in self._get_jobs(): if job in self.metadata.get('exclude_jobs', []): continue have = 0 precise, builds = self._get_builds(job, build_limit) for build in builds: if (job, build) in builds_have: have += 1 if have > 40 and not precise: break continue yield job, build def mp_init_worker(jobs_dir, metadata, client_class, use_signal=True): """ Initialize the environment for multiprocessing-based multithreading. """ if use_signal: signal.signal(signal.SIGINT, signal.SIG_IGN) # Multiprocessing doesn't allow local variables for each worker, so we need # to make a GCSClient global variable. global WORKER_CLIENT # pylint: disable=global-statement WORKER_CLIENT = client_class(jobs_dir, metadata) def get_started_finished(job_info): (job, build) = job_info try: return WORKER_CLIENT.get_started_finished(job, build) except: # pylint: disable=W0702 logging.exception('failed to get tests for %s/%s', job, build) return None, None, None def get_junits(build_info): (build_id, gcs_path) = build_info try: junits = WORKER_CLIENT.get_junits_from_build(gcs_path) return build_id, gcs_path, junits except: logging.exception('failed to get junits for %s', gcs_path) raise def get_all_builds(db, jobs_dir, metadata, threads, client_class, build_limit): """ Adds information about tests to a dictionary. Args: jobs_dir: the GCS path containing jobs. metadata: a dict of metadata about the jobs_dir. threads: how many threads to use to download build information. client_class: a constructor for a GCSClient (or a subclass). """ gcs = client_class(jobs_dir, metadata) print(f'Loading builds from {jobs_dir}') sys.stdout.flush() builds_have = db.get_existing_builds(jobs_dir) print(f'already have {len(builds_have)} builds') sys.stdout.flush() jobs_and_builds = gcs.get_builds(builds_have, build_limit) pool = None if threads > 1: pool = multiprocessing.Pool(threads, mp_init_worker, (jobs_dir, metadata, client_class)) builds_iterator = pool.imap_unordered( get_started_finished, jobs_and_builds) else: global WORKER_CLIENT # pylint: disable=global-statement WORKER_CLIENT = gcs builds_iterator = ( get_started_finished(job_build) for job_build in jobs_and_builds) try: for n, (build_dir, started, finished) in enumerate(builds_iterator): if not build_dir: continue # skip builds that raised exceptions print(f'inserting build: {build_dir}') if started or finished: db.insert_build(build_dir, started, finished) if n % 200 == 0: db.commit() except KeyboardInterrupt: if pool: pool.terminate() raise else: if pool: pool.close() pool.join() db.commit() def remove_system_out(data): """Strip bloated system-out annotations.""" if 'system-out' in data: try: root = ET.fromstring(data) for parent in root.findall('*//system-out/..'): for child in parent.findall('system-out'): parent.remove(child) return ET.tostring(root, 'unicode') except ET.ParseError: pass return data def download_junit(db, threads, client_class): """Download junit results for builds without them.""" logging.info('Downloading JUnit artifacts.') sys.stdout.flush() builds_to_grab = db.get_builds_missing_junit() pool = None if threads > 1: pool = multiprocessing.pool.ThreadPool( threads, mp_init_worker, ('', {}, client_class, False)) test_iterator = pool.imap_unordered( get_junits, builds_to_grab) else: global WORKER_CLIENT # pylint: disable=global-statement WORKER_CLIENT = client_class('', {}) test_iterator = ( get_junits(build_path) for build_path in builds_to_grab) for n, (build_id, build_path, junits) in enumerate(test_iterator, 1): logging.info('%d/%d %s %d %d', n, len(builds_to_grab), build_path, len(junits), len(''.join(junits.values()))) junits = {k: remove_system_out(v) for k, v in junits.items()} db.insert_build_junits(build_id, junits) if n % 100 == 0: db.commit() db.commit() if pool: pool.close() pool.join() def main(db, jobs_dirs, threads, get_junit, build_limit, client_class=GCSClient): """Collect test info in matching jobs.""" get_all_builds(db, 'gs://kubernetes-jenkins/pr-logs', {'pr': True}, threads, client_class, build_limit) for bucket, metadata in jobs_dirs.items(): if not bucket.endswith('/'): bucket += '/' get_all_builds(db, bucket, metadata, threads, client_class, build_limit) if get_junit: download_junit(db, threads, client_class) def get_options(argv): """Process command line arguments.""" parser = argparse.ArgumentParser() parser.add_argument( '--buckets', help='YAML file with GCS bucket locations', required=True, ) parser.add_argument( '--threads', help='number of concurrent threads to download results with', default=32, type=int, ) parser.add_argument( '--junit', action='store_true', help='Download JUnit results from each build' ) parser.add_argument( '--buildlimit', help='maximum number of runs within each job to pull, \ all jobs will be collected if unset or 0', default=int(os.getenv('BUILD_LIMIT', '0')), type=int, ) return parser.parse_args(argv) if __name__ == '__main__': OPTIONS = get_options(sys.argv[1:]) OPTIONS.buildlimit = OPTIONS.buildlimit or sys.maxsize main( model.Database(), yaml.safe_load(open(OPTIONS.buckets)), OPTIONS.threads, OPTIONS.junit, OPTIONS.buildlimit, )
from Adafruit_I2C import Adafruit_I2C import time import math Oled = Adafruit_I2C(0x3c) Command_Mode=0x80 Data_mode=0x40 grayH= 0xF0 grayL= 0x0F Normal_Display_Cmd=0xA4 BasicFont = [[0 for x in xrange(8)] for x in xrange(10)] BasicFont=[[0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00], [0x00,0x00,0x5F,0x00,0x00,0x00,0x00,0x00], [0x00,0x00,0x07,0x00,0x07,0x00,0x00,0x00], [0x00,0x14,0x7F,0x14,0x7F,0x14,0x00,0x00], [0x00,0x24,0x2A,0x7F,0x2A,0x12,0x00,0x00], [0x00,0x23,0x13,0x08,0x64,0x62,0x00,0x00], [0x00,0x36,0x49,0x55,0x22,0x50,0x00,0x00], [0x00,0x00,0x05,0x03,0x00,0x00,0x00,0x00], [0x00,0x1C,0x22,0x41,0x00,0x00,0x00,0x00], [0x00,0x41,0x22,0x1C,0x00,0x00,0x00,0x00], [0x00,0x08,0x2A,0x1C,0x2A,0x08,0x00,0x00], [0x00,0x08,0x08,0x3E,0x08,0x08,0x00,0x00], [0x00,0xA0,0x60,0x00,0x00,0x00,0x00,0x00], [0x00,0x08,0x08,0x08,0x08,0x08,0x00,0x00], [0x00,0x60,0x60,0x00,0x00,0x00,0x00,0x00], [0x00,0x20,0x10,0x08,0x04,0x02,0x00,0x00], [0x00,0x3E,0x51,0x49,0x45,0x3E,0x00,0x00], [0x00,0x00,0x42,0x7F,0x40,0x00,0x00,0x00], [0x00,0x62,0x51,0x49,0x49,0x46,0x00,0x00], [0x00,0x22,0x41,0x49,0x49,0x36,0x00,0x00], [0x00,0x18,0x14,0x12,0x7F,0x10,0x00,0x00], [0x00,0x27,0x45,0x45,0x45,0x39,0x00,0x00], [0x00,0x3C,0x4A,0x49,0x49,0x30,0x00,0x00], [0x00,0x01,0x71,0x09,0x05,0x03,0x00,0x00], [0x00,0x36,0x49,0x49,0x49,0x36,0x00,0x00], [0x00,0x06,0x49,0x49,0x29,0x1E,0x00,0x00], [0x00,0x00,0x36,0x36,0x00,0x00,0x00,0x00], [0x00,0x00,0xAC,0x6C,0x00,0x00,0x00,0x00], [0x00,0x08,0x14,0x22,0x41,0x00,0x00,0x00], [0x00,0x14,0x14,0x14,0x14,0x14,0x00,0x00], [0x00,0x41,0x22,0x14,0x08,0x00,0x00,0x00], [0x00,0x02,0x01,0x51,0x09,0x06,0x00,0x00], [0x00,0x32,0x49,0x79,0x41,0x3E,0x00,0x00], [0x00,0x7E,0x09,0x09,0x09,0x7E,0x00,0x00], [0x00,0x7F,0x49,0x49,0x49,0x36,0x00,0x00], [0x00,0x3E,0x41,0x41,0x41,0x22,0x00,0x00], [0x00,0x7F,0x41,0x41,0x22,0x1C,0x00,0x00], [0x00,0x7F,0x49,0x49,0x49,0x41,0x00,0x00], [0x00,0x7F,0x09,0x09,0x09,0x01,0x00,0x00], [0x00,0x3E,0x41,0x41,0x51,0x72,0x00,0x00], [0x00,0x7F,0x08,0x08,0x08,0x7F,0x00,0x00], [0x00,0x41,0x7F,0x41,0x00,0x00,0x00,0x00], [0x00,0x20,0x40,0x41,0x3F,0x01,0x00,0x00], [0x00,0x7F,0x08,0x14,0x22,0x41,0x00,0x00], [0x00,0x7F,0x40,0x40,0x40,0x40,0x00,0x00], [0x00,0x7F,0x02,0x0C,0x02,0x7F,0x00,0x00], [0x00,0x7F,0x04,0x08,0x10,0x7F,0x00,0x00], [0x00,0x3E,0x41,0x41,0x41,0x3E,0x00,0x00], [0x00,0x7F,0x09,0x09,0x09,0x06,0x00,0x00], [0x00,0x3E,0x41,0x51,0x21,0x5E,0x00,0x00], [0x00,0x7F,0x09,0x19,0x29,0x46,0x00,0x00], [0x00,0x26,0x49,0x49,0x49,0x32,0x00,0x00], [0x00,0x01,0x01,0x7F,0x01,0x01,0x00,0x00], [0x00,0x3F,0x40,0x40,0x40,0x3F,0x00,0x00], [0x00,0x1F,0x20,0x40,0x20,0x1F,0x00,0x00], [0x00,0x3F,0x40,0x38,0x40,0x3F,0x00,0x00], [0x00,0x63,0x14,0x08,0x14,0x63,0x00,0x00], [0x00,0x03,0x04,0x78,0x04,0x03,0x00,0x00], [0x00,0x61,0x51,0x49,0x45,0x43,0x00,0x00], [0x00,0x7F,0x41,0x41,0x00,0x00,0x00,0x00], [0x00,0x02,0x04,0x08,0x10,0x20,0x00,0x00], [0x00,0x41,0x41,0x7F,0x00,0x00,0x00,0x00], [0x00,0x04,0x02,0x01,0x02,0x04,0x00,0x00], [0x00,0x80,0x80,0x80,0x80,0x80,0x00,0x00], [0x00,0x01,0x02,0x04,0x00,0x00,0x00,0x00], [0x00,0x20,0x54,0x54,0x54,0x78,0x00,0x00], [0x00,0x7F,0x48,0x44,0x44,0x38,0x00,0x00], [0x00,0x38,0x44,0x44,0x28,0x00,0x00,0x00], [0x00,0x38,0x44,0x44,0x48,0x7F,0x00,0x00], [0x00,0x38,0x54,0x54,0x54,0x18,0x00,0x00], [0x00,0x08,0x7E,0x09,0x02,0x00,0x00,0x00], [0x00,0x18,0xA4,0xA4,0xA4,0x7C,0x00,0x00], [0x00,0x7F,0x08,0x04,0x04,0x78,0x00,0x00], [0x00,0x00,0x7D,0x00,0x00,0x00,0x00,0x00], [0x00,0x80,0x84,0x7D,0x00,0x00,0x00,0x00], [0x00,0x7F,0x10,0x28,0x44,0x00,0x00,0x00], [0x00,0x41,0x7F,0x40,0x00,0x00,0x00,0x00], [0x00,0x7C,0x04,0x18,0x04,0x78,0x00,0x00], [0x00,0x7C,0x08,0x04,0x7C,0x00,0x00,0x00], [0x00,0x38,0x44,0x44,0x38,0x00,0x00,0x00], [0x00,0xFC,0x24,0x24,0x18,0x00,0x00,0x00], [0x00,0x18,0x24,0x24,0xFC,0x00,0x00,0x00], [0x00,0x00,0x7C,0x08,0x04,0x00,0x00,0x00], [0x00,0x48,0x54,0x54,0x24,0x00,0x00,0x00], [0x00,0x04,0x7F,0x44,0x00,0x00,0x00,0x00], [0x00,0x3C,0x40,0x40,0x7C,0x00,0x00,0x00], [0x00,0x1C,0x20,0x40,0x20,0x1C,0x00,0x00], [0x00,0x3C,0x40,0x30,0x40,0x3C,0x00,0x00], [0x00,0x44,0x28,0x10,0x28,0x44,0x00,0x00], [0x00,0x1C,0xA0,0xA0,0x7C,0x00,0x00,0x00], [0x00,0x44,0x64,0x54,0x4C,0x44,0x00,0x00], [0x00,0x08,0x36,0x41,0x00,0x00,0x00,0x00], [0x00,0x00,0x7F,0x00,0x00,0x00,0x00,0x00], [0x00,0x41,0x36,0x08,0x00,0x00,0x00,0x00], [0x00,0x02,0x01,0x01,0x02,0x01,0x00,0x00], [0x00,0x02,0x05,0x05,0x02,0x00,0x00,0x00]] def oled_init(): sendCommand(0xFD) # Unlock OLED driver IC MCU interface from entering command. i.e: Accept commands sendCommand(0x12) sendCommand(0xAE) # Set display off sendCommand(0xA8) # set multiplex ratio sendCommand(0x5F) # 96 sendCommand(0xA1) # set display start line sendCommand(0x00) sendCommand(0xA2) # set display offset sendCommand(0x60) sendCommand(0xA0) # set remap sendCommand(0x46) sendCommand(0xAB) # set vdd internal sendCommand(0x01) sendCommand(0x81) # set contrasr sendCommand(0x53) # 100 nit sendCommand(0xB1) # Set Phase Length sendCommand(0X51) sendCommand(0xB3) # Set Display Clock Divide Ratio/Oscillator Frequency sendCommand(0x01) sendCommand(0xB9) sendCommand(0xBC) # set pre_charge voltage/VCOMH sendCommand(0x08) # (0x08); sendCommand(0xBE) # set VCOMH sendCommand(0X07) # (0x07); sendCommand(0xB6) # Set second pre-charge period sendCommand(0x01) sendCommand(0xD5) # enable second precharge and enternal vsl sendCommand(0X62) # (0x62); sendCommand(0xA4) # Set Normal Display Mode sendCommand(0x2E) # Deactivate Scroll sendCommand(0xAF) # Switch on display time.sleep(0.1) # delay(100); # Row Address sendCommand(0x75) # Set Row Address sendCommand(0x00) # Start 0 sendCommand(0x5f) # End 95 # Column Address sendCommand(0x15) # Set Column Address sendCommand(0x08) # Start from 8th Column of driver IC. This is 0th Column for OLED sendCommand(0x37) # End at (8 + 47)th column. Each Column has 2 pixels(segments) # Init gray level for text. Default:Brightest White grayH= 0xF0 grayL= 0x0F def sendCommand(byte): Oled.write8(Command_Mode,byte) def sendData(byte): Oled.write8(Data_mode,byte) def multi_comm(commands): for c in commands: sendCommand(c) def oled_clearDisplay(): for j in range (0,48): for i in range (0,96): sendData(0x00) def oled_setNormalDisplay(): sendCommand(Normal_Display_Cmd) def oled_setVerticalMode(): sendCommand(0xA0) # remap to sendCommand(0x46) # Vertical mode def oled_setTextXY(Row,Column): sendCommand(0x15) # Set Column Address sendCommand(0x08+(Column*4)) # Start Column: Start from 8 sendCommand(0x37) # End Column # Row Address sendCommand(0x75) # Set Row Address sendCommand(0x00+(Row*8)) # Start Row sendCommand(0x07+(Row*8)) # End Row def oled_putChar(C): C_add=ord(C) if C_add<32 or C_add>127: # Ignore non-printable ASCII characters C=' ' C_add=ord(C) for i in range(0,8,2): for j in range(0,8): c=0x00 bit1=((BasicFont[C_add-32][i])>>j)&0x01 bit2=((BasicFont[C_add-32][i+1])>>j)&0x01 if bit1: c=c|grayH else: c=c|0x00 if bit2: c=c|grayL else: c=c|0x00 sendData(c) def oled_putString(String): for i in range(len(String)): oled_putChar(String[i]) """ if __name__=="__main__": oled_init() oled_clearDisplay() oled_setNormalDisplay() oled_setTextXY(0,0) oled_putString("Reflow Oven") oled_setTextXY(1,0) oled_putString("Heater On") oled_setTextXY(2,0) oled_putString("Temp:"+str(150)) time.sleep(10) #Oled.write8(Command_Mode,0xFD) #sendCommand(0xFD) print 'hello world' """
# Copyright 2016 Stephen Shirley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import re import socket import sys import time from prometheus_client import ( CollectorRegistry, Gauge, generate_latest, push_to_gateway, ) DEFAULT_PUSH_GATEWAY = "localhost:9091" DEFAULT_JOB_NAME = "rsnapshot" localhost = socket.getfqdn() gauges = {} RSYNC_STATS = { # Metadata "rsync_start_time": "Time rsync started at", "rsync_end_time": "Time rsync finished at", "rsync_duration_seconds": "How long rsync ran for", "rsync_success": "0 if rsync encountered no errors, 1 otherwise.", # Stats directly from rsync "rsync_num_files": "Number of files", "rsync_num_xferred_files": "Number of regular files transferred", "rsync_total_file_bytes": "Total file size", "rsync_total_xferred_file_bytes": "Total transferred file size", "rsync_literal_data_bytes": "Literal data", "rsync_matched_data_bytes": "Matched data", "rsync_file_list_bytes": "File list size", "rsync_file_list_gen_seconds": "File list generation time", "rsync_file_list_xfer_seconds": "File list transfer time", "rsync_total_sent_bytes": "Total bytes sent", "rsync_total_recv_bytes": "Total bytes received", } class Stats: STAT_NAME = {v: k for k, v in RSYNC_STATS.items()} def __init__(self, line): self._metrics = {} self._metrics['rsync_start_time'] = time.time() self._end = 0 self._success = True self.src_host = None self.src_path = None self.dst_host = None self.dst_path = None self._parse_rsync_line(line) def _parse_rsync_line(self, line): parts = line.split() self.src_host, self.src_path = self._get_host_path(parts[-2]) self.dst_host, self.dst_path = self._get_host_path(parts[-1]) def _get_host_path(self, s): remote_rx = re.compile(r'((.*@)?(?P<host>.+):)?(?P<path>.+)$') m = remote_rx.match(s) host = m.group('host') or localhost path = m.group('path') return host, path def parse(self, line): """ Returns None on success, False on error """ parse_rx = re.compile(r'^(?P<desc>[^:]+): (?P<val>\S+)') m = parse_rx.match(line) if not m: return desc = m.group('desc') if desc == "rsync error": self._success = False return False name = self.STAT_NAME.get(m.group('desc')) if not name: # Skip non-matching lines return self._metrics[name] = float(m.group('val')) def publish(self, def_labels): self._metrics['rsync_end_time'] = time.time() self._metrics['rsync_duration_seconds'] = ( self._metrics['rsync_end_time'] - self._metrics['rsync_start_time']) self._metrics['rsync_success'] = 0 if self._success else 1 logging.info("Publishing %s:%s -> %s:%s" % ( self.src_host, self.src_path, self.dst_host, self.dst_path)) labels = { 'src_host': self.src_host, 'src_path': self.src_path, 'dst_host': self.dst_host, 'dst_path': self.dst_path, } labels.update(def_labels) for name, val in self._metrics.items(): metric = gauges[name] metric.labels(**labels).set(val) def main(): parser = argparse.ArgumentParser( prog="rsnap_prom_stats", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--pushgw", default=DEFAULT_PUSH_GATEWAY, help="Address of the pushgateway to publish to. If " "set to '-' it will print the metrics to stdout instead.") parser.add_argument("--job", default=DEFAULT_JOB_NAME, help="Pushgateway job name.") parser.add_argument("-v", action="store_true", help="Print some information to stdout.") args = parser.parse_args() level = logging.WARNING if args.v: level = logging.INFO logging.basicConfig( format='[%(asctime)s] %(message)s', level=level) registry = setup_metrics() start = time.time() logging.info("Started") def_labels = {'instance': localhost} process_input(def_labels) end = time.time() logging.info("Finished reading output") gauges["rsnapshot_start_time"].labels(def_labels).set(start) gauges["rsnapshot_end_time"].labels(def_labels).set(end) gauges["rsnapshot_duration_seconds"].labels(def_labels).set(end - start) if args.pushgw == "-": print(generate_latest(registry).decode("utf-8")) else: logging.info("publishing to pushgateway @ %s", args.pushgw) push_to_gateway(args.pushgw, job=args.job, registry=registry) def setup_metrics(): registry = CollectorRegistry() basic_labels = ['instance'] gauges["rsnapshot_start_time"] = Gauge( "rsnapshot_start_time", "Timestamp rsnapshot started at", basic_labels, registry=registry) gauges["rsnapshot_end_time"] = Gauge( "rsnapshot_end_time", "Timestamp rsnapshot finished at", basic_labels, registry=registry) gauges["rsnapshot_duration_seconds"] = Gauge( "rsnapshot_duration_seconds", "How long rsnapshot ran for", basic_labels, registry=registry) rsync_labels = ['src_host', 'src_path', 'dst_host', 'dst_path'] for name, desc in RSYNC_STATS.items(): gauges[name] = Gauge( name, desc, basic_labels + rsync_labels, registry=registry) return registry def process_input(def_labels): rsync_rx = re.compile(r'^[/\w]+/rsync') s = None for line in read_lines(): if not line: # Skip blank lines continue if rsync_rx.match(line): s = Stats(line) continue if not s: # Don't bother parsing lines until we found the start of a stats # block continue if line.startswith('sent ') or s.parse(line) is False: # We've reached the end of the stats block, or an rsync error # was encountered. Either way, publish the stats. s.publish(def_labels) s = None continue def read_lines(): line = "" for l in sys.stdin: line += l.strip() if line.endswith("\\"): line = line.rstrip("\\") continue yield line line = "" if __name__ == '__main__': main()
# This Python module is part of the PyRate software package. # # Copyright 2022 Geoscience Australia # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This Python module contains tests for the shared.py PyRate module. """ import os import shutil import sys import random import string import tempfile import pytest from pathlib import Path from itertools import product from numpy import isnan, where, nan from os.path import join, basename, exists from stat import S_IRGRP, S_IWGRP, S_IWOTH, S_IROTH, S_IRUSR, S_IWUSR import numpy as np from numpy.testing import assert_array_equal from osgeo import gdal from osgeo.gdal import Open, Dataset, UseExceptions import pyrate.constants as C import tests.common from tests.common import SML_TEST_TIF, SML_TEST_DEM_TIF, TEMPDIR, WORKING_DIR from pyrate.core import shared, ifgconstants as ifc, gamma from pyrate.core.shared import dem_or_ifg from pyrate.core import mpiops from pyrate import prepifg, conv2tif from pyrate.configuration import Configuration, MultiplePaths from pyrate.core.shared import Ifg, DEM, RasterException from pyrate.core.shared import cell_size, _utm_zone, tiles_split from tests import common UseExceptions() if not exists(SML_TEST_TIF): sys.exit("ERROR: Missing small_test data for unit tests\n") class TestIfgTests: """Unit tests for the Ifg/interferogram class.""" def setup_class(cls): cls.ifg = Ifg(join(SML_TEST_TIF, 'geo_060619-061002_unw.tif')) cls.ifg.open() cls.ifg.nodata_value = 0 def test_headers_as_attr(self): for a in ['ncols', 'nrows', 'x_first', 'x_step', 'y_first', 'y_step', 'wavelength', 'first', 'second']: assert getattr(self.ifg, a) is not None def test_convert_to_nans(self): self.ifg.convert_to_nans() assert self.ifg.nan_converted def test_xylast(self): # ensure the X|Y_LAST header element has been created assert self.ifg.x_last == pytest.approx(150.9491667) assert self.ifg.y_last == pytest.approx(-34.23) def test_num_cells(self): # test cell size from header elements data = self.ifg.phase_band.ReadAsArray() ys, xs = data.shape exp_ncells = ys * xs assert exp_ncells == self.ifg.num_cells def test_shape(self): assert self.ifg.shape == self.ifg.phase_data.shape def test_nan_count(self): num_nan = 0 for row in self.ifg.phase_data: for v in row: if isnan(v): num_nan += 1 if self.ifg.nan_converted: assert num_nan == self.ifg.nan_count else: assert num_nan == 0 def test_phase_band(self): data = self.ifg.phase_band.ReadAsArray() assert data.shape == (72, 47) def test_nan_fraction(self): # NB: source data lacks 0 -> NaN conversion data = self.ifg.phase_data data = where(data == 0, nan, data) # fake 0 -> nan for the count below # manually count # nan cells nans = 0 ys, xs = data.shape for y, x in product(range(ys), range(xs)): if isnan(data[y, x]): nans += 1 del data num_cells = float(ys * xs) assert nans > 0 assert nans <= num_cells assert nans / num_cells == self.ifg.nan_fraction def test_xy_size(self): assert ~ (self.ifg.ncols is None) assert ~ (self.ifg.nrows is None) # test with tolerance from base 90m cell # within 2% of cells over small? assert self.ifg.y_size > 88.0 assert self.ifg.y_size < 92.0, 'Got %s' % self.ifg.y_size width = 76.9 # from nearby PyRate coords assert self.ifg.x_size > 0.97 * width # ~3% tolerance assert self.ifg.x_size < 1.03 * width def test_centre_latlong(self): lat_exp = self.ifg.y_first + \ (int(self.ifg.nrows / 2) * self.ifg.y_step) long_exp = self.ifg.x_first + \ (int(self.ifg.ncols / 2) * self.ifg.x_step) assert lat_exp == self.ifg.lat_centre assert long_exp == self.ifg.long_centre def test_centre_cell(self): assert self.ifg.x_centre == 23 assert self.ifg.y_centre == 36 def test_time_span(self): assert self.ifg.time_span == pytest.approx(0.287474332649) def test_wavelength(self): assert self.ifg.wavelength == pytest.approx(0.0562356424) class TestIfgIOTests: def setup_method(self): self.ifg = Ifg(join(SML_TEST_TIF, 'geo_070709-070813_unw.tif')) self.header = join(common.ROIPAC_SML_TEST_DIR, 'geo_070709-070813_unw.rsc') def test_open(self): assert self.ifg.dataset is None assert self.ifg.is_open is False self.ifg.open(readonly=True) assert self.ifg.dataset is not None assert self.ifg.is_open is True assert isinstance(self.ifg.dataset, Dataset) # ensure open cannot be called twice with pytest.raises(RasterException): self.ifg.open(True) def test_open_ifg_from_dataset(self): """ Test showing open() can not be used for Ifg created with gdal.Dataset object as Dataset has already been read in """ self.ifg.open() dataset = self.ifg.dataset new_ifg = Ifg(dataset) with pytest.raises(RasterException): new_ifg.open() def test_write(self): base = TEMPDIR src = self.ifg.data_path dest = join(base, basename(self.ifg.data_path)) # shutil.copy needs to copy writeable permission from src os.chmod(src, S_IRGRP | S_IWGRP | S_IWOTH | S_IROTH | S_IRUSR | S_IWUSR) shutil.copy(src, dest) os.chmod(src, S_IRGRP | S_IROTH | S_IRUSR) # revert i = Ifg(dest) i.open() i.phase_data[0, 1:] = nan i.write_modified_phase() del i # reopen to ensure data/nans can be read back out i = Ifg(dest) i.open(readonly=True) assert_array_equal(True, isnan(i.phase_data[0, 1:])) i.close() os.remove(dest) def test_write_fails_on_readonly(self): # check readonly status is same before # and after open() for readonly file assert self.ifg.is_read_only self.ifg.open(readonly=True) assert self.ifg.is_read_only with pytest.raises(IOError): self.ifg.write_modified_phase() def test_phase_band_unopened_ifg(self): try: _ = self.ifg.phase_band self.fail("Should not be able to access band without open dataset") except RasterException: pass def test_nan_fraction_unopened(self): try: # NB: self.assertRaises doesn't work here (as it is a property?) _ = self.ifg.nan_fraction self.fail("Shouldn't be able to " "call nan_fraction() with unopened Ifg") except RasterException: pass def test_phase_data_properties(self): # Use raw GDAL to isolate raster reading from Ifg functionality ds = Open(self.ifg.data_path) data = ds.GetRasterBand(1).ReadAsArray() del ds self.ifg.open() # test full array and row by row access assert_array_equal(data, self.ifg.phase_data) for y, row in enumerate(self.ifg.phase_rows): assert_array_equal(data[y], row) # test the data is cached if changed crd = (5, 4) orig = self.ifg.phase_data[crd] self.ifg.phase_data[crd] *= 2 nv = self.ifg.phase_data[crd] # pull new value out again assert nv == 2 * orig # FIXME: # class IncidenceFileTests(): # 'Unit tests to verify operations on GeoTIFF format Incidence rasters' # # def setUp(self): # raise NotImplementedError # self.inc = Incidence(join(INCID_TEST_DIR, '128x2.tif')) # self.inc.open() # # # def test_incidence_data(self): # # check incidences rises while traversing the scene # data = self.inc.incidence_data # diff = data.ptp() # self.assertTrue(diff > 0.5, "Got ptp() diff of %s" % diff) # # # ascending pass, values should increase from W->E across scene # for i in range(2): # d = data[i] # self.assertFalse((d == 0).any()) # ensure no NODATA # self.assertFalse((isnan(d)).any()) # # diff = array([d[i+1] - d[i] for i in range(len(d)-1)]) # res = abs(diff[diff < 0]) # TODO: check if this is normal # self.assertTrue((res < 1e-4).all()) # # # def test_azimuth_data(self): # # ensure azimuth is fairly constant # # az = self.inc.azimuth_data # self.assertFalse((az == 0).all()) # az = az[az != 0] # filter NODATA cells # # # azimuth should be relatively constant # ptp = az.ptp() # self.assertTrue(ptp < 0.1, msg="min -> max diff is %s" % ptp) class TestDEMTests: 'Unit tests to verify operations on GeoTIFF format DEMs' def setup_method(self): self.ras = DEM(SML_TEST_DEM_TIF) def test_create_raster(self): # validate header path assert os.path.exists(self.ras.data_path) def test_headers_as_attr(self): self.ras.open() attrs = ['ncols', 'nrows', 'x_first', 'x_step', 'y_first', 'y_step' ] # TODO: are 'projection' and 'datum' attrs needed? for a in attrs: assert getattr(self.ras, a) is not None def test_is_dem(self): self.ras = DEM(join(SML_TEST_TIF, 'geo_060619-061002_unw.tif')) assert ~hasattr(self.ras, 'datum') def test_open(self): assert self.ras.dataset is None self.ras.open() assert self.ras.dataset is not None assert isinstance(self.ras.dataset, Dataset) # ensure open cannot be called twice with pytest.raises(RasterException): self.ras.open() # def test_band_fails_with_unopened_raster(self): # now opening if not open # # test accessing bands with open and unopened datasets # with pytest.raises(RasterException): # self.ras.band def test_band_read_with_open_raster(self): data = self.ras.band.ReadAsArray() assert data.shape == (72, 47) class TestWriteUnw: @classmethod @pytest.fixture(autouse=True) def setup_class(cls, gamma_params): # change the required params shared.mkdir_p(gamma_params[C.OUT_DIR]) from copy import deepcopy cls.params = deepcopy(gamma_params) cls.params[WORKING_DIR] = common.GAMMA_SML_TEST_DIR cls.params[C.PROCESSOR] = 1 # gamma cls.params[C.PARALLEL] = 0 cls.params[C.REF_EST_METHOD] = 1 cls.params[C.DEM_FILE] = common.SML_TEST_DEM_GAMMA cls.params[C.BASE_FILE_LIST] = common.GAMMA_SML_TEST_DIR # base_unw_paths need to be geotiffed and multilooked by run_prepifg cls.base_unw_paths = tests.common.original_ifg_paths(cls.params[C.IFG_FILE_LIST], cls.params[WORKING_DIR]) cls.base_unw_paths.append(common.SML_TEST_DEM_GAMMA) # dest_paths are tifs that have been geotif converted and multilooked conv2tif.main(cls.params) prepifg.main(cls.params) cls.dest_paths = [Path(cls.params[C.INTERFEROGRAM_DIR]).joinpath(Path(c.sampled_path).name).as_posix() for c in gamma_params[C.INTERFEROGRAM_FILES]] cls.dest_paths += [Path(cls.params[C.COHERENCE_DIR]).joinpath(Path(c.sampled_path).name).as_posix() for c in gamma_params[C.COHERENCE_FILE_PATHS]] cls.ifgs = [dem_or_ifg(i) for i in cls.dest_paths] for i in cls.ifgs: i.open() i.nodata_value = 0 @classmethod def teardown_class(cls): """auto cleaning on""" def test_unw_contains_same_data_as_numpy_array(self): from datetime import time temp_unw = tempfile.mktemp(suffix='.unw') temp_tif = tempfile.mktemp(suffix='.tif') # setup some header files for use in write_geotif dem_header_file = common.SML_TEST_DEM_HDR_GAMMA dem_header = gamma.parse_dem_header(dem_header_file) header = gamma.parse_epoch_header( os.path.join(common.GAMMA_SML_TEST_DIR, '20060828_slc.par')) header.update(dem_header) base_header = gamma.parse_baseline_header( os.path.join(common.GAMMA_SML_TEST_DIR, '20060828-20061211_base.par')) header.update(base_header) # insert some dummy data so we are the dem in write_fullres_geotiff is not # not activated and ifg write_fullres_geotiff operation works header[ifc.PYRATE_TIME_SPAN] = 0 header[ifc.SECOND_DATE] = 0 header[ifc.DATA_UNITS] = 'degrees' header[ifc.DATA_TYPE] = ifc.ORIG header[ifc.SECOND_TIME] = time(10) # now create arbitrary data data = np.random.rand(dem_header[ifc.PYRATE_NROWS], dem_header[ifc.PYRATE_NCOLS]) # convert numpy array to .unw shared.write_unw_from_data_or_geotiff(geotif_or_data=data, dest_unw=temp_unw, ifg_proc=1) # convert the .unw to geotif shared.write_fullres_geotiff(header=header, data_path=temp_unw, dest=temp_tif, nodata=np.nan) # now compare geotiff with original numpy array ds = gdal.Open(temp_tif, gdal.GA_ReadOnly) data_lv_theta = ds.ReadAsArray() ds = None np.testing.assert_array_almost_equal(data, data_lv_theta) try: os.remove(temp_tif) except PermissionError: print("File opened by another process.") try: os.remove(temp_unw) except PermissionError: print("File opened by another process.") def test_multilooked_tiffs_converted_to_unw_are_same(self): # Get multilooked geotiffs geotiffs = list(set(self.dest_paths)) geotiffs = [g for g in geotiffs if 'dem' not in g] # Convert back to .unw dest_unws = [] for g in set(geotiffs): dest_unw = os.path.join(self.params[C.OUT_DIR], Path(g).stem + '.unw') shared.write_unw_from_data_or_geotiff(geotif_or_data=g, dest_unw=dest_unw, ifg_proc=1) dest_unws.append(dest_unw) dest_unws_ = [] for d in dest_unws: dest_unws_.append(MultiplePaths(d, self.params)) # Convert back to tiff new_geotiffs_ = conv2tif.do_geotiff(dest_unws_, self.params) new_geotiffs = [gt for gt, b in new_geotiffs_] # Ensure original multilooked geotiffs and # unw back to geotiff are the same geotiffs.sort(key=lambda x: Path(x).name) new_geotiffs.sort(key=lambda x: Path(x).name) for g, u in zip(geotiffs, new_geotiffs): g_ds = gdal.Open(g) u_gs = gdal.Open(u) np.testing.assert_array_almost_equal(u_gs.ReadAsArray(), g_ds.ReadAsArray()) u_gs = None g_ds = None def test_roipac_raises(self): geotiffs = [os.path.join( self.params[C.OUT_DIR], os.path.basename(b).split('.')[0] + '_' + os.path.basename(b).split('.')[1] + '.tif') for b in self.base_unw_paths] for g in geotiffs[:1]: dest_unw = os.path.join(self.params[C.OUT_DIR], os.path.splitext(g)[0] + '.unw') with pytest.raises(NotImplementedError): shared.write_unw_from_data_or_geotiff(geotif_or_data=g, dest_unw=dest_unw, ifg_proc=0) class TestGeodesy: def test_utm_zone(self): # test some different zones (collected manually) for lon in [174.0, 176.5, 179.999, 180.0]: assert 60 == _utm_zone(lon) for lon in [144.0, 144.1, 146.3456, 149.9999]: assert 55 == _utm_zone(lon) for lon in [-180.0, -179.275, -176.925]: assert 1 == _utm_zone(lon) for lon in [-72.0, -66.1]: assert 19 == _utm_zone(lon) for lon in [0.0, 0.275, 3.925, 5.999]: assert 31 == _utm_zone(lon) def test_cell_size_polar_region(self): # Can't have polar area zones: see http://www.dmap.co.uk/utmworld.htm for lat in [-80.1, -85.0, -90.0, 84.1, 85.0, 89.9999, 90.0]: with pytest.raises(ValueError): cell_size(lat, 0, 0.1, 0.1) def test_cell_size_calc(self): # test conversion of X|Y_STEP to X|Y_SIZE x_deg = 0.000833333 y_deg = -x_deg approx = 90.0 # x_deg is approx 90m exp_low = approx - (.15 * approx) # assumed tolerance exp_high = approx + (.15 * approx) latlons = [(10.0, 15.0), (-10.0, 15.0), (10.0, -15.0), (-10.0, -15.0), (178.0, 33.0), (-178.0, 33.0), (178.0, -33.0), (-178.0, -33.0) ] for lon, lat in latlons: xs, ys = cell_size(lat, lon, x_deg, y_deg) for s in (xs, ys): assert s > 0, "size=%s" % s assert s > exp_low, "size=%s" % s assert s < exp_high, "size=%s" % s @pytest.fixture(params=[0, 1]) def parallel(request): return request.param def get_random_string(length): letters = string.ascii_lowercase result_str = ''.join(random.choice(letters) for _ in range(length)) return result_str def _data_types(): return { 'str': [get_random_string(np.random.randint(2, 10)) for _ in range(20)], 'int': np.arange(20), 'ndarray': [np.random.randint(low=0, high=10, size=(2, 3), dtype=np.uint8) for _ in range(20)] } data_types = mpiops.run_once(_data_types) # required otherwise differnt arrays are generated in each mpi process @pytest.fixture(params=list(data_types.keys())) def data_type(request): return request.param def test_tiles_split(parallel, data_type): params = { C.TILES: data_types[data_type], C.PARALLEL: parallel, C.LOG_LEVEL: 'INFO', 'multiplier': 2, C.PROCESSES: 4 } def func(tile, params): return tile * params['multiplier'] ret = tiles_split(func, params) expected_ret = np.array([item*params['multiplier'] for item in data_types[data_type]], dtype=object) np.testing.assert_array_equal(ret, expected_ret) def test_convert_to_radians(): import math data = np.random.randint(1, 10, (4, 5)) wavelength = 10.5 ret = shared.convert_mm_to_radians(data, wavelength) expected = data * (4 * math.pi) / wavelength /ifc.MM_PER_METRE np.testing.assert_array_almost_equal(ret, expected) def test_convert_to_radians_ifg(ten_geotiffs): for g in ten_geotiffs[:2]: ifg = Ifg(g) ifg.open() md = ifg.dataset.GetMetadata() assert ifc.DATA_TYPE in md assert md[ifc.DATA_TYPE] == ifc.ORIG assert md[ifc.DATA_UNITS] == shared.RADIANS rad_data = ifg.phase_data ifg.convert_to_mm() assert ifg.meta_data[ifc.DATA_UNITS] == shared.MILLIMETRES ifg.convert_to_radians() assert md[ifc.DATA_UNITS] == shared.RADIANS np.testing.assert_array_almost_equal(rad_data, ifg.phase_data, decimal=4)
from __future__ import unicode_literals from moto.core.exceptions import RESTError from moto.core.utils import amzn_request_id from moto.core.responses import BaseResponse from .models import elbv2_backends from .exceptions import DuplicateTagKeysError from .exceptions import LoadBalancerNotFoundError from .exceptions import TargetGroupNotFoundError SSL_POLICIES = [ { "name": "ELBSecurityPolicy-2016-08", "ssl_protocols": ["TLSv1", "TLSv1.1", "TLSv1.2"], "ciphers": [ {"name": "ECDHE-ECDSA-AES128-GCM-SHA256", "priority": 1}, {"name": "ECDHE-RSA-AES128-GCM-SHA256", "priority": 2}, {"name": "ECDHE-ECDSA-AES128-SHA256", "priority": 3}, {"name": "ECDHE-RSA-AES128-SHA256", "priority": 4}, {"name": "ECDHE-ECDSA-AES128-SHA", "priority": 5}, {"name": "ECDHE-RSA-AES128-SHA", "priority": 6}, {"name": "ECDHE-ECDSA-AES256-GCM-SHA384", "priority": 7}, {"name": "ECDHE-RSA-AES256-GCM-SHA384", "priority": 8}, {"name": "ECDHE-ECDSA-AES256-SHA384", "priority": 9}, {"name": "ECDHE-RSA-AES256-SHA384", "priority": 10}, {"name": "ECDHE-RSA-AES256-SHA", "priority": 11}, {"name": "ECDHE-ECDSA-AES256-SHA", "priority": 12}, {"name": "AES128-GCM-SHA256", "priority": 13}, {"name": "AES128-SHA256", "priority": 14}, {"name": "AES128-SHA", "priority": 15}, {"name": "AES256-GCM-SHA384", "priority": 16}, {"name": "AES256-SHA256", "priority": 17}, {"name": "AES256-SHA", "priority": 18}, ], }, { "name": "ELBSecurityPolicy-TLS-1-2-2017-01", "ssl_protocols": ["TLSv1.2"], "ciphers": [ {"name": "ECDHE-ECDSA-AES128-GCM-SHA256", "priority": 1}, {"name": "ECDHE-RSA-AES128-GCM-SHA256", "priority": 2}, {"name": "ECDHE-ECDSA-AES128-SHA256", "priority": 3}, {"name": "ECDHE-RSA-AES128-SHA256", "priority": 4}, {"name": "ECDHE-ECDSA-AES256-GCM-SHA384", "priority": 5}, {"name": "ECDHE-RSA-AES256-GCM-SHA384", "priority": 6}, {"name": "ECDHE-ECDSA-AES256-SHA384", "priority": 7}, {"name": "ECDHE-RSA-AES256-SHA384", "priority": 8}, {"name": "AES128-GCM-SHA256", "priority": 9}, {"name": "AES128-SHA256", "priority": 10}, {"name": "AES256-GCM-SHA384", "priority": 11}, {"name": "AES256-SHA256", "priority": 12}, ], }, { "name": "ELBSecurityPolicy-TLS-1-1-2017-01", "ssl_protocols": ["TLSv1.1", "TLSv1.2"], "ciphers": [ {"name": "ECDHE-ECDSA-AES128-GCM-SHA256", "priority": 1}, {"name": "ECDHE-RSA-AES128-GCM-SHA256", "priority": 2}, {"name": "ECDHE-ECDSA-AES128-SHA256", "priority": 3}, {"name": "ECDHE-RSA-AES128-SHA256", "priority": 4}, {"name": "ECDHE-ECDSA-AES128-SHA", "priority": 5}, {"name": "ECDHE-RSA-AES128-SHA", "priority": 6}, {"name": "ECDHE-ECDSA-AES256-GCM-SHA384", "priority": 7}, {"name": "ECDHE-RSA-AES256-GCM-SHA384", "priority": 8}, {"name": "ECDHE-ECDSA-AES256-SHA384", "priority": 9}, {"name": "ECDHE-RSA-AES256-SHA384", "priority": 10}, {"name": "ECDHE-RSA-AES256-SHA", "priority": 11}, {"name": "ECDHE-ECDSA-AES256-SHA", "priority": 12}, {"name": "AES128-GCM-SHA256", "priority": 13}, {"name": "AES128-SHA256", "priority": 14}, {"name": "AES128-SHA", "priority": 15}, {"name": "AES256-GCM-SHA384", "priority": 16}, {"name": "AES256-SHA256", "priority": 17}, {"name": "AES256-SHA", "priority": 18}, ], }, { "name": "ELBSecurityPolicy-2015-05", "ssl_protocols": ["TLSv1", "TLSv1.1", "TLSv1.2"], "ciphers": [ {"name": "ECDHE-ECDSA-AES128-GCM-SHA256", "priority": 1}, {"name": "ECDHE-RSA-AES128-GCM-SHA256", "priority": 2}, {"name": "ECDHE-ECDSA-AES128-SHA256", "priority": 3}, {"name": "ECDHE-RSA-AES128-SHA256", "priority": 4}, {"name": "ECDHE-ECDSA-AES128-SHA", "priority": 5}, {"name": "ECDHE-RSA-AES128-SHA", "priority": 6}, {"name": "ECDHE-ECDSA-AES256-GCM-SHA384", "priority": 7}, {"name": "ECDHE-RSA-AES256-GCM-SHA384", "priority": 8}, {"name": "ECDHE-ECDSA-AES256-SHA384", "priority": 9}, {"name": "ECDHE-RSA-AES256-SHA384", "priority": 10}, {"name": "ECDHE-RSA-AES256-SHA", "priority": 11}, {"name": "ECDHE-ECDSA-AES256-SHA", "priority": 12}, {"name": "AES128-GCM-SHA256", "priority": 13}, {"name": "AES128-SHA256", "priority": 14}, {"name": "AES128-SHA", "priority": 15}, {"name": "AES256-GCM-SHA384", "priority": 16}, {"name": "AES256-SHA256", "priority": 17}, {"name": "AES256-SHA", "priority": 18}, ], }, { "name": "ELBSecurityPolicy-TLS-1-0-2015-04", "ssl_protocols": ["TLSv1", "TLSv1.1", "TLSv1.2"], "ciphers": [ {"name": "ECDHE-ECDSA-AES128-GCM-SHA256", "priority": 1}, {"name": "ECDHE-RSA-AES128-GCM-SHA256", "priority": 2}, {"name": "ECDHE-ECDSA-AES128-SHA256", "priority": 3}, {"name": "ECDHE-RSA-AES128-SHA256", "priority": 4}, {"name": "ECDHE-ECDSA-AES128-SHA", "priority": 5}, {"name": "ECDHE-RSA-AES128-SHA", "priority": 6}, {"name": "ECDHE-ECDSA-AES256-GCM-SHA384", "priority": 7}, {"name": "ECDHE-RSA-AES256-GCM-SHA384", "priority": 8}, {"name": "ECDHE-ECDSA-AES256-SHA384", "priority": 9}, {"name": "ECDHE-RSA-AES256-SHA384", "priority": 10}, {"name": "ECDHE-RSA-AES256-SHA", "priority": 11}, {"name": "ECDHE-ECDSA-AES256-SHA", "priority": 12}, {"name": "AES128-GCM-SHA256", "priority": 13}, {"name": "AES128-SHA256", "priority": 14}, {"name": "AES128-SHA", "priority": 15}, {"name": "AES256-GCM-SHA384", "priority": 16}, {"name": "AES256-SHA256", "priority": 17}, {"name": "AES256-SHA", "priority": 18}, {"name": "DES-CBC3-SHA", "priority": 19}, ], }, ] class ELBV2Response(BaseResponse): @property def elbv2_backend(self): return elbv2_backends[self.region] @amzn_request_id def create_load_balancer(self): load_balancer_name = self._get_param("Name") subnet_ids = self._get_multi_param("Subnets.member") security_groups = self._get_multi_param("SecurityGroups.member") scheme = self._get_param("Scheme") load_balancer = self.elbv2_backend.create_load_balancer( name=load_balancer_name, security_groups=security_groups, subnet_ids=subnet_ids, scheme=scheme, ) self._add_tags(load_balancer) template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE) return template.render(load_balancer=load_balancer) @amzn_request_id def create_rule(self): lister_arn = self._get_param("ListenerArn") _conditions = self._get_list_prefix("Conditions.member") conditions = [] for _condition in _conditions: condition = {} condition["field"] = _condition["field"] values = sorted( [e for e in _condition.items() if "values.member" in e[0]], key=lambda x: x[0], ) condition["values"] = [e[1] for e in values] conditions.append(condition) priority = self._get_int_param("Priority") actions = self._get_list_prefix("Actions.member") rules = self.elbv2_backend.create_rule( listener_arn=lister_arn, conditions=conditions, priority=priority, actions=actions, ) template = self.response_template(CREATE_RULE_TEMPLATE) return template.render(rules=rules) @amzn_request_id def create_target_group(self): name = self._get_param("Name") vpc_id = self._get_param("VpcId") protocol = self._get_param("Protocol") port = self._get_param("Port") healthcheck_protocol = self._get_param("HealthCheckProtocol") healthcheck_port = self._get_param("HealthCheckPort") healthcheck_path = self._get_param("HealthCheckPath") healthcheck_interval_seconds = self._get_param("HealthCheckIntervalSeconds") healthcheck_timeout_seconds = self._get_param("HealthCheckTimeoutSeconds") healthy_threshold_count = self._get_param("HealthyThresholdCount") unhealthy_threshold_count = self._get_param("UnhealthyThresholdCount") matcher = self._get_param("Matcher") target_group = self.elbv2_backend.create_target_group( name, vpc_id=vpc_id, protocol=protocol, port=port, healthcheck_protocol=healthcheck_protocol, healthcheck_port=healthcheck_port, healthcheck_path=healthcheck_path, healthcheck_interval_seconds=healthcheck_interval_seconds, healthcheck_timeout_seconds=healthcheck_timeout_seconds, healthy_threshold_count=healthy_threshold_count, unhealthy_threshold_count=unhealthy_threshold_count, matcher=matcher, ) template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE) return template.render(target_group=target_group) @amzn_request_id def create_listener(self): load_balancer_arn = self._get_param("LoadBalancerArn") protocol = self._get_param("Protocol") port = self._get_param("Port") ssl_policy = self._get_param("SslPolicy", "ELBSecurityPolicy-2016-08") certificates = self._get_list_prefix("Certificates.member") if certificates: certificate = certificates[0].get("certificate_arn") else: certificate = None default_actions = self._get_list_prefix("DefaultActions.member") listener = self.elbv2_backend.create_listener( load_balancer_arn=load_balancer_arn, protocol=protocol, port=port, ssl_policy=ssl_policy, certificate=certificate, default_actions=default_actions, ) template = self.response_template(CREATE_LISTENER_TEMPLATE) return template.render(listener=listener) @amzn_request_id def describe_load_balancers(self): arns = self._get_multi_param("LoadBalancerArns.member") names = self._get_multi_param("Names.member") all_load_balancers = list( self.elbv2_backend.describe_load_balancers(arns, names) ) marker = self._get_param("Marker") all_names = [balancer.name for balancer in all_load_balancers] if marker: start = all_names.index(marker) + 1 else: start = 0 page_size = self._get_int_param( "PageSize", 50 ) # the default is 400, but using 50 to make testing easier load_balancers_resp = all_load_balancers[start : start + page_size] next_marker = None if len(all_load_balancers) > start + page_size: next_marker = load_balancers_resp[-1].name template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) return template.render(load_balancers=load_balancers_resp, marker=next_marker) @amzn_request_id def describe_rules(self): listener_arn = self._get_param("ListenerArn") rule_arns = ( self._get_multi_param("RuleArns.member") if any( k for k in list(self.querystring.keys()) if k.startswith("RuleArns.member") ) else None ) all_rules = self.elbv2_backend.describe_rules(listener_arn, rule_arns) all_arns = [rule.arn for rule in all_rules] page_size = self._get_int_param("PageSize", 50) # set 50 for temporary marker = self._get_param("Marker") if marker: start = all_arns.index(marker) + 1 else: start = 0 rules_resp = all_rules[start : start + page_size] next_marker = None if len(all_rules) > start + page_size: next_marker = rules_resp[-1].arn template = self.response_template(DESCRIBE_RULES_TEMPLATE) return template.render(rules=rules_resp, marker=next_marker) @amzn_request_id def describe_target_groups(self): load_balancer_arn = self._get_param("LoadBalancerArn") target_group_arns = self._get_multi_param("TargetGroupArns.member") names = self._get_multi_param("Names.member") target_groups = self.elbv2_backend.describe_target_groups( load_balancer_arn, target_group_arns, names ) template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE) return template.render(target_groups=target_groups) @amzn_request_id def describe_target_group_attributes(self): target_group_arn = self._get_param("TargetGroupArn") target_group = self.elbv2_backend.target_groups.get(target_group_arn) if not target_group: raise TargetGroupNotFoundError() template = self.response_template(DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE) return template.render(attributes=target_group.attributes) @amzn_request_id def describe_listeners(self): load_balancer_arn = self._get_param("LoadBalancerArn") listener_arns = self._get_multi_param("ListenerArns.member") if not load_balancer_arn and not listener_arns: raise LoadBalancerNotFoundError() listeners = self.elbv2_backend.describe_listeners( load_balancer_arn, listener_arns ) template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE) return template.render(listeners=listeners) @amzn_request_id def delete_load_balancer(self): arn = self._get_param("LoadBalancerArn") self.elbv2_backend.delete_load_balancer(arn) template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE) return template.render() @amzn_request_id def delete_rule(self): arn = self._get_param("RuleArn") self.elbv2_backend.delete_rule(arn) template = self.response_template(DELETE_RULE_TEMPLATE) return template.render() @amzn_request_id def delete_target_group(self): arn = self._get_param("TargetGroupArn") self.elbv2_backend.delete_target_group(arn) template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE) return template.render() @amzn_request_id def delete_listener(self): arn = self._get_param("ListenerArn") self.elbv2_backend.delete_listener(arn) template = self.response_template(DELETE_LISTENER_TEMPLATE) return template.render() @amzn_request_id def modify_rule(self): rule_arn = self._get_param("RuleArn") _conditions = self._get_list_prefix("Conditions.member") conditions = [] for _condition in _conditions: condition = {} condition["field"] = _condition["field"] values = sorted( [e for e in _condition.items() if "values.member" in e[0]], key=lambda x: x[0], ) condition["values"] = [e[1] for e in values] conditions.append(condition) actions = self._get_list_prefix("Actions.member") rules = self.elbv2_backend.modify_rule( rule_arn=rule_arn, conditions=conditions, actions=actions ) template = self.response_template(MODIFY_RULE_TEMPLATE) return template.render(rules=rules) @amzn_request_id def modify_target_group_attributes(self): target_group_arn = self._get_param("TargetGroupArn") target_group = self.elbv2_backend.target_groups.get(target_group_arn) attributes = { attr["key"]: attr["value"] for attr in self._get_list_prefix("Attributes.member") } target_group.attributes.update(attributes) if not target_group: raise TargetGroupNotFoundError() template = self.response_template(MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE) return template.render(attributes=attributes) @amzn_request_id def register_targets(self): target_group_arn = self._get_param("TargetGroupArn") targets = self._get_list_prefix("Targets.member") self.elbv2_backend.register_targets(target_group_arn, targets) template = self.response_template(REGISTER_TARGETS_TEMPLATE) return template.render() @amzn_request_id def deregister_targets(self): target_group_arn = self._get_param("TargetGroupArn") targets = self._get_list_prefix("Targets.member") self.elbv2_backend.deregister_targets(target_group_arn, targets) template = self.response_template(DEREGISTER_TARGETS_TEMPLATE) return template.render() @amzn_request_id def describe_target_health(self): target_group_arn = self._get_param("TargetGroupArn") targets = self._get_list_prefix("Targets.member") target_health_descriptions = self.elbv2_backend.describe_target_health( target_group_arn, targets ) template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE) return template.render(target_health_descriptions=target_health_descriptions) @amzn_request_id def set_rule_priorities(self): rule_priorities = self._get_list_prefix("RulePriorities.member") for rule_priority in rule_priorities: rule_priority["priority"] = int(rule_priority["priority"]) rules = self.elbv2_backend.set_rule_priorities(rule_priorities) template = self.response_template(SET_RULE_PRIORITIES_TEMPLATE) return template.render(rules=rules) @amzn_request_id def add_tags(self): resource_arns = self._get_multi_param("ResourceArns.member") for arn in resource_arns: if ":targetgroup" in arn: resource = self.elbv2_backend.target_groups.get(arn) if not resource: raise TargetGroupNotFoundError() elif ":loadbalancer" in arn: resource = self.elbv2_backend.load_balancers.get(arn) if not resource: raise LoadBalancerNotFoundError() else: raise LoadBalancerNotFoundError() self._add_tags(resource) template = self.response_template(ADD_TAGS_TEMPLATE) return template.render() @amzn_request_id def remove_tags(self): resource_arns = self._get_multi_param("ResourceArns.member") tag_keys = self._get_multi_param("TagKeys.member") for arn in resource_arns: if ":targetgroup" in arn: resource = self.elbv2_backend.target_groups.get(arn) if not resource: raise TargetGroupNotFoundError() elif ":loadbalancer" in arn: resource = self.elbv2_backend.load_balancers.get(arn) if not resource: raise LoadBalancerNotFoundError() else: raise LoadBalancerNotFoundError() [resource.remove_tag(key) for key in tag_keys] template = self.response_template(REMOVE_TAGS_TEMPLATE) return template.render() @amzn_request_id def describe_tags(self): resource_arns = self._get_multi_param("ResourceArns.member") resources = [] for arn in resource_arns: if ":targetgroup" in arn: resource = self.elbv2_backend.target_groups.get(arn) if not resource: raise TargetGroupNotFoundError() elif ":loadbalancer" in arn: resource = self.elbv2_backend.load_balancers.get(arn) if not resource: raise LoadBalancerNotFoundError() else: raise LoadBalancerNotFoundError() resources.append(resource) template = self.response_template(DESCRIBE_TAGS_TEMPLATE) return template.render(resources=resources) @amzn_request_id def describe_account_limits(self): # Supports paging but not worth implementing yet # marker = self._get_param('Marker') # page_size = self._get_int_param('PageSize') limits = { "application-load-balancers": 20, "target-groups": 3000, "targets-per-application-load-balancer": 30, "listeners-per-application-load-balancer": 50, "rules-per-application-load-balancer": 100, "network-load-balancers": 20, "targets-per-network-load-balancer": 200, "listeners-per-network-load-balancer": 50, } template = self.response_template(DESCRIBE_LIMITS_TEMPLATE) return template.render(limits=limits) @amzn_request_id def describe_ssl_policies(self): names = self._get_multi_param("Names.member.") # Supports paging but not worth implementing yet # marker = self._get_param('Marker') # page_size = self._get_int_param('PageSize') policies = SSL_POLICIES if names: policies = filter(lambda policy: policy["name"] in names, policies) template = self.response_template(DESCRIBE_SSL_POLICIES_TEMPLATE) return template.render(policies=policies) @amzn_request_id def set_ip_address_type(self): arn = self._get_param("LoadBalancerArn") ip_type = self._get_param("IpAddressType") self.elbv2_backend.set_ip_address_type(arn, ip_type) template = self.response_template(SET_IP_ADDRESS_TYPE_TEMPLATE) return template.render(ip_type=ip_type) @amzn_request_id def set_security_groups(self): arn = self._get_param("LoadBalancerArn") sec_groups = self._get_multi_param("SecurityGroups.member.") self.elbv2_backend.set_security_groups(arn, sec_groups) template = self.response_template(SET_SECURITY_GROUPS_TEMPLATE) return template.render(sec_groups=sec_groups) @amzn_request_id def set_subnets(self): arn = self._get_param("LoadBalancerArn") subnets = self._get_multi_param("Subnets.member.") subnet_zone_list = self.elbv2_backend.set_subnets(arn, subnets) template = self.response_template(SET_SUBNETS_TEMPLATE) return template.render(subnets=subnet_zone_list) @amzn_request_id def modify_load_balancer_attributes(self): arn = self._get_param("LoadBalancerArn") attrs = self._get_map_prefix( "Attributes.member", key_end="Key", value_end="Value" ) all_attrs = self.elbv2_backend.modify_load_balancer_attributes(arn, attrs) template = self.response_template(MODIFY_LOADBALANCER_ATTRS_TEMPLATE) return template.render(attrs=all_attrs) @amzn_request_id def describe_load_balancer_attributes(self): arn = self._get_param("LoadBalancerArn") attrs = self.elbv2_backend.describe_load_balancer_attributes(arn) template = self.response_template(DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE) return template.render(attrs=attrs) @amzn_request_id def modify_target_group(self): arn = self._get_param("TargetGroupArn") health_check_proto = self._get_param( "HealthCheckProtocol" ) # 'HTTP' | 'HTTPS' | 'TCP', health_check_port = self._get_param("HealthCheckPort") health_check_path = self._get_param("HealthCheckPath") health_check_interval = self._get_param("HealthCheckIntervalSeconds") health_check_timeout = self._get_param("HealthCheckTimeoutSeconds") healthy_threshold_count = self._get_param("HealthyThresholdCount") unhealthy_threshold_count = self._get_param("UnhealthyThresholdCount") http_codes = self._get_param("Matcher.HttpCode") target_group = self.elbv2_backend.modify_target_group( arn, health_check_proto, health_check_port, health_check_path, health_check_interval, health_check_timeout, healthy_threshold_count, unhealthy_threshold_count, http_codes, ) template = self.response_template(MODIFY_TARGET_GROUP_TEMPLATE) return template.render(target_group=target_group) @amzn_request_id def modify_listener(self): arn = self._get_param("ListenerArn") port = self._get_param("Port") protocol = self._get_param("Protocol") ssl_policy = self._get_param("SslPolicy") certificates = self._get_list_prefix("Certificates.member") default_actions = self._get_list_prefix("DefaultActions.member") # Should really move SSL Policies to models if ssl_policy is not None and ssl_policy not in [ item["name"] for item in SSL_POLICIES ]: raise RESTError( "SSLPolicyNotFound", "Policy {0} not found".format(ssl_policy) ) listener = self.elbv2_backend.modify_listener( arn, port, protocol, ssl_policy, certificates, default_actions ) template = self.response_template(MODIFY_LISTENER_TEMPLATE) return template.render(listener=listener) def _add_tags(self, resource): tag_values = [] tag_keys = [] for t_key, t_val in sorted(self.querystring.items()): if t_key.startswith("Tags.member."): if t_key.split(".")[3] == "Key": tag_keys.extend(t_val) elif t_key.split(".")[3] == "Value": tag_values.extend(t_val) counts = {} for i in tag_keys: counts[i] = tag_keys.count(i) counts = sorted(counts.items(), key=lambda i: i[1], reverse=True) if counts and counts[0][1] > 1: # We have dupes... raise DuplicateTagKeysError(counts[0]) for tag_key, tag_value in zip(tag_keys, tag_values): resource.add_tag(tag_key, tag_value) ADD_TAGS_TEMPLATE = """<AddTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <AddTagsResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </AddTagsResponse>""" REMOVE_TAGS_TEMPLATE = """<RemoveTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <RemoveTagsResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </RemoveTagsResponse>""" DESCRIBE_TAGS_TEMPLATE = """<DescribeTagsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeTagsResult> <TagDescriptions> {% for resource in resources %} <member> <ResourceArn>{{ resource.arn }}</ResourceArn> <Tags> {% for key, value in resource.tags.items() %} <member> <Value>{{ value }}</Value> <Key>{{ key }}</Key> </member> {% endfor %} </Tags> </member> {% endfor %} </TagDescriptions> </DescribeTagsResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeTagsResponse>""" CREATE_LOAD_BALANCER_TEMPLATE = """<CreateLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <CreateLoadBalancerResult> <LoadBalancers> <member> <LoadBalancerArn>{{ load_balancer.arn }}</LoadBalancerArn> <Scheme>{{ load_balancer.scheme }}</Scheme> <LoadBalancerName>{{ load_balancer.name }}</LoadBalancerName> <VpcId>{{ load_balancer.vpc_id }}</VpcId> <CanonicalHostedZoneId>Z2P70J7EXAMPLE</CanonicalHostedZoneId> <CreatedTime>{{ load_balancer.created_time }}</CreatedTime> <AvailabilityZones> {% for subnet in load_balancer.subnets %} <member> <SubnetId>{{ subnet.id }}</SubnetId> <ZoneName>{{ subnet.availability_zone }}</ZoneName> </member> {% endfor %} </AvailabilityZones> <SecurityGroups> {% for security_group in load_balancer.security_groups %} <member>{{ security_group }}</member> {% endfor %} </SecurityGroups> <DNSName>{{ load_balancer.dns_name }}</DNSName> <State> <Code>provisioning</Code> </State> <Type>application</Type> </member> </LoadBalancers> </CreateLoadBalancerResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </CreateLoadBalancerResponse>""" CREATE_RULE_TEMPLATE = """<CreateRuleResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <CreateRuleResult> <Rules> {% for rule in rules %} <member> <IsDefault>{{ "true" if rule.is_default else "false" }}</IsDefault> <Conditions> {% for condition in rule.conditions %} <member> <Field>{{ condition["field"] }}</Field> <Values> {% for value in condition["values"] %} <member>{{ value }}</member> {% endfor %} </Values> </member> {% endfor %} </Conditions> <Priority>{{ rule.priority }}</Priority> <Actions> {% for action in rule.actions %} <member> <Type>{{ action["type"] }}</Type> {% if action["type"] == "forward" %} <TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn> {% elif action["type"] == "redirect" %} <RedirectConfig>{{ action["redirect_config"] }}</RedirectConfig> {% endif %} </member> {% endfor %} </Actions> <RuleArn>{{ rule.arn }}</RuleArn> </member> {% endfor %} </Rules> </CreateRuleResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </CreateRuleResponse>""" CREATE_TARGET_GROUP_TEMPLATE = """<CreateTargetGroupResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <CreateTargetGroupResult> <TargetGroups> <member> <TargetGroupArn>{{ target_group.arn }}</TargetGroupArn> <TargetGroupName>{{ target_group.name }}</TargetGroupName> <Protocol>{{ target_group.protocol }}</Protocol> <Port>{{ target_group.port }}</Port> <VpcId>{{ target_group.vpc_id }}</VpcId> <HealthCheckProtocol>{{ target_group.health_check_protocol }}</HealthCheckProtocol> <HealthCheckPort>{{ target_group.healthcheck_port }}</HealthCheckPort> <HealthCheckPath>{{ target_group.healthcheck_path }}</HealthCheckPath> <HealthCheckIntervalSeconds>{{ target_group.healthcheck_interval_seconds }}</HealthCheckIntervalSeconds> <HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds> <HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount> <UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount> {% if target_group.matcher %} <Matcher> <HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode> </Matcher> {% endif %} {% if target_group.target_type %} <TargetType>{{ target_group.target_type }}</TargetType> {% endif %} </member> </TargetGroups> </CreateTargetGroupResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </CreateTargetGroupResponse>""" CREATE_LISTENER_TEMPLATE = """<CreateListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <CreateListenerResult> <Listeners> <member> <LoadBalancerArn>{{ listener.load_balancer_arn }}</LoadBalancerArn> <Protocol>{{ listener.protocol }}</Protocol> {% if listener.certificates %} <Certificates> {% for cert in listener.certificates %} <member> <CertificateArn>{{ cert }}</CertificateArn> </member> {% endfor %} </Certificates> {% endif %} <Port>{{ listener.port }}</Port> <SslPolicy>{{ listener.ssl_policy }}</SslPolicy> <ListenerArn>{{ listener.arn }}</ListenerArn> <DefaultActions> {% for action in listener.default_actions %} <member> {{ action.to_xml() }} </member> {% endfor %} </DefaultActions> </member> </Listeners> </CreateListenerResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </CreateListenerResponse>""" DELETE_LOAD_BALANCER_TEMPLATE = """<DeleteLoadBalancerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DeleteLoadBalancerResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DeleteLoadBalancerResponse>""" DELETE_RULE_TEMPLATE = """<DeleteRuleResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DeleteRuleResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DeleteRuleResponse>""" DELETE_TARGET_GROUP_TEMPLATE = """<DeleteTargetGroupResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DeleteTargetGroupResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DeleteTargetGroupResponse>""" DELETE_LISTENER_TEMPLATE = """<DeleteListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DeleteListenerResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DeleteListenerResponse>""" DESCRIBE_LOAD_BALANCERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeLoadBalancersResult> <LoadBalancers> {% for load_balancer in load_balancers %} <member> <LoadBalancerArn>{{ load_balancer.arn }}</LoadBalancerArn> <Scheme>{{ load_balancer.scheme }}</Scheme> <LoadBalancerName>{{ load_balancer.name }}</LoadBalancerName> <VpcId>{{ load_balancer.vpc_id }}</VpcId> <CanonicalHostedZoneId>Z2P70J7EXAMPLE</CanonicalHostedZoneId> <CreatedTime>{{ load_balancer.created_time }}</CreatedTime> <AvailabilityZones> {% for subnet in load_balancer.subnets %} <member> <SubnetId>{{ subnet.id }}</SubnetId> <ZoneName>{{ subnet.availability_zone }}</ZoneName> </member> {% endfor %} </AvailabilityZones> <SecurityGroups> {% for security_group in load_balancer.security_groups %} <member>{{ security_group }}</member> {% endfor %} </SecurityGroups> <DNSName>{{ load_balancer.dns_name }}</DNSName> <State> <Code>provisioning</Code> </State> <Type>application</Type> <IpAddressType>ipv4</IpAddressType> </member> {% endfor %} </LoadBalancers> {% if marker %} <NextMarker>{{ marker }}</NextMarker> {% endif %} </DescribeLoadBalancersResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeLoadBalancersResponse>""" DESCRIBE_RULES_TEMPLATE = """<DescribeRulesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeRulesResult> <Rules> {% for rule in rules %} <member> <IsDefault>{{ "true" if rule.is_default else "false" }}</IsDefault> <Conditions> {% for condition in rule.conditions %} <member> <Field>{{ condition["field"] }}</Field> <Values> {% for value in condition["values"] %} <member>{{ value }}</member> {% endfor %} </Values> </member> {% endfor %} </Conditions> <Priority>{{ rule.priority }}</Priority> <Actions> {% for action in rule.actions %} <member> {{ action.to_xml() }} </member> {% endfor %} </Actions> <RuleArn>{{ rule.arn }}</RuleArn> </member> {% endfor %} </Rules> {% if marker %} <NextMarker>{{ marker }}</NextMarker> {% endif %} </DescribeRulesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeRulesResponse>""" DESCRIBE_TARGET_GROUPS_TEMPLATE = """<DescribeTargetGroupsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeTargetGroupsResult> <TargetGroups> {% for target_group in target_groups %} <member> <TargetGroupArn>{{ target_group.arn }}</TargetGroupArn> <TargetGroupName>{{ target_group.name }}</TargetGroupName> <Protocol>{{ target_group.protocol }}</Protocol> <Port>{{ target_group.port }}</Port> <VpcId>{{ target_group.vpc_id }}</VpcId> <HealthCheckProtocol>{{ target_group.healthcheck_protocol }}</HealthCheckProtocol> <HealthCheckPort>{{ target_group.healthcheck_port }}</HealthCheckPort> <HealthCheckPath>{{ target_group.healthcheck_path }}</HealthCheckPath> <HealthCheckIntervalSeconds>{{ target_group.healthcheck_interval_seconds }}</HealthCheckIntervalSeconds> <HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds> <HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount> <UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount> {% if target_group.matcher %} <Matcher> <HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode> </Matcher> {% endif %} {% if target_group.target_type %} <TargetType>{{ target_group.target_type }}</TargetType> {% endif %} <LoadBalancerArns> {% for load_balancer_arn in target_group.load_balancer_arns %} <member>{{ load_balancer_arn }}</member> {% endfor %} </LoadBalancerArns> </member> {% endfor %} </TargetGroups> </DescribeTargetGroupsResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeTargetGroupsResponse>""" DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<DescribeTargetGroupAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeTargetGroupAttributesResult> <Attributes> {% for key, value in attributes.items() %} <member> <Key>{{ key }}</Key> <Value>{{ value }}</Value> </member> {% endfor %} </Attributes> </DescribeTargetGroupAttributesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeTargetGroupAttributesResponse>""" DESCRIBE_LISTENERS_TEMPLATE = """<DescribeLoadBalancersResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeListenersResult> <Listeners> {% for listener in listeners %} <member> <LoadBalancerArn>{{ listener.load_balancer_arn }}</LoadBalancerArn> <Protocol>{{ listener.protocol }}</Protocol> {% if listener.certificate %} <Certificates> <member> <CertificateArn>{{ listener.certificate }}</CertificateArn> </member> </Certificates> {% endif %} <Port>{{ listener.port }}</Port> <SslPolicy>{{ listener.ssl_policy }}</SslPolicy> <ListenerArn>{{ listener.arn }}</ListenerArn> <DefaultActions> {% for action in listener.default_actions %} <member> {{ action.to_xml() }} </member> {% endfor %} </DefaultActions> </member> {% endfor %} </Listeners> </DescribeListenersResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeLoadBalancersResponse>""" CONFIGURE_HEALTH_CHECK_TEMPLATE = """<ConfigureHealthCheckResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <ConfigureHealthCheckResult> <HealthCheck> <Interval>{{ check.interval }}</Interval> <Target>{{ check.target }}</Target> <HealthyThreshold>{{ check.healthy_threshold }}</HealthyThreshold> <Timeout>{{ check.timeout }}</Timeout> <UnhealthyThreshold>{{ check.unhealthy_threshold }}</UnhealthyThreshold> </HealthCheck> </ConfigureHealthCheckResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </ConfigureHealthCheckResponse>""" MODIFY_RULE_TEMPLATE = """<ModifyRuleResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <ModifyRuleResult> <Rules> {% for rule in rules %} <member> <IsDefault>{{ "true" if rule.is_default else "false" }}</IsDefault> <Conditions> {% for condition in rule.conditions %} <member> <Field>{{ condition["field"] }}</Field> <Values> {% for value in condition["values"] %} <member>{{ value }}</member> {% endfor %} </Values> </member> {% endfor %} </Conditions> <Priority>{{ rule.priority }}</Priority> <Actions> {% for action in rule.actions %} <member> {{ action.to_xml() }} </member> {% endfor %} </Actions> <RuleArn>{{ rule.arn }}</RuleArn> </member> {% endfor %} </Rules> </ModifyRuleResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </ModifyRuleResponse>""" MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """<ModifyTargetGroupAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <ModifyTargetGroupAttributesResult> <Attributes> {% for key, value in attributes.items() %} <member> <Key>{{ key }}</Key> <Value>{{ value }}</Value> </member> {% endfor %} </Attributes> </ModifyTargetGroupAttributesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </ModifyTargetGroupAttributesResponse>""" REGISTER_TARGETS_TEMPLATE = """<RegisterTargetsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <RegisterTargetsResult> </RegisterTargetsResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </RegisterTargetsResponse>""" DEREGISTER_TARGETS_TEMPLATE = """<DeregisterTargetsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DeregisterTargetsResult> </DeregisterTargetsResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DeregisterTargetsResponse>""" SET_LOAD_BALANCER_SSL_CERTIFICATE = """<SetLoadBalancerListenerSSLCertificateResponse xmlns="http://elasticloadbalan cing.amazonaws.com/doc/2015-12-01/"> <SetLoadBalancerListenerSSLCertificateResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </SetLoadBalancerListenerSSLCertificateResponse>""" DELETE_LOAD_BALANCER_LISTENERS = """<DeleteLoadBalancerListenersResponse xmlns="http://elasticloadbalan cing.amazonaws.com/doc/2015-12-01/"> <DeleteLoadBalancerListenersResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DeleteLoadBalancerListenersResponse>""" DESCRIBE_ATTRIBUTES_TEMPLATE = """<DescribeLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeLoadBalancerAttributesResult> <LoadBalancerAttributes> <AccessLog> <Enabled>{{ attributes.access_log.enabled }}</Enabled> {% if attributes.access_log.enabled %} <S3BucketName>{{ attributes.access_log.s3_bucket_name }}</S3BucketName> <S3BucketPrefix>{{ attributes.access_log.s3_bucket_prefix }}</S3BucketPrefix> <EmitInterval>{{ attributes.access_log.emit_interval }}</EmitInterval> {% endif %} </AccessLog> <ConnectionSettings> <IdleTimeout>{{ attributes.connecting_settings.idle_timeout }}</IdleTimeout> </ConnectionSettings> <CrossZoneLoadBalancing> <Enabled>{{ attributes.cross_zone_load_balancing.enabled }}</Enabled> </CrossZoneLoadBalancing> <ConnectionDraining> {% if attributes.connection_draining.enabled %} <Enabled>true</Enabled> <Timeout>{{ attributes.connection_draining.timeout }}</Timeout> {% else %} <Enabled>false</Enabled> {% endif %} </ConnectionDraining> </LoadBalancerAttributes> </DescribeLoadBalancerAttributesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeLoadBalancerAttributesResponse> """ MODIFY_ATTRIBUTES_TEMPLATE = """<ModifyLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <ModifyLoadBalancerAttributesResult> <LoadBalancerName>{{ load_balancer.name }}</LoadBalancerName> <LoadBalancerAttributes> <AccessLog> <Enabled>{{ attributes.access_log.enabled }}</Enabled> {% if attributes.access_log.enabled %} <S3BucketName>{{ attributes.access_log.s3_bucket_name }}</S3BucketName> <S3BucketPrefix>{{ attributes.access_log.s3_bucket_prefix }}</S3BucketPrefix> <EmitInterval>{{ attributes.access_log.emit_interval }}</EmitInterval> {% endif %} </AccessLog> <ConnectionSettings> <IdleTimeout>{{ attributes.connecting_settings.idle_timeout }}</IdleTimeout> </ConnectionSettings> <CrossZoneLoadBalancing> <Enabled>{{ attributes.cross_zone_load_balancing.enabled }}</Enabled> </CrossZoneLoadBalancing> <ConnectionDraining> {% if attributes.connection_draining.enabled %} <Enabled>true</Enabled> <Timeout>{{ attributes.connection_draining.timeout }}</Timeout> {% else %} <Enabled>false</Enabled> {% endif %} </ConnectionDraining> </LoadBalancerAttributes> </ModifyLoadBalancerAttributesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </ModifyLoadBalancerAttributesResponse> """ CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """<CreateLoadBalancerPolicyResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <CreateLoadBalancerPolicyResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </CreateLoadBalancerPolicyResponse> """ SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """<SetLoadBalancerPoliciesOfListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <SetLoadBalancerPoliciesOfListenerResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </SetLoadBalancerPoliciesOfListenerResponse> """ SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE = """<SetLoadBalancerPoliciesForBackendServerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <SetLoadBalancerPoliciesForBackendServerResult/> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </SetLoadBalancerPoliciesForBackendServerResponse> """ DESCRIBE_TARGET_HEALTH_TEMPLATE = """<DescribeTargetHealthResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeTargetHealthResult> <TargetHealthDescriptions> {% for target_health in target_health_descriptions %} <member> <HealthCheckPort>{{ target_health.health_port }}</HealthCheckPort> <TargetHealth> <State>{{ target_health.status }}</State> {% if target_health.reason %} <Reason>{{ target_health.reason }}</Reason> {% endif %} {% if target_health.description %} <Description>{{ target_health.description }}</Description> {% endif %} </TargetHealth> <Target> <Port>{{ target_health.port }}</Port> <Id>{{ target_health.instance_id }}</Id> </Target> </member> {% endfor %} </TargetHealthDescriptions> </DescribeTargetHealthResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeTargetHealthResponse>""" SET_RULE_PRIORITIES_TEMPLATE = """<SetRulePrioritiesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <SetRulePrioritiesResult> <Rules> {% for rule in rules %} <member> <IsDefault>{{ "true" if rule.is_default else "false" }}</IsDefault> <Conditions> {% for condition in rule.conditions %} <member> <Field>{{ condition["field"] }}</Field> <Values> {% for value in condition["values"] %} <member>{{ value }}</member> {% endfor %} </Values> </member> {% endfor %} </Conditions> <Priority>{{ rule.priority }}</Priority> <Actions> {% for action in rule.actions %} <member> <Type>{{ action["type"] }}</Type> <TargetGroupArn>{{ action["target_group_arn"] }}</TargetGroupArn> </member> {% endfor %} </Actions> <RuleArn>{{ rule.arn }}</RuleArn> </member> {% endfor %} </Rules> </SetRulePrioritiesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </SetRulePrioritiesResponse>""" DESCRIBE_LIMITS_TEMPLATE = """<DescribeAccountLimitsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeAccountLimitsResult> <Limits> {% for key, value in limits.items() %} <member> <Name>{{ key }}</Name> <Max>{{ value }}</Max> </member> {% endfor %} </Limits> </DescribeAccountLimitsResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeAccountLimitsResponse>""" DESCRIBE_SSL_POLICIES_TEMPLATE = """<DescribeSSLPoliciesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeSSLPoliciesResult> <SslPolicies> {% for policy in policies %} <member> <Name>{{ policy['name'] }}</Name> <Ciphers> {% for cipher in policy['ciphers'] %} <member> <Name>{{ cipher['name'] }}</Name> <Priority>{{ cipher['priority'] }}</Priority> </member> {% endfor %} </Ciphers> <SslProtocols> {% for proto in policy['ssl_protocols'] %} <member>{{ proto }}</member> {% endfor %} </SslProtocols> </member> {% endfor %} </SslPolicies> </DescribeSSLPoliciesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeSSLPoliciesResponse>""" SET_IP_ADDRESS_TYPE_TEMPLATE = """<SetIpAddressTypeResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <SetIpAddressTypeResult> <IpAddressType>{{ ip_type }}</IpAddressType> </SetIpAddressTypeResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </SetIpAddressTypeResponse>""" SET_SECURITY_GROUPS_TEMPLATE = """<SetSecurityGroupsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <SetSecurityGroupsResult> <SecurityGroupIds> {% for group in sec_groups %} <member>{{ group }}</member> {% endfor %} </SecurityGroupIds> </SetSecurityGroupsResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </SetSecurityGroupsResponse>""" SET_SUBNETS_TEMPLATE = """<SetSubnetsResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <SetSubnetsResult> <AvailabilityZones> {% for zone_id, subnet_id in subnets %} <member> <SubnetId>{{ subnet_id }}</SubnetId> <ZoneName>{{ zone_id }}</ZoneName> </member> {% endfor %} </AvailabilityZones> </SetSubnetsResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </SetSubnetsResponse>""" MODIFY_LOADBALANCER_ATTRS_TEMPLATE = """<ModifyLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <ModifyLoadBalancerAttributesResult> <Attributes> {% for key, value in attrs.items() %} <member> {% if value == None %}<Value />{% else %}<Value>{{ value }}</Value>{% endif %} <Key>{{ key }}</Key> </member> {% endfor %} </Attributes> </ModifyLoadBalancerAttributesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </ModifyLoadBalancerAttributesResponse>""" DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE = """<DescribeLoadBalancerAttributesResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <DescribeLoadBalancerAttributesResult> <Attributes> {% for key, value in attrs.items() %} <member> {% if value == None %}<Value />{% else %}<Value>{{ value }}</Value>{% endif %} <Key>{{ key }}</Key> </member> {% endfor %} </Attributes> </DescribeLoadBalancerAttributesResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </DescribeLoadBalancerAttributesResponse>""" MODIFY_TARGET_GROUP_TEMPLATE = """<ModifyTargetGroupResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <ModifyTargetGroupResult> <TargetGroups> <member> <TargetGroupArn>{{ target_group.arn }}</TargetGroupArn> <TargetGroupName>{{ target_group.name }}</TargetGroupName> <Protocol>{{ target_group.protocol }}</Protocol> <Port>{{ target_group.port }}</Port> <VpcId>{{ target_group.vpc_id }}</VpcId> <HealthCheckProtocol>{{ target_group.healthcheck_protocol }}</HealthCheckProtocol> <HealthCheckPort>{{ target_group.healthcheck_port }}</HealthCheckPort> <HealthCheckPath>{{ target_group.healthcheck_path }}</HealthCheckPath> <HealthCheckIntervalSeconds>{{ target_group.healthcheck_interval_seconds }}</HealthCheckIntervalSeconds> <HealthCheckTimeoutSeconds>{{ target_group.healthcheck_timeout_seconds }}</HealthCheckTimeoutSeconds> <HealthyThresholdCount>{{ target_group.healthy_threshold_count }}</HealthyThresholdCount> <UnhealthyThresholdCount>{{ target_group.unhealthy_threshold_count }}</UnhealthyThresholdCount> <Matcher> <HttpCode>{{ target_group.matcher['HttpCode'] }}</HttpCode> </Matcher> <LoadBalancerArns> {% for load_balancer_arn in target_group.load_balancer_arns %} <member>{{ load_balancer_arn }}</member> {% endfor %} </LoadBalancerArns> </member> </TargetGroups> </ModifyTargetGroupResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </ModifyTargetGroupResponse>""" MODIFY_LISTENER_TEMPLATE = """<ModifyListenerResponse xmlns="http://elasticloadbalancing.amazonaws.com/doc/2015-12-01/"> <ModifyListenerResult> <Listeners> <member> <LoadBalancerArn>{{ listener.load_balancer_arn }}</LoadBalancerArn> <Protocol>{{ listener.protocol }}</Protocol> {% if listener.certificates %} <Certificates> {% for cert in listener.certificates %} <member> <CertificateArn>{{ cert }}</CertificateArn> </member> {% endfor %} </Certificates> {% endif %} <Port>{{ listener.port }}</Port> <SslPolicy>{{ listener.ssl_policy }}</SslPolicy> <ListenerArn>{{ listener.arn }}</ListenerArn> <DefaultActions> {% for action in listener.default_actions %} <member> {{ action.to_xml() }} </member> {% endfor %} </DefaultActions> </member> </Listeners> </ModifyListenerResult> <ResponseMetadata> <RequestId>{{ request_id }}</RequestId> </ResponseMetadata> </ModifyListenerResponse>"""
"""Support for Vallox ventilation units.""" import ipaddress import logging from vallox_websocket_api import PROFILE as VALLOX_PROFILE, Vallox from vallox_websocket_api.constants import vlxDevConstants from vallox_websocket_api.exceptions import ValloxApiException import voluptuous as vol from homeassistant.const import CONF_HOST, CONF_NAME import homeassistant.helpers.config_validation as cv from homeassistant.helpers.discovery import async_load_platform from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.event import async_track_time_interval from .const import ( DEFAULT_FAN_SPEED_AWAY, DEFAULT_FAN_SPEED_BOOST, DEFAULT_FAN_SPEED_HOME, DEFAULT_NAME, DOMAIN, METRIC_KEY_PROFILE_FAN_SPEED_AWAY, METRIC_KEY_PROFILE_FAN_SPEED_BOOST, METRIC_KEY_PROFILE_FAN_SPEED_HOME, SIGNAL_VALLOX_STATE_UPDATE, STATE_PROXY_SCAN_INTERVAL, ) _LOGGER = logging.getLogger(__name__) CONFIG_SCHEMA = vol.Schema( { DOMAIN: vol.Schema( { vol.Required(CONF_HOST): vol.All(ipaddress.ip_address, cv.string), vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, } ) }, extra=vol.ALLOW_EXTRA, ) PROFILE_TO_STR_SETTABLE = { VALLOX_PROFILE.HOME: "Home", VALLOX_PROFILE.AWAY: "Away", VALLOX_PROFILE.BOOST: "Boost", VALLOX_PROFILE.FIREPLACE: "Fireplace", } STR_TO_PROFILE = {v: k for (k, v) in PROFILE_TO_STR_SETTABLE.items()} PROFILE_TO_STR_REPORTABLE = { **{VALLOX_PROFILE.NONE: "None", VALLOX_PROFILE.EXTRA: "Extra"}, **PROFILE_TO_STR_SETTABLE, } ATTR_PROFILE = "profile" ATTR_PROFILE_FAN_SPEED = "fan_speed" SERVICE_SCHEMA_SET_PROFILE = vol.Schema( {vol.Required(ATTR_PROFILE): vol.All(cv.string, vol.In(STR_TO_PROFILE))} ) SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED = vol.Schema( { vol.Required(ATTR_PROFILE_FAN_SPEED): vol.All( vol.Coerce(int), vol.Clamp(min=0, max=100) ) } ) SERVICE_SET_PROFILE = "set_profile" SERVICE_SET_PROFILE_FAN_SPEED_HOME = "set_profile_fan_speed_home" SERVICE_SET_PROFILE_FAN_SPEED_AWAY = "set_profile_fan_speed_away" SERVICE_SET_PROFILE_FAN_SPEED_BOOST = "set_profile_fan_speed_boost" SERVICE_TO_METHOD = { SERVICE_SET_PROFILE: { "method": "async_set_profile", "schema": SERVICE_SCHEMA_SET_PROFILE, }, SERVICE_SET_PROFILE_FAN_SPEED_HOME: { "method": "async_set_profile_fan_speed_home", "schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED, }, SERVICE_SET_PROFILE_FAN_SPEED_AWAY: { "method": "async_set_profile_fan_speed_away", "schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED, }, SERVICE_SET_PROFILE_FAN_SPEED_BOOST: { "method": "async_set_profile_fan_speed_boost", "schema": SERVICE_SCHEMA_SET_PROFILE_FAN_SPEED, }, } async def async_setup(hass, config): """Set up the client and boot the platforms.""" conf = config[DOMAIN] host = conf.get(CONF_HOST) name = conf.get(CONF_NAME) client = Vallox(host) state_proxy = ValloxStateProxy(hass, client) service_handler = ValloxServiceHandler(client, state_proxy) hass.data[DOMAIN] = {"client": client, "state_proxy": state_proxy, "name": name} for vallox_service, method in SERVICE_TO_METHOD.items(): schema = method["schema"] hass.services.async_register( DOMAIN, vallox_service, service_handler.async_handle, schema=schema ) # The vallox hardware expects quite strict timings for websocket # requests. Timings that machines with less processing power, like # Raspberries, cannot live up to during the busy start phase of Home # Asssistant. Hence, async_add_entities() for fan and sensor in respective # code will be called with update_before_add=False to intentionally delay # the first request, increasing chance that it is issued only when the # machine is less busy again. hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config)) hass.async_create_task(async_load_platform(hass, "fan", DOMAIN, {}, config)) async_track_time_interval(hass, state_proxy.async_update, STATE_PROXY_SCAN_INTERVAL) return True class ValloxStateProxy: """Helper class to reduce websocket API calls.""" def __init__(self, hass, client): """Initialize the proxy.""" self._hass = hass self._client = client self._metric_cache = {} self._profile = None self._valid = False def fetch_metric(self, metric_key): """Return cached state value.""" _LOGGER.debug("Fetching metric key: %s", metric_key) if not self._valid: raise OSError("Device state out of sync.") if metric_key not in vlxDevConstants.__dict__: raise KeyError(f"Unknown metric key: {metric_key}") return self._metric_cache[metric_key] def get_profile(self): """Return cached profile value.""" _LOGGER.debug("Returning profile") if not self._valid: raise OSError("Device state out of sync.") return PROFILE_TO_STR_REPORTABLE[self._profile] async def async_update(self, event_time): """Fetch state update.""" _LOGGER.debug("Updating Vallox state cache") try: self._metric_cache = await self._client.fetch_metrics() self._profile = await self._client.get_profile() except (OSError, ValloxApiException) as err: self._valid = False _LOGGER.error("Error during state cache update: %s", err) return self._valid = True async_dispatcher_send(self._hass, SIGNAL_VALLOX_STATE_UPDATE) class ValloxServiceHandler: """Services implementation.""" def __init__(self, client, state_proxy): """Initialize the proxy.""" self._client = client self._state_proxy = state_proxy async def async_set_profile(self, profile: str = "Home") -> bool: """Set the ventilation profile.""" _LOGGER.debug("Setting ventilation profile to: %s", profile) try: await self._client.set_profile(STR_TO_PROFILE[profile]) return True except (OSError, ValloxApiException) as err: _LOGGER.error("Error setting ventilation profile: %s", err) return False async def async_set_profile_fan_speed_home( self, fan_speed: int = DEFAULT_FAN_SPEED_HOME ) -> bool: """Set the fan speed in percent for the Home profile.""" _LOGGER.debug("Setting Home fan speed to: %d%%", fan_speed) try: await self._client.set_values( {METRIC_KEY_PROFILE_FAN_SPEED_HOME: fan_speed} ) return True except (OSError, ValloxApiException) as err: _LOGGER.error("Error setting fan speed for Home profile: %s", err) return False async def async_set_profile_fan_speed_away( self, fan_speed: int = DEFAULT_FAN_SPEED_AWAY ) -> bool: """Set the fan speed in percent for the Away profile.""" _LOGGER.debug("Setting Away fan speed to: %d%%", fan_speed) try: await self._client.set_values( {METRIC_KEY_PROFILE_FAN_SPEED_AWAY: fan_speed} ) return True except (OSError, ValloxApiException) as err: _LOGGER.error("Error setting fan speed for Away profile: %s", err) return False async def async_set_profile_fan_speed_boost( self, fan_speed: int = DEFAULT_FAN_SPEED_BOOST ) -> bool: """Set the fan speed in percent for the Boost profile.""" _LOGGER.debug("Setting Boost fan speed to: %d%%", fan_speed) try: await self._client.set_values( {METRIC_KEY_PROFILE_FAN_SPEED_BOOST: fan_speed} ) return True except (OSError, ValloxApiException) as err: _LOGGER.error("Error setting fan speed for Boost profile: %s", err) return False async def async_handle(self, service): """Dispatch a service call.""" method = SERVICE_TO_METHOD.get(service.service) params = service.data.copy() if not hasattr(self, method["method"]): _LOGGER.error("Service not implemented: %s", method["method"]) return result = await getattr(self, method["method"])(**params) # Force state_proxy to refresh device state, so that updates are # propagated to platforms. if result: await self._state_proxy.async_update(None)
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved. from __future__ import absolute_import from collections import OrderedDict import os import re import shutil import signal import time import traceback import flask import gevent import gevent.event import gevent.queue from . import utils from .config import config_value from .dataset import DatasetJob from .job import Job from .log import logger from .model import ModelJob from .status import Status from digits.utils import errors """ This constant configures how long to wait before automatically deleting completed non-persistent jobs """ NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS = 3600 class Resource(object): """ Stores information about which tasks are using a resource """ class ResourceAllocation(object): """ Marks that a task is using [part of] a resource """ def __init__(self, task, value): """ Arguments: task -- which task is using the resource value -- how much of the resource is being used """ self.task = task self.value = value def __init__(self, identifier=None, max_value=1): """ Keyword arguments: identifier -- some way to identify this resource max_value -- a numeric representation of the capacity of this resource """ if identifier is None: self.identifier = id(self) else: self.identifier = identifier self.max_value = max_value self.allocations = [] def remaining(self): """ Returns the amount of this resource that is not being used """ return self.max_value - sum(a.value for a in self.allocations) def allocate(self, task, value): """ A task is requesting to use this resource """ if self.remaining() - value < 0: raise RuntimeError('Resource is already maxed out at %s/%s' % ( self.remaining(), self.max_value) ) self.allocations.append(self.ResourceAllocation(task, value)) def deallocate(self, task): """ The task has finished using this resource """ for i, a in enumerate(self.allocations): if id(task) == id(a.task): self.allocations.pop(i) return True return False class Scheduler: """ Coordinates execution of Jobs """ def __init__(self, gpu_list=None, verbose=False): """ Keyword arguments: gpu_list -- a comma-separated string which is a list of GPU id's verbose -- if True, print more errors """ self.jobs = OrderedDict() self.verbose = verbose # Keeps track of resource usage self.resources = { # TODO: break this into CPU cores, memory usage, IO usage, etc. 'parse_folder_task_pool': [Resource()], 'create_db_task_pool': [Resource(max_value=2)], 'analyze_db_task_pool': [Resource(max_value=4)], 'inference_task_pool': [Resource(max_value=4)], 'gpus': [Resource(identifier=index) for index in gpu_list.split(',')] if gpu_list else [], } self.running = False self.shutdown = gevent.event.Event() def load_past_jobs(self): """ Look in the jobs directory and load all valid jobs """ loaded_jobs = [] failed_jobs = [] for dir_name in sorted(os.listdir(config_value('jobs_dir'))): if os.path.isdir(os.path.join(config_value('jobs_dir'), dir_name)): # Make sure it hasn't already been loaded if dir_name in self.jobs: continue try: job = Job.load(dir_name) # The server might have crashed if job.status.is_running(): job.status = Status.ABORT for task in job.tasks: if task.status.is_running(): task.status = Status.ABORT # We might have changed some attributes here or in __setstate__ job.save() loaded_jobs.append(job) except Exception as e: failed_jobs.append((dir_name, e)) # add DatasetJobs for job in loaded_jobs: if isinstance(job, DatasetJob): self.jobs[job.id()] = job # add ModelJobs for job in loaded_jobs: if isinstance(job, ModelJob): try: # load the DatasetJob job.load_dataset() self.jobs[job.id()] = job except Exception as e: failed_jobs.append((dir_name, e)) logger.info('Loaded %d jobs.' % len(self.jobs)) if len(failed_jobs): logger.warning('Failed to load %d jobs.' % len(failed_jobs)) if self.verbose: for job_id, e in failed_jobs: logger.debug('%s - %s: %s' % (job_id, type(e).__name__, str(e))) def add_job(self, job): """ Add a job to self.jobs """ if not self.running: logger.error('Scheduler not running. Cannot add job.') return False else: self.jobs[job.id()] = job # Need to fix this properly # if True or flask._app_ctx_stack.top is not None: from digits.webapp import app, socketio with app.app_context(): # send message to job_management room that the job is added socketio.emit('job update', { 'update': 'added', 'job_id': job.id(), }, namespace='/jobs', room='job_management', ) if 'DIGITS_MODE_TEST' not in os.environ: # Let the scheduler do a little work before returning time.sleep(utils.wait_time()) return True def get_job(self, job_id): """ Look through self.jobs to try to find the Job Returns None if not found """ if job_id is None: return None return self.jobs.get(job_id, None) def get_related_jobs(self, job): """ Look through self.jobs to try to find the Jobs whose parent contains job """ related_jobs = [] if isinstance(job, ModelJob): datajob = job.dataset related_jobs.append(datajob) elif isinstance(job, DatasetJob): datajob = job else: raise ValueError("Unhandled job type %s" % job.job_type()) for j in self.jobs.values(): # Any model that shares (this/the same) dataset should be added too: if isinstance(j, ModelJob): if datajob == j.train_task().dataset and j.id() != job.id(): related_jobs.append(j) return related_jobs def abort_job(self, job_id): """ Aborts a running Job Returns True if the job was found and aborted """ job = self.get_job(job_id) if job is None or not job.status.is_running(): return False job.abort() logger.info('Job aborted.', job_id=job_id) return True def delete_job(self, job): """ Deletes an entire job folder from disk Returns True if the Job was found and deleted """ if isinstance(job, str) or isinstance(job, unicode): job_id = str(job) elif isinstance(job, Job): job_id = job.id() else: raise ValueError('called delete_job with a %s' % type(job)) dependent_jobs = [] # try to find the job job = self.jobs.get(job_id, None) if job: if isinstance(job, DatasetJob): # check for dependencies for j in self.jobs.values(): if isinstance(j, ModelJob) and j.dataset_id == job.id(): logger.error('Cannot delete "%s" (%s) because "%s" (%s) depends on it.' % (job.name(), job.id(), j.name(), j.id())) dependent_jobs.append(j.name()) if len(dependent_jobs)>0: error_message = 'Cannot delete "%s" because %d model%s depend%s on it: %s' % ( job.name(), len(dependent_jobs), ('s' if len(dependent_jobs) != 1 else ''), ('s' if len(dependent_jobs) == 1 else ''), ', '.join(['"%s"' % j for j in dependent_jobs])) raise errors.DeleteError(error_message) self.jobs.pop(job_id, None) job.abort() if os.path.exists(job.dir()): shutil.rmtree(job.dir()) logger.info('Job deleted.', job_id=job_id) from digits.webapp import socketio socketio.emit('job update', { 'update': 'deleted', 'job_id': job.id() }, namespace='/jobs', room='job_management', ) return True # see if the folder exists on disk path = os.path.join(config_value('jobs_dir'), job_id) path = os.path.normpath(path) if os.path.dirname(path) == config_value('jobs_dir') and os.path.exists(path): shutil.rmtree(path) return True return False def running_dataset_jobs(self): """a query utility""" return sorted( [j for j in self.jobs.values() if isinstance(j, DatasetJob) and j.status.is_running()], cmp=lambda x,y: cmp(y.id(), x.id()) ) def completed_dataset_jobs(self): """a query utility""" return sorted( [j for j in self.jobs.values() if isinstance(j, DatasetJob) and not j.status.is_running()], cmp=lambda x,y: cmp(y.id(), x.id()) ) def running_model_jobs(self): """a query utility""" return sorted( [j for j in self.jobs.values() if isinstance(j, ModelJob) and j.status.is_running()], cmp=lambda x,y: cmp(y.id(), x.id()) ) def completed_model_jobs(self): """a query utility""" return sorted( [j for j in self.jobs.values() if isinstance(j, ModelJob) and not j.status.is_running()], cmp=lambda x,y: cmp(y.id(), x.id()) ) def start(self): """ Start the Scheduler Returns True on success """ if self.running: return True gevent.spawn(self.main_thread) self.running = True return True def stop(self): """ Stop the Scheduler Returns True if the shutdown was graceful """ self.shutdown.set() wait_limit = 5 start = time.time() while self.running: if time.time() - start > wait_limit: return False time.sleep(0.1) return True def main_thread(self): """ Monitors the jobs in current_jobs, updates their statuses, and puts their tasks in queues to be processed by other threads """ signal.signal(signal.SIGTERM, self.sigterm_handler) try: last_saved = None while not self.shutdown.is_set(): # Iterate backwards so we can delete jobs for job in self.jobs.values(): if job.status == Status.INIT: def start_this_job(job): if isinstance(job, ModelJob): if job.dataset.status == Status.DONE: job.status = Status.RUN elif job.dataset.status in [Status.ABORT, Status.ERROR]: job.abort() else: job.status = Status.WAIT else: job.status = Status.RUN if 'DIGITS_MODE_TEST' in os.environ: start_this_job(job) else: # Delay start by one second for initial page load gevent.spawn_later(1, start_this_job, job) if job.status == Status.WAIT: if isinstance(job, ModelJob): if job.dataset.status == Status.DONE: job.status = Status.RUN elif job.dataset.status in [Status.ABORT, Status.ERROR]: job.abort() else: job.status = Status.RUN if job.status == Status.RUN: alldone = True for task in job.tasks: if task.status in [Status.INIT, Status.WAIT]: alldone = False # try to start the task if task.ready_to_queue(): requested_resources = task.offer_resources(self.resources) if requested_resources is None: task.status = Status.WAIT else: if self.reserve_resources(task, requested_resources): gevent.spawn(self.run_task, task, requested_resources) elif task.status == Status.RUN: # job is not done alldone = False elif task.status in [Status.DONE, Status.ABORT]: # job is done pass elif task.status == Status.ERROR: # propagate error status up to job job.status = Status.ERROR alldone = False break else: logger.warning('Unrecognized task status: "%s"', task.status, job_id=job.id()) if alldone: job.status = Status.DONE logger.info('Job complete.', job_id=job.id()) job.save() # save running jobs every 15 seconds if not last_saved or time.time()-last_saved > 15: for job in self.jobs.values(): if job.status.is_running(): if job.is_persistent(): job.save() elif (not job.is_persistent()) and (time.time() - job.status_history[-1][1] > NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS): # job has been unclaimed for far too long => proceed to garbage collection self.delete_job(job) last_saved = time.time() if 'DIGITS_MODE_TEST' not in os.environ: time.sleep(utils.wait_time()) else: time.sleep(0.05) except KeyboardInterrupt: pass # Shutdown for job in self.jobs.values(): job.abort() job.save() self.running = False def sigterm_handler(self, signal, frame): """ Gunicorn shuts down workers with SIGTERM, not SIGKILL """ self.shutdown.set() def task_error(self, task, error): """ Handle an error while executing a task """ logger.error('%s: %s' % (type(error).__name__, error), job_id=task.job_id) task.exception = error task.traceback = traceback.format_exc() task.status = Status.ERROR def reserve_resources(self, task, resources): """ Reserve resources for a task """ try: # reserve resources for resource_type, requests in resources.iteritems(): for identifier, value in requests: found = False for resource in self.resources[resource_type]: if resource.identifier == identifier: resource.allocate(task, value) self.emit_gpus_available() found = True break if not found: raise RuntimeError('Resource "%s" with identifier="%s" not found' % ( resource_type, identifier)) task.current_resources = resources return True except Exception as e: self.task_error(task, e) self.release_resources(task, resources) return False def release_resources(self, task, resources): """ Release resources previously reserved for a task """ # release resources for resource_type, requests in resources.iteritems(): for identifier, value in requests: for resource in self.resources[resource_type]: if resource.identifier == identifier: resource.deallocate(task) self.emit_gpus_available() task.current_resources = None def run_task(self, task, resources): """ Executes a task Arguments: task -- the task to run resources -- the resources allocated for this task a dict mapping resource_type to lists of (identifier, value) tuples """ try: task.run(resources) except Exception as e: self.task_error(task, e) finally: self.release_resources(task, resources) def emit_gpus_available(self): """ Call socketio.emit gpu availability """ from digits.webapp import scheduler, socketio socketio.emit('server update', { 'update': 'gpus_available', 'total_gpu_count': len(self.resources['gpus']), 'remaining_gpu_count': sum(r.remaining() for r in scheduler.resources['gpus']), }, namespace='/jobs', room='job_management' )
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import state from . import types_of_service class summary_lsa(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/summary-lsa. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Contents of the summary LSA """ __slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service") _yang_name = "summary-lsa" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__types_of_service = YANGDynClass( base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "lsdb", "lsa-types", "lsa-type", "lsas", "lsa", "summary-lsa", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/state (container) YANG Description: State parameters of the summary LSA """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters of the summary LSA """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_types_of_service(self): """ Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service (container) YANG Description: Breakdown of LSA contents specifying multiple TOS values """ return self.__types_of_service def _set_types_of_service(self, v, load=False): """ Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service (container) If this variable is read-only (config: false) in the source YANG file, then _set_types_of_service is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_types_of_service() directly. YANG Description: Breakdown of LSA contents specifying multiple TOS values """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """types_of_service must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=types_of_service.types_of_service, is_container='container', yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__types_of_service = t if hasattr(self, "_set"): self._set() def _unset_types_of_service(self): self.__types_of_service = YANGDynClass( base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) types_of_service = __builtin__.property(_get_types_of_service) _pyangbind_elements = OrderedDict( [("state", state), ("types_of_service", types_of_service)] ) from . import state from . import types_of_service class summary_lsa(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/summary-lsa. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Contents of the summary LSA """ __slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service") _yang_name = "summary-lsa" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) self.__types_of_service = YANGDynClass( base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "lsdb", "lsa-types", "lsa-type", "lsas", "lsa", "summary-lsa", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/state (container) YANG Description: State parameters of the summary LSA """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters of the summary LSA """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) def _get_types_of_service(self): """ Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service (container) YANG Description: Breakdown of LSA contents specifying multiple TOS values """ return self.__types_of_service def _set_types_of_service(self, v, load=False): """ Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/summary_lsa/types_of_service (container) If this variable is read-only (config: false) in the source YANG file, then _set_types_of_service is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_types_of_service() directly. YANG Description: Breakdown of LSA contents specifying multiple TOS values """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """types_of_service must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=types_of_service.types_of_service, is_container='container', yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__types_of_service = t if hasattr(self, "_set"): self._set() def _unset_types_of_service(self): self.__types_of_service = YANGDynClass( base=types_of_service.types_of_service, is_container="container", yang_name="types-of-service", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) types_of_service = __builtin__.property(_get_types_of_service) _pyangbind_elements = OrderedDict( [("state", state), ("types_of_service", types_of_service)] )
import pytest import mock import random from framework.auth.core import Auth from api.base.settings.defaults import API_BASE from api.draft_registrations.serializers import DraftRegistrationContributorsCreateSerializer from api_tests.nodes.views.test_node_contributors_list import ( NodeCRUDTestCase, TestNodeContributorList, TestNodeContributorAdd, TestNodeContributorCreateValidation, TestNodeContributorCreateEmail, TestNodeContributorBulkCreate, TestNodeContributorBulkUpdate, TestNodeContributorBulkPartialUpdate, TestNodeContributorBulkDelete, TestNodeContributorFiltering, ) from osf_tests.factories import ( DraftRegistrationFactory, AuthUserFactory, UserFactory, ProjectFactory, ) from osf.utils import permissions from tests.base import capture_signals from website.project.signals import contributor_added @pytest.fixture() def user(): return AuthUserFactory() class DraftRegistrationCRUDTestCase(NodeCRUDTestCase): @pytest.fixture() def project_public(self, user, title, description, category): # Overrides NodeCRUDTestCase - just going to make a "public project" # be a draft branched from a public project. project = ProjectFactory(creator=user, is_public=True) return DraftRegistrationFactory( title=title, description=description, category=category, initiator=user, branched_from=project ) @pytest.fixture() def project_private(self, user, title, description, category): return DraftRegistrationFactory( title=title, description=description, category=category, initiator=user ) class TestDraftRegistrationContributorList(DraftRegistrationCRUDTestCase, TestNodeContributorList): @pytest.fixture() def url_public(self, project_public): return '/{}draft_registrations/{}/contributors/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}draft_registrations/{}/contributors/'.format(API_BASE, project_private._id) # Overrides TestNodeContributorList def test_concatenated_id(self, app, user, project_public, url_public): # Overriding since draft registrations can only be accessed by contribs res = app.get(url_public, auth=user.auth) assert res.status_code == 200 assert res.json['data'][0]['id'].split('-')[0] == project_public._id assert res.json['data'][0]['id'] == '{}-{}'.format( project_public._id, user._id) # Overrides TestNodeContributorList def test_return( self, app, user, user_two, project_public, project_private, url_public, url_private, make_contrib_id): # test_return_public_contributor_list_logged_in # Since permissions are based on the branched from node, this will not pass res = app.get(url_public, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 # test_return_private_contributor_list_logged_out res = app.get(url_private, expect_errors=True) assert res.status_code == 401 assert 'detail' in res.json['errors'][0] # test_return_private_contributor_list_logged_in_non_contributor res = app.get(url_private, auth=user_two.auth, expect_errors=True) assert res.status_code == 403 assert 'detail' in res.json['errors'][0] # Overrides TestNodeContributorList def test_return_public_contributor_list_logged_out( self, app, user, user_two, project_public, url_public, make_contrib_id): project_public.add_contributor(user_two, save=True) res = app.get(url_public, expect_errors=True) assert res.status_code == 401 # Overrides TestNodeContributorList def test_disabled_contributors_contain_names_under_meta( self, app, user, user_two, project_public, url_public, make_contrib_id): project_public.add_contributor(user_two, save=True) user_two.is_disabled = True user_two.save() res = app.get(url_public, auth=user.auth) assert res.status_code == 200 assert res.content_type == 'application/vnd.api+json' assert len(res.json['data']) == 2 assert res.json['data'][0]['id'] == make_contrib_id( project_public._id, user._id) assert res.json['data'][1]['id'] == make_contrib_id( project_public._id, user_two._id) assert res.json['data'][1]['embeds']['users']['errors'][0]['meta']['full_name'] == user_two.fullname assert res.json['data'][1]['embeds']['users']['errors'][0]['detail'] == 'The requested user is no longer available.' # Overrides TestNodeContributorList def test_total_bibliographic_contributor_count_returned_in_metadata( self, app, user_two, user, project_public, url_public): non_bibliographic_user = UserFactory() project_public.add_contributor( non_bibliographic_user, visible=False, auth=Auth(project_public.creator)) project_public.save() res = app.get(url_public, auth=user.auth) assert res.status_code == 200 assert res.json['links']['meta']['total_bibliographic'] == len( project_public.visible_contributor_ids) # Overrides TestNodeContributorList def test_contributors_order_is_the_same_over_multiple_requests( self, app, user, project_public, url_public): project_public.add_unregistered_contributor( 'Robert Jackson', 'robert@gmail.com', auth=Auth(user), save=True ) for i in range(0, 10): new_user = AuthUserFactory() if i % 2 == 0: visible = True else: visible = False project_public.add_contributor( new_user, visible=visible, auth=Auth(project_public.creator), save=True ) req_one = app.get( '{}?page=2'.format(url_public), auth=user.auth) req_two = app.get( '{}?page=2'.format(url_public), auth=user.auth) id_one = [item['id'] for item in req_one.json['data']] id_two = [item['id'] for item in req_two.json['data']] for a, b in zip(id_one, id_two): assert a == b def test_permissions_work_with_many_users( self, app, user, project_private, url_private): users = { permissions.ADMIN: [user._id], permissions.WRITE: [] } for i in range(0, 25): perm = random.choice(list(users.keys())) user = AuthUserFactory() project_private.add_contributor(user, permissions=perm) users[perm].append(user._id) res = app.get(url_private, auth=user.auth) data = res.json['data'] for user in data: api_perm = user['attributes']['permission'] user_id = user['id'].split('-')[1] assert user_id in users[api_perm], 'Permissions incorrect for {}. Should not have {} permission.'.format( user_id, api_perm) class TestDraftRegistrationContributorAdd(DraftRegistrationCRUDTestCase, TestNodeContributorAdd): @pytest.fixture() def url_public(self, project_public): return '/{}draft_registrations/{}/contributors/?send_email=false'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}draft_registrations/{}/contributors/?send_email=false'.format(API_BASE, project_private._id) # Overrides TestNodeContributorAdd def test_adds_contributor_public_project_non_admin_osf_group( self, app, user, user_two, user_three, project_public, data_user_three, url_public): # Draft registrations don't have groups return # Overrides TestNodeContributorAdd def test_adds_contributor_private_project_osf_group_admin_perms( self, app, user, user_two, user_three, project_private, data_user_two, url_private): # Draft registrations don't have groups return class TestDraftRegistrationContributorCreateValidation(DraftRegistrationCRUDTestCase, TestNodeContributorCreateValidation): @pytest.fixture() def create_serializer(self): # Overrides TestNodeContributorCreateValidation return DraftRegistrationContributorsCreateSerializer class TestDraftContributorCreateEmail(DraftRegistrationCRUDTestCase, TestNodeContributorCreateEmail): @pytest.fixture() def url_project_contribs(self, project_public): # Overrides TestNodeContributorCreateEmail return '/{}draft_registrations/{}/contributors/'.format(API_BASE, project_public._id) @mock.patch('framework.auth.views.mails.send_mail') def test_add_contributor_sends_email( self, mock_mail, app, user, user_two, url_project_contribs): # Overrides TestNodeContributorCreateEmail url = '{}?send_email=draft_registration'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } } res = app.post_json_api(url, payload, auth=user.auth) assert res.status_code == 201 assert mock_mail.call_count == 1 # Overrides TestNodeContributorCreateEmail def test_add_contributor_signal_if_default( self, app, user, user_two, url_project_contribs): url = '{}?send_email=default'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { }, 'relationships': { 'users': { 'data': { 'type': 'users', 'id': user_two._id } } } } } res = app.post_json_api(url, payload, auth=user.auth, expect_errors=True) assert res.status_code == 400 assert res.json['errors'][0]['detail'] == 'default is not a valid email preference.' # Overrides TestNodeContributorCreateEmail @mock.patch('framework.auth.views.mails.send_mail') def test_add_unregistered_contributor_sends_email( self, mock_mail, app, user, url_project_contribs): url = '{}?send_email=draft_registration'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', 'email': 'kanye@west.com' } } } res = app.post_json_api(url, payload, auth=user.auth) assert res.status_code == 201 assert mock_mail.call_count == 1 # Overrides TestNodeContributorCreateEmail @mock.patch('website.project.signals.unreg_contributor_added.send') def test_add_unregistered_contributor_signal_if_default( self, mock_send, app, user, url_project_contribs): url = '{}?send_email=draft_registration'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', 'email': 'kanye@west.com' } } } res = app.post_json_api(url, payload, auth=user.auth) args, kwargs = mock_send.call_args assert res.status_code == 201 assert 'draft_registration' == kwargs['email_template'] # Overrides TestNodeContributorCreateEmail @mock.patch('framework.auth.views.mails.send_mail') def test_add_unregistered_contributor_without_email_no_email( self, mock_mail, app, user, url_project_contribs): url = '{}?send_email=draft_registration'.format(url_project_contribs) payload = { 'data': { 'type': 'contributors', 'attributes': { 'full_name': 'Kanye West', } } } with capture_signals() as mock_signal: res = app.post_json_api(url, payload, auth=user.auth) assert contributor_added in mock_signal.signals_sent() assert res.status_code == 201 assert mock_mail.call_count == 0 class TestDraftContributorBulkCreate(DraftRegistrationCRUDTestCase, TestNodeContributorBulkCreate): @pytest.fixture() def url_public(self, project_public): return '/{}draft_registrations/{}/contributors/?send_email=false'.format( API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}draft_registrations/{}/contributors/?send_email=false'.format( API_BASE, project_private._id) class TestDraftContributorBulkUpdated(DraftRegistrationCRUDTestCase, TestNodeContributorBulkUpdate): @pytest.fixture() def project_public( self, user, user_two, user_three, title, description, category): project_public = DraftRegistrationFactory( initiator=user ) project_public.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_public.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_public @pytest.fixture() def project_private( self, user, user_two, user_three, title, description, category): project_private = DraftRegistrationFactory( initiator=user ) project_private.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_private.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_private @pytest.fixture() def url_public(self, project_public): return '/{}draft_registrations/{}/contributors/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}draft_registrations/{}/contributors/'.format( API_BASE, project_private._id) class TestDraftRegistrationContributorBulkPartialUpdate(DraftRegistrationCRUDTestCase, TestNodeContributorBulkPartialUpdate): @pytest.fixture() def project_public( self, user, user_two, user_three, title, description, category): project_public = DraftRegistrationFactory( initiator=user ) project_public.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_public.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_public @pytest.fixture() def project_private( self, user, user_two, user_three, title, description, category): project_private = DraftRegistrationFactory( initiator=user ) project_private.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_private.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_private @pytest.fixture() def url_public(self, project_public): return '/{}draft_registrations/{}/contributors/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}draft_registrations/{}/contributors/'.format( API_BASE, project_private._id) class TestDraftRegistrationContributorBulkDelete(DraftRegistrationCRUDTestCase, TestNodeContributorBulkDelete): @pytest.fixture() def url_public(self, project_public): return '/{}draft_registrations/{}/contributors/'.format(API_BASE, project_public._id) @pytest.fixture() def url_private(self, project_private): return '/{}draft_registrations/{}/contributors/'.format( API_BASE, project_private._id) @pytest.fixture() def project_public( self, user, user_two, user_three, title, description, category): project_public = DraftRegistrationFactory( initiator=user ) project_public.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_public.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_public @pytest.fixture() def project_private( self, user, user_two, user_three, title, description, category): project_private = DraftRegistrationFactory( initiator=user ) project_private.add_contributor( user_two, permissions=permissions.READ, visible=True, save=True) project_private.add_contributor( user_three, permissions=permissions.READ, visible=True, save=True) return project_private @pytest.mark.django_db @pytest.mark.enable_quickfiles_creation @pytest.mark.enable_implicit_clean class TestDraftRegistrationContributorFiltering(DraftRegistrationCRUDTestCase, TestNodeContributorFiltering): @pytest.fixture() def project(self, user): return DraftRegistrationFactory(initiator=user) @pytest.fixture() def url(self, project): return '/{}draft_registrations/{}/contributors/'.format( API_BASE, project._id)
"""Testing utilities.""" # Copyright (c) 2011, 2012 # Authors: Pietro Berkes, # Andreas Muller # Mathieu Blondel # Olivier Grisel # Arnaud Joly # Denis Engemann # Giorgio Patrini # Thierry Guillemot # License: BSD 3 clause import os import inspect import pkgutil import warnings import sys import functools import scipy as sp import scipy.io from functools import wraps from operator import itemgetter try: # Python 2 from urllib2 import urlopen from urllib2 import HTTPError except ImportError: # Python 3+ from urllib.request import urlopen from urllib.error import HTTPError import tempfile import shutil import os.path as op import atexit import unittest # WindowsError only exist on Windows try: WindowsError except NameError: WindowsError = None import sklearn from sklearn.base import BaseEstimator from sklearn.externals import joblib from sklearn.utils.fixes import signature from sklearn.utils import deprecated, IS_PYPY, _IS_32BIT additional_names_in_all = [] try: from nose.tools import raises as _nose_raises deprecation_message = ( 'sklearn.utils.testing.raises has been deprecated in version 0.20 ' 'and will be removed in 0.22. Please use ' 'sklearn.utils.testing.assert_raises instead.') raises = deprecated(deprecation_message)(_nose_raises) additional_names_in_all.append('raises') except ImportError: pass try: from nose.tools import with_setup as _with_setup deprecation_message = ( 'sklearn.utils.testing.with_setup has been deprecated in version 0.20 ' 'and will be removed in 0.22.' 'If your code relies on with_setup, please use' ' nose.tools.with_setup instead.') with_setup = deprecated(deprecation_message)(_with_setup) additional_names_in_all.append('with_setup') except ImportError: pass from numpy.testing import assert_almost_equal from numpy.testing import assert_array_equal from numpy.testing import assert_array_almost_equal from numpy.testing import assert_array_less from numpy.testing import assert_approx_equal import numpy as np from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin, ClusterMixin) from sklearn.utils._unittest_backport import TestCase __all__ = ["assert_equal", "assert_not_equal", "assert_raises", "assert_raises_regexp", "assert_true", "assert_false", "assert_almost_equal", "assert_array_equal", "assert_array_almost_equal", "assert_array_less", "assert_less", "assert_less_equal", "assert_greater", "assert_greater_equal", "assert_approx_equal", "SkipTest"] __all__.extend(additional_names_in_all) _dummy = TestCase('__init__') assert_equal = _dummy.assertEqual assert_not_equal = _dummy.assertNotEqual assert_true = _dummy.assertTrue assert_false = _dummy.assertFalse assert_raises = _dummy.assertRaises SkipTest = unittest.case.SkipTest assert_dict_equal = _dummy.assertDictEqual assert_in = _dummy.assertIn assert_not_in = _dummy.assertNotIn assert_less = _dummy.assertLess assert_greater = _dummy.assertGreater assert_less_equal = _dummy.assertLessEqual assert_greater_equal = _dummy.assertGreaterEqual assert_raises_regex = _dummy.assertRaisesRegex # assert_raises_regexp is deprecated in Python 3.4 in favor of # assert_raises_regex but lets keep the backward compat in scikit-learn with # the old name for now assert_raises_regexp = assert_raises_regex def assert_warns(warning_class, func, *args, **kw): """Test that a certain warning occurs. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. func : callable Callable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func` Returns ------- result : the return value of `func` """ clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") # Trigger a warning. result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = any(warning.category is warning_class for warning in w) if not found: raise AssertionError("%s did not give warning: %s( is %s)" % (func.__name__, warning_class, w)) return result def assert_warns_message(warning_class, message, func, *args, **kw): # very important to avoid uncontrolled state propagation """Test that a certain warning occurs and with a certain message. Parameters ---------- warning_class : the warning class The class to test for, e.g. UserWarning. message : str | callable The message or a substring of the message to test for. If callable, it takes a string as the argument and will trigger an AssertionError if the callable returns `False`. func : callable Callable object to trigger warnings. *args : the positional arguments to `func`. **kw : the keyword arguments to `func`. Returns ------- result : the return value of `func` """ clean_warning_registry() with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") if hasattr(np, 'VisibleDeprecationWarning'): # Let's not catch the numpy internal DeprecationWarnings warnings.simplefilter('ignore', np.VisibleDeprecationWarning) # Trigger a warning. result = func(*args, **kw) # Verify some things if not len(w) > 0: raise AssertionError("No warning raised when calling %s" % func.__name__) found = [issubclass(warning.category, warning_class) for warning in w] if not any(found): raise AssertionError("No warning raised for %s with class " "%s" % (func.__name__, warning_class)) message_found = False # Checks the message of all warnings belong to warning_class for index in [i for i, x in enumerate(found) if x]: # substring will match, the entire message with typo won't msg = w[index].message # For Python 3 compatibility msg = str(msg.args[0] if hasattr(msg, 'args') else msg) if callable(message): # add support for certain tests check_in_message = message else: check_in_message = lambda msg: message in msg if check_in_message(msg): message_found = True break if not message_found: raise AssertionError("Did not receive the message you expected " "('%s') for <%s>, got: '%s'" % (message, func.__name__, msg)) return result def assert_warns_div0(func, *args, **kw): """Assume that numpy's warning for divide by zero is raised Handles the case of platforms that do not support warning on divide by zero Parameters ---------- func *args **kw """ with np.errstate(divide='warn', invalid='warn'): try: assert_warns(RuntimeWarning, np.divide, 1, np.zeros(1)) except AssertionError: # This platform does not report numpy divide by zeros return func(*args, **kw) return assert_warns_message(RuntimeWarning, 'invalid value encountered', func, *args, **kw) # To remove when we support numpy 1.7 def assert_no_warnings(func, *args, **kw): """ Parameters ---------- func *args **kw """ # very important to avoid uncontrolled state propagation clean_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') result = func(*args, **kw) if hasattr(np, 'VisibleDeprecationWarning'): # Filter out numpy-specific warnings in numpy >= 1.9 w = [e for e in w if e.category is not np.VisibleDeprecationWarning] if len(w) > 0: raise AssertionError("Got warnings when calling %s: [%s]" % (func.__name__, ', '.join(str(warning) for warning in w))) return result def ignore_warnings(obj=None, category=Warning): """Context manager and decorator to ignore warnings. Note: Using this (in both variants) will clear all warnings from all python modules loaded. In case you need to test cross-module-warning-logging, this is not your tool of choice. Parameters ---------- obj : callable or None callable where you want to ignore the warnings. category : warning class, defaults to Warning. The category to filter. If Warning, all categories will be muted. Examples -------- >>> with ignore_warnings(): ... warnings.warn('buhuhuhu') >>> def nasty_warn(): ... warnings.warn('buhuhuhu') ... print(42) >>> ignore_warnings(nasty_warn)() 42 """ if isinstance(obj, type) and issubclass(obj, Warning): # Avoid common pitfall of passing category as the first positional # argument which result in the test not being run warning_name = obj.__name__ raise ValueError( "'obj' should be a callable where you want to ignore warnings. " "You passed a warning class instead: 'obj={warning_name}'. " "If you want to pass a warning class to ignore_warnings, " "you should use 'category={warning_name}'".format( warning_name=warning_name)) elif callable(obj): return _IgnoreWarnings(category=category)(obj) else: return _IgnoreWarnings(category=category) class _IgnoreWarnings(object): """Improved and simplified Python warnings context manager and decorator. This class allows the user to ignore the warnings raised by a function. Copied from Python 2.7.5 and modified as required. Parameters ---------- category : tuple of warning class, default to Warning The category to filter. By default, all the categories will be muted. """ def __init__(self, category): self._record = True self._module = sys.modules['warnings'] self._entered = False self.log = [] self.category = category def __call__(self, fn): """Decorator to catch and hide warnings without visual nesting.""" @wraps(fn) def wrapper(*args, **kwargs): clean_warning_registry() with warnings.catch_warnings(): warnings.simplefilter("ignore", self.category) return fn(*args, **kwargs) return wrapper def __repr__(self): args = [] if self._record: args.append("record=True") if self._module is not sys.modules['warnings']: args.append("module=%r" % self._module) name = type(self).__name__ return "%s(%s)" % (name, ", ".join(args)) def __enter__(self): if self._entered: raise RuntimeError("Cannot enter %r twice" % self) self._entered = True self._filters = self._module.filters self._module.filters = self._filters[:] self._showwarning = self._module.showwarning clean_warning_registry() warnings.simplefilter("ignore", self.category) def __exit__(self, *exc_info): if not self._entered: raise RuntimeError("Cannot exit %r without entering first" % self) self._module.filters = self._filters self._module.showwarning = self._showwarning self.log[:] = [] clean_warning_registry() assert_less = _dummy.assertLess assert_greater = _dummy.assertGreater assert_allclose = np.testing.assert_allclose def assert_raise_message(exceptions, message, function, *args, **kwargs): """Helper function to test the message raised in an exception. Given an exception, a callable to raise the exception, and a message string, tests that the correct exception is raised and that the message is a substring of the error thrown. Used to test that the specific message thrown during an exception is correct. Parameters ---------- exceptions : exception or tuple of exception An Exception object. message : str The error message or a substring of the error message. function : callable Callable object to raise error. *args : the positional arguments to `function`. **kwargs : the keyword arguments to `function`. """ try: function(*args, **kwargs) except exceptions as e: error_message = str(e) if message not in error_message: raise AssertionError("Error message does not include the expected" " string: %r. Observed error message: %r" % (message, error_message)) else: # concatenate exception names if isinstance(exceptions, tuple): names = " or ".join(e.__name__ for e in exceptions) else: names = exceptions.__name__ raise AssertionError("%s not raised by %s" % (names, function.__name__)) def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=''): """Assert allclose for sparse and dense data. Both x and y need to be either sparse or dense, they can't be mixed. Parameters ---------- x : array-like or sparse matrix First array to compare. y : array-like or sparse matrix Second array to compare. rtol : float, optional relative tolerance; see numpy.allclose atol : float, optional absolute tolerance; see numpy.allclose. Note that the default here is more tolerant than the default for numpy.testing.assert_allclose, where atol=0. err_msg : string, default='' Error message to raise. """ if sp.sparse.issparse(x) and sp.sparse.issparse(y): x = x.tocsr() y = y.tocsr() x.sum_duplicates() y.sum_duplicates() assert_array_equal(x.indices, y.indices, err_msg=err_msg) assert_array_equal(x.indptr, y.indptr, err_msg=err_msg) assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg) elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y): # both dense assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg) else: raise ValueError("Can only compare two sparse matrices," " not a sparse matrix and an array.") @deprecated('deprecated in version 0.20 to be removed in version 0.22') def fake_mldata(columns_dict, dataname, matfile, ordering=None): """Create a fake mldata data set. .. deprecated:: 0.20 Will be removed in version 0.22 Parameters ---------- columns_dict : dict, keys=str, values=ndarray Contains data as columns_dict[column_name] = array of data. dataname : string Name of data set. matfile : string or file object The file name string or the file-like object of the output file. ordering : list, default None List of column_names, determines the ordering in the data set. Notes ----- This function transposes all arrays, while fetch_mldata only transposes 'data', keep that into account in the tests. """ datasets = dict(columns_dict) # transpose all variables for name in datasets: datasets[name] = datasets[name].T if ordering is None: ordering = sorted(list(datasets.keys())) # NOTE: setting up this array is tricky, because of the way Matlab # re-packages 1D arrays datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)), dtype='object') for i, name in enumerate(ordering): datasets['mldata_descr_ordering'][0, i] = name scipy.io.savemat(matfile, datasets, oned_as='column') @deprecated('deprecated in version 0.20 to be removed in version 0.22') class mock_mldata_urlopen(object): """Object that mocks the urlopen function to fake requests to mldata. When requesting a dataset with a name that is in mock_datasets, this object creates a fake dataset in a StringIO object and returns it. Otherwise, it raises an HTTPError. .. deprecated:: 0.20 Will be removed in version 0.22 Parameters ---------- mock_datasets : dict A dictionary of {dataset_name: data_dict}, or {dataset_name: (data_dict, ordering). `data_dict` itself is a dictionary of {column_name: data_array}, and `ordering` is a list of column_names to determine the ordering in the data set (see :func:`fake_mldata` for details). """ def __init__(self, mock_datasets): self.mock_datasets = mock_datasets def __call__(self, urlname): """ Parameters ---------- urlname : string The url """ dataset_name = urlname.split('/')[-1] if dataset_name in self.mock_datasets: resource_name = '_' + dataset_name from io import BytesIO matfile = BytesIO() dataset = self.mock_datasets[dataset_name] ordering = None if isinstance(dataset, tuple): dataset, ordering = dataset fake_mldata(dataset, resource_name, matfile, ordering) matfile.seek(0) return matfile else: raise HTTPError(urlname, 404, dataset_name + " is not available", [], None) def install_mldata_mock(mock_datasets): """ Parameters ---------- mock_datasets : dict A dictionary of {dataset_name: data_dict}, or {dataset_name: (data_dict, ordering). `data_dict` itself is a dictionary of {column_name: data_array}, and `ordering` is a list of column_names to determine the ordering in the data set (see :func:`fake_mldata` for details). """ # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets) def uninstall_mldata_mock(): # Lazy import to avoid mutually recursive imports from sklearn import datasets datasets.mldata.urlopen = urlopen # Meta estimators need another estimator to be instantiated. META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator", "MultiOutputRegressor", "MultiOutputClassifier", "OutputCodeClassifier", "OneVsRestClassifier", "RFE", "RFECV", "BaseEnsemble", "ClassifierChain", "RegressorChain"] # estimators that there is no way to default-construct sensibly OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV", "SelectFromModel", "ColumnTransformer"] # some strange ones DONT_TEST = ['SparseCoder', 'DictVectorizer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'TfidfTransformer', 'TfidfVectorizer', 'IsotonicRegression', 'OneHotEncoder', 'RandomTreesEmbedding', 'OrdinalEncoder', 'FeatureHasher', 'DummyClassifier', 'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures', 'GaussianRandomProjectionHash', 'HashingVectorizer', 'CheckingClassifier', 'PatchExtractor', 'CountVectorizer', # GradientBoosting base estimators, maybe should # exclude them in another way 'ZeroEstimator', 'ScaledLogOddsEstimator', 'QuantileEstimator', 'MeanEstimator', 'LogOddsEstimator', 'PriorProbabilityEstimator', '_SigmoidCalibration', 'VotingClassifier'] def all_estimators(include_meta_estimators=False, include_other=False, type_filter=None, include_dont_test=False): """Get a list of all estimators from sklearn. This function crawls the module and gets all classes that inherit from BaseEstimator. Classes that are defined in test-modules are not included. By default meta_estimators such as GridSearchCV are also not included. Parameters ---------- include_meta_estimators : boolean, default=False Whether to include meta-estimators that can be constructed using an estimator as their first argument. These are currently BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier, OneVsRestClassifier, RFE, RFECV. include_other : boolean, default=False Wether to include meta-estimators that are somehow special and can not be default-constructed sensibly. These are currently Pipeline, FeatureUnion and GridSearchCV type_filter : string, list of string, or None, default=None Which kind of estimators should be returned. If None, no filter is applied and all estimators are returned. Possible values are 'classifier', 'regressor', 'cluster' and 'transformer' to get estimators only of these specific types, or a list of these to get the estimators that fit at least one of the types. include_dont_test : boolean, default=False Whether to include "special" label estimator or test processors. Returns ------- estimators : list of tuples List of (name, class), where ``name`` is the class name as string and ``class`` is the actuall type of the class. """ def is_abstract(c): if not(hasattr(c, '__abstractmethods__')): return False if not len(c.__abstractmethods__): return False return True all_classes = [] # get parent folder path = sklearn.__path__ for importer, modname, ispkg in pkgutil.walk_packages( path=path, prefix='sklearn.', onerror=lambda x: None): if ".tests." in modname: continue if IS_PYPY and ('_svmlight_format' in modname or 'feature_extraction._hashing' in modname): continue module = __import__(modname, fromlist="dummy") classes = inspect.getmembers(module, inspect.isclass) all_classes.extend(classes) all_classes = set(all_classes) estimators = [c for c in all_classes if (issubclass(c[1], BaseEstimator) and c[0] != 'BaseEstimator')] # get rid of abstract base classes estimators = [c for c in estimators if not is_abstract(c[1])] if not include_dont_test: estimators = [c for c in estimators if not c[0] in DONT_TEST] if not include_other: estimators = [c for c in estimators if not c[0] in OTHER] # possibly get rid of meta estimators if not include_meta_estimators: estimators = [c for c in estimators if not c[0] in META_ESTIMATORS] if type_filter is not None: if not isinstance(type_filter, list): type_filter = [type_filter] else: type_filter = list(type_filter) # copy filtered_estimators = [] filters = {'classifier': ClassifierMixin, 'regressor': RegressorMixin, 'transformer': TransformerMixin, 'cluster': ClusterMixin} for name, mixin in filters.items(): if name in type_filter: type_filter.remove(name) filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)]) estimators = filtered_estimators if type_filter: raise ValueError("Parameter type_filter must be 'classifier', " "'regressor', 'transformer', 'cluster' or " "None, got" " %s." % repr(type_filter)) # drop duplicates, sort for reproducibility # itemgetter is used to ensure the sort does not extend to the 2nd item of # the tuple return sorted(set(estimators), key=itemgetter(0)) def set_random_state(estimator, random_state=0): """Set random state of an estimator if it has the `random_state` param. Parameters ---------- estimator : object The estimator random_state : int, RandomState instance or None, optional, default=0 Pseudo random number generator state. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. """ if "random_state" in estimator.get_params(): estimator.set_params(random_state=random_state) def if_matplotlib(func): """Test decorator that skips test if matplotlib not installed. Parameters ---------- func """ @wraps(func) def run_test(*args, **kwargs): try: import matplotlib matplotlib.use('Agg', warn=False) # this fails if no $DISPLAY specified import matplotlib.pyplot as plt plt.figure() except ImportError: raise SkipTest('Matplotlib not available.') else: return func(*args, **kwargs) return run_test try: import pytest skip_if_32bit = pytest.mark.skipif(_IS_32BIT, reason='skipped on 32bit platforms') skip_travis = pytest.mark.skipif(os.environ.get('TRAVIS') == 'true', reason='skip on travis') fails_if_pypy = pytest.mark.xfail(IS_PYPY, raises=NotImplementedError, reason='not compatible with PyPy') # Decorator for tests involving both BLAS calls and multiprocessing. # # Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction # with some implementation of BLAS (or other libraries that manage an # internal posix thread pool) can cause a crash or a freeze of the Python # process. # # In practice all known packaged distributions (from Linux distros or # Anaconda) of BLAS under Linux seems to be safe. So we this problem seems # to only impact OSX users. # # This wrapper makes it possible to skip tests that can possibly cause # this crash under OS X with. # # Under Python 3.4+ it is possible to use the `forkserver` start method # for multiprocessing to avoid this issue. However it can cause pickling # errors on interactively defined functions. It therefore not enabled by # default. if_safe_multiprocessing_with_blas = pytest.mark.skipif( sys.platform == 'darwin', reason="Possible multi-process bug with some BLAS") except ImportError: pass def clean_warning_registry(): """Clean Python warning registry for easier testing of warning messages. We may not need to do this any more when getting rid of Python 2, not entirely sure. See https://bugs.python.org/issue4180 and https://bugs.python.org/issue21724 for more details. """ reg = "__warningregistry__" for mod_name, mod in list(sys.modules.items()): if 'six.moves' in mod_name: continue if hasattr(mod, reg): getattr(mod, reg).clear() def check_skip_network(): if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)): raise SkipTest("Text tutorial requires large dataset download") def _delete_folder(folder_path, warn=False): """Utility function to cleanup a temporary folder if still existing. Copy from joblib.pool (for independence). """ try: if os.path.exists(folder_path): # This can fail under windows, # but will succeed when called by atexit shutil.rmtree(folder_path) except WindowsError: if warn: warnings.warn("Could not delete temporary folder %s" % folder_path) class TempMemmap(object): """ Parameters ---------- data mmap_mode """ def __init__(self, data, mmap_mode='r'): self.mmap_mode = mmap_mode self.data = data def __enter__(self): data_read_only, self.temp_folder = create_memmap_backed_data( self.data, mmap_mode=self.mmap_mode, return_folder=True) return data_read_only def __exit__(self, exc_type, exc_val, exc_tb): _delete_folder(self.temp_folder) def create_memmap_backed_data(data, mmap_mode='r', return_folder=False): """ Parameters ---------- data mmap_mode return_folder """ temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_') atexit.register(functools.partial(_delete_folder, temp_folder, warn=True)) filename = op.join(temp_folder, 'data.pkl') joblib.dump(data, filename) memmap_backed_data = joblib.load(filename, mmap_mode=mmap_mode) result = (memmap_backed_data if not return_folder else (memmap_backed_data, temp_folder)) return result # Utils to test docstrings def _get_args(function, varargs=False): """Helper to get function arguments""" try: params = signature(function).parameters except ValueError: # Error on builtin C function return [] args = [key for key, param in params.items() if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)] if varargs: varargs = [param.name for param in params.values() if param.kind == param.VAR_POSITIONAL] if len(varargs) == 0: varargs = None return args, varargs else: return args def _get_func_name(func, class_name=None): """Get function full name Parameters ---------- func : callable The function object. class_name : string, optional (default: None) If ``func`` is a class method and the class name is known specify class_name for the error message. Returns ------- name : str The function name. """ parts = [] module = inspect.getmodule(func) if module: parts.append(module.__name__) if class_name is not None: parts.append(class_name) elif hasattr(func, 'im_class'): parts.append(func.im_class.__name__) parts.append(func.__name__) return '.'.join(parts) def check_docstring_parameters(func, doc=None, ignore=None, class_name=None): """Helper to check docstring Parameters ---------- func : callable The function object to test. doc : str, optional (default: None) Docstring if it is passed manually to the test. ignore : None | list Parameters to ignore. class_name : string, optional (default: None) If ``func`` is a class method and the class name is known specify class_name for the error message. Returns ------- incorrect : list A list of string describing the incorrect results. """ from numpydoc import docscrape incorrect = [] ignore = [] if ignore is None else ignore func_name = _get_func_name(func, class_name=class_name) if (not func_name.startswith('sklearn.') or func_name.startswith('sklearn.externals')): return incorrect # Don't check docstring for property-functions if inspect.isdatadescriptor(func): return incorrect # Don't check docstring for setup / teardown pytest functions if func_name.split('.')[-1] in ('setup_module', 'teardown_module'): return incorrect # Dont check estimator_checks module if func_name.split('.')[2] == 'estimator_checks': return incorrect args = list(filter(lambda x: x not in ignore, _get_args(func))) # drop self if len(args) > 0 and args[0] == 'self': args.remove('self') if doc is None: with warnings.catch_warnings(record=True) as w: try: doc = docscrape.FunctionDoc(func) except Exception as exp: incorrect += [func_name + ' parsing error: ' + str(exp)] return incorrect if len(w): raise RuntimeError('Error for %s:\n%s' % (func_name, w[0])) param_names = [] for name, type_definition, param_doc in doc['Parameters']: if not type_definition.strip(): if ':' in name and name[:name.index(':')][-1:].strip(): incorrect += [func_name + ' There was no space between the param name and ' 'colon (%r)' % name] elif name.rstrip().endswith(':'): incorrect += [func_name + ' Parameter %r has an empty type spec. ' 'Remove the colon' % (name.lstrip())] if '*' not in name: param_names.append(name.split(':')[0].strip('` ')) param_names = list(filter(lambda x: x not in ignore, param_names)) if len(param_names) != len(args): bad = str(sorted(list(set(param_names) ^ set(args)))) incorrect += [func_name + ' arg mismatch: ' + bad] else: for n1, n2 in zip(param_names, args): if n1 != n2: incorrect += [func_name + ' ' + n1 + ' != ' + n2] return incorrect
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/globocom/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com timehome@corp.globo.com from os.path import abspath, join, dirname from pymongo import Connection from pyvows import Vows, expect from thumbor.storages.mongo_storage import Storage as MongoStorage from thumbor.context import Context from thumbor.config import Config from fixtures.storage_fixture import IMAGE_URL, IMAGE_BYTES, get_server FIXTURES_FOLDER = join(abspath(dirname(__file__)), 'fixtures') CONNECTION = Connection('localhost', 7777) COLLECTION = CONNECTION['thumbor']['images'] class MongoDBContext(Vows.Context): def teardown(self): CONNECTION.drop_database('thumbor') @Vows.batch class MongoStorageVows(MongoDBContext): class CanStoreImage(Vows.Context): def topic(self): storage = MongoStorage(Context(config=Config(MONGO_STORAGE_SERVER_PORT=7777))) storage.put(IMAGE_URL % 1, IMAGE_BYTES) return COLLECTION.find_one({'path': IMAGE_URL % 1}) def should_be_in_catalog(self, topic): expect(topic).not_to_be_null() expect(topic).not_to_be_an_error() class KnowsIfImageExists(Vows.Context): def topic(self): storage = MongoStorage(Context(config=Config(MONGO_STORAGE_SERVER_PORT=7777))) storage.put(IMAGE_URL % 10000, IMAGE_BYTES) return storage.exists(IMAGE_URL % 10000) def should_exist(self, topic): expect(topic).not_to_be_an_error() expect(topic).to_be_true() class KnowsIfImageDoesNotExist(Vows.Context): def topic(self): storage = MongoStorage(Context(config=Config(MONGO_STORAGE_SERVER_PORT=7777))) return storage.exists(IMAGE_URL % 20000) def should_not_exist(self, topic): expect(topic).not_to_be_an_error() expect(topic).to_be_false() class CanRemoveImage(Vows.Context): def topic(self): storage = MongoStorage(Context(config=Config(MONGO_STORAGE_SERVER_PORT=7777))) storage.put(IMAGE_URL % 9999, IMAGE_BYTES) storage.remove(IMAGE_URL % 9999) return COLLECTION.find_one({'path': IMAGE_URL % 9999}) def should_not_be_in_catalog(self, topic): expect(topic).not_to_be_an_error() expect(topic).to_be_null() class CanReRemoveImage(Vows.Context): def topic(self): storage = MongoStorage(Context(config=Config(MONGO_STORAGE_SERVER_PORT=7777))) storage.remove(IMAGE_URL % 9999) return COLLECTION.find_one({'path': IMAGE_URL % 9999}) def should_not_be_in_catalog(self, topic): expect(topic).not_to_be_an_error() expect(topic).to_be_null() class CanGetImage(Vows.Context): def topic(self): storage = MongoStorage(Context(config=Config(MONGO_STORAGE_SERVER_PORT=7777))) storage.put(IMAGE_URL % 2, IMAGE_BYTES) return storage.get(IMAGE_URL % 2) def should_not_be_null(self, topic): expect(topic).not_to_be_null() expect(topic).not_to_be_an_error() def should_have_proper_bytes(self, topic): expect(topic).to_equal(IMAGE_BYTES) class GettingReturnsNoneWhenImageDoesNotExist(Vows.Context): def topic(self): storage = MongoStorage(Context(config=Config(MONGO_STORAGE_SERVER_PORT=7777))) return storage.get(IMAGE_URL % 99) def should_be_null(self, topic): expect(topic).to_be_null() class StoresCrypto(Vows.Context): class DoesNotStoreWhenConfigIsFalseInPutMethod(Vows.Context): def topic(self): storage = MongoStorage( Context(config=Config(MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=False))) storage.put(IMAGE_URL % 3, IMAGE_BYTES) return COLLECTION.find_one({'path': IMAGE_URL % 3}) def should_be_in_catalog(self, topic): expect(topic).not_to_be_null() expect(topic).not_to_be_an_error() def should_not_have_crypto_key(self, topic): expect(topic).Not.to_include('crypto') class StoringEmptyKeyRaises(Vows.Context): def topic(self): conf = Config(MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True) server = get_server() storage = MongoStorage(Context(server=server, config=conf)) storage.put(IMAGE_URL % 4, IMAGE_BYTES) def should_be_an_error(self, topic): expect(topic).to_be_an_error_like(RuntimeError) expect(topic).to_have_an_error_message_of( "STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no SECURITY_KEY specified" ) class StoringProperKey(Vows.Context): def topic(self): conf = Config(MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True) storage = MongoStorage( Context( config=conf, server=get_server('ACME-SEC') ) ) storage.put(IMAGE_URL % 5, IMAGE_BYTES) return COLLECTION.find_one({'path': IMAGE_URL % 5}) def should_be_in_catalog(self, topic): expect(topic).not_to_be_null() expect(topic).not_to_be_an_error() def should_have_crypto_key(self, topic): expect(topic).to_include('crypto') expect(topic['crypto']).to_equal('ACME-SEC') class GetProperKey(Vows.Context): def topic(self): conf = Config(MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True) server = get_server('ACME-SEC') storage = MongoStorage(Context(config=conf, server=server)) storage.put(IMAGE_URL % 6, IMAGE_BYTES) return storage.get_crypto(IMAGE_URL % 6) def should_be_in_catalog(self, topic): expect(topic).not_to_be_null() expect(topic).not_to_be_an_error() def should_have_crypto_key(self, topic): expect(topic).to_equal('ACME-SEC') class GetNoKey(Vows.Context): def topic(self): storage = MongoStorage( Context(config=Config( MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True, SECURITY_KEY='ACME-SEC') ) ) return storage.get_crypto(IMAGE_URL % 7) def should_not_be_in_catalog(self, topic): expect(topic).to_be_null() class GetProperKeyBeforeExpiration(Vows.Context): def topic(self): conf = Config( MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True, STORAGE_EXPIRATION_SECONDS=5000 ) server = get_server('ACME-SEC') storage = MongoStorage(Context(server=server, config=conf)) storage.put(IMAGE_URL % 8, IMAGE_BYTES) return storage.get(IMAGE_URL % 8) def should_be_in_catalog(self, topic): expect(topic).not_to_be_null() expect(topic).not_to_be_an_error() def should_have_crypto_key(self, topic): expect(topic).to_equal(IMAGE_BYTES) class GetNothingAfterExpiration(Vows.Context): def topic(self): config = Config( MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=True, SECURITY_KEY='ACME-SEC', STORAGE_EXPIRATION_SECONDS=0 ) server = get_server('ACME-SEC') storage = MongoStorage(Context(server=server, config=config)) storage.put(IMAGE_URL % 10, IMAGE_BYTES) item = storage.get(IMAGE_URL % 10) return item is None def should_be_expired(self, topic): expect(topic).to_be_true() class StoresCryptoAfterStoringImage(Vows.Context): def topic(self): conf = Config(MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=False) server = get_server('ACME-SEC') storage = MongoStorage(Context(config=conf, server=server)) storage.put(IMAGE_URL % 11, IMAGE_BYTES) conf.STORES_CRYPTO_KEY_FOR_EACH_IMAGE = True storage.put_crypto(IMAGE_URL % 11) item = storage.get_crypto(IMAGE_URL % 11) return item def should_be_acme_sec(self, topic): expect(topic).not_to_be_null() expect(topic).to_equal('ACME-SEC') class DoesNotStoreCryptoIfNoNeed(Vows.Context): def topic(self): conf = Config(MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=False, SECURITY_KEY='ACME-SEC') storage = MongoStorage(Context(config=conf)) storage.put(IMAGE_URL % 12, IMAGE_BYTES) storage.put_crypto(IMAGE_URL % 12) item = storage.get_crypto(IMAGE_URL % 12) return item def should_be_null(self, topic): expect(topic).to_be_null() class RaisesIfWrongConfig(Vows.Context): def topic(self): conf = Config(MONGO_STORAGE_SERVER_PORT=7777, STORES_CRYPTO_KEY_FOR_EACH_IMAGE=False) server = get_server('') storage = MongoStorage(Context(config=conf, server=server)) storage.put(IMAGE_URL % 13, IMAGE_BYTES) conf.STORES_CRYPTO_KEY_FOR_EACH_IMAGE = True storage.put_crypto(IMAGE_URL % 13) def should_be_an_error(self, topic): expect(topic).to_be_an_error_like(RuntimeError) expect(topic).to_have_an_error_message_of( "STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no SECURITY_KEY specified" ) class DetectorData(Vows.Context): def topic(self): conf = Config(MONGO_STORAGE_SERVER_PORT=7777) storage = MongoStorage(Context(config=conf)) storage.put(IMAGE_URL % 14, IMAGE_BYTES) storage.put_detector_data(IMAGE_URL % 14, "some data") return storage.get_detector_data(IMAGE_URL % 14) def should_be_some_data(self, topic): expect(topic).to_equal('some data')
#!/usr/bin/env python2 # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import itertools from argparse import ArgumentParser import os import random import re import sys import subprocess import glob import shutil from collections import namedtuple from sparktestsupport import SPARK_HOME, USER_HOME, ERROR_CODES from sparktestsupport.shellutils import exit_from_command_with_retcode, run_cmd, rm_r, which from sparktestsupport.toposort import toposort_flatten import sparktestsupport.modules as modules # ------------------------------------------------------------------------------------------------- # Functions for traversing module dependency graph # ------------------------------------------------------------------------------------------------- def determine_modules_for_files(filenames): """ Given a list of filenames, return the set of modules that contain those files. If a file is not associated with a more specific submodule, then this method will consider that file to belong to the 'root' module. >>> sorted(x.name for x in determine_modules_for_files(["python/pyspark/a.py", "sql/core/foo"])) ['pyspark-core', 'sql'] >>> [x.name for x in determine_modules_for_files(["file_not_matched_by_any_subproject"])] ['root'] """ changed_modules = set() for filename in filenames: matched_at_least_one_module = False for module in modules.all_modules: if module.contains_file(filename): changed_modules.add(module) matched_at_least_one_module = True if not matched_at_least_one_module: changed_modules.add(modules.root) return changed_modules def identify_changed_files_from_git_commits(patch_sha, target_branch=None, target_ref=None): """ Given a git commit and target ref, use the set of files changed in the diff in order to determine which modules' tests should be run. >>> [x.name for x in determine_modules_for_files( \ identify_changed_files_from_git_commits("fc0a1475ef", target_ref="5da21f07"))] ['graphx'] >>> 'root' in [x.name for x in determine_modules_for_files( \ identify_changed_files_from_git_commits("50a0496a43", target_ref="6765ef9"))] True """ if target_branch is None and target_ref is None: raise AttributeError("must specify either target_branch or target_ref") elif target_branch is not None and target_ref is not None: raise AttributeError("must specify either target_branch or target_ref, not both") if target_branch is not None: diff_target = target_branch run_cmd(['git', 'fetch', 'origin', str(target_branch+':'+target_branch)]) else: diff_target = target_ref raw_output = subprocess.check_output(['git', 'diff', '--name-only', patch_sha, diff_target], universal_newlines=True) # Remove any empty strings return [f for f in raw_output.split('\n') if f] def setup_test_environ(environ): print("[info] Setup the following environment variables for tests: ") for (k, v) in environ.items(): print("%s=%s" % (k, v)) os.environ[k] = v def determine_modules_to_test(changed_modules): """ Given a set of modules that have changed, compute the transitive closure of those modules' dependent modules in order to determine the set of modules that should be tested. Returns a topologically-sorted list of modules (ties are broken by sorting on module names). >>> [x.name for x in determine_modules_to_test([modules.root])] ['root'] >>> [x.name for x in determine_modules_to_test([modules.build])] ['root'] >>> [x.name for x in determine_modules_to_test([modules.graphx])] ['graphx', 'examples'] >>> x = [x.name for x in determine_modules_to_test([modules.sql])] >>> x # doctest: +NORMALIZE_WHITESPACE ['sql', 'avro', 'hive', 'mllib', 'sql-kafka-0-10', 'examples', 'hive-thriftserver', 'pyspark-sql', 'repl', 'sparkr', 'pyspark-mllib', 'pyspark-ml'] """ modules_to_test = set() for module in changed_modules: modules_to_test = modules_to_test.union(determine_modules_to_test(module.dependent_modules)) modules_to_test = modules_to_test.union(set(changed_modules)) # If we need to run all of the tests, then we should short-circuit and return 'root' if modules.root in modules_to_test: return [modules.root] return toposort_flatten( {m: set(m.dependencies).intersection(modules_to_test) for m in modules_to_test}, sort=True) def determine_tags_to_exclude(changed_modules): tags = [] for m in modules.all_modules: if m not in changed_modules: tags += m.test_tags return tags # ------------------------------------------------------------------------------------------------- # Functions for working with subprocesses and shell tools # ------------------------------------------------------------------------------------------------- def determine_java_executable(): """Will return the path of the java executable that will be used by Spark's tests or `None`""" # Any changes in the way that Spark's build detects java must be reflected # here. Currently the build looks for $JAVA_HOME/bin/java then falls back to # the `java` executable on the path java_home = os.environ.get("JAVA_HOME") # check if there is an executable at $JAVA_HOME/bin/java java_exe = which(os.path.join(java_home, "bin", "java")) if java_home else None # if the java_exe wasn't set, check for a `java` version on the $PATH return java_exe if java_exe else which("java") # ------------------------------------------------------------------------------------------------- # Functions for running the other build and test scripts # ------------------------------------------------------------------------------------------------- def set_title_and_block(title, err_block): os.environ["CURRENT_BLOCK"] = str(ERROR_CODES[err_block]) line_str = '=' * 72 print('') print(line_str) print(title) print(line_str) def run_apache_rat_checks(): set_title_and_block("Running Apache RAT checks", "BLOCK_RAT") run_cmd([os.path.join(SPARK_HOME, "dev", "check-license")]) def run_scala_style_checks(build_profiles): set_title_and_block("Running Scala style checks", "BLOCK_SCALA_STYLE") profiles = " ".join(build_profiles) print("[info] Checking Scala style using SBT with these profiles: ", profiles) run_cmd([os.path.join(SPARK_HOME, "dev", "lint-scala"), profiles]) def run_java_style_checks(build_profiles): set_title_and_block("Running Java style checks", "BLOCK_JAVA_STYLE") # The same profiles used for building are used to run Checkstyle by SBT as well because # the previous build looks reused for Checkstyle and affecting Checkstyle. See SPARK-27130. profiles = " ".join(build_profiles) print("[info] Checking Java style using SBT with these profiles: ", profiles) run_cmd([os.path.join(SPARK_HOME, "dev", "sbt-checkstyle"), profiles]) def run_python_style_checks(): set_title_and_block("Running Python style checks", "BLOCK_PYTHON_STYLE") run_cmd([os.path.join(SPARK_HOME, "dev", "lint-python")]) def run_sparkr_style_checks(): set_title_and_block("Running R style checks", "BLOCK_R_STYLE") if which("R"): # R style check should be executed after `install-dev.sh`. # Since warnings about `no visible global function definition` appear # without the installation. SEE ALSO: SPARK-9121. run_cmd([os.path.join(SPARK_HOME, "dev", "lint-r")]) else: print("Ignoring SparkR style check as R was not found in PATH") def build_spark_documentation(): set_title_and_block("Building Spark Documentation", "BLOCK_DOCUMENTATION") os.environ["PRODUCTION"] = "1 jekyll build" os.chdir(os.path.join(SPARK_HOME, "docs")) jekyll_bin = which("jekyll") if not jekyll_bin: print("[error] Cannot find a version of `jekyll` on the system; please", " install one and retry to build documentation.") sys.exit(int(os.environ.get("CURRENT_BLOCK", 255))) else: run_cmd([jekyll_bin, "build"]) os.chdir(SPARK_HOME) def get_zinc_port(): """ Get a randomized port on which to start Zinc """ return random.randrange(3030, 4030) def exec_maven(mvn_args=()): """Will call Maven in the current directory with the list of mvn_args passed in and returns the subprocess for any further processing""" zinc_port = get_zinc_port() os.environ["ZINC_PORT"] = "%s" % zinc_port zinc_flag = "-DzincPort=%s" % zinc_port flags = [os.path.join(SPARK_HOME, "build", "mvn"), zinc_flag] run_cmd(flags + mvn_args) def exec_sbt(sbt_args=()): """Will call SBT in the current directory with the list of mvn_args passed in and returns the subprocess for any further processing""" sbt_cmd = [os.path.join(SPARK_HOME, "build", "sbt")] + sbt_args sbt_output_filter = re.compile(b"^.*[info].*Resolving" + b"|" + b"^.*[warn].*Merging" + b"|" + b"^.*[info].*Including") # NOTE: echo "q" is needed because sbt on encountering a build file # with failure (either resolution or compilation) prompts the user for # input either q, r, etc to quit or retry. This echo is there to make it # not block. echo_proc = subprocess.Popen(["echo", "\"q\n\""], stdout=subprocess.PIPE) sbt_proc = subprocess.Popen(sbt_cmd, stdin=echo_proc.stdout, stdout=subprocess.PIPE) echo_proc.wait() for line in iter(sbt_proc.stdout.readline, b''): if not sbt_output_filter.match(line): print(line, end='') retcode = sbt_proc.wait() if retcode != 0: exit_from_command_with_retcode(sbt_cmd, retcode) def get_hadoop_profiles(hadoop_version): """ For the given Hadoop version tag, return a list of Maven/SBT profile flags for building and testing against that Hadoop version. """ sbt_maven_hadoop_profiles = { "hadoop2.7": ["-Phadoop-2.7"], "hadoop3.2": ["-Phadoop-3.2"], } if hadoop_version in sbt_maven_hadoop_profiles: return sbt_maven_hadoop_profiles[hadoop_version] else: print("[error] Could not find", hadoop_version, "in the list. Valid options", " are", sbt_maven_hadoop_profiles.keys()) sys.exit(int(os.environ.get("CURRENT_BLOCK", 255))) def build_spark_maven(hadoop_version): # Enable all of the profiles for the build: build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags mvn_goals = ["clean", "package", "-DskipTests"] profiles_and_goals = build_profiles + mvn_goals print("[info] Building Spark using Maven with these arguments: ", " ".join(profiles_and_goals)) exec_maven(profiles_and_goals) def build_spark_sbt(hadoop_version): # Enable all of the profiles for the build: build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags sbt_goals = ["test:package", # Build test jars as some tests depend on them "streaming-kinesis-asl-assembly/assembly"] profiles_and_goals = build_profiles + sbt_goals print("[info] Building Spark using SBT with these arguments: ", " ".join(profiles_and_goals)) exec_sbt(profiles_and_goals) def build_spark_unidoc_sbt(hadoop_version): set_title_and_block("Building Unidoc API Documentation", "BLOCK_DOCUMENTATION") # Enable all of the profiles for the build: build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags sbt_goals = ["unidoc"] profiles_and_goals = build_profiles + sbt_goals print("[info] Building Spark unidoc using SBT with these arguments: ", " ".join(profiles_and_goals)) exec_sbt(profiles_and_goals) def build_spark_assembly_sbt(hadoop_version, checkstyle=False): # Enable all of the profiles for the build: build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags sbt_goals = ["assembly/package"] profiles_and_goals = build_profiles + sbt_goals print("[info] Building Spark assembly using SBT with these arguments: ", " ".join(profiles_and_goals)) exec_sbt(profiles_and_goals) if checkstyle: run_java_style_checks(build_profiles) build_spark_unidoc_sbt(hadoop_version) def build_apache_spark(build_tool, hadoop_version): """Will build Spark against Hive v1.2.1 given the passed in build tool (either `sbt` or `maven`). Defaults to using `sbt`.""" set_title_and_block("Building Spark", "BLOCK_BUILD") rm_r("lib_managed") if build_tool == "maven": build_spark_maven(hadoop_version) else: build_spark_sbt(hadoop_version) def detect_binary_inop_with_mima(hadoop_version): build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags set_title_and_block("Detecting binary incompatibilities with MiMa", "BLOCK_MIMA") profiles = " ".join(build_profiles) print("[info] Detecting binary incompatibilities with MiMa using SBT with these profiles: ", profiles) run_cmd([os.path.join(SPARK_HOME, "dev", "mima"), profiles]) def run_scala_tests_maven(test_profiles): mvn_test_goals = ["test", "--fail-at-end"] profiles_and_goals = test_profiles + mvn_test_goals print("[info] Running Spark tests using Maven with these arguments: ", " ".join(profiles_and_goals)) exec_maven(profiles_and_goals) def run_scala_tests_sbt(test_modules, test_profiles): sbt_test_goals = list(itertools.chain.from_iterable(m.sbt_test_goals for m in test_modules)) if not sbt_test_goals: return profiles_and_goals = test_profiles + sbt_test_goals print("[info] Running Spark tests using SBT with these arguments: ", " ".join(profiles_and_goals)) exec_sbt(profiles_and_goals) def run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags): """Function to properly execute all tests passed in as a set from the `determine_test_suites` function""" set_title_and_block("Running Spark unit tests", "BLOCK_SPARK_UNIT_TESTS") test_modules = set(test_modules) test_profiles = get_hadoop_profiles(hadoop_version) + \ list(set(itertools.chain.from_iterable(m.build_profile_flags for m in test_modules))) if excluded_tags: test_profiles += ['-Dtest.exclude.tags=' + ",".join(excluded_tags)] if build_tool == "maven": run_scala_tests_maven(test_profiles) else: run_scala_tests_sbt(test_modules, test_profiles) def run_python_tests(test_modules, parallelism, with_coverage=False): set_title_and_block("Running PySpark tests", "BLOCK_PYSPARK_UNIT_TESTS") if with_coverage: # Coverage makes the PySpark tests flaky due to heavy parallelism. # When we run PySpark tests with coverage, it uses 4 for now as # workaround. parallelism = 4 script = "run-tests-with-coverage" else: script = "run-tests" command = [os.path.join(SPARK_HOME, "python", script)] if test_modules != [modules.root]: command.append("--modules=%s" % ','.join(m.name for m in test_modules)) command.append("--parallelism=%i" % parallelism) run_cmd(command) if with_coverage: post_python_tests_results() def post_python_tests_results(): if "SPARK_TEST_KEY" not in os.environ: print("[error] 'SPARK_TEST_KEY' environment variable was not set. Unable to post " "PySpark coverage results.") sys.exit(1) spark_test_key = os.environ.get("SPARK_TEST_KEY") # The steps below upload HTMLs to 'github.com/spark-test/pyspark-coverage-site'. # 1. Clone PySpark coverage site. run_cmd([ "git", "clone", "https://spark-test:%s@github.com/spark-test/pyspark-coverage-site.git" % spark_test_key]) # 2. Remove existing HTMLs. run_cmd(["rm", "-fr"] + glob.glob("pyspark-coverage-site/*")) # 3. Copy generated coverage HTMLs. for f in glob.glob("%s/python/test_coverage/htmlcov/*" % SPARK_HOME): shutil.copy(f, "pyspark-coverage-site/") os.chdir("pyspark-coverage-site") try: # 4. Check out to a temporary branch. run_cmd(["git", "symbolic-ref", "HEAD", "refs/heads/latest_branch"]) # 5. Add all the files. run_cmd(["git", "add", "-A"]) # 6. Commit current HTMLs. run_cmd([ "git", "commit", "-am", "Coverage report at latest commit in Apache Spark", '--author="Apache Spark Test Account <sparktestacc@gmail.com>"']) # 7. Delete the old branch. run_cmd(["git", "branch", "-D", "gh-pages"]) # 8. Rename the temporary branch to master. run_cmd(["git", "branch", "-m", "gh-pages"]) # 9. Finally, force update to our repository. run_cmd(["git", "push", "-f", "origin", "gh-pages"]) finally: os.chdir("..") def run_python_packaging_tests(): set_title_and_block("Running PySpark packaging tests", "BLOCK_PYSPARK_PIP_TESTS") command = [os.path.join(SPARK_HOME, "dev", "run-pip-tests")] run_cmd(command) def run_build_tests(): set_title_and_block("Running build tests", "BLOCK_BUILD_TESTS") run_cmd([os.path.join(SPARK_HOME, "dev", "test-dependencies.sh")]) def run_sparkr_tests(): set_title_and_block("Running SparkR tests", "BLOCK_SPARKR_UNIT_TESTS") if which("R"): run_cmd([os.path.join(SPARK_HOME, "R", "run-tests.sh")]) else: print("Ignoring SparkR tests as R was not found in PATH") def parse_opts(): parser = ArgumentParser( prog="run-tests" ) parser.add_argument( "-p", "--parallelism", type=int, default=8, help="The number of suites to test in parallel (default %(default)d)" ) args, unknown = parser.parse_known_args() if unknown: parser.error("Unsupported arguments: %s" % ' '.join(unknown)) if args.parallelism < 1: parser.error("Parallelism cannot be less than 1") return args def main(): opts = parse_opts() # Ensure the user home directory (HOME) is valid and is an absolute directory if not USER_HOME or not os.path.isabs(USER_HOME): print("[error] Cannot determine your home directory as an absolute path;", " ensure the $HOME environment variable is set properly.") sys.exit(1) os.chdir(SPARK_HOME) rm_r(os.path.join(SPARK_HOME, "work")) rm_r(os.path.join(USER_HOME, ".ivy2", "local", "org.apache.spark")) rm_r(os.path.join(USER_HOME, ".ivy2", "cache", "org.apache.spark")) os.environ["CURRENT_BLOCK"] = str(ERROR_CODES["BLOCK_GENERAL"]) java_exe = determine_java_executable() if not java_exe: print("[error] Cannot find a version of `java` on the system; please", " install one and retry.") sys.exit(2) # install SparkR if which("R"): run_cmd([os.path.join(SPARK_HOME, "R", "install-dev.sh")]) else: print("Cannot install SparkR as R was not found in PATH") if os.environ.get("AMPLAB_JENKINS"): # if we're on the Amplab Jenkins build servers setup variables # to reflect the environment settings build_tool = os.environ.get("AMPLAB_JENKINS_BUILD_TOOL", "sbt") hadoop_version = os.environ.get("AMPLAB_JENKINS_BUILD_PROFILE", "hadoop2.7") test_env = "amplab_jenkins" # add path for Python3 in Jenkins if we're calling from a Jenkins machine # TODO(sknapp): after all builds are ported to the ubuntu workers, change this to be: # /home/jenkins/anaconda2/envs/py36/bin os.environ["PATH"] = "/home/anaconda/envs/py36/bin:" + os.environ.get("PATH") else: # else we're running locally and can use local settings build_tool = "sbt" hadoop_version = os.environ.get("HADOOP_PROFILE", "hadoop2.7") test_env = "local" print("[info] Using build tool", build_tool, "with Hadoop profile", hadoop_version, "under environment", test_env) changed_modules = None changed_files = None if test_env == "amplab_jenkins" and os.environ.get("AMP_JENKINS_PRB"): target_branch = os.environ["ghprbTargetBranch"] changed_files = identify_changed_files_from_git_commits("HEAD", target_branch=target_branch) changed_modules = determine_modules_for_files(changed_files) excluded_tags = determine_tags_to_exclude(changed_modules) if not changed_modules: changed_modules = [modules.root] excluded_tags = [] print("[info] Found the following changed modules:", ", ".join(x.name for x in changed_modules)) # setup environment variables # note - the 'root' module doesn't collect environment variables for all modules. Because the # environment variables should not be set if a module is not changed, even if running the 'root' # module. So here we should use changed_modules rather than test_modules. test_environ = {} for m in changed_modules: test_environ.update(m.environ) setup_test_environ(test_environ) test_modules = determine_modules_to_test(changed_modules) # license checks run_apache_rat_checks() # style checks if not changed_files or any(f.endswith(".scala") or f.endswith("scalastyle-config.xml") for f in changed_files): build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags run_scala_style_checks(build_profiles) should_run_java_style_checks = False if not changed_files or any(f.endswith(".java") or f.endswith("checkstyle.xml") or f.endswith("checkstyle-suppressions.xml") for f in changed_files): # Run SBT Checkstyle after the build to prevent a side-effect to the build. should_run_java_style_checks = True if not changed_files or any(f.endswith("lint-python") or f.endswith("tox.ini") or f.endswith(".py") for f in changed_files): run_python_style_checks() if not changed_files or any(f.endswith(".R") or f.endswith("lint-r") or f.endswith(".lintr") for f in changed_files): run_sparkr_style_checks() # determine if docs were changed and if we're inside the amplab environment # note - the below commented out until *all* Jenkins workers can get `jekyll` installed # if "DOCS" in changed_modules and test_env == "amplab_jenkins": # build_spark_documentation() if any(m.should_run_build_tests for m in test_modules): run_build_tests() # spark build build_apache_spark(build_tool, hadoop_version) # backwards compatibility checks if build_tool == "sbt": # Note: compatibility tests only supported in sbt for now detect_binary_inop_with_mima(hadoop_version) # Since we did not build assembly/package before running dev/mima, we need to # do it here because the tests still rely on it; see SPARK-13294 for details. build_spark_assembly_sbt(hadoop_version, should_run_java_style_checks) # run the test suites run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags) modules_with_python_tests = [m for m in test_modules if m.python_test_goals] if modules_with_python_tests: # We only run PySpark tests with coverage report in one specific job with # Spark master with SBT in Jenkins. is_sbt_master_job = "SPARK_MASTER_SBT_HADOOP_2_7" in os.environ run_python_tests( modules_with_python_tests, opts.parallelism, with_coverage=is_sbt_master_job) run_python_packaging_tests() if any(m.should_run_r_tests for m in test_modules): run_sparkr_tests() def _test(): import doctest failure_count = doctest.testmod()[0] if failure_count: sys.exit(-1) if __name__ == "__main__": _test() main()
""" this file does variant calling for DNAseq """ #============= import required packages ================= import os import sys,subprocess sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # disable buffer so that in the log file all information is printed in order. from Modules.f00_Message import Message from Modules.f01_list_trim_fq import list_files_human,Trimmomatic from Modules.f02_aligner_command import bwa_vari from Modules.f03_samtools import sam2bam_sort from Modules.f07_picard import markduplicates from Modules.f08_GATK import * from Modules.p01_FileProcess import remove,get_parameters,rg_bams #============= define some parameters =================== """these parameters and read group names are different for different samples, should only change this part for running pipeline """ parFile = sys.argv[1] param = get_parameters(parFile) thread = param['thread'] email = param['email'] startMessage = param['startMessage'] endMessage = param['endMessage'] ref_fa = param['refSequence'] file_path = param['filePath'] bwaDb = param['alignerDb'] trim = param['trim'] phred = param['phred'] picard = param['picard'] trimmomatic = param['trimmomatic'] trimmoAdapter = param['trimmoAdapter'] gold_snp = param['dbSNP'] phaseINDEL= param['phase1INDEL'] gold_indel= param['MillINDEL'] omni = param['omni'] hapmap = param['hapMap'] gatk = param['gatk'] read_group = param['readGroup'] organism = param['organism'] ##***************** Part 0. Build index file for bwa and GATK ****** ##================= Part I. Preprocess ============================ #======== 1. map and dedupping ===================================== #======== (0) enter the directory ======================== os.chdir(file_path) Message(startMessage,email) #======== (1) read files ================================ fastqFiles = list_files_human(file_path) if trim == 'True': fastqFiles = Trimmomatic(trimmomatic,fastqFiles,phred,trimmoAdapter) print 'list file succeed' print 'fastqFiles is: ',fastqFiles #======== (2) define group =============================== #defined above #======== (3) align using bwa ============================ try: map_sam = bwa_vari(read_group,fastqFiles,bwaDb,thread) print 'align succeed' print 'map_sam is: ',map_sam except: print 'align failed' Message('align failed',email) raise #======== (4) Convert sam to sorted bam ================== try: sort_bams = sam2bam_sort(map_sam,thread) print 'sort bam files succeed' print 'sort_bams is: ',sort_bams except: print 'sort bam files failed' Message('sort bam files failed',email) raise #======== (5) Markduplicates using picard ================ try: dedup_files = markduplicates(picard,sort_bams) print 'mark duplicates succeed' print 'dedup_files is: ',dedup_files remove(sort_bams) except: print 'mark duplicates failed' Message('mark duplicates failed',email) raise #======== 2. Indel realignment ==================================== #======== (6) Create a target list of intervals=========== try: interval = RealignerTargetCreator(gatk,dedup_files,ref_fa,thread,phaseINDEL,gold_indel) print 'RealignerTarget Creator succeed' print 'interval is: ',interval except: print 'RealignerTarget Creator failed' Message('RealignerTarget Creator failed',email) raise #======== (7) realignment of target intervals ============ try: realign_bams = IndelRealigner(gatk,dedup_files,ref_fa,interval,phaseINDEL,gold_indel) print 'IndexRealigner succeed' print 'realign_bams is: ',realign_bams remove(dedup_files) except: print 'IndelRealigner failed' Message('IndelRealigner failed',email) raise #======== 3. Base quality recalibration ================= roundNum = '1' try: recal_bam_files = BaseRecalibrator(gatk,realign_bams,ref_fa,gold_snp, gold_indel,roundNum,thread) print 'round 1 recalibration succeed' print 'recal_bam_files is: ',recal_bam_files except: print 'round 1 recalibration failed' Message('round 1 recalibration failed',email) raise ##================= Part II. Variant Calling ====================== #======== 1. call raw variant using HaplotypeCaller ===== #======== (1) determine parameters ====================== #======== (2) call variant ============================== #======== !!! merge lanes for the same sample ============ if len(recal_bam_files) !=1: #========= (3) merge samples ========================= try: merged_bams = rg_bams(read_group,recal_bam_files) print 'merged succeed' print 'merged_bams is: ',merged_bams remove(recal_bam_files) except: print 'merged failed' Message('merged failed',email) raise #========= (4) mark duplicates ======================== try: dedup_files = markduplicates(picard,merged_bams) print 'dedup succeed' print 'merged dedup_files is: ',dedup_files remove(merged_bams) except: print 'merged dedup failed' Message('merged dedup failed',email) raise #========= (5) Realignment ============================ try: interval = RealignerTargetCreator(gatk,dedup_files,ref_fa, thread,phaseINDEL,gold_indel) realign_bams = IndelRealigner(gatk,dedup_files,ref_fa, interval,phaseINDEL,gold_indel) print 'merged indelrealigner succeed' print 'merged realign_bams is: ',realign_bams remove(dedup_files) except: print 'merged realign failed' Message('merged realign failed',email) raise #========= (6) call variant ============================== try: raw_gvcf_files = HaplotypeCaller_DNA_gVCF(gatk,realign_bams,ref_fa,thread) print 'merged final call succeed' print 'raw_gvcf_files is:',raw_gvcf_files except: print 'final call failed' Message('final call failed',email) raise #======== (7) Joint Genotyping ========================== try: joint_gvcf_file = JointGenotype(gatk,raw_gvcf_files,ref_fa,organism,thread) print 'final joint succeed' print 'joint_gvcf_file is: ',joint_gvcf_file except: print 'final joint failed' Message('final joint failed',email) raise #======== (8) VQSR ====================================== try: recal_variant = VQSR_human(gatk,joint_gvcf_file,ref_fa,thread,hapmap,omni,phaseINDEL,gold_snp,gold_indel) print 'vcf recalibration succeed' print 'recal_variant is: ',recal_variant except: print 'final vcf recalibration failed' Message('final vcf recalibration failed',email) raise else: # for only one file, just run calling with recalibration bam file #======== Calling variant ================================= try: raw_vcf_file = HaplotypeCaller_DNA_VCF(gatk,recal_bam_files[0],ref_fa,thread) print 'final call succeed' print 'raw_gvcf_files is:',raw_vcf_file except: print 'final call failed' Message('final call failed',email) raise #======== Hard filtering ================================== try: final_filtered_files = HardFilter(gatk,raw_vcf_file,ref_fa,thread) print 'final filter succeed' print 'final_filtered_files is: ',final_filtered_files except: print 'final filter failed' Message('final filter failed',email) raise #======== Combine snp and indel =========================== try: combinedVcf = CombineSNPandINDEL(gatk,ref_fa,final_filtered_files,'--assumeIdenticalSamples --genotypemergeoption UNSORTED') print 'combine snp and indel succeed' print 'combineVcf file is: ',combinedVcf except: print 'combine snp and indel failed' raise Message(endMessage,email)
# -*- coding: utf-8 -*- ''' Remote package support using ``pkg_add(1)`` .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. .. warning:: This module has been completely rewritten. Up to and including version 0.17.0, it supported ``pkg_add(1)``, but checked for the existence of a pkgng local database and, if found, would provide some of pkgng's functionality. The rewrite of this module has removed all pkgng support, and moved it to the :mod:`pkgng <salt.modules.pkgng>` execution module. For versions <= 0.17.0, the documentation here should not be considered accurate. If your Minion is running one of these versions, then the documentation for this module can be viewed using the :mod:`sys.doc <salt.modules.sys.doc>` function: .. code-block:: bash salt bsdminion sys.doc pkg This module acts as the default package provider for FreeBSD 9 and older. If you need to use pkgng on a FreeBSD 9 system, you will need to override the ``pkg`` provider by setting the :conf_minion:`providers` parameter in your Minion config file, in order to use pkgng. .. code-block:: yaml providers: pkg: pkgng More information on pkgng support can be found in the documentation for the :mod:`pkgng <salt.modules.pkgng>` module. This module will respect the ``PACKAGEROOT`` and ``PACKAGESITE`` environment variables, if set, but these values can also be overridden in several ways: 1. :strong:`Salt configuration parameters.` The configuration parameters ``freebsdpkg.PACKAGEROOT`` and ``freebsdpkg.PACKAGESITE`` are recognized. These config parameters are looked up using :mod:`config.get <salt.modules.config.get>` and can thus be specified in the Master config file, Grains, Pillar, or in the Minion config file. Example: .. code-block:: yaml freebsdpkg.PACKAGEROOT: ftp://ftp.freebsd.org/ freebsdpkg.PACKAGESITE: ftp://ftp.freebsd.org/pub/FreeBSD/ports/ia64/packages-9-stable/Latest/ 2. :strong:`CLI arguments.` Both the ``packageroot`` (used interchangeably with ``fromrepo`` for API compatibility) and ``packagesite`` CLI arguments are recognized, and override their config counterparts from section 1 above. .. code-block:: bash salt -G 'os:FreeBSD' pkg.install zsh fromrepo=ftp://ftp2.freebsd.org/ salt -G 'os:FreeBSD' pkg.install zsh packageroot=ftp://ftp2.freebsd.org/ salt -G 'os:FreeBSD' pkg.install zsh packagesite=ftp://ftp2.freebsd.org/pub/FreeBSD/ports/ia64/packages-9-stable/Latest/ .. note:: These arguments can also be passed through in states: .. code-block:: yaml zsh: pkg.installed: - fromrepo: ftp://ftp2.freebsd.org/ ''' from __future__ import absolute_import # Import python libs import copy import logging import re # Import salt libs import salt.utils from salt.exceptions import CommandExecutionError, MinionError import salt.ext.six as six log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' def __virtual__(): ''' Load as 'pkg' on FreeBSD versions less than 10. Don't load on FreeBSD 9 when the config option ``providers:pkg`` is set to 'pkgng'. ''' if __grains__['os'] == 'FreeBSD' and float(__grains__['osrelease']) < 10: providers = {} if 'providers' in __opts__: providers = __opts__['providers'] if providers and 'pkg' in providers and providers['pkg'] == 'pkgng': log.debug('Configuration option \'providers:pkg\' is set to ' '\'pkgng\', won\'t load old provider \'freebsdpkg\'.') return (False, 'The freebsdpkg execution module cannot be loaded: the configuration option \'providers:pkg\' is set to \'pkgng\'') return __virtualname__ return (False, 'The freebsdpkg execution module cannot be loaded: either the os is not FreeBSD or the version of FreeBSD is >= 10.') def _get_repo_options(fromrepo=None, packagesite=None): ''' Return a list of tuples to seed the "env" list, which is used to set environment variables for any pkg_add commands that are spawned. If ``fromrepo`` or ``packagesite`` are None, then their corresponding config parameter will be looked up with config.get. If both ``fromrepo`` and ``packagesite`` are None, and neither freebsdpkg.PACKAGEROOT nor freebsdpkg.PACKAGESITE are specified, then an empty list is returned, and it is assumed that the system defaults (or environment variables) will be used. ''' root = fromrepo if fromrepo is not None \ else __salt__['config.get']('freebsdpkg.PACKAGEROOT', None) site = packagesite if packagesite is not None \ else __salt__['config.get']('freebsdpkg.PACKAGESITE', None) ret = {} if root is not None: ret['PACKAGEROOT'] = root if site is not None: ret['PACKAGESITE'] = site return ret def _match(names): ''' Since pkg_delete requires the full "pkgname-version" string, this function will attempt to match the package name with its version. Returns a list of partial matches and package names that match the "pkgname-version" string required by pkg_delete, and a list of errors encountered. ''' pkgs = list_pkgs(versions_as_list=True) errors = [] # Look for full matches full_pkg_strings = [] out = __salt__['cmd.run_stdout'](['pkg_info'], output_loglevel='trace', python_shell=False) for line in out.splitlines(): try: full_pkg_strings.append(line.split()[0]) except IndexError: continue full_matches = [x for x in names if x in full_pkg_strings] # Look for pkgname-only matches matches = [] ambiguous = [] for name in set(names) - set(full_matches): cver = pkgs.get(name) if cver is not None: if len(cver) == 1: matches.append('{0}-{1}'.format(name, cver[0])) else: ambiguous.append(name) errors.append( 'Ambiguous package \'{0}\'. Full name/version required. ' 'Possible matches: {1}'.format( name, ', '.join(['{0}-{1}'.format(name, x) for x in cver]) ) ) # Find packages that did not match anything not_matched = \ set(names) - set(matches) - set(full_matches) - set(ambiguous) for name in not_matched: errors.append('Package \'{0}\' not found'.format(name)) return matches + full_matches, errors def latest_version(*names, **kwargs): ''' ``pkg_add(1)`` is not capable of querying for remote packages, so this function will always return results as if there is no package available for install or upgrade. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ... ''' return '' if len(names) == 1 else dict((x, '') for x in names) # available_version is being deprecated available_version = salt.utils.alias_function(latest_version, 'available_version') def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. with_origin : False Return a nested dictionary containing both the origin name and version for each specified package. .. versionadded:: 2014.1.0 CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ... ''' with_origin = kwargs.pop('with_origin', False) ret = __salt__['pkg_resource.version'](*names, **kwargs) if not salt.utils.is_true(with_origin): return ret # Put the return value back into a dict since we're adding a subdict if len(names) == 1: ret = {names[0]: ret} origins = __context__.get('pkg.origin', {}) return dict([ (x, {'origin': origins.get(x, ''), 'version': y}) for x, y in six.iteritems(ret) ]) def refresh_db(): ''' ``pkg_add(1)`` does not use a local database of available packages, so this function simply returns ``True``. it exists merely for API compatibility. CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' return True def list_pkgs(versions_as_list=False, with_origin=False, **kwargs): ''' List the packages currently installed as a dict:: {'<package_name>': '<version>'} with_origin : False Return a nested dictionary containing both the origin name and version for each installed package. .. versionadded:: 2014.1.0 CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs ''' versions_as_list = salt.utils.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: ret = copy.deepcopy(__context__['pkg.list_pkgs']) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) if salt.utils.is_true(with_origin): origins = __context__.get('pkg.origin', {}) return dict([ (x, {'origin': origins.get(x, ''), 'version': y}) for x, y in six.iteritems(ret) ]) return ret ret = {} origins = {} out = __salt__['cmd.run_stdout'](['pkg_info', '-ao'], output_loglevel='trace', python_shell=False) pkgs_re = re.compile(r'Information for ([^:]+):\s*Origin:\n([^\n]+)') for pkg, origin in pkgs_re.findall(out): if not pkg: continue try: pkgname, pkgver = pkg.rsplit('-', 1) except ValueError: continue __salt__['pkg_resource.add_pkg'](ret, pkgname, pkgver) origins[pkgname] = origin __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) __context__['pkg.origin'] = origins if not versions_as_list: __salt__['pkg_resource.stringify'](ret) if salt.utils.is_true(with_origin): return dict([ (x, {'origin': origins.get(x, ''), 'version': y}) for x, y in six.iteritems(ret) ]) return ret def install(name=None, refresh=False, fromrepo=None, pkgs=None, sources=None, **kwargs): ''' Install package(s) using ``pkg_add(1)`` name The name of the package to be installed. refresh Whether or not to refresh the package database before installing. fromrepo or packageroot Specify a package repository from which to install. Overrides the system default, as well as the PACKAGEROOT environment variable. packagesite Specify the exact directory from which to install the remote package. Overrides the PACKAGESITE environment variable, if present. Multiple Package Installation Options: pkgs A list of packages to install from a software repository. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo", "bar"]' sources A list of packages to install. Must be passed as a list of dicts, with the keys being package names, and the values being the source URI or local path to the package. CLI Example: .. code-block:: bash salt '*' pkg.install sources='[{"foo": "salt://foo.deb"}, {"bar": "salt://bar.deb"}]' Return a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.install <package name> ''' try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, sources, **kwargs ) except MinionError as exc: raise CommandExecutionError(exc) if not pkg_params: return {} packageroot = kwargs.get('packageroot') if not fromrepo and packageroot: fromrepo = packageroot env = _get_repo_options(fromrepo, kwargs.get('packagesite')) args = [] if pkg_type == 'repository': args.append('-r') # use remote repo args.extend(pkg_params) old = list_pkgs() out = __salt__['cmd.run_all']( ['pkg_add'] + args, env=env, output_loglevel='trace', python_shell=False ) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() _rehash() ret = salt.utils.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) return ret def upgrade(): ''' Upgrades are not supported with ``pkg_add(1)``. This function is included for API compatibility only and always returns an empty dict. CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' return {} def remove(name=None, pkgs=None, **kwargs): ''' Remove packages using ``pkg_delete(1)`` name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. .. versionadded:: 0.16.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' ''' try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets, errors = _match([x for x in pkg_params]) for error in errors: log.error(error) if not targets: return {} out = __salt__['cmd.run_all']( ['pkg_delete'] + targets, output_loglevel='trace', python_shell=False ) if out['retcode'] != 0 and out['stderr']: errors = [out['stderr']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.compare_dicts(old, new) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) return ret # Support pkg.delete to remove packages to more closely match pkg_delete delete = salt.utils.alias_function(remove, 'delete') # No equivalent to purge packages, use remove instead purge = salt.utils.alias_function(remove, 'purge') def _rehash(): ''' Recomputes internal hash table for the PATH variable. Use whenever a new command is created during the current session. ''' shell = __salt__['environ.get']('SHELL') if shell.split('/')[-1] in ('csh', 'tcsh'): __salt__['cmd.shell']('rehash', output_loglevel='trace') def file_list(*packages): ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list ''' ret = file_dict(*packages) files = [] for pkg_files in six.itervalues(ret['files']): files.extend(pkg_files) ret['files'] = files return ret def file_dict(*packages): ''' List the files that belong to a package, grouped by package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list ''' errors = [] files = {} if packages: match_pattern = '\'{0}-[0-9]*\'' cmd = ['pkg_info', '-QL'] + [match_pattern.format(p) for p in packages] else: cmd = ['pkg_info', '-QLa'] ret = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) for line in ret['stderr'].splitlines(): errors.append(line) pkg = None for line in ret['stdout'].splitlines(): if pkg is not None and line.startswith('/'): files[pkg].append(line) elif ':/' in line: pkg, fn = line.split(':', 1) pkg, ver = pkg.rsplit('-', 1) files[pkg] = [fn] else: continue # unexpected string return {'errors': errors, 'files': files}
# Copyright 2019 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Performs image retrieval on Revisited Oxford/Paris datasets.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import os import sys import time import numpy as np from scipy import spatial from skimage import measure from skimage import transform import tensorflow as tf from google.protobuf import text_format from tensorflow.python.platform import app from delf import aggregation_config_pb2 from delf import datum_io from delf import feature_aggregation_similarity from delf import feature_io from delf.python.detect_to_retrieve import dataset cmd_args = None # Aliases for aggregation types. _VLAD = aggregation_config_pb2.AggregationConfig.VLAD _ASMK = aggregation_config_pb2.AggregationConfig.ASMK _ASMK_STAR = aggregation_config_pb2.AggregationConfig.ASMK_STAR # Extensions. _DELF_EXTENSION = '.delf' _VLAD_EXTENSION_SUFFIX = 'vlad' _ASMK_EXTENSION_SUFFIX = 'asmk' _ASMK_STAR_EXTENSION_SUFFIX = 'asmk_star' # Precision-recall ranks to use in metric computation. _PR_RANKS = (1, 5, 10) # Pace to log. _STATUS_CHECK_LOAD_ITERATIONS = 50 _STATUS_CHECK_GV_ITERATIONS = 10 # Output file names. _METRICS_FILENAME = 'metrics.txt' # Re-ranking / geometric verification parameters. _NUM_TO_RERANK = 100 _FEATURE_DISTANCE_THRESHOLD = 0.9 _NUM_RANSAC_TRIALS = 1000 _MIN_RANSAC_SAMPLES = 3 _RANSAC_RESIDUAL_THRESHOLD = 10 def _ReadAggregatedDescriptors(input_dir, image_list, config): """Reads aggregated descriptors. Args: input_dir: Directory where aggregated descriptors are located. image_list: List of image names for which to load descriptors. config: AggregationConfig used for images. Returns: aggregated_descriptors: List containing #images items, each a 1D NumPy array. visual_words: If using VLAD aggregation, returns an empty list. Otherwise, returns a list containing #images items, each a 1D NumPy array. """ # Compose extension of aggregated descriptors. extension = '.' if config.use_regional_aggregation: extension += 'r' if config.aggregation_type == _VLAD: extension += _VLAD_EXTENSION_SUFFIX elif config.aggregation_type == _ASMK: extension += _ASMK_EXTENSION_SUFFIX elif config.aggregation_type == _ASMK_STAR: extension += _ASMK_STAR_EXTENSION_SUFFIX else: raise ValueError('Invalid aggregation type: %d' % config.aggregation_type) num_images = len(image_list) aggregated_descriptors = [] visual_words = [] print('Starting to collect descriptors for %d images...' % num_images) start = time.clock() for i in range(num_images): if i > 0 and i % _STATUS_CHECK_LOAD_ITERATIONS == 0: elapsed = (time.clock() - start) print('Reading descriptors for image %d out of %d, last %d ' 'images took %f seconds' % (i, num_images, _STATUS_CHECK_LOAD_ITERATIONS, elapsed)) start = time.clock() descriptors_filename = image_list[i] + extension descriptors_fullpath = os.path.join(input_dir, descriptors_filename) if config.aggregation_type == _VLAD: aggregated_descriptors.append(datum_io.ReadFromFile(descriptors_fullpath)) else: d, v = datum_io.ReadPairFromFile(descriptors_fullpath) if config.aggregation_type == _ASMK_STAR: d = d.astype('uint8') aggregated_descriptors.append(d) visual_words.append(v) return aggregated_descriptors, visual_words def _MatchFeatures(query_locations, query_descriptors, index_image_locations, index_image_descriptors): """Matches local features using geometric verification. First, finds putative local feature matches by matching `query_descriptors` against a KD-tree from the `index_image_descriptors`. Then, attempts to fit an affine transformation between the putative feature corresponces using their locations. Args: query_locations: Locations of local features for query image. NumPy array of shape [#query_features, 2]. query_descriptors: Descriptors of local features for query image. NumPy array of shape [#query_features, depth]. index_image_locations: Locations of local features for index image. NumPy array of shape [#index_image_features, 2]. index_image_descriptors: Descriptors of local features for index image. NumPy array of shape [#index_image_features, depth]. Returns: score: Number of inliers of match. If no match is found, returns 0. """ num_features_query = query_locations.shape[0] num_features_index_image = index_image_locations.shape[0] if not num_features_query or not num_features_index_image: return 0 # Find nearest-neighbor matches using a KD tree. index_image_tree = spatial.cKDTree(index_image_descriptors) _, indices = index_image_tree.query( query_descriptors, distance_upper_bound=_FEATURE_DISTANCE_THRESHOLD) # Select feature locations for putative matches. query_locations_to_use = np.array([ query_locations[i,] for i in range(num_features_query) if indices[i] != num_features_index_image ]) index_image_locations_to_use = np.array([ index_image_locations[indices[i],] for i in range(num_features_query) if indices[i] != num_features_index_image ]) # If there are no putative matches, early return 0. if not query_locations_to_use.shape[0]: return 0 # Perform geometric verification using RANSAC. _, inliers = measure.ransac( (index_image_locations_to_use, query_locations_to_use), transform.AffineTransform, min_samples=_MIN_RANSAC_SAMPLES, residual_threshold=_RANSAC_RESIDUAL_THRESHOLD, max_trials=_NUM_RANSAC_TRIALS) if inliers is None: inliers = [] return sum(inliers) def _RerankByGeometricVerification(input_ranks, initial_scores, query_name, index_names, query_features_dir, index_features_dir, junk_ids): """Re-ranks retrieval results using geometric verification. Args: input_ranks: 1D NumPy array with indices of top-ranked index images, sorted from the most to the least similar. initial_scores: 1D NumPy array with initial similarity scores between query and index images. Entry i corresponds to score for image i. query_name: Name for query image (string). index_names: List of names for index images (strings). query_features_dir: Directory where query local feature file is located (string). index_features_dir: Directory where index local feature files are located (string). junk_ids: Set with indices of junk images which should not be considered during re-ranking. Returns: output_ranks: 1D NumPy array with index image indices, sorted from the most to the least similar according to the geometric verification and initial scores. Raises: ValueError: If `input_ranks`, `initial_scores` and `index_names` do not have the same number of entries. """ num_index_images = len(index_names) if len(input_ranks) != num_index_images: raise ValueError('input_ranks and index_names have different number of ' 'elements: %d vs %d' % (len(input_ranks), len(index_names))) if len(initial_scores) != num_index_images: raise ValueError('initial_scores and index_names have different number of ' 'elements: %d vs %d' % (len(initial_scores), len(index_names))) # Filter out junk images from list that will be re-ranked. input_ranks_for_gv = [] for ind in input_ranks: if ind not in junk_ids: input_ranks_for_gv.append(ind) num_to_rerank = min(_NUM_TO_RERANK, len(input_ranks_for_gv)) # Load query image features. query_features_path = os.path.join(query_features_dir, query_name + _DELF_EXTENSION) query_locations, _, query_descriptors, _, _ = feature_io.ReadFromFile( query_features_path) # Initialize list containing number of inliers and initial similarity scores. inliers_and_initial_scores = [] for i in range(num_index_images): inliers_and_initial_scores.append([0, initial_scores[i]]) # Loop over top-ranked images and get results. print('Starting to re-rank') for i in range(num_to_rerank): if i > 0 and i % _STATUS_CHECK_GV_ITERATIONS == 0: print('Re-ranking: i = %d out of %d' % (i, num_to_rerank)) index_image_id = input_ranks_for_gv[i] # Load index image features. index_image_features_path = os.path.join( index_features_dir, index_names[index_image_id] + _DELF_EXTENSION) (index_image_locations, _, index_image_descriptors, _, _) = feature_io.ReadFromFile(index_image_features_path) inliers_and_initial_scores[index_image_id][0] = _MatchFeatures( query_locations, query_descriptors, index_image_locations, index_image_descriptors) # Sort based on (inliers_score, initial_score). def _InliersInitialScoresSorting(k): """Helper function to sort list based on two entries. Args: k: Index into `inliers_and_initial_scores`. Returns: Tuple containing inlier score and initial score. """ return (inliers_and_initial_scores[k][0], inliers_and_initial_scores[k][1]) output_ranks = sorted( range(num_index_images), key=_InliersInitialScoresSorting, reverse=True) return output_ranks def _SaveMetricsFile(mean_average_precision, mean_precisions, mean_recalls, pr_ranks, output_path): """Saves aggregated retrieval metrics to text file. Args: mean_average_precision: Dict mapping each dataset protocol to a float. mean_precisions: Dict mapping each dataset protocol to a NumPy array of floats with shape [len(pr_ranks)]. mean_recalls: Dict mapping each dataset protocol to a NumPy array of floats with shape [len(pr_ranks)]. pr_ranks: List of integers. output_path: Full file path. """ with tf.gfile.GFile(output_path, 'w') as f: for k in sorted(mean_average_precision.keys()): f.write('{}\n mAP={}\n mP@k{} {}\n mR@k{} {}\n'.format( k, np.around(mean_average_precision[k] * 100, decimals=2), np.array(pr_ranks), np.around(mean_precisions[k] * 100, decimals=2), np.array(pr_ranks), np.around(mean_recalls[k] * 100, decimals=2))) def main(argv): if len(argv) > 1: raise RuntimeError('Too many command-line arguments.') # Parse dataset to obtain query/index images, and ground-truth. print('Parsing dataset...') query_list, index_list, ground_truth = dataset.ReadDatasetFile( cmd_args.dataset_file_path) num_query_images = len(query_list) num_index_images = len(index_list) (_, medium_ground_truth, hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth) print('done! Found %d queries and %d index images' % (num_query_images, num_index_images)) # Parse AggregationConfig protos. query_config = aggregation_config_pb2.AggregationConfig() with tf.gfile.GFile(cmd_args.query_aggregation_config_path, 'r') as f: text_format.Merge(f.read(), query_config) index_config = aggregation_config_pb2.AggregationConfig() with tf.gfile.GFile(cmd_args.index_aggregation_config_path, 'r') as f: text_format.Merge(f.read(), index_config) # Read aggregated descriptors. query_aggregated_descriptors, query_visual_words = _ReadAggregatedDescriptors( cmd_args.query_aggregation_dir, query_list, query_config) index_aggregated_descriptors, index_visual_words = _ReadAggregatedDescriptors( cmd_args.index_aggregation_dir, index_list, index_config) # Create similarity computer. similarity_computer = ( feature_aggregation_similarity.SimilarityAggregatedRepresentation( index_config)) # Compute similarity between query and index images, potentially re-ranking # with geometric verification. ranks_before_gv = np.zeros([num_query_images, num_index_images], dtype='int32') if cmd_args.use_geometric_verification: medium_ranks_after_gv = np.zeros([num_query_images, num_index_images], dtype='int32') hard_ranks_after_gv = np.zeros([num_query_images, num_index_images], dtype='int32') for i in range(num_query_images): print('Performing retrieval with query %d (%s)...' % (i, query_list[i])) start = time.clock() # Compute similarity between aggregated descriptors. similarities = np.zeros([num_index_images]) for j in range(num_index_images): similarities[j] = similarity_computer.ComputeSimilarity( query_aggregated_descriptors[i], index_aggregated_descriptors[j], query_visual_words[i], index_visual_words[j]) ranks_before_gv[i] = np.argsort(-similarities) # Re-rank using geometric verification. if cmd_args.use_geometric_verification: medium_ranks_after_gv[i] = _RerankByGeometricVerification( ranks_before_gv[i], similarities, query_list[i], index_list, cmd_args.query_features_dir, cmd_args.index_features_dir, set(medium_ground_truth[i]['junk'])) hard_ranks_after_gv[i] = _RerankByGeometricVerification( ranks_before_gv[i], similarities, query_list[i], index_list, cmd_args.query_features_dir, cmd_args.index_features_dir, set(hard_ground_truth[i]['junk'])) elapsed = (time.clock() - start) print('done! Retrieval for query %d took %f seconds' % (i, elapsed)) # Create output directory if necessary. if not tf.gfile.Exists(cmd_args.output_dir): tf.gfile.MakeDirs(cmd_args.output_dir) # Compute metrics. medium_metrics = dataset.ComputeMetrics(ranks_before_gv, medium_ground_truth, _PR_RANKS) hard_metrics = dataset.ComputeMetrics(ranks_before_gv, hard_ground_truth, _PR_RANKS) if cmd_args.use_geometric_verification: medium_metrics_after_gv = dataset.ComputeMetrics(medium_ranks_after_gv, medium_ground_truth, _PR_RANKS) hard_metrics_after_gv = dataset.ComputeMetrics(hard_ranks_after_gv, hard_ground_truth, _PR_RANKS) # Write metrics to file. mean_average_precision_dict = { 'medium': medium_metrics[0], 'hard': hard_metrics[0] } mean_precisions_dict = {'medium': medium_metrics[1], 'hard': hard_metrics[1]} mean_recalls_dict = {'medium': medium_metrics[2], 'hard': hard_metrics[2]} if cmd_args.use_geometric_verification: mean_average_precision_dict.update({ 'medium_after_gv': medium_metrics_after_gv[0], 'hard_after_gv': hard_metrics_after_gv[0] }) mean_precisions_dict.update({ 'medium_after_gv': medium_metrics_after_gv[1], 'hard_after_gv': hard_metrics_after_gv[1] }) mean_recalls_dict.update({ 'medium_after_gv': medium_metrics_after_gv[2], 'hard_after_gv': hard_metrics_after_gv[2] }) _SaveMetricsFile(mean_average_precision_dict, mean_precisions_dict, mean_recalls_dict, _PR_RANKS, os.path.join(cmd_args.output_dir, _METRICS_FILENAME)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.register('type', 'bool', lambda v: v.lower() == 'true') parser.add_argument( '--index_aggregation_config_path', type=str, default='/tmp/index_aggregation_config.pbtxt', help=""" Path to index AggregationConfig proto text file. This is used to load the aggregated descriptors from the index, and to define the parameters used in computing similarity for aggregated descriptors. """) parser.add_argument( '--query_aggregation_config_path', type=str, default='/tmp/query_aggregation_config.pbtxt', help=""" Path to query AggregationConfig proto text file. This is only used to load the aggregated descriptors for the queries. """) parser.add_argument( '--dataset_file_path', type=str, default='/tmp/gnd_roxford5k.mat', help=""" Dataset file for Revisited Oxford or Paris dataset, in .mat format. """) parser.add_argument( '--index_aggregation_dir', type=str, default='/tmp/index_aggregation', help=""" Directory where index aggregated descriptors are located. """) parser.add_argument( '--query_aggregation_dir', type=str, default='/tmp/query_aggregation', help=""" Directory where query aggregated descriptors are located. """) parser.add_argument( '--use_geometric_verification', type=lambda x: (str(x).lower() == 'true'), default=False, help=""" If True, performs re-ranking using local feature-based geometric verification. """) parser.add_argument( '--index_features_dir', type=str, default='/tmp/index_features', help=""" Only used if `use_geometric_verification` is True. Directory where index local image features are located, all in .delf format. """) parser.add_argument( '--query_features_dir', type=str, default='/tmp/query_features', help=""" Only used if `use_geometric_verification` is True. Directory where query local image features are located, all in .delf format. """) parser.add_argument( '--output_dir', type=str, default='/tmp/retrieval', help=""" Directory where retrieval output will be written to. A file containing metrics for this run is saved therein, with file name "metrics.txt". """) cmd_args, unparsed = parser.parse_known_args() app.run(main=main, argv=[sys.argv[0]] + unparsed)
# Copyright (c) 2008-2011 by Enthought, Inc. # Copyright (c) 2013-2017 Continuum Analytics, Inc. # All rights reserved. from __future__ import absolute_import, unicode_literals import ctypes import logging import os from os.path import isdir, join, exists import pywintypes import sys import locale from .utils import rm_empty_dir, rm_rf from .knownfolders import get_folder_path, FOLDERID # KNOWNFOLDERID does provide a direct path to Quick Launch. No additional path necessary. from .winshortcut import create_shortcut # This allows debugging installer issues using DebugView from Microsoft. OutputDebugString = ctypes.windll.kernel32.OutputDebugStringW OutputDebugString.argtypes = [ctypes.c_wchar_p] class DbgViewHandler(logging.Handler): def emit(self, record): OutputDebugString(self.format(record)) logger = logging.getLogger("menuinst_win32") logger.setLevel(logging.DEBUG) stream_handler = logging.StreamHandler() stream_handler.setLevel(logging.WARNING) dbgview = DbgViewHandler() dbgview.setLevel(logging.DEBUG) logger.addHandler(dbgview) logger.addHandler(stream_handler) # When running as 'nt authority/system' as sometimes people do via SCCM, # various folders do not exist, such as QuickLaunch. This doesn't matter # as we'll use the "system" key finally and check for the "quicklaunch" # subkey before adding any Quick Launch menu items. # It can happen that some of the dirs[] entires refer to folders that do not # exist, in which case, the 2nd entry of the value tuple is a sub-class of # Exception. dirs_src = {"system": { "desktop": get_folder_path(FOLDERID.PublicDesktop), "start": get_folder_path(FOLDERID.CommonPrograms), "documents": get_folder_path(FOLDERID.PublicDocuments), "profile": get_folder_path(FOLDERID.Profile)}, "user": { "desktop": get_folder_path(FOLDERID.Desktop), "start": get_folder_path(FOLDERID.Programs), "quicklaunch": get_folder_path(FOLDERID.QuickLaunch), "documents": get_folder_path(FOLDERID.Documents), "profile": get_folder_path(FOLDERID.Profile)}} def folder_path(preferred_mode, check_other_mode, key): ''' This function implements all heuristics and workarounds for messed up KNOWNFOLDERID registry values. It's also verbose (OutputDebugStringW) about whether fallbacks worked or whether they would have worked if check_other_mode had been allowed. ''' other_mode = 'system' if preferred_mode == 'user' else 'user' path, exception = dirs_src[preferred_mode][key] if not exception: return path logger.info("WARNING: menuinst key: '%s'\n" " path: '%s'\n" " .. excepted with: '%s' in knownfolders.py, implementing workarounds .." % (key, path, type(exception).__name__)) # Since I have seen 'user', 'documents' set as '\\vmware-host\Shared Folders\Documents' # when there's no such server, we check 'user', 'profile' + '\Documents' before maybe # trying the other_mode (though I have chickened out on that idea). if preferred_mode == 'user' and key == 'documents': user_profile, exception = dirs_src['user']['profile'] if not exception: path = join(user_profile, 'Documents') if os.access(path, os.W_OK): logger.info(" .. worked-around to: '%s'" % (path)) return path path, exception = dirs_src[other_mode][key] # Do not fall back to something we cannot write to. if exception: if check_other_mode: logger.info(" .. despite 'check_other_mode'\n" " and 'other_mode' 'path' of '%s'\n" " it excepted with: '%s' in knownfolders.py" % (path, type(exception).__name__)) else: logger.info(" .. 'check_other_mode' is False,\n" " and 'other_mode' 'path' is '%s'\n" " but it excepted anyway with: '%s' in knownfolders.py" % (path, type(exception).__name__)) return None if not check_other_mode: logger.info(" .. due to lack of 'check_other_mode' not picking\n" " non-excepting path of '%s'\n in knownfolders.py" % (path)) return None return path def quoted(s): """ quotes a string if necessary. """ # strip any existing quotes s = s.strip(u'"') if u' ' in s or u'/' in s: return u'"%s"' % s else: return s def ensure_pad(name, pad="_"): """ Examples: >>> ensure_pad('conda') '_conda_' """ if not name or name[0] == name[-1] == pad: return name else: return "%s%s%s" % (pad, name, pad) def to_unicode(var, codec=locale.getpreferredencoding()): if sys.version_info[0] < 3 and isinstance(var, unicode): return var if not codec: codec = "utf-8" if hasattr(var, "decode"): var = var.decode(codec) return var def to_bytes(var, codec=locale.getpreferredencoding()): if isinstance(var, bytes): return var if not codec: codec="utf-8" if hasattr(var, "encode"): var = var.encode(codec) return var unicode_root_prefix = to_unicode(sys.prefix) if u'\\envs\\' in unicode_root_prefix: logger.warn('menuinst called from non-root env %s', unicode_root_prefix) def substitute_env_variables(text, dir): # When conda is using Menuinst, only the root conda installation ever # calls menuinst. Thus, these calls to sys refer to the root conda # installation, NOT the child environment py_major_ver = sys.version_info[0] py_bitness = 8 * tuple.__itemsize__ env_prefix = to_unicode(dir['prefix']) text = to_unicode(text) env_name = to_unicode(dir['env_name']) for a, b in ( (u'${PREFIX}', env_prefix), (u'${ROOT_PREFIX}', unicode_root_prefix), (u'${PYTHON_SCRIPTS}', os.path.normpath(join(env_prefix, u'Scripts')).replace(u"\\", u"/")), (u'${MENU_DIR}', join(env_prefix, u'Menu')), (u'${PERSONALDIR}', dir['documents']), (u'${USERPROFILE}', dir['profile']), (u'${ENV_NAME}', env_name), (u'${PY_VER}', u'%d' % (py_major_ver)), (u'${PLATFORM}', u"(%s-bit)" % py_bitness), ): if b: text = text.replace(a, b) return text class Menu(object): def __init__(self, name, prefix=unicode_root_prefix, env_name=u"", mode=None): """ Prefix is the system prefix to be used -- this is needed since there is the possibility of a different Python's packages being managed. """ # bytestrings passed in need to become unicode self.prefix = to_unicode(prefix) used_mode = mode if mode else ('user' if exists(join(self.prefix, u'.nonadmin')) else 'system') logger.debug("Menu: name: '%s', prefix: '%s', env_name: '%s', mode: '%s', used_mode: '%s'" % (name, self.prefix, env_name, mode, used_mode)) try: self.set_dir(name, self.prefix, env_name, used_mode) except (WindowsError, pywintypes.error): # We get here if we aren't elevated. This is different from # permissions: a user can have permission, but elevation is still # required. If the process isn't elevated, we get the # WindowsError if 'user' in dirs_src and used_mode == 'system': logger.warn("Insufficient permissions to write menu folder. " "Falling back to user location") try: self.set_dir(name, self.prefix, env_name, 'user') except: pass else: logger.fatal("Unable to create AllUsers menu folder") def set_dir(self, name, prefix, env_name, mode): self.mode = mode self.dir = dict() # I have chickened out on allowing check_other_mode. Really there needs # to be 3 distinct cases that 'menuinst' cares about: # priv-user doing system install # priv-user doing user-only install # non-priv-user doing user-only install # (priv-user only exists in an AllUsers installation). check_other_mode = False for k, v in dirs_src[mode].items(): # We may want to cache self.dir to some files, one for AllUsers # (system) installs and one for each subsequent user install? self.dir[k] = folder_path(mode, check_other_mode, k) self.dir['prefix'] = prefix self.dir['env_name'] = env_name folder_name = substitute_env_variables(name, self.dir) self.path = join(self.dir["start"], folder_name) self.create() def create(self): if not isdir(self.path): os.mkdir(self.path) def remove(self): rm_empty_dir(self.path) def extend_script_args(args, shortcut): try: args.append(shortcut['scriptargument']) except KeyError: pass try: args.extend(shortcut['scriptarguments']) except KeyError: pass def quote_args(args): # cmd.exe /K or /C expects a single string argument and requires # doubled-up quotes when any sub-arguments have spaces: # https://stackoverflow.com/a/6378038/3257826 if (len(args) > 2 and ("CMD.EXE" in args[0].upper() or "%COMSPEC%" in args[0].upper()) and (args[1].upper() == '/K' or args[1].upper() == '/C') and any(' ' in arg for arg in args[2:]) ): args = [ ensure_pad(args[0], '"'), # cmd.exe args[1], # /K or /C '"%s"' % (' '.join(ensure_pad(arg, '"') for arg in args[2:])), # double-quoted ] else: args = [quoted(arg) for arg in args] return args class ShortCut(object): def __init__(self, menu, shortcut): self.menu = menu self.shortcut = shortcut def remove(self): self.create(remove=True) def create(self, remove=False): # Substitute env variables early because we may need to escape spaces in the value. args = [] fix_win_slashes = [0] prefix = self.menu.prefix.replace('/', '\\') root_py = join(unicode_root_prefix, u"python.exe") root_pyw = join(unicode_root_prefix, u"pythonw.exe") env_py = join(prefix, u"python.exe") env_pyw = join(prefix, u"pythonw.exe") cwp_py = [root_py, join(unicode_root_prefix, u'cwp.py'), prefix, env_py] cwp_pyw = [root_pyw, join(unicode_root_prefix, u'cwp.py'), prefix, env_pyw] if "pywscript" in self.shortcut: args = cwp_pyw fix_win_slashes = [len(args)] args += self.shortcut["pywscript"].split() elif "pyscript" in self.shortcut: args = cwp_py fix_win_slashes = [len(args)] args += self.shortcut["pyscript"].split() elif "webbrowser" in self.shortcut: args = [root_pyw, '-m', 'webbrowser', '-t', self.shortcut['webbrowser']] elif "script" in self.shortcut: # It is unclear whether running through cwp.py is what we want here. In # the long term I would rather this was made an explicit choice. args = [root_py, join(unicode_root_prefix, u'cwp.py'), prefix] fix_win_slashes = [len(args)] args += self.shortcut["script"].split() extend_script_args(args, self.shortcut) elif "system" in self.shortcut: args = self.shortcut["system"].split() extend_script_args(args, self.shortcut) else: raise Exception("Nothing to do: %r" % self.shortcut) args = [substitute_env_variables(arg, self.menu.dir) for arg in args] for fws in fix_win_slashes: args[fws] = args[fws].replace('/', '\\') args = quote_args(args) cmd = args[0] args = args[1:] logger.debug('Shortcut cmd is %s, args are %s' % (cmd, args)) workdir = self.shortcut.get('workdir', '') icon = self.shortcut.get('icon', '') workdir = substitute_env_variables(workdir, self.menu.dir) icon = substitute_env_variables(icon, self.menu.dir) # Fix up the '/' to '\' workdir = workdir.replace('/', '\\') icon = icon.replace('/', '\\') # Create the working directory if it doesn't exist if workdir: if not isdir(workdir): os.makedirs(workdir) else: workdir = '%HOMEPATH%' # Menu link dst_dirs = [self.menu.path] # Desktop link if self.shortcut.get('desktop'): dst_dirs.append(self.menu.dir['desktop']) # Quicklaunch link if self.shortcut.get('quicklaunch') and 'quicklaunch' in self.menu.dir: dst_dirs.append(self.menu.dir['quicklaunch']) name_suffix = " ({})".format(self.menu.dir['env_name']) if self.menu.dir['env_name'] else "" for dst_dir in dst_dirs: dst = join(dst_dir, self.shortcut['name'] + name_suffix + '.lnk') if remove: rm_rf(dst) else: # The API for the call to 'create_shortcut' has 3 # required arguments (path, description and filename) # and 4 optional ones (args, working_dir, icon_path and # icon_index). create_shortcut( u'' + cmd, u'' + self.shortcut['name'] + name_suffix, u'' + dst, u' '.join(arg for arg in args), u'' + workdir, u'' + icon, )
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers from google.api_core import operations_v1 from google.api_core import gapic_v1 import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.cloud.dialogflowcx_v3.types import agent from google.cloud.dialogflowcx_v3.types import agent as gcdc_agent from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from .base import AgentsTransport, DEFAULT_CLIENT_INFO class AgentsGrpcTransport(AgentsTransport): """gRPC backend transport for Agents. Service for managing [Agents][google.cloud.dialogflow.cx.v3.Agent]. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _stubs: Dict[str, Callable] def __init__( self, *, host: str = "dialogflow.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} self._operations_client: Optional[operations_v1.OperationsClient] = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @classmethod def create_channel( cls, host: str = "dialogflow.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. Raises: google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def operations_client(self) -> operations_v1.OperationsClient: """Create the client designed to process long-running operations. This property caches on the instance; repeated calls return the same client. """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsClient(self.grpc_channel) # Return the client from cache. return self._operations_client @property def list_agents( self, ) -> Callable[[agent.ListAgentsRequest], agent.ListAgentsResponse]: r"""Return a callable for the list agents method over gRPC. Returns the list of all agents in the specified location. Returns: Callable[[~.ListAgentsRequest], ~.ListAgentsResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_agents" not in self._stubs: self._stubs["list_agents"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/ListAgents", request_serializer=agent.ListAgentsRequest.serialize, response_deserializer=agent.ListAgentsResponse.deserialize, ) return self._stubs["list_agents"] @property def get_agent(self) -> Callable[[agent.GetAgentRequest], agent.Agent]: r"""Return a callable for the get agent method over gRPC. Retrieves the specified agent. Returns: Callable[[~.GetAgentRequest], ~.Agent]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_agent" not in self._stubs: self._stubs["get_agent"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/GetAgent", request_serializer=agent.GetAgentRequest.serialize, response_deserializer=agent.Agent.deserialize, ) return self._stubs["get_agent"] @property def create_agent( self, ) -> Callable[[gcdc_agent.CreateAgentRequest], gcdc_agent.Agent]: r"""Return a callable for the create agent method over gRPC. Creates an agent in the specified location. Note: You should always train flows prior to sending them queries. See the `training documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__. Returns: Callable[[~.CreateAgentRequest], ~.Agent]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_agent" not in self._stubs: self._stubs["create_agent"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/CreateAgent", request_serializer=gcdc_agent.CreateAgentRequest.serialize, response_deserializer=gcdc_agent.Agent.deserialize, ) return self._stubs["create_agent"] @property def update_agent( self, ) -> Callable[[gcdc_agent.UpdateAgentRequest], gcdc_agent.Agent]: r"""Return a callable for the update agent method over gRPC. Updates the specified agent. Note: You should always train flows prior to sending them queries. See the `training documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__. Returns: Callable[[~.UpdateAgentRequest], ~.Agent]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_agent" not in self._stubs: self._stubs["update_agent"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/UpdateAgent", request_serializer=gcdc_agent.UpdateAgentRequest.serialize, response_deserializer=gcdc_agent.Agent.deserialize, ) return self._stubs["update_agent"] @property def delete_agent(self) -> Callable[[agent.DeleteAgentRequest], empty_pb2.Empty]: r"""Return a callable for the delete agent method over gRPC. Deletes the specified agent. Returns: Callable[[~.DeleteAgentRequest], ~.Empty]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_agent" not in self._stubs: self._stubs["delete_agent"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/DeleteAgent", request_serializer=agent.DeleteAgentRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["delete_agent"] @property def export_agent( self, ) -> Callable[[agent.ExportAgentRequest], operations_pb2.Operation]: r"""Return a callable for the export agent method over gRPC. Exports the specified agent to a binary file. This method is a `long-running operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__. The returned ``Operation`` type has the following method-specific fields: - ``metadata``: An empty `Struct message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__ - ``response``: [ExportAgentResponse][google.cloud.dialogflow.cx.v3.ExportAgentResponse] Returns: Callable[[~.ExportAgentRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "export_agent" not in self._stubs: self._stubs["export_agent"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/ExportAgent", request_serializer=agent.ExportAgentRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["export_agent"] @property def restore_agent( self, ) -> Callable[[agent.RestoreAgentRequest], operations_pb2.Operation]: r"""Return a callable for the restore agent method over gRPC. Restores the specified agent from a binary file. Replaces the current agent with a new one. Note that all existing resources in agent (e.g. intents, entity types, flows) will be removed. This method is a `long-running operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__. The returned ``Operation`` type has the following method-specific fields: - ``metadata``: An empty `Struct message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__ - ``response``: An `Empty message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#empty>`__ Note: You should always train flows prior to sending them queries. See the `training documentation <https://cloud.google.com/dialogflow/cx/docs/concept/training>`__. Returns: Callable[[~.RestoreAgentRequest], ~.Operation]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_agent" not in self._stubs: self._stubs["restore_agent"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/RestoreAgent", request_serializer=agent.RestoreAgentRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, ) return self._stubs["restore_agent"] @property def validate_agent( self, ) -> Callable[[agent.ValidateAgentRequest], agent.AgentValidationResult]: r"""Return a callable for the validate agent method over gRPC. Validates the specified agent and creates or updates validation results. The agent in draft version is validated. Please call this API after the training is completed to get the complete validation results. Returns: Callable[[~.ValidateAgentRequest], ~.AgentValidationResult]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "validate_agent" not in self._stubs: self._stubs["validate_agent"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/ValidateAgent", request_serializer=agent.ValidateAgentRequest.serialize, response_deserializer=agent.AgentValidationResult.deserialize, ) return self._stubs["validate_agent"] @property def get_agent_validation_result( self, ) -> Callable[[agent.GetAgentValidationResultRequest], agent.AgentValidationResult]: r"""Return a callable for the get agent validation result method over gRPC. Gets the latest agent validation result. Agent validation is performed when ValidateAgent is called. Returns: Callable[[~.GetAgentValidationResultRequest], ~.AgentValidationResult]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_agent_validation_result" not in self._stubs: self._stubs["get_agent_validation_result"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.cx.v3.Agents/GetAgentValidationResult", request_serializer=agent.GetAgentValidationResultRequest.serialize, response_deserializer=agent.AgentValidationResult.deserialize, ) return self._stubs["get_agent_validation_result"] def close(self): self.grpc_channel.close() __all__ = ("AgentsGrpcTransport",)
#!/usr/bin/env python # -*- coding: utf-8 - # # This file is part of the restpose python module, released under the MIT # license. See the COPYING file for more information. """ An example of implemnting a search for the nearest item. This example requires the "restpose" and "flask" python modules to be installed. To load some sample data in:: python find_my_nearest.py csv find_my_nearest_data.csv To perform a search:: python find_my_nearest.py search lat=51.5357 lon=-0.1557318936202792 type=zoo type=tv To start the server running: python serve # add a "debug" parameter to start in debug mode. When the server is running, go to http://localhost:5000/ to get a form for entering some example searches, and go to http://localhost:5000/search to get the search results (as JSON). Note that the form is limited in the types of search it can create - any label fields can be searched, and can be searched for multiple values by supplying them multiple times. Results are returned in order of distance from the supplied point, closest first, filtered to match the supplied labels. """ import csv import flask import json import restpose import sys class Context(object): def __init__(self): self.collection = restpose.Server().collection('find_my_nearest') self.places = self.collection.doc_type('place') def add_item(self, id, name, description, url, postcode, geolocation, fbid, labels): """Add an item to the collection (or update an old item). :param id: A unique identifier for the item (an arbitrary string) :param name: A name for the item (plain text, no length limit). :param description: A description for the item (plain text, no length limit). :param postcode: A postcode for the item. :param geolocation: A [latitude, longitude] pair for the item. :param fbid: An identifier for the item on facebook. :param labels: A dictionary of extra labels to associate with the item. """ fields = dict(id=id, name_text=name, description_text=description, url=url, postcode_tag=postcode, fbid_tag=fbid) fields['lonlat'] = dict(lat=geolocation[0], lon=geolocation[1]) for k, v in labels.iteritems(): fields[k + 'label_tag'] = v self.places.add_doc(fields) def import_from_rows(self, rows): """Import some data from an iterator over rows. This can be used, for example, to import data from CSV. Each row should contain the following: - ID: the identifier for the item (an arbitrary string). - name: A name for the item (plain text, no length limit). - description: A description for the item (plain text, no length limit). - postcode: A postcode for the item. - latitude: The latitude of the item (as a decimal). - longitude: The longitude of the item (as a decimal). - fbid: An identifier for the item on facebook (an arbitrary string). - labels: A semi-colon separated list of labels for the item. Each label has a category and a value, separated by '='. For example, "activity=tour;activity=feeding". Each category can appear multiple times, or not at all. Returns a list of any errors which occurred while importing. """ for row in rows: (id, name, description, url, postcode, latitude, longitude, fbid, labels) = row latitude = float(latitude) longitude = float(longitude) # unpack the labels unpacked_labels = {} for label in labels.split(';'): cat, val = label.split('=') unpacked_labels.setdefault(cat.strip(), []).append(val.strip()) self.add_item(id, name, description, url, postcode, [latitude, longitude], fbid, unpacked_labels) return self.collection.checkpoint().wait().errors def clear(self): """Clear the entire collection. """ self.collection.delete() def search(self, **kwargs): """Perform a search. kwargs contains the parameters to search for; the values should always be lists of values to search for. """ # Ensure that values are lists for k, v in kwargs.items(): if (not hasattr(v, '__iter__') and not isinstance(v, basestring)): v = [v] kwargs[k] = v if len(v) == 0: del kwargs[k] # Parse lat, lon parameters lat = filter(lambda x: x != '', kwargs.get('lat', [])) lon = filter(lambda x: x != '', kwargs.get('lon', [])) if len(lat) != 0 or len(lon) != 0: if len(lat) == 0 or len(lon) == 0: raise ValueError('Must supply both lat and lon parameters ' 'if either supplied') if len(lat) != 1: raise ValueError('Must supply exactly 1 latitude parameter') if len(lon) != 1: raise ValueError('Must supply exactly 1 longitude parameter') lat = float(lat[0]) lon = float(lon[0]) q = self.places.field('lonlat').distscore(dict(lat=lat, lon=lon)) else: q = self.places.all() # Handle any label filters for k, v in kwargs.items(): if k in ('lat', 'lon', 'search'): continue if len(v) == 0 or v[0] == '': continue field = self.places.field(k + 'label_tag') q = q.filter(field.is_in(v)) print q._build_search() # Tidy up the result structures. results = [] for item in q: data = {} for k, v in item.data.items(): if k.endswith('label_tag'): k = k[:-9] # Labels can occur multiple times, so leave their value as # a list. elif k == 'type': # Don't want to return the type of the item continue else: if k.endswith('_tag'): k = k[:-4] if k.endswith('_text'): k = k[:-5] assert len(v) == 1 v = v[0] data[k] = v results.append(data) return results def do_cmd(cmd, args): """Some command line actions. - clear: Clear the database - csv <filename>: Load some data from a CSV file. - search <param=val> <param=val>: Perform a search, using specified parameters. """ if cmd == 'clear': context = Context() context.clear() elif cmd == 'csv': context = Context() filename = args[0] with open(filename, "r") as fd: reader = csv.reader(fd, dialect='excel') errors = context.import_from_rows(reader) if errors: print "Errors while indexing", errors print "Collection now contains %d places" % ( context.places.all().matches_estimated ) elif cmd == 'search': context = Context() params = {} for arg in args: k, v = arg.split('=') params.setdefault(k.strip(), []).append(v.strip()) print context.search(**params) elif cmd == 'serve': context = Context() app = flask.Flask('find_my_nearest') if len(args) > 0 and args[0] == 'debug': app.debug = True @app.route('/search') def search(): kwargs = dict(flask.request.args.lists()) try: return flask.jsonify(dict(results=context.search(**kwargs))) except ValueError, e: return flask.jsonify(dict(error=str(e))) @app.route('/') def top(): return '''<?doctype html><html> <head><title>Search</title></head><body> <form action="/search" method="GET"> <label for="lat">Latitude:</label> <input type="text" id="lat" name="lat" placeholder="Latitude"> <br> <label for="lon">Longitude:</label> <input type="text" id="lon" name="lon" placeholder="Longitude"> <br> Some example labels:<br> <label for="type">Type:</label> <input type="text" id="type" name="type" placeholder="Type; eg. tv"> <br> <label for="activity">Activity:</label> <input type="text" id="activity" name="activity" placeholder="Activity; eg. tour"> <br> <input type="submit" name="search" value="Search"> </form></body></html> ''' if app.debug: app.run() else: app.run(host='0.0.0.0') else: print "Unknown command" if __name__ == '__main__': do_cmd(sys.argv[1], sys.argv[2:])
# epydoc -- Introspection # # Copyright (C) 2005 Edward Loper # Author: Edward Loper <edloper@loper.org> # URL: <http://epydoc.sf.net> # # $Id: docintrospecter.py 1678 2008-01-29 17:21:29Z edloper $ """ Extract API documentation about python objects by directly introspecting their values. The function L{introspect_docs()}, which provides the main interface of this module, examines a Python objects via introspection, and uses the information it finds to create an L{APIDoc} objects containing the API documentation for that objects. The L{register_introspecter()} method can be used to extend the functionality of C{docintrospector}, by providing methods that handle special value types. """ __docformat__ = 'epytext en' ###################################################################### ## Imports ###################################################################### import inspect, re, sys, os.path, imp # API documentation encoding: from epydoc.apidoc import * # Type comparisons: from types import * # Error reporting: from epydoc import log # Helper functions: from epydoc.util import * # For extracting encoding for docstrings: import epydoc.docparser # Builtin values import __builtin__ # Backwards compatibility from epydoc.compat import * ###################################################################### ## Caches ###################################################################### _valuedoc_cache = {} """A cache containing the API documentation for values that we've already seen. This cache is implemented as a dictionary that maps a value's pyid to its L{ValueDoc}. Note that if we encounter a value but decide not to introspect it (because it's imported from another module), then C{_valuedoc_cache} will contain an entry for the value, but the value will not be listed in L{_introspected_values}.""" _introspected_values = {} """A record which values we've introspected, encoded as a dictionary from pyid to C{bool}.""" def clear_cache(): """ Discard any cached C{APIDoc} values that have been computed for introspected values. """ _valuedoc_cache.clear() _introspected_values.clear() ###################################################################### ## Introspection ###################################################################### def introspect_docs(value=None, name=None, filename=None, context=None, is_script=False, module_name=None): """ Generate the API documentation for a specified object by introspecting Python values, and return it as a L{ValueDoc}. The object to generate documentation for may be specified using the C{value} parameter, the C{filename} parameter, I{or} the C{name} parameter. (It is an error to specify more than one of these three parameters, or to not specify any of them.) @param value: The python object that should be documented. @param filename: The name of the file that contains the python source code for a package, module, or script. If C{filename} is specified, then C{introspect} will return a C{ModuleDoc} describing its contents. @param name: The fully-qualified python dotted name of any value (including packages, modules, classes, and functions). C{DocParser} will automatically figure out which module(s) it needs to import in order to find the documentation for the specified object. @param context: The API documentation for the class of module that contains C{value} (if available). @param module_name: The name of the module where the value is defined. Useful to retrieve the docstring encoding if there is no way to detect the module by introspection (such as in properties) """ if value is None and name is not None and filename is None: value = get_value_from_name(DottedName(name)) elif value is None and name is None and filename is not None: if is_script: value = get_value_from_scriptname(filename) else: value = get_value_from_filename(filename, context) elif name is None and filename is None: # it's ok if value is None -- that's a value, after all. pass else: raise ValueError("Expected exactly one of the following " "arguments: value, name, filename") pyid = id(value) # If we've already introspected this value, then simply return # its ValueDoc from our cache. if pyid in _introspected_values: # If the file is a script, then adjust its name. if is_script and filename is not None: _valuedoc_cache[pyid].canonical_name = DottedName( munge_script_name(str(filename))) return _valuedoc_cache[pyid] # Create an initial value doc for this value & add it to the cache. val_doc = _get_valuedoc(value) # Introspect the value. _introspected_values[pyid] = True introspect_func = _get_introspecter(value) introspect_func(value, val_doc, module_name=module_name) # Set canonical name, if it was given if val_doc.canonical_name is UNKNOWN and name is not None: val_doc.canonical_name = DottedName(name) # If the file is a script, then adjust its name. if is_script and filename is not None: val_doc.canonical_name = DottedName(munge_script_name(str(filename))) if val_doc.canonical_name is UNKNOWN and filename is not None: shadowed_name = DottedName(value.__name__) log.warning("Module %s is shadowed by a variable with " "the same name." % shadowed_name) val_doc.canonical_name = DottedName(str(shadowed_name)+"'") return val_doc def _get_valuedoc(value): """ If a C{ValueDoc} for the given value exists in the valuedoc cache, then return it; otherwise, create a new C{ValueDoc}, add it to the cache, and return it. When possible, the new C{ValueDoc}'s C{pyval}, C{repr}, and C{canonical_name} attributes will be set appropriately. """ pyid = id(value) val_doc = _valuedoc_cache.get(pyid) if val_doc is None: try: canonical_name = get_canonical_name(value, strict=True) except DottedName.InvalidDottedName: canonical_name = UNKNOWN val_doc = ValueDoc(pyval=value, canonical_name = canonical_name, docs_extracted_by='introspecter') _valuedoc_cache[pyid] = val_doc # If it's a module, then do some preliminary introspection. # Otherwise, check what the containing module is (used e.g. # to decide what markup language should be used for docstrings) if inspect.ismodule(value): introspect_module(value, val_doc, preliminary=True) val_doc.defining_module = val_doc else: module_name = str(get_containing_module(value)) module = sys.modules.get(module_name) if module is not None and inspect.ismodule(module): val_doc.defining_module = _get_valuedoc(module) return val_doc #//////////////////////////////////////////////////////////// # Module Introspection #//////////////////////////////////////////////////////////// #: A list of module variables that should not be included in a #: module's API documentation. UNDOCUMENTED_MODULE_VARS = ( '__builtins__', '__doc__', '__all__', '__file__', '__path__', '__name__', '__extra_epydoc_fields__', '__docformat__') def introspect_module(module, module_doc, module_name=None, preliminary=False): """ Add API documentation information about the module C{module} to C{module_doc}. """ module_doc.specialize_to(ModuleDoc) # Record the module's docformat if hasattr(module, '__docformat__'): module_doc.docformat = unicode(module.__docformat__) # Record the module's filename if hasattr(module, '__file__'): try: module_doc.filename = unicode(module.__file__) except KeyboardInterrupt: raise except: pass if module_doc.filename is not UNKNOWN: try: module_doc.filename = py_src_filename(module_doc.filename) except ValueError: pass # If this is just a preliminary introspection, then don't do # anything else. (Typically this is true if this module was # imported, but is not included in the set of modules we're # documenting.) module_doc.variables = {} if preliminary: return # Record the module's docstring if hasattr(module, '__doc__'): module_doc.docstring = get_docstring(module) # If the module has a __path__, then it's (probably) a # package; so set is_package=True and record its __path__. if hasattr(module, '__path__'): module_doc.is_package = True try: module_doc.path = [unicode(p) for p in module.__path__] except KeyboardInterrupt: raise except: pass else: module_doc.is_package = False # Make sure we have a name for the package. dotted_name = module_doc.canonical_name if dotted_name is UNKNOWN: dotted_name = DottedName(module.__name__) name_without_primes = DottedName(str(dotted_name).replace("'", "")) # Record the module's parent package, if it has one. if len(dotted_name) > 1: package_name = str(dotted_name.container()) package = sys.modules.get(package_name) if package is not None: module_doc.package = introspect_docs(package) else: module_doc.package = None # Initialize the submodules property module_doc.submodules = [] # Add the module to its parent package's submodules list. if module_doc.package not in (None, UNKNOWN): module_doc.package.submodules.append(module_doc) # Look up the module's __all__ attribute (public names). public_names = None if hasattr(module, '__all__'): try: public_names = set([str(name) for name in module.__all__]) except KeyboardInterrupt: raise except: pass # Record the module's variables. module_doc.variables = {} for child_name in dir(module): if child_name in UNDOCUMENTED_MODULE_VARS: continue child = getattr(module, child_name) # Create a VariableDoc for the child, and introspect its # value if it's defined in this module. container = get_containing_module(child) if ((container is not None and container == name_without_primes) or (public_names is not None and child_name in public_names)): # Local variable. child_val_doc = introspect_docs(child, context=module_doc, module_name=dotted_name) child_var_doc = VariableDoc(name=child_name, value=child_val_doc, is_imported=False, container=module_doc, docs_extracted_by='introspecter') elif container is None or module_doc.canonical_name is UNKNOWN: # Don't introspect stuff "from __future__" if is_future_feature(child): continue # Possibly imported variable. child_val_doc = introspect_docs(child, context=module_doc) child_var_doc = VariableDoc(name=child_name, value=child_val_doc, container=module_doc, docs_extracted_by='introspecter') else: # Imported variable. child_val_doc = _get_valuedoc(child) child_var_doc = VariableDoc(name=child_name, value=child_val_doc, is_imported=True, container=module_doc, docs_extracted_by='introspecter') # If the module's __all__ attribute is set, use it to set the # variables public/private status and imported status. if public_names is not None: if child_name in public_names: child_var_doc.is_public = True if not isinstance(child_var_doc, ModuleDoc): child_var_doc.is_imported = False else: child_var_doc.is_public = False module_doc.variables[child_name] = child_var_doc return module_doc #//////////////////////////////////////////////////////////// # Class Introspection #//////////////////////////////////////////////////////////// #: A list of class variables that should not be included in a #: class's API documentation. UNDOCUMENTED_CLASS_VARS = ( '__doc__', '__module__', '__dict__', '__weakref__', '__slots__', '__pyx_vtable__') def introspect_class(cls, class_doc, module_name=None): """ Add API documentation information about the class C{cls} to C{class_doc}. """ class_doc.specialize_to(ClassDoc) # Record the class's docstring. class_doc.docstring = get_docstring(cls) # Record the class's __all__ attribute (public names). public_names = None if hasattr(cls, '__all__'): try: public_names = set([str(name) for name in cls.__all__]) except KeyboardInterrupt: raise except: pass # Start a list of subclasses. class_doc.subclasses = [] # Sometimes users will define a __metaclass__ that copies all # class attributes from bases directly into the derived class's # __dict__ when the class is created. (This saves the lookup time # needed to search the base tree for an attribute.) But for the # docs, we only want to list these copied attributes in the # parent. So only add an attribute if it is not identical to an # attribute of a base class. (Unfortunately, this can sometimes # cause an attribute to look like it was inherited, even though it # wasn't, if it happens to have the exact same value as the # corresponding base's attribute.) An example of a case where # this helps is PyQt -- subclasses of QWidget get about 300 # methods injected into them. base_children = {} # Record the class's base classes; and add the class to its # base class's subclass lists. if hasattr(cls, '__bases__'): try: bases = list(cls.__bases__) except: bases = None log.warning("Class '%s' defines __bases__, but it does not " "contain an iterable; ignoring base list." % getattr(cls, '__name__', '??')) if bases is not None: class_doc.bases = [] for base in bases: basedoc = introspect_docs(base) class_doc.bases.append(basedoc) basedoc.subclasses.append(class_doc) bases.reverse() for base in bases: if hasattr(base, '__dict__'): base_children.update(base.__dict__) # The module name is not defined if the class is being introspected # as another class base. if module_name is None and class_doc.defining_module not in (None, UNKNOWN): module_name = class_doc.defining_module.canonical_name # Record the class's local variables. class_doc.variables = {} if hasattr(cls, '__dict__'): private_prefix = '_%s__' % getattr(cls, '__name__', '<none>') for child_name, child in cls.__dict__.items(): if (child_name in base_children and base_children[child_name] == child): continue if child_name.startswith(private_prefix): child_name = child_name[len(private_prefix)-2:] if child_name in UNDOCUMENTED_CLASS_VARS: continue val_doc = introspect_docs(child, context=class_doc, module_name=module_name) var_doc = VariableDoc(name=child_name, value=val_doc, container=class_doc, docs_extracted_by='introspecter') if public_names is not None: var_doc.is_public = (child_name in public_names) class_doc.variables[child_name] = var_doc return class_doc #//////////////////////////////////////////////////////////// # Routine Introspection #//////////////////////////////////////////////////////////// def introspect_routine(routine, routine_doc, module_name=None): """Add API documentation information about the function C{routine} to C{routine_doc} (specializing it to C{Routine_doc}).""" routine_doc.specialize_to(RoutineDoc) # Extract the underying function if isinstance(routine, MethodType): func = routine.im_func elif isinstance(routine, staticmethod): func = routine.__get__(0) elif isinstance(routine, classmethod): func = routine.__get__(0).im_func else: func = routine # Record the function's docstring. routine_doc.docstring = get_docstring(func) # Record the function's signature. if isinstance(func, FunctionType): (args, vararg, kwarg, defaults) = inspect.getargspec(func) # Add the arguments. routine_doc.posargs = args routine_doc.vararg = vararg routine_doc.kwarg = kwarg # Set default values for positional arguments. routine_doc.posarg_defaults = [None]*len(args) if defaults is not None: offset = len(args)-len(defaults) for i in range(len(defaults)): default_val = introspect_docs(defaults[i]) routine_doc.posarg_defaults[i+offset] = default_val # If it's a bound method, then strip off the first argument. if isinstance(routine, MethodType) and routine.im_self is not None: routine_doc.posargs = routine_doc.posargs[1:] routine_doc.posarg_defaults = routine_doc.posarg_defaults[1:] # Set the routine's line number. if hasattr(func, 'func_code'): routine_doc.lineno = func.func_code.co_firstlineno else: # [XX] I should probably use UNKNOWN here?? # dvarrazzo: if '...' is to be changed, also check that # `docstringparser.process_arg_field()` works correctly. # See SF bug #1556024. routine_doc.posargs = ['...'] routine_doc.posarg_defaults = [None] routine_doc.kwarg = None routine_doc.vararg = None # Change type, if appropriate. if isinstance(routine, staticmethod): routine_doc.specialize_to(StaticMethodDoc) if isinstance(routine, classmethod): routine_doc.specialize_to(ClassMethodDoc) return routine_doc #//////////////////////////////////////////////////////////// # Property Introspection #//////////////////////////////////////////////////////////// def introspect_property(prop, prop_doc, module_name=None): """Add API documentation information about the property C{prop} to C{prop_doc} (specializing it to C{PropertyDoc}).""" prop_doc.specialize_to(PropertyDoc) # Record the property's docstring. prop_doc.docstring = get_docstring(prop, module_name=module_name) # Record the property's access functions. if hasattr(prop, 'fget'): prop_doc.fget = introspect_docs(prop.fget) prop_doc.fset = introspect_docs(prop.fset) prop_doc.fdel = introspect_docs(prop.fdel) return prop_doc #//////////////////////////////////////////////////////////// # Generic Value Introspection #//////////////////////////////////////////////////////////// def introspect_other(val, val_doc, module_name=None): """Specialize val_doc to a C{GenericValueDoc} and return it.""" val_doc.specialize_to(GenericValueDoc) return val_doc #//////////////////////////////////////////////////////////// # Helper functions #//////////////////////////////////////////////////////////// def isclass(object): """ Return true if the given object is a class. In particular, return true if object is an instance of C{types.TypeType} or of C{types.ClassType}. This is used instead of C{inspect.isclass()}, because the latter returns true for objects that are not classes (in particular, it returns true for any object that has a C{__bases__} attribute, including objects that define C{__getattr__} to always return a value). """ return isinstance(object, tuple(_CLASS_TYPES)) _CLASS_TYPES = set([TypeType, ClassType]) """A list of types that should be treated as classes.""" def register_class_type(typ): """Add a type to the lists of types that should be treated as classes. By default, this list contains C{TypeType} and C{ClassType}.""" _CLASS_TYPES.add(typ) __future_check_works = None def is_future_feature(object): """ Return True if C{object} results from a C{from __future__ import feature} statement. """ # Guard from unexpected implementation changes of the __future__ module. global __future_check_works if __future_check_works is not None: if __future_check_works: import __future__ return isinstance(object, __future__._Feature) else: return False else: __future_check_works = True try: return is_future_feature(object) except: __future_check_works = False log.warning("Troubles inspecting __future__. Python implementation" " may have been changed.") return False def get_docstring(value, module_name=None): """ Return the docstring for the given value; or C{None} if it does not have a docstring. @rtype: C{unicode} """ docstring = getattr(value, '__doc__', None) if docstring is None: return None elif isinstance(docstring, unicode): return docstring elif isinstance(docstring, str): try: return unicode(docstring, 'ascii') except UnicodeDecodeError: if module_name is None: module_name = get_containing_module(value) if module_name is not None: try: module = get_value_from_name(module_name) filename = py_src_filename(module.__file__) encoding = epydoc.docparser.get_module_encoding(filename) return unicode(docstring, encoding) except KeyboardInterrupt: raise except Exception: pass if hasattr(value, '__name__'): name = value.__name__ else: name = repr(value) log.warning("%s's docstring is not a unicode string, but it " "contains non-ascii data -- treating it as " "latin-1." % name) return unicode(docstring, 'latin-1') return None elif value is BuiltinMethodType: # Don't issue a warning for this special case. return None else: if hasattr(value, '__name__'): name = value.__name__ else: name = repr(value) log.warning("%s's docstring is not a string -- ignoring it." % name) return None def get_canonical_name(value, strict=False): """ @return: the canonical name for C{value}, or C{UNKNOWN} if no canonical name can be found. Currently, C{get_canonical_name} can find canonical names for: modules; functions; non-nested classes; methods of non-nested classes; and some class methods of non-nested classes. @rtype: L{DottedName} or C{UNKNOWN} """ if not hasattr(value, '__name__'): return UNKNOWN # Get the name via introspection. if isinstance(value, ModuleType): try: dotted_name = DottedName(value.__name__, strict=strict) # If the module is shadowed by a variable in its parent # package(s), then add a prime mark to the end, to # differentiate it from the variable that shadows it. if verify_name(value, dotted_name) is UNKNOWN: log.warning("Module %s is shadowed by a variable with " "the same name." % dotted_name) # Note -- this return bypasses verify_name check: return DottedName(value.__name__+"'") except DottedName.InvalidDottedName: # Name is not a valid Python identifier -- treat as script. if hasattr(value, '__file__'): filename = '%s' % value.__str__ dotted_name = DottedName(munge_script_name(filename)) elif isclass(value): if value.__module__ == '__builtin__': dotted_name = DottedName(value.__name__, strict=strict) else: dotted_name = DottedName(value.__module__, value.__name__, strict=strict) elif (inspect.ismethod(value) and value.im_self is not None and value.im_class is ClassType and not value.__name__.startswith('<')): # class method. class_name = get_canonical_name(value.im_self) if class_name is UNKNOWN: return UNKNOWN dotted_name = DottedName(class_name, value.__name__, strict=strict) elif (inspect.ismethod(value) and not value.__name__.startswith('<')): class_name = get_canonical_name(value.im_class) if class_name is UNKNOWN: return UNKNOWN dotted_name = DottedName(class_name, value.__name__, strict=strict) elif (isinstance(value, FunctionType) and not value.__name__.startswith('<')): module_name = _find_function_module(value) if module_name is None: return UNKNOWN dotted_name = DottedName(module_name, value.__name__, strict=strict) else: return UNKNOWN return verify_name(value, dotted_name) def verify_name(value, dotted_name): """ Verify the name. E.g., if it's a nested class, then we won't be able to find it with the name we constructed. """ if dotted_name is UNKNOWN: return UNKNOWN if len(dotted_name) == 1 and hasattr(__builtin__, dotted_name[0]): return dotted_name named_value = sys.modules.get(dotted_name[0]) if named_value is None: return UNKNOWN for identifier in dotted_name[1:]: try: named_value = getattr(named_value, identifier) except: return UNKNOWN if value is named_value: return dotted_name else: return UNKNOWN # [xx] not used: def value_repr(value): try: s = '%r' % value if isinstance(s, str): s = decode_with_backslashreplace(s) return s except: return UNKNOWN def get_containing_module(value): """ Return the name of the module containing the given value, or C{None} if the module name can't be determined. @rtype: L{DottedName} """ if inspect.ismodule(value): return DottedName(value.__name__) elif isclass(value): return DottedName(value.__module__) elif (inspect.ismethod(value) and value.im_self is not None and value.im_class is ClassType): # class method. return DottedName(value.im_self.__module__) elif inspect.ismethod(value): return DottedName(value.im_class.__module__) elif inspect.isroutine(value): module = _find_function_module(value) if module is None: return None return DottedName(module) else: return None def _find_function_module(func): """ @return: The module that defines the given function. @rtype: C{module} @param func: The function whose module should be found. @type func: C{function} """ if hasattr(func, '__module__'): return func.__module__ try: module = inspect.getmodule(func) if module: return module.__name__ except KeyboardInterrupt: raise except: pass # This fallback shouldn't usually be needed. But it is needed in # a couple special cases (including using epydoc to document # itself). In particular, if a module gets loaded twice, using # two different names for the same file, then this helps. for module in sys.modules.values(): if (hasattr(module, '__dict__') and hasattr(func, 'func_globals') and func.func_globals is module.__dict__): return module.__name__ return None #//////////////////////////////////////////////////////////// # Introspection Dispatch Table #//////////////////////////////////////////////////////////// _introspecter_registry = [] def register_introspecter(applicability_test, introspecter, priority=10): """ Register an introspecter function. Introspecter functions take two arguments, a python value and a C{ValueDoc} object, and should add information about the given value to the the C{ValueDoc}. Usually, the first line of an inspecter function will specialize it to a sublass of C{ValueDoc}, using L{ValueDoc.specialize_to()}: >>> def typical_introspecter(value, value_doc): ... value_doc.specialize_to(SomeSubclassOfValueDoc) ... <add info to value_doc> @param priority: The priority of this introspecter, which determines the order in which introspecters are tried -- introspecters with lower numbers are tried first. The standard introspecters have priorities ranging from 20 to 30. The default priority (10) will place new introspecters before standard introspecters. """ _introspecter_registry.append( (priority, applicability_test, introspecter) ) _introspecter_registry.sort() def _get_introspecter(value): for (priority, applicability_test, introspecter) in _introspecter_registry: if applicability_test(value): return introspecter else: return introspect_other # Register the standard introspecter functions. def is_classmethod(v): return isinstance(v, classmethod) def is_staticmethod(v): return isinstance(v, staticmethod) def is_property(v): return isinstance(v, property) register_introspecter(inspect.ismodule, introspect_module, priority=20) register_introspecter(isclass, introspect_class, priority=24) register_introspecter(inspect.isroutine, introspect_routine, priority=28) register_introspecter(is_property, introspect_property, priority=30) # Register getset_descriptor as a property try: import array getset_type = type(array.array.typecode) del array def is_getset(v): return isinstance(v, getset_type) register_introspecter(is_getset, introspect_property, priority=32) except: pass # Register member_descriptor as a property try: import datetime member_type = type(datetime.timedelta.days) del datetime def is_member(v): return isinstance(v, member_type) register_introspecter(is_member, introspect_property, priority=34) except: pass #//////////////////////////////////////////////////////////// # Import support #//////////////////////////////////////////////////////////// def get_value_from_filename(filename, context=None): # Normalize the filename. filename = os.path.normpath(os.path.abspath(filename)) # Divide the filename into a base directory and a name. (For # packages, use the package's parent directory as the base, and # the directory name as its name). basedir = os.path.split(filename)[0] name = os.path.splitext(os.path.split(filename)[1])[0] if name == '__init__': basedir, name = os.path.split(basedir) name = DottedName(name) # If the context wasn't provided, then check if the file is in a # package directory. If so, then update basedir & name to contain # the topmost package's directory and the fully qualified name for # this file. (This update assume the default value of __path__ # for the parent packages; if the parent packages override their # __path__s, then this can cause us not to find the value.) if context is None: while is_package_dir(basedir): basedir, pkg_name = os.path.split(basedir) name = DottedName(pkg_name, name) # If a parent package was specified, then find the directory of # the topmost package, and the fully qualified name for this file. if context is not None: # Combine the name. name = DottedName(context.canonical_name, name) # Find the directory of the base package. while context not in (None, UNKNOWN): pkg_dir = os.path.split(context.filename)[0] basedir = os.path.split(pkg_dir)[0] context = context.package # Import the module. (basedir is the directory of the module's # topmost package, or its own directory if it's not in a package; # and name is the fully qualified dotted name for the module.) old_sys_path = sys.path[:] try: sys.path.insert(0, basedir) # This will make sure that we get the module itself, even # if it is shadowed by a variable. (E.g., curses.wrapper): _import(str(name)) if str(name) in sys.modules: return sys.modules[str(name)] else: # Use this as a fallback -- it *shouldn't* ever be needed. return get_value_from_name(name) finally: sys.path = old_sys_path def get_value_from_scriptname(filename): name = munge_script_name(filename) return _import(name, filename) def get_value_from_name(name, globs=None): """ Given a name, return the corresponding value. @param globs: A namespace to check for the value, if there is no module containing the named value. Defaults to __builtin__. """ name = DottedName(name) # Import the topmost module/package. If we fail, then check if # the requested name refers to a builtin. try: module = _import(name[0]) except ImportError, e: if globs is None: globs = __builtin__.__dict__ if name[0] in globs: try: return _lookup(globs[name[0]], name[1:]) except: raise e else: raise # Find the requested value in the module/package or its submodules. for i in range(1, len(name)): try: return _lookup(module, name[i:]) except ImportError: pass module = _import('.'.join(name[:i+1])) module = _lookup(module, name[1:i+1]) return module def _lookup(module, name): val = module for i, identifier in enumerate(name): try: val = getattr(val, identifier) except AttributeError: exc_msg = ('no variable named %s in %s' % (identifier, '.'.join(name[:1+i]))) raise ImportError(exc_msg) return val def _import(name, filename=None): """ Run the given callable in a 'sandboxed' environment. Currently, this includes saving and restoring the contents of sys and __builtins__; and suppressing stdin, stdout, and stderr. """ # Note that we just do a shallow copy of sys. In particular, # any changes made to sys.modules will be kept. But we do # explicitly store sys.path. old_sys = sys.__dict__.copy() old_sys_path = sys.path[:] old_builtins = __builtin__.__dict__.copy() # Add the current directory to sys.path, in case they're trying to # import a module by name that resides in the current directory. # But add it to the end -- otherwise, the explicit directory added # in get_value_from_filename might get overwritten sys.path.append('') # Suppress input and output. (These get restored when we restore # sys to old_sys). sys.stdin = sys.stdout = sys.stderr = _dev_null sys.__stdin__ = sys.__stdout__ = sys.__stderr__ = _dev_null # Remove any command-line arguments sys.argv = ['(imported)'] try: try: if filename is None: return __import__(name) else: # For importing scripts: return imp.load_source(name, filename) except KeyboardInterrupt: raise except: exc_typ, exc_val, exc_tb = sys.exc_info() if exc_val is None: estr = '%s' % (exc_typ,) else: estr = '%s: %s' % (exc_typ.__name__, exc_val) if exc_tb.tb_next is not None: estr += ' (line %d)' % (exc_tb.tb_next.tb_lineno,) raise ImportError(estr) finally: # Restore the important values that we saved. __builtin__.__dict__.clear() __builtin__.__dict__.update(old_builtins) sys.__dict__.clear() sys.__dict__.update(old_sys) sys.path = old_sys_path def introspect_docstring_lineno(api_doc): """ Try to determine the line number on which the given item's docstring begins. Return the line number, or C{None} if the line number can't be determined. The line number of the first line in the file is 1. """ if api_doc.docstring_lineno is not UNKNOWN: return api_doc.docstring_lineno if isinstance(api_doc, ValueDoc) and api_doc.pyval is not UNKNOWN: try: lines, lineno = inspect.findsource(api_doc.pyval) if not isinstance(api_doc, ModuleDoc): lineno += 1 for lineno in range(lineno, len(lines)): if lines[lineno].split('#', 1)[0].strip(): api_doc.docstring_lineno = lineno + 1 return lineno + 1 except IOError: pass except TypeError: pass except IndexError: log.warning('inspect.findsource(%s) raised IndexError' % api_doc.canonical_name) return None class _DevNull: """ A "file-like" object that discards anything that is written and always reports end-of-file when read. C{_DevNull} is used by L{_import()} to discard output when importing modules; and to ensure that stdin appears closed. """ def __init__(self): self.closed = 1 self.mode = 'r+' self.softspace = 0 self.name='</dev/null>' def close(self): pass def flush(self): pass def read(self, size=0): return '' def readline(self, size=0): return '' def readlines(self, sizehint=0): return [] def seek(self, offset, whence=0): pass def tell(self): return 0L def truncate(self, size=0): pass def write(self, str): pass def writelines(self, sequence): pass xreadlines = readlines _dev_null = _DevNull() ###################################################################### ## Zope InterfaceClass ###################################################################### try: from zope.interface.interface import InterfaceClass as _ZopeInterfaceClass register_class_type(_ZopeInterfaceClass) except: pass ###################################################################### ## Zope Extension classes ###################################################################### try: # Register type(ExtensionClass.ExtensionClass) from ExtensionClass import ExtensionClass as _ExtensionClass _ZopeType = type(_ExtensionClass) def _is_zope_type(val): return isinstance(val, _ZopeType) register_introspecter(_is_zope_type, introspect_class) # Register ExtensionClass.*MethodType from ExtensionClass import PythonMethodType as _ZopeMethodType from ExtensionClass import ExtensionMethodType as _ZopeCMethodType def _is_zope_method(val): return isinstance(val, (_ZopeMethodType, _ZopeCMethodType)) register_introspecter(_is_zope_method, introspect_routine) except: pass # [xx] 0 # hm.. otherwise the following gets treated as a docstring! ouch! """ ###################################################################### ## Zope Extension... ###################################################################### class ZopeIntrospecter(Introspecter): VALUEDOC_CLASSES = Introspecter.VALUEDOC_CLASSES.copy() VALUEDOC_CLASSES.update({ 'module': ZopeModuleDoc, 'class': ZopeClassDoc, 'interface': ZopeInterfaceDoc, 'attribute': ZopeAttributeDoc, }) def add_module_child(self, child, child_name, module_doc): if isinstance(child, zope.interfaces.Interface): module_doc.add_zope_interface(child_name) else: Introspecter.add_module_child(self, child, child_name, module_doc) def add_class_child(self, child, child_name, class_doc): if isinstance(child, zope.interfaces.Interface): class_doc.add_zope_interface(child_name) else: Introspecter.add_class_child(self, child, child_name, class_doc) def introspect_zope_interface(self, interface, interfacename): pass # etc... """
""" Test the file recognizer capabilities. """ import gzip import os import shutil import socket import sys import nose from grin import FileRecognizer def empty_file(filename, open=open): f = open(filename, 'wb') f.close() def binary_file(filename, open=open): f = open(filename, 'wb') f.write(''.join(map(chr, range(256)))) f.close() def text_file(filename, open=open): lines = ['foo\n', 'bar\n'] * 100 lines.append('baz\n') lines.extend(['foo\n', 'bar\n'] * 100) f = open(filename, 'wb') f.writelines(lines) f.close() def fake_gzip_file(filename, open=open): """ Write out a binary file that has the gzip magic header bytes, but is not a gzip file. """ GZIP_MAGIC = '\037\213' f = open(filename, 'wb') f.write(GZIP_MAGIC) f.write(''.join(map(chr, range(256)))) f.close() def binary_middle(filename, open=open): """ Write out a file that is text for the first 100 bytes, then 100 binary bytes, then 100 text bytes to test that the recognizer only reads some of the file. """ text = 'a'*100 + '\0'*100 + 'b'*100 f = open(filename, 'wb') f.write(text) f.close() def socket_file(filename): s = socket.socket(socket.AF_UNIX) s.bind(filename) def unreadable_file(filename): """ Write a file that does not have read permissions. """ text_file(filename) os.chmod(filename, 0200) def unreadable_dir(filename): """ Make a directory that does not have read permissions. """ os.mkdir(filename) os.chmod(filename, 0300) def unexecutable_dir(filename): """ Make a directory that does not have execute permissions. """ os.mkdir(filename) os.chmod(filename, 0600) def totally_unusable_dir(filename): """ Make a directory that has neither read nor execute permissions. """ os.mkdir(filename) os.chmod(filename, 0100) def setup(): # Make files to test individual recognizers. empty_file('empty') binary_file('binary') binary_middle('binary_middle') text_file('text') text_file('text~') text_file('text#') text_file('foo.bar.baz') os.mkdir('dir') binary_file('.binary') text_file('.text') empty_file('empty.gz', open=gzip.open) binary_file('binary.gz', open=gzip.open) text_file('text.gz', open=gzip.open) binary_file('.binary.gz', open=gzip.open) text_file('.text.gz', open=gzip.open) fake_gzip_file('fake.gz') os.mkdir('.dir') os.symlink('binary', 'binary_link') os.symlink('text', 'text_link') os.symlink('dir', 'dir_link') os.symlink('.binary', '.binary_link') os.symlink('.text', '.text_link') os.symlink('.dir', '.dir_link') unreadable_file('unreadable_file') unreadable_dir('unreadable_dir') unexecutable_dir('unexecutable_dir') totally_unusable_dir('totally_unusable_dir') os.symlink('unreadable_file', 'unreadable_file_link') os.symlink('unreadable_dir', 'unreadable_dir_link') os.symlink('unexecutable_dir', 'unexecutable_dir_link') os.symlink('totally_unusable_dir', 'totally_unusable_dir_link') text_file('text.skip_ext') os.mkdir('dir.skip_ext') text_file('text.dont_skip_ext') os.mkdir('skip_dir') text_file('fake_skip_dir') socket_file('socket_test') # Make a directory tree to test tree-walking. os.mkdir('tree') os.mkdir('tree/.hidden_dir') os.mkdir('tree/dir') os.mkdir('tree/dir/subdir') text_file('tree/dir/text') text_file('tree/dir/subdir/text') text_file('tree/text') text_file('tree/text.skip_ext') os.mkdir('tree/dir.skip_ext') text_file('tree/dir.skip_ext/text') text_file('tree/text.dont_skip_ext') binary_file('tree/binary') os.mkdir('tree/skip_dir') text_file('tree/skip_dir/text') os.mkdir('tree/.skip_hidden_dir') text_file('tree/.skip_hidden_file') os.mkdir('tree/unreadable_dir') text_file('tree/unreadable_dir/text') os.chmod('tree/unreadable_dir', 0300) os.mkdir('tree/unexecutable_dir') text_file('tree/unexecutable_dir/text') os.chmod('tree/unexecutable_dir', 0600) os.mkdir('tree/totally_unusable_dir') text_file('tree/totally_unusable_dir/text') os.chmod('tree/totally_unusable_dir', 0100) def ensure_deletability(arg, dirname, fnames): """ os.path.walk() callback function which will make sure every directory is readable and executable so that it may be easily deleted. """ for fn in fnames: fn = os.path.join(dirname, fn) if os.path.isdir(fn): os.chmod(fn, 0700) def teardown(): files_to_delete = ['empty', 'binary', 'binary_middle', 'text', 'text~', 'empty.gz', 'binary.gz', 'text.gz', 'dir', 'binary_link', 'text_link', 'dir_link', '.binary', '.text', '.binary.gz', '.text.gz', 'fake.gz', '.dir', '.binary_link', '.text_link', '.dir_link', 'unreadable_file', 'unreadable_dir', 'unexecutable_dir', 'totally_unusable_dir', 'unreadable_file_link', 'unreadable_dir_link', 'unexecutable_dir_link', 'totally_unusable_dir_link', 'text.skip_ext', 'text.dont_skip_ext', 'dir.skip_ext', 'skip_dir', 'fake_skip_dir', 'text#', 'foo.bar.baz', ] for filename in files_to_delete: try: if os.path.islink(filename) or os.path.isfile(filename): os.unlink(filename) else: os.rmdir(filename) except Exception, e: print >>sys.stderr, 'Could not delete %s: %s' % (filename, e) os.unlink('socket_test') os.path.walk('tree', ensure_deletability, None) shutil.rmtree('tree') def test_binary(): fr = FileRecognizer() assert fr.is_binary('binary') assert fr.recognize_file('binary') == 'binary' assert fr.recognize('binary') == 'binary' def test_text(): fr = FileRecognizer() assert not fr.is_binary('text') assert fr.recognize_file('text') == 'text' assert fr.recognize('text') == 'text' def test_gzipped(): fr = FileRecognizer() assert fr.is_binary('text.gz') assert fr.recognize_file('text.gz') == 'gzip' assert fr.recognize('text.gz') == 'gzip' assert fr.is_binary('binary.gz') assert fr.recognize_file('binary.gz') == 'binary' assert fr.recognize('binary.gz') == 'binary' assert fr.is_binary('fake.gz') assert fr.recognize_file('fake.gz') == 'binary' assert fr.recognize('fake.gz') == 'binary' def test_binary_middle(): fr = FileRecognizer(binary_bytes=100) assert not fr.is_binary('binary_middle') assert fr.recognize_file('binary_middle') == 'text' assert fr.recognize('binary_middle') == 'text' fr = FileRecognizer(binary_bytes=101) assert fr.is_binary('binary_middle') assert fr.recognize_file('binary_middle') == 'binary' assert fr.recognize('binary_middle') == 'binary' def test_socket(): fr= FileRecognizer() assert fr.recognize('socket_test') == 'skip' def test_dir(): fr = FileRecognizer() assert fr.recognize_directory('dir') == 'directory' assert fr.recognize('dir') == 'directory' def test_skip_symlinks(): fr = FileRecognizer(skip_symlink_files=True, skip_symlink_dirs=True) assert fr.recognize('binary_link') == 'link' assert fr.recognize_file('binary_link') == 'link' assert fr.recognize('text_link') == 'link' assert fr.recognize_file('text_link') == 'link' assert fr.recognize('dir_link') == 'link' assert fr.recognize_directory('dir_link') == 'link' def test_do_not_skip_symlinks(): fr = FileRecognizer(skip_symlink_files=False, skip_symlink_dirs=False) assert fr.recognize('binary_link') == 'binary' assert fr.recognize_file('binary_link') == 'binary' assert fr.recognize('text_link') == 'text' assert fr.recognize_file('text_link') == 'text' assert fr.recognize('dir_link') == 'directory' assert fr.recognize_directory('dir_link') == 'directory' def test_skip_hidden(): fr = FileRecognizer(skip_hidden_files=True, skip_hidden_dirs=True) assert fr.recognize('.binary') == 'skip' assert fr.recognize_file('.binary') == 'skip' assert fr.recognize('.text') == 'skip' assert fr.recognize_file('.text') == 'skip' assert fr.recognize('.dir') == 'skip' assert fr.recognize_directory('.dir') == 'skip' assert fr.recognize('.binary_link') == 'skip' assert fr.recognize_file('.binary_link') == 'skip' assert fr.recognize('.text_link') == 'skip' assert fr.recognize_file('.text_link') == 'skip' assert fr.recognize('.dir_link') == 'skip' assert fr.recognize_directory('.dir_link') == 'skip' assert fr.recognize('.text.gz') == 'skip' assert fr.recognize_file('.text.gz') == 'skip' assert fr.recognize('.binary.gz') == 'skip' assert fr.recognize_file('.binary.gz') == 'skip' def test_skip_backup(): fr = FileRecognizer(skip_backup_files=True) assert fr.recognize_file('text~') == 'skip' def test_do_not_skip_backup(): fr = FileRecognizer(skip_backup_files=False) assert fr.recognize_file('text~') == 'text' def test_skip_weird_exts(): fr = FileRecognizer(skip_exts=set()) assert fr.recognize_file('text#') == 'text' assert fr.recognize_file('foo.bar.baz') == 'text' fr = FileRecognizer(skip_exts=set(['#', '.bar.baz'])) assert fr.recognize_file('text#') == 'skip' assert fr.recognize_file('foo.bar.baz') == 'skip' def test_do_not_skip_hidden_or_symlinks(): fr = FileRecognizer(skip_hidden_files=False, skip_hidden_dirs=False, skip_symlink_dirs=False, skip_symlink_files=False) assert fr.recognize('.binary') == 'binary' assert fr.recognize_file('.binary') == 'binary' assert fr.recognize('.text') == 'text' assert fr.recognize_file('.text') == 'text' assert fr.recognize('.dir') == 'directory' assert fr.recognize_directory('.dir') == 'directory' assert fr.recognize('.binary_link') == 'binary' assert fr.recognize_file('.binary_link') == 'binary' assert fr.recognize('.text_link') == 'text' assert fr.recognize_file('.text_link') == 'text' assert fr.recognize('.dir_link') == 'directory' assert fr.recognize_directory('.dir_link') == 'directory' assert fr.recognize('.text.gz') == 'gzip' assert fr.recognize_file('.text.gz') == 'gzip' assert fr.recognize('.binary.gz') == 'binary' assert fr.recognize_file('.binary.gz') == 'binary' def test_do_not_skip_hidden_but_skip_symlinks(): fr = FileRecognizer(skip_hidden_files=False, skip_hidden_dirs=False, skip_symlink_dirs=True, skip_symlink_files=True) assert fr.recognize('.binary') == 'binary' assert fr.recognize_file('.binary') == 'binary' assert fr.recognize('.text') == 'text' assert fr.recognize_file('.text') == 'text' assert fr.recognize('.dir') == 'directory' assert fr.recognize_directory('.dir') == 'directory' assert fr.recognize('.binary_link') == 'link' assert fr.recognize_file('.binary_link') == 'link' assert fr.recognize('.text_link') == 'link' assert fr.recognize_file('.text_link') == 'link' assert fr.recognize('.dir_link') == 'link' assert fr.recognize_directory('.dir_link') == 'link' assert fr.recognize('.text.gz') == 'gzip' assert fr.recognize_file('.text.gz') == 'gzip' assert fr.recognize('.binary.gz') == 'binary' assert fr.recognize_file('.binary.gz') == 'binary' def test_lack_of_permissions(): fr = FileRecognizer() assert fr.recognize('unreadable_file') == 'unreadable' assert fr.recognize_file('unreadable_file') == 'unreadable' assert fr.recognize('unreadable_dir') == 'directory' assert fr.recognize_directory('unreadable_dir') == 'directory' assert fr.recognize('unexecutable_dir') == 'directory' assert fr.recognize_directory('unexecutable_dir') == 'directory' assert fr.recognize('totally_unusable_dir') == 'directory' assert fr.recognize_directory('totally_unusable_dir') == 'directory' def test_symlink_src_unreadable(): fr = FileRecognizer(skip_symlink_files=False, skip_symlink_dirs=False) assert fr.recognize('unreadable_file_link') == 'unreadable' assert fr.recognize_file('unreadable_file_link') == 'unreadable' assert fr.recognize('unreadable_dir_link') == 'directory' assert fr.recognize_directory('unreadable_dir_link') == 'directory' assert fr.recognize('unexecutable_dir_link') == 'directory' assert fr.recognize_directory('unexecutable_dir_link') == 'directory' assert fr.recognize('totally_unusable_dir_link') == 'directory' assert fr.recognize_directory('totally_unusable_dir_link') == 'directory' def test_skip_ext(): fr = FileRecognizer(skip_exts=set(['.skip_ext'])) assert fr.recognize('text.skip_ext') == 'skip' assert fr.recognize_file('text.skip_ext') == 'skip' assert fr.recognize('text') == 'text' assert fr.recognize_file('text') == 'text' assert fr.recognize('text.dont_skip_ext') == 'text' assert fr.recognize_file('text.dont_skip_ext') == 'text' assert fr.recognize('dir.skip_ext') == 'directory' assert fr.recognize_directory('dir.skip_ext') == 'directory' def test_skip_dir(): fr = FileRecognizer(skip_dirs=set(['skip_dir', 'fake_skip_dir'])) assert fr.recognize('skip_dir') == 'skip' assert fr.recognize_directory('skip_dir') == 'skip' assert fr.recognize('fake_skip_dir') == 'text' assert fr.recognize_file('fake_skip_dir') == 'text' def test_walking(): fr = FileRecognizer(skip_hidden_files=True, skip_hidden_dirs=True, skip_exts=set(['.skip_ext']),skip_dirs=set(['skip_dir'])) truth = [ ('tree/binary', 'binary'), ('tree/dir.skip_ext/text', 'text'), ('tree/dir/subdir/text', 'text'), ('tree/dir/text', 'text'), ('tree/text', 'text'), ('tree/text.dont_skip_ext', 'text'), ] result = sorted(fr.walk('tree')) assert result == truth def predot(): os.chdir('tree') def postdot(): os.chdir('..') @nose.with_setup(predot, postdot) def test_dot(): fr = FileRecognizer(skip_hidden_files=True, skip_hidden_dirs=True, skip_exts=set(['.skip_ext']),skip_dirs=set(['skip_dir'])) truth = [ ('./binary', 'binary'), ('./dir.skip_ext/text', 'text'), ('./dir/subdir/text', 'text'), ('./dir/text', 'text'), ('./text', 'text'), ('./text.dont_skip_ext', 'text'), ] result = sorted(fr.walk('.')) assert result == truth def predotdot(): os.chdir('tree') os.chdir('dir') def postdotdot(): os.chdir('..') os.chdir('..') @nose.with_setup(predotdot, postdotdot) def test_dot_dot(): fr = FileRecognizer(skip_hidden_files=True, skip_hidden_dirs=True, skip_exts=set(['.skip_ext']),skip_dirs=set(['skip_dir'])) truth = [ ('../binary', 'binary'), ('../dir.skip_ext/text', 'text'), ('../dir/subdir/text', 'text'), ('../dir/text', 'text'), ('../text', 'text'), ('../text.dont_skip_ext', 'text'), ] result = sorted(fr.walk('..')) assert result == truth
import calendar from django.test import TestCase from ..serializer import ProjectFormSerializer, DataSerializer from geokey.categories.tests.model_factories import ( TextFieldFactory, NumericFieldFactory, DateFieldFactory, TimeFieldFactory, LookupFieldFactory, LookupValueFactory, DateTimeFieldFactory, MultipleLookupFieldFactory, MultipleLookupValueFactory ) from geokey.projects.tests.model_factories import ProjectFactory from geokey.categories.tests.model_factories import CategoryFactory from geokey.contributions.tests.model_factories import ObservationFactory class ProjectFormSerializerTest(TestCase): # ######################################################################## # Test helpers # ######################################################################## def test_create_item(self): serializer = ProjectFormSerializer() item = serializer.create_item('True', 'true') self.assertEqual(item.find('value').text, 'true') self.assertEqual(item.find('label').text, 'True') def test_create_label(self): serializer = ProjectFormSerializer() label = serializer.create_label('Field name') self.assertEqual(label.tag, 'label') self.assertEqual(label.text, 'Field name') def test_create_base_input(self): serializer = ProjectFormSerializer() field = TextFieldFactory() xml = serializer.create_base_input(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) field = TextFieldFactory(**{'required': True}) xml = serializer.create_base_input(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['required'], 'true') def test_create_base_select(self): serializer = ProjectFormSerializer() field = LookupFieldFactory() xml = serializer.create_base_select(field, 'radio') self.assertEqual(xml.tag, 'radio') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) field = LookupFieldFactory(**{'required': True}) xml = serializer.create_base_select(field, 'radio') self.assertEqual(xml.tag, 'radio') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['required'], 'true') def test_get_photo_input(self): serializer = ProjectFormSerializer() xml = serializer.get_photo_input() self.assertEqual(xml.tag, 'photo') def test_get_video_input(self): serializer = ProjectFormSerializer() xml = serializer.get_video_input() self.assertEqual(xml.tag, 'video') # ######################################################################## # Test serializers # ######################################################################## def test_serialize_textfield(self): field = TextFieldFactory() serializer = ProjectFormSerializer() xml = serializer.serialize_textfield(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml[0].tag, 'label') self.assertEqual(xml[0].text, field.name) with self.assertRaises(KeyError): xml.attrib['decimal'] xml.attrib['date'] def test_serialize_number_field(self): field = NumericFieldFactory() serializer = ProjectFormSerializer() xml = serializer.serialize_numericfield(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['decimal'], 'true') self.assertEqual(xml[0].tag, 'label') self.assertEqual(xml[0].text, field.name) with self.assertRaises(KeyError): xml.attrib['date'] xml.attrib['min'] xml.attrib['max'] def test_serialize_number_field_with_minfield(self): field = NumericFieldFactory(**{'required': True, 'minval': 12}) serializer = ProjectFormSerializer() xml = serializer.serialize_numericfield(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['required'], 'true') self.assertEqual(xml.attrib['decimal'], 'true') self.assertEqual(xml.attrib['min'], '12') with self.assertRaises(KeyError): xml.attrib['max'] xml.attrib['date'] self.assertEqual(xml[0].tag, 'label') self.assertEqual(xml[0].text, field.name) def test_serialize_number_field_with_maxfield(self): field = NumericFieldFactory(**{'required': True, 'maxval': 12}) serializer = ProjectFormSerializer() xml = serializer.serialize_numericfield(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['required'], 'true') self.assertEqual(xml.attrib['decimal'], 'true') self.assertEqual(xml.attrib['max'], '12') with self.assertRaises(KeyError): xml.attrib['min'] xml.attrib['date'] self.assertEqual(xml[0].tag, 'label') self.assertEqual(xml[0].text, field.name) def test_serialize_number_field_with_minmaxfield(self): field = NumericFieldFactory( **{'required': True, 'minval': 2, 'maxval': 12}) serializer = ProjectFormSerializer() xml = serializer.serialize_numericfield(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['required'], 'true') self.assertEqual(xml.attrib['decimal'], 'true') self.assertEqual(xml.attrib['min'], '2') self.assertEqual(xml.attrib['max'], '12') self.assertEqual(xml[0].tag, 'label') self.assertEqual(xml[0].text, field.name) with self.assertRaises(KeyError): xml.attrib['date'] def test_serialize_single_lookup_field(self): field = LookupFieldFactory() val1 = LookupValueFactory(**{'field': field, 'name': 'Kermit'}) val2 = LookupValueFactory(**{'field': field, 'name': 'Ms. Piggy'}) val3 = LookupValueFactory(**{'field': field, 'name': 'Gonzo'}) serializer = ProjectFormSerializer() xml = serializer.serialize_singlelookup_field(field) self.assertEqual(xml.tag, 'radio') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(len(xml.findall('item')), 3) for item in xml.findall('item'): self.assertIn( item.find('label').text, [val1.name, val2.name, val3.name] ) self.assertIn( item.find('value').text, [str(val1.id), str(val2.id), str(val3.id)] ) def test_serialize_multiple_lookup_field(self): field = MultipleLookupFieldFactory() val1 = MultipleLookupValueFactory(**{'field': field, 'name': 'Kermit'}) val2 = MultipleLookupValueFactory( **{'field': field, 'name': 'Ms. Piggy'} ) val3 = MultipleLookupValueFactory(**{'field': field, 'name': 'Gonzo'}) serializer = ProjectFormSerializer() xml = serializer.serialize_multiplelookup_field(field) self.assertEqual(xml.tag, 'select') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(len(xml.findall('item')), 3) for item in xml.findall('item'): self.assertIn( item.find('label').text, [val1.name, val2.name, val3.name] ) self.assertIn( item.find('value').text, [str(val1.id), str(val2.id), str(val3.id)] ) def test_serialize_date_field(self): field = DateFieldFactory() serializer = ProjectFormSerializer() xml = serializer.serialize_date_field(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['date'], 'dd/MM/yyyy') self.assertEqual(xml[0].tag, 'label') self.assertEqual(xml[0].text, field.name) with self.assertRaises(KeyError): xml.attrib['min'] xml.attrib['max'] xml.attrib['decimal'] def test_serialize_time_field(self): field = TimeFieldFactory() serializer = ProjectFormSerializer() xml = serializer.serialize_time_field(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['time'], 'HH:mm') self.assertEqual(xml[0].tag, 'label') self.assertEqual(xml[0].text, field.name) with self.assertRaises(KeyError): xml.attrib['min'] xml.attrib['max'] xml.attrib['decimal'] def test_serialize_datetime_field(self): field = DateTimeFieldFactory() serializer = ProjectFormSerializer() xml = serializer.serialize_date_field(field) self.assertEqual(xml.tag, 'input') self.assertEqual( xml.attrib['ref'], field.key + '_' + str(field.category.id) ) self.assertEqual(xml.attrib['date'], 'dd/MM/yyyy') self.assertEqual(xml[0].tag, 'label') self.assertEqual(xml[0].text, field.name) with self.assertRaises(KeyError): xml.attrib['min'] xml.attrib['max'] xml.attrib['decimal'] def test_serialize_project(self): project = ProjectFactory() type1 = CategoryFactory.create(**{'project': project}) TextFieldFactory(**{'category': type1}) type2 = CategoryFactory.create(**{'project': project}) TextFieldFactory(**{'category': type2}) type3 = CategoryFactory.create(**{'project': project}) TextFieldFactory(**{'category': type3}) serializer = ProjectFormSerializer() xml = serializer.serialize(project, 'http://192.168.57.10:8000') self.assertEqual( xml.find('model').find('submission').attrib['id'], str(project.id) ) self.assertEqual( xml.find('model').find('submission').attrib['projectName'], project.name.replace(' ', '_') ) self.assertEqual( xml.find('form').find('location').attrib['required'], 'true' ) self.assertEqual( xml.find('form').find('location').find('label').text, 'Location' ) class SerializeDataTest(TestCase): def test_serialize_observation(self): observation = ObservationFactory.create() observation.properties = {'thekey': '46'} serializer = DataSerializer() xml = serializer.serialize_entry_to_xml(observation) self.assertEqual(str(observation.id), xml.find('id').text) self.assertEqual( str(calendar.timegm(observation.created_at.utctimetuple())), xml.find('created').text) self.assertEqual( observation.created_at.strftime('%Y-%m-%d %H:%M:%S'), xml.find('uploaded').text ) self.assertEqual( xml.find('thekey_%s' % observation.category.id).text, '46' ) def test_serialize_observation_with_null_val(self): observation = ObservationFactory.create() observation.properties = {'thekey': None} serializer = DataSerializer() xml = serializer.serialize_entry_to_xml(observation) self.assertEqual(str(observation.id), xml.find('id').text) self.assertEqual( str(calendar.timegm(observation.created_at.utctimetuple())), xml.find('created').text) self.assertEqual( observation.created_at.strftime('%Y-%m-%d %H:%M:%S'), xml.find('uploaded').text ) self.assertEqual( xml.find('thekey_%s' % observation.category.id).text, 'Null' ) def test_serialize_entry_to_tsv(self): observation = ObservationFactory.create() observation.properties = {'thekey': None} serializer = DataSerializer() tsv = serializer.serialize_entry_to_tsv(observation) self.assertIn('thekey_%s\tNull' % observation.category.id, tsv) def test_serialize_all_to_xml(self): number = 20 project = ProjectFactory.create(**{'isprivate': False}) ObservationFactory.create_batch( number, **{'project': project, 'properties': {'key': 'value'}} ) serializer = DataSerializer() xml = serializer.serialize_to_xml(project) self.assertEqual(len(xml.findall('entry')), number) def test_serialize_all_to_tsv(self): number = 20 project = ProjectFactory.create(**{'isprivate': False}) ObservationFactory.create_batch( number, **{'project': project, 'properties': {'key': 'value'}} ) serializer = DataSerializer() tsv = serializer.serialize_to_tsv(project) self.assertEqual(20, tsv.count('\n'))
#!/usr/bin/python2.4 # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines document-based classes. This module defines classes that are used to modify and describe documents and their operations. """ __author__ = 'davidbyttow@google.com (David Byttow)' import logging import util class Range(object): """Represents a start and end range with integers. Ranges map positions in the document. A range must have at least a length of zero. If zero, the range is considered to be a single point (collapsed). """ java_class = 'com.google.wave.api.Range' def __init__(self, start=0, end=1): """Initializes the range with a start and end position. Args: start: Start index of the range. end: End index of the range. Raises: ValueError: Value error if the range is invalid (less than zero). """ self.start = start self.end = end if self.end - self.start < 0: raise ValueError('Range cannot be less than 0') def __str__(self): return 'Range(' + str(self.start) + ', ' + str(self.end) + ')' def IsCollapsed(self): """"Returns true if this represents a single point as opposed to a range.""" return self.end == self.start class Annotation(object): """Represents an annotation on a document. Annotations are key/value pairs over a range of content. Annotations can be used to store data or to be interpreted by a client when displaying the data. """ java_class = 'com.google.wave.api.Annotation' def __init__(self, name, value, r=None): """Initializes this annotation with a name and value pair and a range. Args: name: Key name for this annotation. value: Value of this annotation. r: Range that this annotation is valid over. """ self.name = name self.value = value self.range = r or Range() class StringEnum(object): """Enum like class that is configured with a list of values. This class effectively implements an enum for Elements, except for that the actual values of the enums will be the string values.""" def __init__(self, *values): for name in values: setattr(self, name, name) ELEMENT_TYPE = StringEnum('INLINE_BLIP', 'INPUT', 'CHECK', 'LABEL', 'BUTTON', 'RADIO_BUTTON', 'RADIO_BUTTON_GROUP','PASSWORD', 'TEXTAREA', 'GADGET', 'IMAGE') class Element(object): """Elements are non-text content within a document. These are generally abstracted from the Robot. Although a Robot can query the properties of an element it can only interact with the specific types that the element represents. Properties of elements are both accesible directly (image.url) and through the properties dictionary (image.properties['url']). In general Element should not be instantiated by robots, but rather rely on the derrived classes. """ java_class = 'com.google.wave.api.Element' def __init__(self, element_type, **properties): """Initializes self with the specified type and any properties. Args: element_type: string typed member of ELEMENT_TYPE properties: either a dictionary of initial properties, or a dictionary with just one member properties that is itself a dictionary of properties. This allows us to both use e = Element(atype, prop1=val1, prop2=prop2...) and e = Element(atype, properties={prop1:val1, prop2:prop2..}) """ if len(properties) == 1 and 'properties' in properties: properties = properties['properties'] self.type = element_type for key, val in properties.items(): setattr(self, key, val) def Serialize(self): """Custom serializer for Elements. Element need their non standard attributes returned in a dict named properties. """ props = {} data = {} for attr in dir(self): if attr.startswith('_'): continue val = getattr(self, attr) if val is None or callable(val): continue val = util.Serialize(val) if attr == 'type' or attr == 'java_class': data[attr] = val else: props[attr] = val data['properties'] = util.Serialize(props) return data class FormElement(Element): java_class = 'com.google.wave.api.FormElement' def __init__(self, element_type, name, value='', default_value='', label=''): super(FormElement, self).__init__(element_type, name=name, value=value, default_value=default_value, label=label) class Gadget(Element): java_class = 'com.google.wave.api.Gadget' def __init__(self, url='', props=None): if props is None: props = {} props['url'] = url logging.info('CONSTRUCTING gadget with:' + str(props)) super(Gadget, self).__init__(ELEMENT_TYPE.GADGET, properties=props) class Image(Element): java_class = 'com.google.wave.api.Image' def __init__(self, url='', width=None, height=None, attachment_id=None, caption=None): super(Image, self).__init__(ELEMENT_TYPE.IMAGE, url=url, width=width, height=height, attachment_id=attachment_id, caption=caption) def ElementFromJson(json): """Construct one of the type of elements given a json object.""" etype = json['type'] logging.info('constructing: ' + str(json)) if etype == ELEMENT_TYPE.GADGET: props = json['properties'].copy() url = props['url'] del props['url'] return Gadget(url=url, props=props) elif etype == ELEMENT_TYPE.IMAGE: return Image(url=props.get('url', ''), width=props.get('width'), height=props.get('height'), attachment_id=props.get('attachment_id'), caption=props.get('caption')) else: return FormElement(element_type=etype, name=json['name'], value=json('value', ''), default_value=json('default_value', ''), label=json('label', ''))
# Copyright 2016 Arie Bregman # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import getpass import sys def create_build_parser(client_subparsers, parent_parser): """Creates build parser""" build_parser = client_subparsers.add_parser("build", parents=[parent_parser]) build_action_subparser = build_parser.add_subparsers(title="action", dest="build_command") # Stop build build_stop_parser = build_action_subparser.add_parser( "stop", help="stop a given build", parents=[parent_parser]) build_stop_parser.add_argument("--number", "-b", dest="build_number", required=False, help='build number') build_stop_parser.add_argument('--name', '-j', dest="job_name", required=True, help='job name', nargs=1) def create_job_parser(client_subparsers, parent_parser): """Creates job parser""" if not sys.stdin.isatty(): default_args = sys.stdin.read().splitlines() default_nargs = '*' # when pipeing no args are accepted (empty stdin) else: default_args = None default_nargs = '+' # without piping, no args would generare error job_parser = client_subparsers.add_parser("job", parents=[parent_parser]) job_action_subparser = job_parser.add_subparsers(title="action", dest="job_command") # Count job_count_parser = job_action_subparser.add_parser( "count", help="print number of jobs", parents=[parent_parser]) job_count_parser.add_argument('string', help='job name or part of it', nargs='?') # List job_list_parser = job_action_subparser.add_parser( "list", help="list job(s)", parents=[parent_parser]) job_list_parser.add_argument('name', nargs='*', default=default_args, help='job name or part of it') # Delete job_delete_parser = job_action_subparser.add_parser( "delete", help="delete job", parents=[parent_parser]) job_delete_parser.add_argument('name', nargs=default_nargs, default=default_args, help='the name of the job(s) to delete') # Console Output job_output_parser = job_action_subparser.add_parser( "output", help="Print job console text", parents=[parent_parser]) job_output_parser.add_argument('name', help='the name of the job') job_output_parser.add_argument('--build', required=False, type=int, help='build number', dest='build_num') job_output_parser.add_argument('--current', action="store_true", help='Display current build output', dest='current') # Build job job_build_parser = job_action_subparser.add_parser( "build", help="build job", parents=[parent_parser]) job_build_parser.add_argument( 'name', help='the name of the job to build', nargs=default_nargs, default=default_args) job_build_parser.add_argument( '-p', '--parameters', type=str, help='params for parameterized job') job_build_parser.add_argument( '-y', '--params_yml', type=str, help='YAML file with params for parameterized job') # Copy job job_copy_parser = job_action_subparser.add_parser( "copy", help="copy job", parents=[parent_parser]) job_copy_parser.add_argument( 'source_job_name', help='the name of the job to copy') job_copy_parser.add_argument( 'dest_job_name', help='the name of the new job') # Disable job job_disable_parser = job_action_subparser.add_parser( "disable", help="disable job", parents=[parent_parser]) job_disable_parser.add_argument( 'name', help='the name of the job to disable', nargs=default_nargs, default=default_args) # Enable job job_enable_parser = job_action_subparser.add_parser( "enable", help="enables job", parents=[parent_parser]) job_enable_parser.add_argument('name', help='the name of the job to enable', nargs=default_nargs, default=default_args) # Print information on last build job_last_build_parser = job_action_subparser.add_parser( "last_build", help="Print information on last build", parents=[parent_parser]) job_last_build_parser.add_argument( 'name', help='the name of the job') def create_view_parser(client_subparsers, parent_parser): """Creates view parser""" view_parser = client_subparsers.add_parser("view", parents=[parent_parser]) view_action_subparser = view_parser.add_subparsers(title="action", dest="view_command") # View sub-commands # List views view_list_parser = view_action_subparser.add_parser( "list", help="list view(s)", parents=[parent_parser]) view_list_parser.add_argument('name', help='view name or part of it', nargs='?') # Delete view view_delete_parser = view_action_subparser.add_parser( "delete", help="delete view", parents=[parent_parser]) view_delete_parser.add_argument('name', help='the name of the view to delete') # Jobs under a specific view view_jobs_parser = view_action_subparser.add_parser( "jobs", help="List all the jobs under specific view", parents=[parent_parser]) view_jobs_parser.add_argument( 'name', help='the name of the view') # Create view view_create_parser = view_action_subparser.add_parser( "create", help="create view", parents=[parent_parser]) view_create_parser.add_argument( 'name', help='name of the view', nargs='?') # Rename view view_rename_parser = view_action_subparser.add_parser( "rename", help="rename view", parents=[parent_parser]) view_rename_parser.add_argument( 'name', help='the current name of the view') view_rename_parser.add_argument( 'new_name', help='the new name of the view') def create_node_parser(client_subparsers, parent_parser): """Creates node parser""" # Node parser node_parser = client_subparsers.add_parser("node", parents=[parent_parser]) node_action_subparser = node_parser.add_subparsers(title="action", dest="node_command") # Node sub-commands # List nodes node_list_parser = node_action_subparser.add_parser( "list", help="list node(s)", parents=[parent_parser]) node_list_parser.add_argument('name', help='node name or part of it', nargs='?') # Delete node node_delete_parser = node_action_subparser.add_parser( "delete", help="delete node", parents=[parent_parser]) node_delete_parser.add_argument('name', help='the name of the node to delete') # Create node node_create_parser = node_action_subparser.add_parser( "create", help="create node", parents=[parent_parser]) node_create_parser.add_argument('name', help='The name of the node') node_create_parser.add_argument('--description', default=None, required=False, help='The description of the node') node_create_parser.add_argument('--remotefs', default="/var/lib/jenkins", help='Remote filesystem location to use') node_create_parser.add_argument('--labels', default=None, help='Labels to associate with node') node_create_parser.add_argument('--exclusive', type=bool, default=False, help='Use this node for tied jobs only') node_create_parser.add_argument('--executors', type=int, default=2, help='The number of executors') # Info on node node_info_parser = node_action_subparser.add_parser( "info", help="Print info on node", parents=[parent_parser]) node_info_parser.add_argument('name', help='the name of the node') def create_plugin_parser(client_subparsers, parent_parser): """Creates plugin parser""" # Plugin parser plugin_parser = client_subparsers.add_parser("plugin", parents=[parent_parser]) plugin_action_subparser = plugin_parser.add_subparsers( title="action", dest="plugin_command") # Plugin sub-commands plugin_action_subparser.add_parser( "list", help="list plugin(s)", parents=[parent_parser]) plugin_info_parser = plugin_action_subparser.add_parser( "info", help="Print information on specified plugin", parents=[parent_parser]) plugin_info_parser.add_argument('name', help='the plugin name', nargs=1) def create_parser(): """Returns argument parser""" # Jcli top level parser parent_parser = argparse.ArgumentParser(add_help=False) main_parser = argparse.ArgumentParser() main_parser.add_argument( '--server', '-s', default='jenkins', dest="section", help="Section name in config file, defaults to 'jenkins'") main_parser.add_argument( '--user', '-u', default=getpass.getuser(), help='username') main_parser.add_argument( '--config', '-c', dest="config", help='client configuration file') main_parser.add_argument( '--debug', required=False, action='store_true', dest="debug", help='debug flag') client_subparsers = main_parser.add_subparsers( title="client", dest="main_command") create_job_parser(client_subparsers, parent_parser) create_build_parser(client_subparsers, parent_parser) create_view_parser(client_subparsers, parent_parser) create_node_parser(client_subparsers, parent_parser) create_plugin_parser(client_subparsers, parent_parser) return main_parser
# Copyright 2019 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """SketchRNN data loading and image manipulation utilities.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import random import numpy as np def get_bounds(data, factor=10): """Return bounds of data.""" min_x = 0 max_x = 0 min_y = 0 max_y = 0 abs_x = 0 abs_y = 0 for i in range(len(data)): x = float(data[i, 0]) / factor y = float(data[i, 1]) / factor abs_x += x abs_y += y min_x = min(min_x, abs_x) min_y = min(min_y, abs_y) max_x = max(max_x, abs_x) max_y = max(max_y, abs_y) return (min_x, max_x, min_y, max_y) def slerp(p0, p1, t): """Spherical interpolation.""" omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1))) so = np.sin(omega) return np.sin((1.0 - t) * omega) / so * p0 + np.sin(t * omega) / so * p1 def lerp(p0, p1, t): """Linear interpolation.""" return (1.0 - t) * p0 + t * p1 # A note on formats: # Sketches are encoded as a sequence of strokes. stroke-3 and stroke-5 are # different stroke encodings. # stroke-3 uses 3-tuples, consisting of x-offset, y-offset, and a binary # variable which is 1 if the pen is lifted between this position and # the next, and 0 otherwise. # stroke-5 consists of x-offset, y-offset, and p_1, p_2, p_3, a binary # one-hot vector of 3 possible pen states: pen down, pen up, end of sketch. # See section 3.1 of https://arxiv.org/abs/1704.03477 for more detail. # Sketch-RNN takes input in stroke-5 format, with sketches padded to a common # maximum length and prefixed by the special start token [0, 0, 1, 0, 0] # The QuickDraw dataset is stored using stroke-3. def strokes_to_lines(strokes): """Convert stroke-3 format to polyline format.""" x = 0 y = 0 lines = [] line = [] for i in range(len(strokes)): if strokes[i, 2] == 1: x += float(strokes[i, 0]) y += float(strokes[i, 1]) line.append([x, y]) lines.append(line) line = [] else: x += float(strokes[i, 0]) y += float(strokes[i, 1]) line.append([x, y]) return lines def lines_to_strokes(lines): """Convert polyline format to stroke-3 format.""" eos = 0 strokes = [[0, 0, 0]] for line in lines: linelen = len(line) for i in range(linelen): eos = 0 if i < linelen - 1 else 1 strokes.append([line[i][0], line[i][1], eos]) strokes = np.array(strokes) strokes[1:, 0:2] -= strokes[:-1, 0:2] return strokes[1:, :] def augment_strokes(strokes, prob=0.0): """Perform data augmentation by randomly dropping out strokes.""" # drop each point within a line segments with a probability of prob # note that the logic in the loop prevents points at the ends to be dropped. result = [] prev_stroke = [0, 0, 1] count = 0 stroke = [0, 0, 1] # Added to be safe. for i in range(len(strokes)): candidate = [strokes[i][0], strokes[i][1], strokes[i][2]] if candidate[2] == 1 or prev_stroke[2] == 1: count = 0 else: count += 1 urnd = np.random.rand() # uniform random variable if candidate[2] == 0 and prev_stroke[2] == 0 and count > 2 and urnd < prob: stroke[0] += candidate[0] stroke[1] += candidate[1] else: stroke = candidate prev_stroke = stroke result.append(stroke) return np.array(result) def scale_bound(stroke, average_dimension=10.0): """Scale an entire image to be less than a certain size.""" # stroke is a numpy array of [dx, dy, pstate], average_dimension is a float. # modifies stroke directly. bounds = get_bounds(stroke, 1) max_dimension = max(bounds[1] - bounds[0], bounds[3] - bounds[2]) stroke[:, 0:2] /= (max_dimension / average_dimension) def to_normal_strokes(big_stroke): """Convert from stroke-5 format (from sketch-rnn paper) back to stroke-3.""" l = 0 for i in range(len(big_stroke)): if big_stroke[i, 4] > 0: l = i break if l == 0: l = len(big_stroke) result = np.zeros((l, 3)) result[:, 0:2] = big_stroke[0:l, 0:2] result[:, 2] = big_stroke[0:l, 3] return result def clean_strokes(sample_strokes, factor=100): """Cut irrelevant end points, scale to pixel space and store as integer.""" # Useful function for exporting data to .json format. copy_stroke = [] added_final = False for j in range(len(sample_strokes)): finish_flag = int(sample_strokes[j][4]) if finish_flag == 0: copy_stroke.append([ int(round(sample_strokes[j][0] * factor)), int(round(sample_strokes[j][1] * factor)), int(sample_strokes[j][2]), int(sample_strokes[j][3]), finish_flag ]) else: copy_stroke.append([0, 0, 0, 0, 1]) added_final = True break if not added_final: copy_stroke.append([0, 0, 0, 0, 1]) return copy_stroke def to_big_strokes(stroke, max_len=250): """Converts from stroke-3 to stroke-5 format and pads to given length.""" # (But does not insert special start token). result = np.zeros((max_len, 5), dtype=float) l = len(stroke) assert l <= max_len result[0:l, 0:2] = stroke[:, 0:2] result[0:l, 3] = stroke[:, 2] result[0:l, 2] = 1 - result[0:l, 3] result[l:, 4] = 1 return result def get_max_len(strokes): """Return the maximum length of an array of strokes.""" max_len = 0 for stroke in strokes: ml = len(stroke) if ml > max_len: max_len = ml return max_len class DataLoader(object): """Class for loading data.""" def __init__(self, strokes, batch_size=100, max_seq_length=250, scale_factor=1.0, random_scale_factor=0.0, augment_stroke_prob=0.0, limit=1000): self.batch_size = batch_size # minibatch size self.max_seq_length = max_seq_length # N_max in sketch-rnn paper self.scale_factor = scale_factor # divide offsets by this factor self.random_scale_factor = random_scale_factor # data augmentation method # Removes large gaps in the data. x and y offsets are clamped to have # absolute value no greater than this limit. self.limit = limit self.augment_stroke_prob = augment_stroke_prob # data augmentation method self.start_stroke_token = [0, 0, 1, 0, 0] # S_0 in sketch-rnn paper # sets self.strokes (list of ndarrays, one per sketch, in stroke-3 format, # sorted by size) self.preprocess(strokes) def preprocess(self, strokes): """Remove entries from strokes having > max_seq_length points.""" raw_data = [] seq_len = [] count_data = 0 for i in range(len(strokes)): data = strokes[i] if len(data) <= (self.max_seq_length): count_data += 1 # removes large gaps from the data data = np.minimum(data, self.limit) data = np.maximum(data, -self.limit) data = np.array(data, dtype=np.float32) data[:, 0:2] /= self.scale_factor raw_data.append(data) seq_len.append(len(data)) seq_len = np.array(seq_len) # nstrokes for each sketch idx = np.argsort(seq_len) self.strokes = [] for i in range(len(seq_len)): self.strokes.append(raw_data[idx[i]]) print("total images <= max_seq_len is %d" % count_data) self.num_batches = int(count_data / self.batch_size) def random_sample(self): """Return a random sample, in stroke-3 format as used by draw_strokes.""" sample = np.copy(random.choice(self.strokes)) return sample def random_scale(self, data): """Augment data by stretching x and y axis randomly [1-e, 1+e].""" x_scale_factor = ( np.random.random() - 0.5) * 2 * self.random_scale_factor + 1.0 y_scale_factor = ( np.random.random() - 0.5) * 2 * self.random_scale_factor + 1.0 result = np.copy(data) result[:, 0] *= x_scale_factor result[:, 1] *= y_scale_factor return result def calculate_normalizing_scale_factor(self): """Calculate the normalizing factor explained in appendix of sketch-rnn.""" data = [] for i in range(len(self.strokes)): if len(self.strokes[i]) > self.max_seq_length: continue for j in range(len(self.strokes[i])): data.append(self.strokes[i][j, 0]) data.append(self.strokes[i][j, 1]) data = np.array(data) return np.std(data) def normalize(self, scale_factor=None): """Normalize entire dataset (delta_x, delta_y) by the scaling factor.""" if scale_factor is None: scale_factor = self.calculate_normalizing_scale_factor() self.scale_factor = scale_factor for i in range(len(self.strokes)): self.strokes[i][:, 0:2] /= self.scale_factor def _get_batch_from_indices(self, indices): """Given a list of indices, return the potentially augmented batch.""" x_batch = [] seq_len = [] for idx in range(len(indices)): i = indices[idx] data = self.random_scale(self.strokes[i]) data_copy = np.copy(data) if self.augment_stroke_prob > 0: data_copy = augment_strokes(data_copy, self.augment_stroke_prob) x_batch.append(data_copy) length = len(data_copy) seq_len.append(length) seq_len = np.array(seq_len, dtype=int) # We return three things: stroke-3 format, stroke-5 format, list of seq_len. return x_batch, self.pad_batch(x_batch, self.max_seq_length), seq_len def random_batch(self): """Return a randomised portion of the training data.""" idx = np.random.permutation(range(0, len(self.strokes)))[0:self.batch_size] return self._get_batch_from_indices(idx) def get_batch(self, idx): """Get the idx'th batch from the dataset.""" assert idx >= 0, "idx must be non negative" assert idx < self.num_batches, "idx must be less than the number of batches" start_idx = idx * self.batch_size indices = range(start_idx, start_idx + self.batch_size) return self._get_batch_from_indices(indices) def pad_batch(self, batch, max_len): """Pad the batch to be stroke-5 bigger format as described in paper.""" result = np.zeros((self.batch_size, max_len + 1, 5), dtype=float) assert len(batch) == self.batch_size for i in range(self.batch_size): l = len(batch[i]) assert l <= max_len result[i, 0:l, 0:2] = batch[i][:, 0:2] result[i, 0:l, 3] = batch[i][:, 2] result[i, 0:l, 2] = 1 - result[i, 0:l, 3] result[i, l:, 4] = 1 # put in the first token, as described in sketch-rnn methodology result[i, 1:, :] = result[i, :-1, :] result[i, 0, :] = 0 result[i, 0, 2] = self.start_stroke_token[2] # setting S_0 from paper. result[i, 0, 3] = self.start_stroke_token[3] result[i, 0, 4] = self.start_stroke_token[4] return result
"""Short messaging system. """ __docformat__ = 'restructuredtext en' from .utils import * class SmsMessage(Cached): """Represents an SMS message. """ _ValidateHandle = int def __repr__(self): return Cached.__repr__(self, 'Id') def _Alter(self, AlterName, Args=None): return self._Owner._Alter('SMS', self.Id, AlterName, Args) def _Init(self): self._MakeOwner() def _Property(self, PropName, Set=None, Cache=True): return self._Owner._Property('SMS', self.Id, PropName, Set, Cache) def Delete(self): """Deletes this SMS message. """ self._Owner._DoCommand('DELETE SMS %s' % self.Id) def MarkAsSeen(self): """Marks this SMS message as seen. """ self._Owner._DoCommand('SET SMS %s SEEN' % self.Id) def Send(self): """Sends this SMS message. """ self._Alter('SEND') def _GetBody(self): return self._Property('BODY') def _SetBody(self, Value): self._Property('BODY', Value) Body = property(_GetBody, _SetBody, doc="""Text of this SMS message. :type: unicode """) def _GetChunks(self): return SmsChunkCollection(self, range(int(chop(self._Property('CHUNKING', Cache=False))[0]))) Chunks = property(_GetChunks, doc="""Chunks of this SMS message. More than one if this is a multi-part message. :type: `SmsChunkCollection` """) def _GetDatetime(self): from datetime import datetime return datetime.fromtimestamp(self.Timestamp) Datetime = property(_GetDatetime, doc="""Timestamp of this SMS message as datetime object. :type: datetime.datetime """) def _GetFailureReason(self): return str(self._Property('FAILUREREASON')) FailureReason = property(_GetFailureReason, doc="""Reason an SMS message failed. Read this if `Status` == `enums.smsMessageStatusFailed`. :type: `enums`.smsFailureReason* """) def _GetId(self): return self._Handle Id = property(_GetId, doc="""Unique SMS message Id. :type: int """) def _GetIsFailedUnseen(self): return (self._Property('IS_FAILED_UNSEEN') == 'TRUE') IsFailedUnseen = property(_GetIsFailedUnseen, doc="""Tells if a failed SMS message was unseen. :type: bool """) def _GetPrice(self): return int(self._Property('PRICE')) Price = property(_GetPrice, doc="""SMS price. Expressed using `PricePrecision`. For a value expressed using `PriceCurrency`, use `PriceValue`. :type: int :see: `PriceCurrency`, `PricePrecision`, `PriceToText`, `PriceValue` """) def _GetPriceCurrency(self): return self._Property('PRICE_CURRENCY') PriceCurrency = property(_GetPriceCurrency, doc="""SMS price currency. :type: unicode :see: `Price`, `PricePrecision`, `PriceToText`, `PriceValue` """) def _GetPricePrecision(self): return int(self._Property('PRICE_PRECISION')) PricePrecision = property(_GetPricePrecision, doc="""SMS price precision. :type: int :see: `Price`, `PriceCurrency`, `PriceToText`, `PriceValue` """) def _GetPriceToText(self): return ('%s %.3f' % (self.PriceCurrency, self.PriceValue)).strip() PriceToText = property(_GetPriceToText, doc="""SMS price as properly formatted text with currency. :type: unicode :see: `Price`, `PriceCurrency`, `PricePrecision`, `PriceValue` """) def _GetPriceValue(self): if self.Price < 0: return 0.0 return float(self.Price) / (10 ** self.PricePrecision) PriceValue = property(_GetPriceValue, doc="""SMS price. Expressed in `PriceCurrency`. :type: float :see: `Price`, `PriceCurrency`, `PricePrecision`, `PriceToText` """) def _GetReplyToNumber(self): return str(self._Property('REPLY_TO_NUMBER')) def _SetReplyToNumber(self, Value): self._Property('REPLY_TO_NUMBER', Value) ReplyToNumber = property(_GetReplyToNumber, _SetReplyToNumber, doc="""Reply-to number for this SMS message. :type: str """) def _SetSeen(self, Value): from warnings import warn warn('SmsMessage.Seen = x: Use SmsMessage.MarkAsSeen() instead.', DeprecationWarning, stacklevel=2) if Value: self.MarkAsSeen() else: raise SkypeError(0, 'Seen can only be set to True') Seen = property(fset=_SetSeen, doc="""Set the read status of the SMS message. Accepts only True value. :type: bool :deprecated: Extremely unpythonic, use `MarkAsSeen` instead. """) def _GetStatus(self): return str(self._Property('STATUS')) Status = property(_GetStatus, doc="""SMS message status. :type: `enums`.smsMessageStatus* """) def _GetTargetNumbers(self): return tuple(split(self._Property('TARGET_NUMBERS'), ', ')) def _SetTargetNumbers(self, Value): self._Property('TARGET_NUMBERS', ', '.join(Value)) TargetNumbers = property(_GetTargetNumbers, _SetTargetNumbers, doc="""Target phone numbers. :type: tuple of str """) def _GetTargets(self): return SmsTargetCollection(self, split(self._Property('TARGET_NUMBERS'), ', ')) Targets = property(_GetTargets, doc="""Target objects. :type: `SmsTargetCollection` """) def _GetTimestamp(self): return float(self._Property('TIMESTAMP')) Timestamp = property(_GetTimestamp, doc="""Timestamp of this SMS message. :type: float :see: `Datetime` """) def _GetType(self): return str(self._Property('TYPE')) Type = property(_GetType, doc="""SMS message type :type: `enums`.smsMessageType* """) class SmsMessageCollection(CachedCollection): _CachedType = SmsMessage class SmsChunk(Cached): """Represents a single chunk of a multi-part SMS message. """ _ValidateHandle = int def __repr__(self): return Cached.__repr__(self, 'Id', 'Message') def _GetCharactersLeft(self): count, left = list(map(int, chop(self.Message._Property('CHUNKING', Cache=False)))) if self.Id == count - 1: return left return 0 CharactersLeft = property(_GetCharactersLeft, doc="""CharactersLeft. :type: int """) def _GetId(self): return self._Handle Id = property(_GetId, doc="""SMS chunk Id. :type: int """) def _GetMessage(self): return self._Owner Message = property(_GetMessage, doc="""SMS message associated with this chunk. :type: `SmsMessage` """) def _GetText(self): return self.Message._Property('CHUNK %s' % self.Id) Text = property(_GetText, doc="""Text (body) of this SMS chunk. :type: unicode """) class SmsChunkCollection(CachedCollection): _CachedType = SmsChunk class SmsTarget(Cached): """Represents a single target of a multi-target SMS message. """ _ValidateHandle = str def __repr__(self): return Cached.__repr__(self, 'Number', 'Message') def _GetMessage(self): return self._Owner Message = property(_GetMessage, doc="""An SMS message object this target refers to. :type: `SmsMessage` """) def _GetNumber(self): return self._Handle Number = property(_GetNumber, doc="""Target phone number. :type: str """) def _GetStatus(self): for t in split(self.Message._Property('TARGET_STATUSES'), ', '): number, status = t.split('=') if number == self.Number: return str(status) Status = property(_GetStatus, doc="""Status of this target. :type: `enums`.smsTargetStatus* """) class SmsTargetCollection(CachedCollection): _CachedType = SmsTarget
""" Support for WeMo switches. For more details about this component, please refer to the documentation at https://home-assistant.io/components/switch.wemo/ """ import asyncio import logging from datetime import datetime, timedelta import requests import async_timeout from homeassistant.components.switch import SwitchDevice from homeassistant.exceptions import PlatformNotReady from homeassistant.util import convert from homeassistant.const import ( STATE_OFF, STATE_ON, STATE_STANDBY, STATE_UNKNOWN) DEPENDENCIES = ['wemo'] SCAN_INTERVAL = timedelta(seconds=10) _LOGGER = logging.getLogger(__name__) ATTR_SENSOR_STATE = 'sensor_state' ATTR_SWITCH_MODE = 'switch_mode' ATTR_CURRENT_STATE_DETAIL = 'state_detail' ATTR_COFFEMAKER_MODE = 'coffeemaker_mode' MAKER_SWITCH_MOMENTARY = 'momentary' MAKER_SWITCH_TOGGLE = 'toggle' WEMO_ON = 1 WEMO_OFF = 0 WEMO_STANDBY = 8 def setup_platform(hass, config, add_entities_callback, discovery_info=None): """Set up discovered WeMo switches.""" from pywemo import discovery if discovery_info is not None: location = discovery_info['ssdp_description'] mac = discovery_info['mac_address'] try: device = discovery.device_from_description(location, mac) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as err: _LOGGER.error('Unable to access %s (%s)', location, err) raise PlatformNotReady if device: add_entities_callback([WemoSwitch(device)]) class WemoSwitch(SwitchDevice): """Representation of a WeMo switch.""" def __init__(self, device): """Initialize the WeMo switch.""" self.wemo = device self.insight_params = None self.maker_params = None self.coffeemaker_mode = None self._state = None self._mode_string = None self._available = True self._update_lock = None self._model_name = self.wemo.model_name self._name = self.wemo.name self._serialnumber = self.wemo.serialnumber def _subscription_callback(self, _device, _type, _params): """Update the state by the Wemo device.""" _LOGGER.info("Subscription update for %s", self.name) updated = self.wemo.subscription_update(_type, _params) self.hass.add_job( self._async_locked_subscription_callback(not updated)) async def _async_locked_subscription_callback(self, force_update): """Handle an update from a subscription.""" # If an update is in progress, we don't do anything if self._update_lock.locked(): return await self._async_locked_update(force_update) self.async_schedule_update_ha_state() @property def unique_id(self): """Return the ID of this WeMo switch.""" return self._serialnumber @property def name(self): """Return the name of the switch if any.""" return self._name @property def device_state_attributes(self): """Return the state attributes of the device.""" attr = {} if self.maker_params: # Is the maker sensor on or off. if self.maker_params['hassensor']: # Note a state of 1 matches the WeMo app 'not triggered'! if self.maker_params['sensorstate']: attr[ATTR_SENSOR_STATE] = STATE_OFF else: attr[ATTR_SENSOR_STATE] = STATE_ON # Is the maker switch configured as toggle(0) or momentary (1). if self.maker_params['switchmode']: attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_MOMENTARY else: attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_TOGGLE if self.insight_params or (self.coffeemaker_mode is not None): attr[ATTR_CURRENT_STATE_DETAIL] = self.detail_state if self.insight_params: attr['on_latest_time'] = \ WemoSwitch.as_uptime(self.insight_params['onfor']) attr['on_today_time'] = \ WemoSwitch.as_uptime(self.insight_params['ontoday']) attr['on_total_time'] = \ WemoSwitch.as_uptime(self.insight_params['ontotal']) attr['power_threshold_w'] = \ convert( self.insight_params['powerthreshold'], float, 0.0 ) / 1000.0 if self.coffeemaker_mode is not None: attr[ATTR_COFFEMAKER_MODE] = self.coffeemaker_mode return attr @staticmethod def as_uptime(_seconds): """Format seconds into uptime string in the format: 00d 00h 00m 00s.""" uptime = datetime(1, 1, 1) + timedelta(seconds=_seconds) return "{:0>2d}d {:0>2d}h {:0>2d}m {:0>2d}s".format( uptime.day-1, uptime.hour, uptime.minute, uptime.second) @property def current_power_w(self): """Return the current power usage in W.""" if self.insight_params: return convert( self.insight_params['currentpower'], float, 0.0 ) / 1000.0 @property def today_energy_kwh(self): """Return the today total energy usage in kWh.""" if self.insight_params: miliwatts = convert(self.insight_params['todaymw'], float, 0.0) return round(miliwatts / (1000.0 * 1000.0 * 60), 2) @property def detail_state(self): """Return the state of the device.""" if self.coffeemaker_mode is not None: return self._mode_string if self.insight_params: standby_state = int(self.insight_params['state']) if standby_state == WEMO_ON: return STATE_ON if standby_state == WEMO_OFF: return STATE_OFF if standby_state == WEMO_STANDBY: return STATE_STANDBY return STATE_UNKNOWN @property def is_on(self): """Return true if switch is on. Standby is on.""" return self._state @property def available(self): """Return true if switch is available.""" return self._available @property def icon(self): """Return the icon of device based on its type.""" if self._model_name == 'CoffeeMaker': return 'mdi:coffee' return None def turn_on(self, **kwargs): """Turn the switch on.""" self.wemo.on() def turn_off(self, **kwargs): """Turn the switch off.""" self.wemo.off() async def async_added_to_hass(self): """Wemo switch added to HASS.""" # Define inside async context so we know our event loop self._update_lock = asyncio.Lock() registry = self.hass.components.wemo.SUBSCRIPTION_REGISTRY await self.hass.async_add_job(registry.register, self.wemo) registry.on(self.wemo, None, self._subscription_callback) async def async_update(self): """Update WeMo state. Wemo has an aggressive retry logic that sometimes can take over a minute to return. If we don't get a state after 5 seconds, assume the Wemo switch is unreachable. If update goes through, it will be made available again. """ # If an update is in progress, we don't do anything if self._update_lock.locked(): return try: with async_timeout.timeout(5): await asyncio.shield(self._async_locked_update(True)) except asyncio.TimeoutError: _LOGGER.warning('Lost connection to %s', self.name) self._available = False async def _async_locked_update(self, force_update): """Try updating within an async lock.""" async with self._update_lock: await self.hass.async_add_job(self._update, force_update) def _update(self, force_update): """Update the device state.""" try: self._state = self.wemo.get_state(force_update) if self._model_name == 'Insight': self.insight_params = self.wemo.insight_params self.insight_params['standby_state'] = ( self.wemo.get_standby_state) elif self._model_name == 'Maker': self.maker_params = self.wemo.maker_params elif self._model_name == 'CoffeeMaker': self.coffeemaker_mode = self.wemo.mode self._mode_string = self.wemo.mode_string if not self._available: _LOGGER.info('Reconnected to %s', self.name) self._available = True except AttributeError as err: _LOGGER.warning("Could not update status for %s (%s)", self.name, err) self._available = False
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Metadata request handler.""" import hashlib import hmac import os from oslo_log import log as logging from oslo_utils import encodeutils from oslo_utils import secretutils as secutils from oslo_utils import strutils import webob.dec import webob.exc from nova.api.metadata import base from nova.api import wsgi from nova import cache_utils import nova.conf from nova import context as nova_context from nova import exception from nova.i18n import _ from nova.network import neutron as neutronapi CONF = nova.conf.CONF LOG = logging.getLogger(__name__) # 160 networks is large enough to satisfy most cases. # Yet while reaching 182 networks Neutron server will break as URL length # exceeds the maximum. Left this at 160 to allow additional parameters when # they're needed. MAX_QUERY_NETWORKS = 160 class MetadataRequestHandler(wsgi.Application): """Serve metadata.""" def __init__(self): self._cache = cache_utils.get_client( expiration_time=CONF.api.metadata_cache_expiration) if (CONF.neutron.service_metadata_proxy and not CONF.neutron.metadata_proxy_shared_secret): LOG.warning("metadata_proxy_shared_secret is not configured, " "the metadata information returned by the proxy " "cannot be trusted") def get_metadata_by_remote_address(self, address): if not address: raise exception.FixedIpNotFoundForAddress(address=address) cache_key = 'metadata-%s' % address data = self._cache.get(cache_key) if data: LOG.debug("Using cached metadata for %s", address) return data try: data = base.get_metadata_by_address(address) except exception.NotFound: LOG.exception('Failed to get metadata for IP %s', address) return None if CONF.api.metadata_cache_expiration > 0: self._cache.set(cache_key, data) return data def get_metadata_by_instance_id(self, instance_id, address): cache_key = 'metadata-%s' % instance_id data = self._cache.get(cache_key) if data: LOG.debug("Using cached metadata for instance %s", instance_id) return data try: data = base.get_metadata_by_instance_id(instance_id, address) except exception.NotFound: return None if CONF.api.metadata_cache_expiration > 0: self._cache.set(cache_key, data) return data @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if os.path.normpath(req.path_info) == "/": resp = base.ec2_md_print(base.VERSIONS + ["latest"]) req.response.body = encodeutils.to_utf8(resp) req.response.content_type = base.MIME_TYPE_TEXT_PLAIN return req.response # Convert webob.headers.EnvironHeaders to a dict and mask any sensitive # details from the logs. if CONF.debug: headers = {k: req.headers[k] for k in req.headers} LOG.debug('Metadata request headers: %s', strutils.mask_dict_password(headers)) if CONF.neutron.service_metadata_proxy: if req.headers.get('X-Metadata-Provider'): meta_data = self._handle_instance_id_request_from_lb(req) else: meta_data = self._handle_instance_id_request(req) else: if req.headers.get('X-Instance-ID'): LOG.warning( "X-Instance-ID present in request headers. The " "'service_metadata_proxy' option must be " "enabled to process this header.") meta_data = self._handle_remote_ip_request(req) if meta_data is None: raise webob.exc.HTTPNotFound() try: data = meta_data.lookup(req.path_info) except base.InvalidMetadataPath: raise webob.exc.HTTPNotFound() if callable(data): return data(req, meta_data) resp = base.ec2_md_print(data) req.response.body = encodeutils.to_utf8(resp) req.response.content_type = meta_data.get_mimetype() return req.response def _handle_remote_ip_request(self, req): remote_address = req.remote_addr if CONF.api.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) try: meta_data = self.get_metadata_by_remote_address(remote_address) except Exception: LOG.exception('Failed to get metadata for IP %s', remote_address) msg = _('An unknown error has occurred. ' 'Please try your request again.') raise webob.exc.HTTPInternalServerError(explanation=str(msg)) if meta_data is None: LOG.error('Failed to get metadata for IP %s: no metadata', remote_address) return meta_data def _handle_instance_id_request(self, req): instance_id = req.headers.get('X-Instance-ID') tenant_id = req.headers.get('X-Tenant-ID') signature = req.headers.get('X-Instance-ID-Signature') remote_address = req.headers.get('X-Forwarded-For') # Ensure that only one header was passed if instance_id is None: msg = _('X-Instance-ID header is missing from request.') elif signature is None: msg = _('X-Instance-ID-Signature header is missing from request.') elif tenant_id is None: msg = _('X-Tenant-ID header is missing from request.') elif not isinstance(instance_id, str): msg = _('Multiple X-Instance-ID headers found within request.') elif not isinstance(tenant_id, str): msg = _('Multiple X-Tenant-ID headers found within request.') else: msg = None if msg: raise webob.exc.HTTPBadRequest(explanation=msg) self._validate_shared_secret(instance_id, signature, remote_address) return self._get_meta_by_instance_id(instance_id, tenant_id, remote_address) def _get_instance_id_from_lb(self, provider_id, instance_address): # We use admin context, admin=True to lookup the # inter-Edge network port context = nova_context.get_admin_context() neutron = neutronapi.get_client(context, admin=True) # Tenant, instance ids are found in the following method: # X-Metadata-Provider contains id of the metadata provider, and since # overlapping networks cannot be connected to the same metadata # provider, the combo of tenant's instance IP and the metadata # provider has to be unique. # # The networks which are connected to the metadata provider are # retrieved in the 1st call to neutron.list_subnets() # In the 2nd call we read the ports which belong to any of the # networks retrieved above, and have the X-Forwarded-For IP address. # This combination has to be unique as explained above, and we can # read the instance_id, tenant_id from that port entry. # Retrieve networks which are connected to metadata provider md_subnets = neutron.list_subnets( context, advanced_service_providers=[provider_id], fields=['network_id']) if not md_subnets or not md_subnets.get('subnets'): msg = _('Could not find any subnets for provider %s') % provider_id LOG.error(msg) raise webob.exc.HTTPBadRequest(explanation=msg) md_networks = [subnet['network_id'] for subnet in md_subnets['subnets']] try: # Retrieve the instance data from the instance's port ports = [] while md_networks: ports.extend(neutron.list_ports( context, fixed_ips='ip_address=' + instance_address, network_id=md_networks[:MAX_QUERY_NETWORKS], fields=['device_id', 'tenant_id'])['ports']) md_networks = md_networks[MAX_QUERY_NETWORKS:] except Exception as e: LOG.error('Failed to get instance id for metadata ' 'request, provider %(provider)s ' 'networks %(networks)s ' 'requester %(requester)s. Error: %(error)s', {'provider': provider_id, 'networks': md_networks, 'requester': instance_address, 'error': e}) msg = _('An unknown error has occurred. ' 'Please try your request again.') raise webob.exc.HTTPBadRequest(explanation=msg) if len(ports) != 1: msg = _('Expected a single port matching provider %(pr)s ' 'and IP %(ip)s. Found %(count)d.') % { 'pr': provider_id, 'ip': instance_address, 'count': len(ports)} LOG.error(msg) raise webob.exc.HTTPBadRequest(explanation=msg) instance_data = ports[0] instance_id = instance_data['device_id'] tenant_id = instance_data['tenant_id'] # instance_data is unicode-encoded, while cache_utils doesn't like # that. Therefore we convert to str if isinstance(instance_id, str): instance_id = instance_id.encode('utf-8') return instance_id, tenant_id def _handle_instance_id_request_from_lb(self, req): remote_address = req.headers.get('X-Forwarded-For') if remote_address is None: msg = _('X-Forwarded-For is missing from request.') raise webob.exc.HTTPBadRequest(explanation=msg) provider_id = req.headers.get('X-Metadata-Provider') if provider_id is None: msg = _('X-Metadata-Provider is missing from request.') raise webob.exc.HTTPBadRequest(explanation=msg) instance_address = remote_address.split(',')[0] # If authentication token is set, authenticate if CONF.neutron.metadata_proxy_shared_secret: signature = req.headers.get('X-Metadata-Provider-Signature') self._validate_shared_secret(provider_id, signature, instance_address) cache_key = 'provider-%s-%s' % (provider_id, instance_address) data = self._cache.get(cache_key) if data: LOG.debug("Using cached metadata for %s for %s", provider_id, instance_address) instance_id, tenant_id = data else: instance_id, tenant_id = self._get_instance_id_from_lb( provider_id, instance_address) if CONF.api.metadata_cache_expiration > 0: self._cache.set(cache_key, (instance_id, tenant_id)) LOG.debug('Instance %s with address %s matches provider %s', instance_id, remote_address, provider_id) return self._get_meta_by_instance_id(instance_id, tenant_id, instance_address) def _validate_shared_secret(self, requestor_id, signature, requestor_address): expected_signature = hmac.new( encodeutils.to_utf8(CONF.neutron.metadata_proxy_shared_secret), encodeutils.to_utf8(requestor_id), hashlib.sha256).hexdigest() if (not signature or not secutils.constant_time_compare(expected_signature, signature)): if requestor_id: LOG.warning('X-Instance-ID-Signature: %(signature)s does ' 'not match the expected value: ' '%(expected_signature)s for id: ' '%(requestor_id)s. Request From: ' '%(requestor_address)s', {'signature': signature, 'expected_signature': expected_signature, 'requestor_id': requestor_id, 'requestor_address': requestor_address}) msg = _('Invalid proxy request signature.') raise webob.exc.HTTPForbidden(explanation=msg) def _get_meta_by_instance_id(self, instance_id, tenant_id, remote_address): try: meta_data = self.get_metadata_by_instance_id(instance_id, remote_address) except Exception: LOG.exception('Failed to get metadata for instance id: %s', instance_id) msg = _('An unknown error has occurred. ' 'Please try your request again.') raise webob.exc.HTTPInternalServerError(explanation=str(msg)) if meta_data is None: LOG.error('Failed to get metadata for instance id: %s', instance_id) elif meta_data.instance.project_id != tenant_id: LOG.warning("Tenant_id %(tenant_id)s does not match tenant_id " "of instance %(instance_id)s.", {'tenant_id': tenant_id, 'instance_id': instance_id}) # causes a 404 to be raised meta_data = None return meta_data
"""Directory based queue. A port of Perl module Directory::Queue http://search.cpan.org/~lcons/Directory-Queue/ The documentation from Directory::Queue module was adapted for Python. The goal of this module is to offer a simple queue system using the underlying filesystem for storage, security and to prevent race conditions via atomic operations. It focuses on simplicity, robustness and scalability. This module allows multiple concurrent readers and writers to interact with the same queue. Provides ======== Classes: * :py:class:`dirq.queue.Queue` directory based queue * :py:class:`dirq.QueueSimple.QueueSimple` simple directory based queue * :py:class:`dirq.queue.QueueSet` set of directory based queues * :py:class:`dirq.Exceptions.QueueError` exception Documentation ============= =========== Queue class =========== :py:class:`dirq.queue.Queue` - directory based queue. Usage:: from dirq.queue import Queue # simple schema: # - there must be a "body" which is a string # - there can be a "header" which is a table/dictionary schema = {"body": "string", "header": "table?"} queuedir = "/tmp/test" # sample producer dirq = Queue(queuedir, schema=schema) import os for count in range(1,101): name = dirq.add({"body" : "element %i"%count, "header": dict(os.environ)}) print("# added element %i as %s" % (count, name)) # sample consumer dirq = Queue(queuedir, schema=schema) name = dirq.first() while name: if not dirq.lock(name): name = dirq.next() continue print("# reading element %s" % name) data = dirq.get(name) # one can use data['body'] and data['header'] here... # one could use dirq.unlock(name) to only browse the queue... dirq.remove(name) name = dirq.next() Terminology ----------- An element is something that contains one or more pieces of data. A simple string may be an element but more complex schemas can also be used, see the *Schema* section for more information. A queue is a "best effort FIFO" collection of elements. It is very hard to guarantee pure FIFO behavior with multiple writers using the same queue. Consider for instance: . Writer1: calls the add() method . Writer2: calls the add() method . Writer2: the add() method returns . Writer1: the add() method returns Who should be first in the queue, Writer1 or Writer2? For simplicity, this implementation provides only "best effort FIFO", i.e. there is a very high probability that elements are processed in FIFO order but this is not guaranteed. This is achieved by using a high-resolution time function and having elements sorted by the time the element's final directory gets created. Locking ------- Adding an element is not a problem because the add() method is atomic. In order to support multiple processes interacting with the same queue, advisory locking is used. Processes should first lock an element before working with it. In fact, the get() and remove() methods raise an exception if they are called on unlocked elements. If the process that created the lock dies without unlocking the ele- ment, we end up with a staled lock. The purge() method can be used to remove these staled locks. An element can basically be in only one of two states: locked or unlocked. A newly created element is unlocked as a writer usually does not need to do anything more with the element once dropped in the queue. Iterators return all the elements, regardless of their states. There is no method to get an element state as this information is usu- ally useless since it may change at any time. Instead, programs should directly try to lock elements to make sure they are indeed locked. Constructor ----------- For the signature of the Queue constructor see documentation to the respective __init__() method. Schema ------ The schema defines how user supplied data is stored in the queue. It is only required by the add() and get() methods. The schema must be a dictionary containing key/value pairs. The key must contain only alphanumerical characters. It identifies the piece of data and will be used as file name when storing the data inside the element directory. The value represents the type of the given piece of data. It can be: binary the data is a sequence of binary bytes, it will be stored directly in a plain file with no further encoding string the data is a text string (i.e. a sequence of characters), it will be UTF-8 encoded table the data is a reference to a hash of text strings, it will be seri- alized and UTF-8 encoded before being stored in a file By default, all pieces of data are mandatory. If you append a question mark to the type, this piece of data will be marked as optional. See the comments in the *Usage* section for more information. To comply with Directory::Queue implementation it is allowed to append '*' (asterisk) to data type specification, which in Directory::Queue means switching to working with element references in add() and get() operations. This is irrelevant for the Python implementation. Directory Structure ------------------- All the directories holding the elements and all the files holding the data pieces are located under the queue toplevel directory. This direc- tory can contain: temporary the directory holding temporary elements, i.e. the elements being added obsolete the directory holding obsolete elements, i.e. the elements being removed NNNNNNNN an intermediate directory holding elements; NNNNNNNN is an 8-digits long hexadecimal number In any of the above directories, an element is stored as a single directory with a 14-digits long hexadecimal name SSSSSSSSMMMMMR where: SSSSSSSS represents the number of seconds since the Epoch MMMMM represents the microsecond part of the time since the Epoch R is a random digit used to reduce name collisions Finally, inside an element directory, the different pieces of data are stored into different files, named according to the schema. A locked element contains in addition a directory named "locked". Security -------- There are no specific security mechanisms in this module. The elements are stored as plain files and directories. The filesystem security features (owner, group, permissions, ACLs...) should be used to adequately protect the data. By default, the process' umask is respected. See the class constructor documentation if you want an other behavior. If multiple readers and writers with different uids are expected, the easiest solution is to have all the files and directories inside the toplevel directory world-writable (i.e. umask=0). Then, the permissions of the toplevel directory itself (e.g. group-writable) are enough to control who can access the queue. ============== QueueSet class ============== :py:class:`dirq.queue.QueueSet` - interface to a set of Queue objects Usage:: from dirq.queue import Queue, QueueSet dq1 = Queue("/tmp/q1") dq2 = Queue("/tmp/q2") dqset = QueueSet(dq1, dq2) # dqs = [dq1, dq2] # dqset = QueueSet(dqs) (dq, elt) = dqset.first() while dq: # you can now process the element elt of queue dq... (dq, elt) = dqset.next() Description ----------- This class can be used to put different queues into a set and browse them as one queue. The elements from all queues are merged together and sorted independently from the queue they belong to. Constructor ----------- For the signature of the QueueSet constructor see documentation to the respective :py:meth:`dirq.queue.QueueSet.__init__` method. Author ------ Konstantin Skaburskas <konstantin.skaburskas@gmail.com> License and Copyright --------------------- ASL 2.0 Copyright (C) 2010-2012 CERN """ import dirq __author__ = dirq.AUTHOR __version__ = dirq.VERSION __date__ = dirq.DATE import errno import os import re import sys import time __all__ = ['Queue', 'QueueSet', 'QueueError'] from dirq.QueueBase import QueueBase, _DirElemRegexp, _DirectoryRegexp, \ _ElementRegexp from dirq.QueueBase import (_name, _special_mkdir, _special_rmdir, _file_read, _file_write, _directory_contents, _warn) from dirq.Exceptions import QueueError, QueueLockError from dirq.utils import VALID_STR_TYPES, VALID_INT_TYPES # name of the directory holding temporary elements TEMPORARY_DIRECTORY = "temporary" # name of the directory holding obsolete elements OBSOLETE_DIRECTORY = "obsolete" # name of the directory indicating a locked element LOCKED_DIRECTORY = "locked" # # global variables # __FileRegexp = "[0-9a-zA-Z]+" _FileRegexp = re.compile("^(%s)$" % __FileRegexp) _KeyValRegexp = re.compile('^([^\x09\x0a]*)\x09([^\x09\x0a]*)$') _Byte2Esc = {"\\" : r"\\", "\t" : r"\t", "\n" : r"\n"} _Esc2Byte = dict([(value, key) for key, value in _Byte2Esc.items()]) # # Helper Functions # def _hash2string(data): """Transform a hash of strings into a string. Raise: QueueError - invalid type of a value in hash (allowed string or unicode) Note: the keys are sorted so that identical hashes yield to identical strings """ string = '' for key in sorted(data.keys()): val = data[key] if type(val) not in VALID_STR_TYPES: raise QueueError("invalid hash value type: %r"%val) key = re.sub('(\\\\|\x09|\x0a)', lambda m: _Byte2Esc[m.group(1)], key) val = re.sub('(\\\\|\x09|\x0a)', lambda m: _Byte2Esc[m.group(1)], val) string = '%s%s' % (string, '%s\x09%s\x0a' % (key, val)) return string def _string2hash(given): """Transform a string into a hash of strings. Raise: QueueError - unexpected hash line Note: duplicate keys are not checked (the last one wins) """ _hash = dict() if not given: return _hash for line in given.strip('\n').split('\x0a'): match = _KeyValRegexp.match(line) if not match: raise QueueError("unexpected hash line: %s" % line) key = re.sub(r'(\\\\|\\t|\\n)', lambda m: _Esc2Byte[str(m.group(1))], match.group(1)) val = re.sub(r'(\\\\|\\t|\\n)', lambda m: _Esc2Byte[str(m.group(1))], match.group(2)) _hash[key] = val return _hash def _older(path, given_time): """ Check if a path is old enough: * return true if the path exists and is (strictly) older than given time * return false if it does not exist or it is newer * die in case of any other error Raise: OSError - can't stat given path Note: lstat() is used so symlinks are not followed """ try: stat = os.lstat(path) except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOENT: raise OSError("cannot lstat(%s): %s"%(path, error)) # RACE: this path does not exist (anymore) return False else: return stat.st_mtime < given_time def __subdirs_num_nlink(path): """Count the number of sub-directories in the given directory: * return 0 if the directory does not exist (anymore) * die in case of any other error Raise: OSError - can't stat given path Note: * lstat() is used so symlinks are not followed * this only checks the number of links * we do not even check that the path indeed points to a directory! """ try: stat = os.lstat(path) except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOENT: raise OSError("cannot lstat(%s): %s"%(path, error)) # RACE: this path does not exist (anymore) return 0 else: return stat.st_nlink - 2 def __subdirs_num_count(path): """Count the number of sub-directories in the given directory: - return 0 if the directory does not exist (anymore) For systems where we cannot rely on the number of links, so we simply count the number of sub-directories. """ return len(_directory_contents(path, missingok=True)) if sys.platform in ['win32', 'cygwin']: _subdirs_num = __subdirs_num_count else: _subdirs_num = __subdirs_num_nlink def _check_element(name): """Check the given string to make sure it represents a valid element name. Raise: QueueError - given element is invalid """ if not _DirElemRegexp.match(name): raise QueueError("invalid element name: %s"%name) def _count(path): """Return the number of elements in the queue, regardless of their state. Raise: OSError - can't list/stat element directories """ count = 0 for name in [x for x in _directory_contents(path)]: subdirs = _subdirs_num('%s/%s' % (path, name)) if subdirs: count += subdirs return count # # Object Oriented Interface # class Queue(QueueBase): """Directory based queue. """ def __init__(self, path, umask=None, maxelts=16000, schema=dict()): """Check and set schema. Build the queue directory structure. Arguments: path the queue toplevel directory umask the umask to use when creating files and directories (default: use the running process' umask) maxelts the maximum number of elements that an intermediate directory can hold (default: 16,000) schema the schema defining how to interpret user supplied data (mandatory if elements are added or read) Raise: TypeError - wrong input data types provided QueueError - problems with the queue schema definition OSError - can't create directory structure """ super(Queue, self).__init__(path, umask=umask) if type(maxelts) in VALID_INT_TYPES: self.maxelts = maxelts else: raise TypeError("'maxelts' should be int or long") # check schema self.type = {} self.mandatory = {} if schema: if not isinstance(schema, dict): raise QueueError("invalid schema: %r"%schema) for name in schema.keys(): if not _FileRegexp.match(name): raise QueueError("invalid schema name: %r"%name) if not isinstance(schema[name], str): raise QueueError("invalid data type for schema "+\ "specification: %r"%type(schema[name])) match = re.match('(binary|string|table)([\?\*]{0,2})?$', schema[name]) if not match: raise QueueError("invalid schema data type: %r" % schema[name]) self.type[name] = match.group(1) if not re.search('\?', match.group(2)): self.mandatory[name] = True if not self.mandatory: raise QueueError("invalid schema: no mandatory data") # create directories for directory in (TEMPORARY_DIRECTORY, OBSOLETE_DIRECTORY): _special_mkdir('%s/%s'%(self.path, directory), self.umask) def _is_locked_nlink(self, ename, _time=None): """Check if an element is locked. Note: this is only an indication as the state may be changed by another process Uses number of links (st_nlink) returned by os.lstat() applied to the element directory. Arguments: ename - name of an element _time - consider only locks older than the given time Return: True - if element exists and locked. If _time is provided, only return True on locks older than this time (needed by purge). False - in other cases Raises: OSError - if unable to stat() the element """ path = '%s/%s' % (self.path, ename) try: stat = os.lstat(path) except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOENT: raise OSError("cannot lstat(%s): %s" % (path, error)) return False # locking increases number of links if not stat.st_nlink > 2: return False # check age if _time is given if _time and stat.st_mtime >= _time: return False return os.path.exists('%s/%s' % (path, LOCKED_DIRECTORY)) def _is_locked_nonlink(self, ename, _time=None): """See _is_locked_nlink(). This version doesn't use nlink (slower). """ path = '%s/%s' % (self.path, ename) if not os.path.exists('%s/%s' % (path, LOCKED_DIRECTORY)): return False elif not _time: return True # element exists and locked, and we were asked to act upon its age try: stat = os.lstat(path) except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOENT: raise OSError("cannot lstat(%s): %s" % (path, error)) return False return stat.st_mtime < _time if sys.platform in ['win32', 'cygwin']: _is_locked = _is_locked_nonlink else: _is_locked = _is_locked_nlink def _build_elements(self): """Build list of elements. Raise: OSError - can't list element directories """ while self.dirs: directory = self.dirs.pop(0) _list = [] for name in _directory_contents( '%s/%s'%(self.path, directory), True): if _ElementRegexp.match(name): _list.append(name) if not _list: continue self.elts = ['%s/%s' % (directory, x) for x in sorted(_list)] return True return False def count(self): """Return the number of elements in the queue, regardless of their state. Raise: OSError - can't list/stat element directories """ return _count(self.path) def lock(self, ename, permissive=True): """Lock an element. Arguments: ename - name of an element permissive - work in permissive mode Return: * True on success * False in case the element could not be locked (in permissive mode) Raise: QueueError - invalid element name OSError - can't create lock (mkdir()/lstat() failed) Note: * locking can fail: * if the element has been locked by somebody else (EEXIST) * if the element has been removed by somebody else (ENOENT) * if the optional second argument is true, it is not an error if the element cannot be locked (permissive mode), this is the default * the directory's mtime will change automatically (after a successful mkdir()), this will later be used to detect stalled locks """ _check_element(ename) path = '%s/%s/%s' % (self.path, ename, LOCKED_DIRECTORY) try: if self.umask != None: oldumask = os.umask(self.umask) os.mkdir(path) os.umask(oldumask) else: os.mkdir(path) os.lstat(path) except Exception: error = sys.exc_info()[1] if permissive: # RACE: the locked directory already exists if error.errno == errno.EEXIST: return False # RACE: the element directory does not exist anymore if error.errno == errno.ENOENT: return False # otherwise this is unexpected... raise OSError("cannot mkdir(%s): %s" % (path, error)) try: os.lstat(path) except Exception: error = sys.exc_info()[1] if permissive: # RACE: the element directory does not exist anymore (this can # happen if an other process locked & removed the element # while our mkdir() was in progress... yes, this can happen!) if error.errno == errno.ENOENT: return False # otherwise this is unexpected... raise OSError("cannot lstat(%s): %s"%(path, str(error))) return True def unlock(self, ename, permissive=False): """Unlock an element. Arguments: ename - name of an element permissive - work in permissive mode Return: * true on success * false in case the element could not be unlocked (in permissive mode) Raise: QueueError - invalid element name OSError - can't remove lock (rmdir() failed) Note: * unlocking can fail: * if the element has been unlocked by somebody else (ENOENT) * if the element has been removed by somebody else (ENOENT) * if the optional second argument is true, it is not an error if the element cannot be unlocked (permissive mode), this is _not_ the default """ _check_element(ename) path = '%s/%s/%s' % (self.path, ename, LOCKED_DIRECTORY) try: os.rmdir(path) except Exception: error = sys.exc_info()[1] if permissive: # RACE: the element directory or its lock does not exist anymore if error.errno == errno.ENOENT: return False raise OSError("cannot rmdir(%s): %s"%(path, error)) else: return True def remove(self, ename): """Remove locked element from the queue. Arguments: ename - name of an element Raise: QueueError - invalid element name; element not locked; unexpected file in the element directory OSError - can't rename/remove a file/directory Note: doesn't return anything explicitly (i.e. returns NoneType) or fails """ _check_element(ename) if not self._is_locked(ename): raise QueueError("cannot remove %s: not locked"%ename) # move the element out of its intermediate directory path = '%s/%s' % (self.path, ename) while True: temp = '%s/%s/%s' % (self.path, OBSOLETE_DIRECTORY, _name()) try: os.rename(path, temp) break except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOTEMPTY and \ error.errno != errno.EEXIST: raise OSError("cannot rename(%s, %s): %s"%(ename, temp, error)) # RACE: the target directory was already present... # remove the data files for name in _directory_contents(temp): if name == LOCKED_DIRECTORY: continue if not _FileRegexp.match(name): raise QueueError("unexpected file in %s: %s"%(temp, name)) path = '%s/%s' % (temp, name) try: os.unlink(path) except Exception: error = sys.exc_info()[1] raise OSError("cannot unlink(%s): %s"%(path, error)) # remove the locked directory path = '%s/%s' % (temp, LOCKED_DIRECTORY) while True: try: os.rmdir(path) except Exception: error = sys.exc_info()[1] raise OSError("cannot rmdir(%s): %s"%(path, error)) try: os.rmdir(temp) return except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOTEMPTY and \ error.errno != errno.EEXIST: raise OSError("cannot rmdir(%s): %s"%(temp, error)) # RACE: this can happen if an other process managed to lock # this element while it was being removed (see the comment in # the lock() method) so we try to remove the lock again # and again... def dequeue(self, ename, permissive=True): """Dequeue an element from the queue. Removes element from the queue. Performs operations: lock(name), get(name), remove(name) Arguments: ename - name of an element Return: dictionary representing an element Raise: QueueLockError - coulnd't lock element QueueError - problems with schema/data types/etc. OSError - problems opening/closing directory/file """ if not self.lock(ename, permissive=permissive): raise QueueLockError("couldn't lock element: %s" % ename) element = self.get(ename) self.remove(ename) return element def get(self, ename): """Get an element data from a locked element. Arguments: ename - name of an element Return: dictionary representing an element Raise: QueueError - schema is unknown; unexpected data type in the schema specification; missing mandatory file of the element OSError - problems opening/closing file IOError - file read error """ if not self.type: raise QueueError("unknown schema") _check_element(ename) if not self._is_locked(ename): raise QueueError("cannot get %s: not locked"%ename) data = {} for dname in self.type.keys(): path = '%s/%s/%s' % (self.path, ename, dname) try: os.lstat(path) except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOENT: raise OSError("cannot lstat(%s): %s"%(path, error)) if dname in self.mandatory: raise QueueError("missing data file: %s"%path) else: continue if self.type[dname] == 'binary': data[dname] = _file_read(path, 0) elif self.type[dname] == 'string': data[dname] = _file_read(path, 1) elif self.type[dname] == 'table': data[dname] = _string2hash(_file_read(path, 1)) else: raise QueueError("unexpected data type: %s"%self.type[dname]) return data def get_element(self, ename, permissive=True): """Get an element from the queue. Element will not be removed. Operations performed: lock(name), get(name), unlock(name) Arguments: ename - name of an element Raise: QueueLockError - couldn't lock element """ if not self.lock(ename, permissive=permissive): raise QueueLockError("couldn't lock element: %s" % ename) element = self.get(ename) self.unlock(ename, permissive=permissive) return element def _insertion_directory(self): """Return the name of the intermediate directory that can be used for insertion: * if there is none, an initial one will be created * if it is full, a new one will be created * in any case the name will match $_DirectoryRegexp Raise: OSError - can't list/make element directories """ _list = [] # get the list of existing directories for name in _directory_contents(self.path): if _DirectoryRegexp.match(name): _list.append(name) # handle the case with no directories yet if not _list: name = '%08x' % 0 _special_mkdir('%s/%s' % (self.path, name), self.umask) return name # check the last directory _list.sort() name = _list[-1] subdirs = _subdirs_num('%s/%s' % (self.path, name)) if subdirs: if subdirs < self.maxelts: return name else: # RACE: at this point, the directory does not exist anymore, # so it must have been purged after we listed the directory # contents. We do not try to do more and simply create a new # directory pass # we need a new directory name = '%08x' % (int(name, 16) + 1) _special_mkdir('%s/%s' % (self.path, name), self.umask) return name def add(self, data): """Add a new element to the queue and return its name. Arguments: data - element as a dictionary (should conform to the schema) Raise: QueueError - problem with schema definition or data OSError - problem putting element on disk Note: the destination directory must _not_ be created beforehand as it would be seen as a valid (but empty) element directory by another process, we therefore use rename() from a temporary directory """ if not self.type: raise QueueError("unknown schema") while True: temp = '%s/%s/%s' % (self.path, TEMPORARY_DIRECTORY, _name()) if _special_mkdir(temp, self.umask): break for name in data.keys(): if name not in self.type: raise QueueError("unexpected data: %s"%name) if self.type[name] == 'binary': if type(data[name]) not in VALID_STR_TYPES: raise QueueError("unexpected binary data in %s: %r"%(name, data[name])) _file_write('%s/%s' % (temp, name), 0, self.umask, data[name]) elif self.type[name] == 'string': if type(data[name]) not in VALID_STR_TYPES: raise QueueError("unexpected string data in %s: %r"%(name, data[name])) _file_write('%s/%s' % (temp, name), 1, self.umask, data[name]) elif self.type[name] == 'table': if not isinstance(data[name], dict): raise QueueError("unexpected table data in %s: %r"%(name, data[name])) _file_write('%s/%s' % (temp, name), 1, self.umask, _hash2string(data[name])) else: raise QueueError("unexpected data type in %s: %r"%(name, self.type[name])) for name in self.mandatory.keys(): if name not in data: raise QueueError("missing mandatory data: %s"%name) while True: name = '%s/%s' % (self._insertion_directory(), _name()) path = '%s/%s' % (self.path, name) try: os.rename(temp, path) return name except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOTEMPTY and \ error.errno != errno.EEXIST: raise OSError("cannot rename(%s, %s): %s"%(temp, path, error)) # RACE: the target directory was already present... enqueue = add def _volatile(self): """Return the list of volatile (i.e. temporary or obsolete) directories. """ _list = [] for name in _directory_contents( '%s/%s'%(self.path,TEMPORARY_DIRECTORY), True): if _ElementRegexp.match(name): _list.append('%s/%s' % (TEMPORARY_DIRECTORY, name)) for name in _directory_contents( '%s/%s'%(self.path,OBSOLETE_DIRECTORY), True): if _ElementRegexp.match(name): _list.append('%s/%s' % (OBSOLETE_DIRECTORY, name)) return _list def purge(self, maxtemp=300, maxlock=600): """Purge the queue: * delete unused intermediate directories * delete too old temporary directories * unlock too old locked directories Arguments: maxtemp - maximum time for a temporary element. If 0, temporary elements will not be removed. maxlock - maximum time for a locked element. If 0, locked elements will not be unlocked. Raise: OSError - problem deleting element from disk Note: this uses first()/next() to iterate so this will reset the cursor """ # get the list of intermediate directories _list = [] for name in _directory_contents(self.path): if _DirectoryRegexp.match(name): _list.append(name) _list.sort() # try to purge all but last one if len(_list) > 1: _list.pop() for name in _list: path = '%s/%s' % (self.path, name) if _subdirs_num(path): continue _special_rmdir(path) # remove the volatile directories which are too old if maxtemp: oldtime = time.time() - maxtemp for name in self._volatile(): path = '%s/%s' % (self.path, name) if _older(path, oldtime): _warn("* removing too old volatile element: %s" % name) for file_name in _directory_contents(path, True): if file_name == LOCKED_DIRECTORY: continue fpath = '%s/%s' % (path, file_name) try: os.unlink(fpath) except Exception: error = sys.exc_info()[1] if error.errno != errno.ENOENT: raise OSError("cannot unlink(%s): %s"%(fpath, error)) _special_rmdir('%s/%s' % (path, LOCKED_DIRECTORY)) _special_rmdir(path) # iterate to find abandoned locked entries if maxlock: oldtime = time.time() - maxlock name = self.first() while name: if self._is_locked(name, oldtime): # TODO: check if remove_element is needed or # "unlocking" instead. _warn("* removing too old locked element: %s" % name) self.unlock(name, True) name = self.next() # For backward compatibility. from dirq.QueueSet import QueueSet
import django_filters from django_filters.widgets import BooleanWidget from waldur_core.core import filters as core_filters from waldur_core.structure import filters as structure_filters from waldur_openstack.openstack_tenant.utils import get_valid_availability_zones from . import models class FlavorFilter(structure_filters.ServicePropertySettingsFilter): name_iregex = django_filters.CharFilter(field_name='name', lookup_expr='iregex') o = django_filters.OrderingFilter(fields=('cores', 'ram', 'disk')) class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.Flavor fields = dict( { 'cores': ['exact', 'gte', 'lte'], 'ram': ['exact', 'gte', 'lte'], 'disk': ['exact', 'gte', 'lte'], }, **{ field: ['exact'] for field in structure_filters.ServicePropertySettingsFilter.Meta.fields } ) class NetworkFilter(structure_filters.ServicePropertySettingsFilter): class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.Network fields = structure_filters.ServicePropertySettingsFilter.Meta.fields + ( 'type', 'is_external', ) class SubNetFilter(structure_filters.ServicePropertySettingsFilter): network = core_filters.URLFilter( view_name='openstacktenant-network-detail', field_name='network__uuid' ) network_uuid = django_filters.UUIDFilter(field_name='network__uuid') class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.SubNet fields = structure_filters.ServicePropertySettingsFilter.Meta.fields + ( 'ip_version', 'enable_dhcp', ) class FloatingIPFilter(structure_filters.ServicePropertySettingsFilter): free = django_filters.BooleanFilter( field_name='internal_ip', lookup_expr='isnull', widget=BooleanWidget ) class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.FloatingIP fields = structure_filters.ServicePropertySettingsFilter.Meta.fields + ( 'runtime_state', 'address', 'is_booked', ) class VolumeFilter(structure_filters.BaseResourceFilter): instance = core_filters.URLFilter( view_name='openstacktenant-instance-detail', field_name='instance__uuid' ) instance_uuid = django_filters.UUIDFilter(field_name='instance__uuid') snapshot = core_filters.URLFilter( view_name='openstacktenant-snapshot-detail', field_name='restoration__snapshot__uuid', ) snapshot_uuid = django_filters.UUIDFilter(field_name='restoration__snapshot__uuid') availability_zone_name = django_filters.CharFilter( field_name='availability_zone__name' ) attach_instance_uuid = django_filters.UUIDFilter(method='filter_attach_instance') def filter_attach_instance(self, queryset, name, value): """ This filter is used by volume attachment dialog for instance. It allows to filter out volumes that could be attached to the given instance. """ try: instance = models.Instance.objects.get(uuid=value) except models.Volume.DoesNotExist: return queryset.none() queryset = queryset.filter( service_settings=instance.service_settings, project=instance.project, ).exclude(instance=instance) zones_map = get_valid_availability_zones(instance) if instance.availability_zone and zones_map: zone_names = { nova_zone for (nova_zone, cinder_zone) in zones_map.items() if cinder_zone == instance.availability_zone.name } nova_zones = models.InstanceAvailabilityZone.objects.filter( settings=instance.service_settings, name__in=zone_names, available=True, ) queryset = queryset.filter(availability_zone__in=nova_zones) return queryset class Meta(structure_filters.BaseResourceFilter.Meta): model = models.Volume fields = structure_filters.BaseResourceFilter.Meta.fields + ('runtime_state',) ORDERING_FIELDS = structure_filters.BaseResourceFilter.ORDERING_FIELDS + ( ('instance__name', 'instance_name'), ('size', 'size'), ) class SnapshotFilter(structure_filters.BaseResourceFilter): source_volume_uuid = django_filters.UUIDFilter(field_name='source_volume__uuid') source_volume = core_filters.URLFilter( view_name='openstacktenant-volume-detail', field_name='source_volume__uuid' ) backup_uuid = django_filters.UUIDFilter(field_name='backups__uuid') backup = core_filters.URLFilter( view_name='openstacktenant-backup-detail', field_name='backups__uuid' ) snapshot_schedule = core_filters.URLFilter( view_name='openstacktenant-snapshot-schedule-detail', field_name='snapshot_schedule__uuid', ) snapshot_schedule_uuid = django_filters.UUIDFilter( field_name='snapshot_schedule__uuid' ) class Meta(structure_filters.BaseResourceFilter.Meta): model = models.Snapshot fields = structure_filters.BaseResourceFilter.Meta.fields + ('runtime_state',) ORDERING_FIELDS = structure_filters.BaseResourceFilter.ORDERING_FIELDS + ( ('source_volume__name', 'source_volume_name'), ('size', 'size'), ) class InstanceAvailabilityZoneFilter(structure_filters.ServicePropertySettingsFilter): class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.InstanceAvailabilityZone class InstanceFilter(structure_filters.BaseResourceFilter): external_ip = django_filters.CharFilter( field_name='internal_ips_set__floating_ips__address' ) availability_zone_name = django_filters.CharFilter( field_name='availability_zone__name' ) attach_volume_uuid = django_filters.UUIDFilter(method='filter_attach_volume') def filter_attach_volume(self, queryset, name, value): """ This filter is used by volume attachment dialog. It allows to filter out instances that could be attached to the given volume. """ try: volume = models.Volume.objects.get(uuid=value) except models.Volume.DoesNotExist: return queryset.none() queryset = queryset.filter( service_settings=volume.service_settings, project=volume.project ) zones_map = get_valid_availability_zones(volume) if volume.availability_zone and zones_map: zone_names = { nova_zone for (nova_zone, cinder_zone) in zones_map.items() if cinder_zone == volume.availability_zone.name } nova_zones = models.InstanceAvailabilityZone.objects.filter( settings=volume.service_settings, name__in=zone_names, available=True, ) queryset = queryset.filter(availability_zone__in=nova_zones) return queryset class Meta(structure_filters.BaseResourceFilter.Meta): model = models.Instance fields = structure_filters.BaseResourceFilter.Meta.fields + ( 'runtime_state', 'external_ip', ) ORDERING_FIELDS = structure_filters.BaseResourceFilter.ORDERING_FIELDS + ( ('internal_ips_set__fixed_ips__0__ip_address', 'ip_address'), ('internal_ips_set__floating_ips__address', 'external_ips'), ) class BackupFilter(structure_filters.BaseResourceFilter): instance = core_filters.URLFilter( view_name='openstacktenant-instance-detail', field_name='instance__uuid' ) instance_uuid = django_filters.UUIDFilter(field_name='instance__uuid') backup_schedule = core_filters.URLFilter( view_name='openstacktenant-backup-schedule-detail', field_name='backup_schedule__uuid', ) backup_schedule_uuid = django_filters.UUIDFilter(field_name='backup_schedule__uuid') class Meta(structure_filters.BaseResourceFilter.Meta): model = models.Backup class BackupScheduleFilter(structure_filters.BaseResourceFilter): instance = core_filters.URLFilter( view_name='openstacktenant-instance-detail', field_name='instance__uuid' ) instance_uuid = django_filters.UUIDFilter(field_name='instance__uuid') class Meta(structure_filters.BaseResourceFilter.Meta): model = models.BackupSchedule class SnapshotScheduleFilter(structure_filters.BaseResourceFilter): source_volume = core_filters.URLFilter( view_name='openstacktenant-volume-detail', field_name='source_volume__uuid' ) source_volume_uuid = django_filters.UUIDFilter(field_name='source_volume__uuid') class Meta(structure_filters.BaseResourceFilter.Meta): model = models.SnapshotSchedule class SecurityGroupFilter(structure_filters.ServicePropertySettingsFilter): class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.SecurityGroup class ImageFilter(structure_filters.ServicePropertySettingsFilter): class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.Image class VolumeTypeFilter(structure_filters.ServicePropertySettingsFilter): class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.VolumeType class VolumeAvailabilityZoneFilter(structure_filters.ServicePropertySettingsFilter): class Meta(structure_filters.ServicePropertySettingsFilter.Meta): model = models.VolumeAvailabilityZone
from __future__ import absolute_import, unicode_literals __version__ = '0.39' __license__ = 'MIT' import re import os import sys import time import logging import marshal import tempfile import threading from math import log from hashlib import md5 from ._compat import * from . import finalseg if os.name == 'nt': from shutil import move as _replace_file else: _replace_file = os.rename _get_abs_path = lambda path: os.path.normpath(os.path.join(os.getcwd(), path)) DEFAULT_DICT = None DEFAULT_DICT_NAME = "dict.txt" log_console = logging.StreamHandler(sys.stderr) default_logger = logging.getLogger(__name__) default_logger.setLevel(logging.DEBUG) default_logger.addHandler(log_console) DICT_WRITING = {} pool = None re_userdict = re.compile('^(.+?)( [0-9]+)?( [a-z]+)?$', re.U) re_eng = re.compile('[a-zA-Z0-9]', re.U) # \u4E00-\u9FD5a-zA-Z0-9+#&\._ : All non-space characters. Will be handled with re_han # \r\n|\s : whitespace characters. Will not be handled. # re_han_default = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._%]+)", re.U) # Adding "-" symbol in re_han_default re_han_default = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._%\-]+)", re.U) re_skip_default = re.compile("(\r\n|\s)", re.U) re_han_cut_all = re.compile("([\u4E00-\u9FD5]+)", re.U) re_skip_cut_all = re.compile("[^a-zA-Z0-9+#\n]", re.U) def setLogLevel(log_level): global logger default_logger.setLevel(log_level) class Tokenizer(object): def __init__(self, dictionary=DEFAULT_DICT): self.lock = threading.RLock() if dictionary == DEFAULT_DICT: self.dictionary = dictionary else: self.dictionary = _get_abs_path(dictionary) self.FREQ = {} self.total = 0 self.user_word_tag_tab = {} self.initialized = False self.tmp_dir = None self.cache_file = None def __repr__(self): return '<Tokenizer dictionary=%r>' % self.dictionary def gen_pfdict(self, f): lfreq = {} ltotal = 0 f_name = resolve_filename(f) for lineno, line in enumerate(f, 1): try: line = line.strip().decode('utf-8') word, freq = line.split(' ')[:2] freq = int(freq) lfreq[word] = freq ltotal += freq for ch in xrange(len(word)): wfrag = word[:ch + 1] if wfrag not in lfreq: lfreq[wfrag] = 0 except ValueError: raise ValueError( 'invalid dictionary entry in %s at Line %s: %s' % (f_name, lineno, line)) f.close() return lfreq, ltotal def initialize(self, dictionary=None): if dictionary: abs_path = _get_abs_path(dictionary) if self.dictionary == abs_path and self.initialized: return else: self.dictionary = abs_path self.initialized = False else: abs_path = self.dictionary with self.lock: try: with DICT_WRITING[abs_path]: pass except KeyError: pass if self.initialized: return default_logger.debug("Building prefix dict from %s ..." % (abs_path or 'the default dictionary')) t1 = time.time() if self.cache_file: cache_file = self.cache_file # default dictionary elif abs_path == DEFAULT_DICT: cache_file = "jieba.cache" # custom dictionary else: cache_file = "jieba.u%s.cache" % md5( abs_path.encode('utf-8', 'replace')).hexdigest() cache_file = os.path.join( self.tmp_dir or tempfile.gettempdir(), cache_file) # prevent absolute path in self.cache_file tmpdir = os.path.dirname(cache_file) load_from_cache_fail = True if os.path.isfile(cache_file) and (abs_path == DEFAULT_DICT or os.path.getmtime(cache_file) > os.path.getmtime(abs_path)): default_logger.debug( "Loading model from cache %s" % cache_file) try: with open(cache_file, 'rb') as cf: self.FREQ, self.total = marshal.load(cf) load_from_cache_fail = False except Exception: load_from_cache_fail = True if load_from_cache_fail: wlock = DICT_WRITING.get(abs_path, threading.RLock()) DICT_WRITING[abs_path] = wlock with wlock: self.FREQ, self.total = self.gen_pfdict(self.get_dict_file()) default_logger.debug( "Dumping model to file cache %s" % cache_file) try: # prevent moving across different filesystems fd, fpath = tempfile.mkstemp(dir=tmpdir) with os.fdopen(fd, 'wb') as temp_cache_file: marshal.dump( (self.FREQ, self.total), temp_cache_file) _replace_file(fpath, cache_file) except Exception: default_logger.exception("Dump cache file failed.") try: del DICT_WRITING[abs_path] except KeyError: pass self.initialized = True default_logger.debug( "Loading model cost %.3f seconds." % (time.time() - t1)) default_logger.debug("Prefix dict has been built successfully.") def check_initialized(self): if not self.initialized: self.initialize() def calc(self, sentence, DAG, route): N = len(sentence) route[N] = (0, 0) logtotal = log(self.total) for idx in xrange(N - 1, -1, -1): route[idx] = max((log(self.FREQ.get(sentence[idx:x + 1]) or 1) - logtotal + route[x + 1][0], x) for x in DAG[idx]) def get_DAG(self, sentence): self.check_initialized() DAG = {} N = len(sentence) for k in xrange(N): tmplist = [] i = k frag = sentence[k] while i < N and frag in self.FREQ: if self.FREQ[frag]: tmplist.append(i) i += 1 frag = sentence[k:i + 1] if not tmplist: tmplist.append(k) DAG[k] = tmplist return DAG def __cut_all(self, sentence): dag = self.get_DAG(sentence) old_j = -1 for k, L in iteritems(dag): if len(L) == 1 and k > old_j: yield sentence[k:L[0] + 1] old_j = L[0] else: for j in L: if j > k: yield sentence[k:j + 1] old_j = j def __cut_DAG_NO_HMM(self, sentence): DAG = self.get_DAG(sentence) route = {} self.calc(sentence, DAG, route) x = 0 N = len(sentence) buf = '' while x < N: y = route[x][1] + 1 l_word = sentence[x:y] if re_eng.match(l_word) and len(l_word) == 1: buf += l_word x = y else: if buf: yield buf buf = '' yield l_word x = y if buf: yield buf buf = '' def __cut_DAG(self, sentence): DAG = self.get_DAG(sentence) route = {} self.calc(sentence, DAG, route) x = 0 buf = '' N = len(sentence) while x < N: y = route[x][1] + 1 l_word = sentence[x:y] if y - x == 1: buf += l_word else: if buf: if len(buf) == 1: yield buf buf = '' else: if not self.FREQ.get(buf): recognized = finalseg.cut(buf) for t in recognized: yield t else: for elem in buf: yield elem buf = '' yield l_word x = y if buf: if len(buf) == 1: yield buf elif not self.FREQ.get(buf): recognized = finalseg.cut(buf) for t in recognized: yield t else: for elem in buf: yield elem def cut(self, sentence, cut_all=False, HMM=True): ''' The main function that segments an entire sentence that contains Chinese characters into separated words. Parameter: - sentence: The str(unicode) to be segmented. - cut_all: Model type. True for full pattern, False for accurate pattern. - HMM: Whether to use the Hidden Markov Model. ''' sentence = strdecode(sentence) if cut_all: re_han = re_han_cut_all re_skip = re_skip_cut_all else: re_han = re_han_default re_skip = re_skip_default if cut_all: cut_block = self.__cut_all elif HMM: cut_block = self.__cut_DAG else: cut_block = self.__cut_DAG_NO_HMM blocks = re_han.split(sentence) for blk in blocks: if not blk: continue if re_han.match(blk): for word in cut_block(blk): yield word else: tmp = re_skip.split(blk) for x in tmp: if re_skip.match(x): yield x elif not cut_all: for xx in x: yield xx else: yield x def cut_for_search(self, sentence, HMM=True): """ Finer segmentation for search engines. """ words = self.cut(sentence, HMM=HMM) for w in words: if len(w) > 2: for i in xrange(len(w) - 1): gram2 = w[i:i + 2] if self.FREQ.get(gram2): yield gram2 if len(w) > 3: for i in xrange(len(w) - 2): gram3 = w[i:i + 3] if self.FREQ.get(gram3): yield gram3 yield w def lcut(self, *args, **kwargs): return list(self.cut(*args, **kwargs)) def lcut_for_search(self, *args, **kwargs): return list(self.cut_for_search(*args, **kwargs)) _lcut = lcut _lcut_for_search = lcut_for_search def _lcut_no_hmm(self, sentence): return self.lcut(sentence, False, False) def _lcut_all(self, sentence): return self.lcut(sentence, True) def _lcut_for_search_no_hmm(self, sentence): return self.lcut_for_search(sentence, False) def get_dict_file(self): if self.dictionary == DEFAULT_DICT: return get_module_res(DEFAULT_DICT_NAME) else: return open(self.dictionary, 'rb') def load_userdict(self, f): ''' Load personalized dict to improve detect rate. Parameter: - f : A plain text file contains words and their ocurrences. Can be a file-like object, or the path of the dictionary file, whose encoding must be utf-8. Structure of dict file: word1 freq1 word_type1 word2 freq2 word_type2 ... Word type may be ignored ''' self.check_initialized() if isinstance(f, string_types): f_name = f f = open(f, 'rb') else: f_name = resolve_filename(f) for lineno, ln in enumerate(f, 1): line = ln.strip() if not isinstance(line, text_type): try: line = line.decode('utf-8').lstrip('\ufeff') except UnicodeDecodeError: raise ValueError('dictionary file %s must be utf-8' % f_name) if not line: continue # match won't be None because there's at least one character word, freq, tag = re_userdict.match(line).groups() if freq is not None: freq = freq.strip() if tag is not None: tag = tag.strip() self.add_word(word, freq, tag) def add_word(self, word, freq=None, tag=None): """ Add a word to dictionary. freq and tag can be omitted, freq defaults to be a calculated value that ensures the word can be cut out. """ self.check_initialized() word = strdecode(word) freq = int(freq) if freq is not None else self.suggest_freq(word, False) self.FREQ[word] = freq self.total += freq if tag: self.user_word_tag_tab[word] = tag for ch in xrange(len(word)): wfrag = word[:ch + 1] if wfrag not in self.FREQ: self.FREQ[wfrag] = 0 if freq == 0: finalseg.add_force_split(word) def del_word(self, word): """ Convenient function for deleting a word. """ self.add_word(word, 0) def suggest_freq(self, segment, tune=False): """ Suggest word frequency to force the characters in a word to be joined or splitted. Parameter: - segment : The segments that the word is expected to be cut into, If the word should be treated as a whole, use a str. - tune : If True, tune the word frequency. Note that HMM may affect the final result. If the result doesn't change, set HMM=False. """ self.check_initialized() ftotal = float(self.total) freq = 1 if isinstance(segment, string_types): word = segment for seg in self.cut(word, HMM=False): freq *= self.FREQ.get(seg, 1) / ftotal freq = max(int(freq * self.total) + 1, self.FREQ.get(word, 1)) else: segment = tuple(map(strdecode, segment)) word = ''.join(segment) for seg in segment: freq *= self.FREQ.get(seg, 1) / ftotal freq = min(int(freq * self.total), self.FREQ.get(word, 0)) if tune: self.add_word(word, freq) return freq def tokenize(self, unicode_sentence, mode="default", HMM=True): """ Tokenize a sentence and yields tuples of (word, start, end) Parameter: - sentence: the str(unicode) to be segmented. - mode: "default" or "search", "search" is for finer segmentation. - HMM: whether to use the Hidden Markov Model. """ if not isinstance(unicode_sentence, text_type): raise ValueError("jieba: the input parameter should be unicode.") start = 0 if mode == 'default': for w in self.cut(unicode_sentence, HMM=HMM): width = len(w) yield (w, start, start + width) start += width else: for w in self.cut(unicode_sentence, HMM=HMM): width = len(w) if len(w) > 2: for i in xrange(len(w) - 1): gram2 = w[i:i + 2] if self.FREQ.get(gram2): yield (gram2, start + i, start + i + 2) if len(w) > 3: for i in xrange(len(w) - 2): gram3 = w[i:i + 3] if self.FREQ.get(gram3): yield (gram3, start + i, start + i + 3) yield (w, start, start + width) start += width def set_dictionary(self, dictionary_path): with self.lock: abs_path = _get_abs_path(dictionary_path) if not os.path.isfile(abs_path): raise Exception("jieba: file does not exist: " + abs_path) self.dictionary = abs_path self.initialized = False # default Tokenizer instance dt = Tokenizer() # global functions get_FREQ = lambda k, d=None: dt.FREQ.get(k, d) add_word = dt.add_word calc = dt.calc cut = dt.cut lcut = dt.lcut cut_for_search = dt.cut_for_search lcut_for_search = dt.lcut_for_search del_word = dt.del_word get_DAG = dt.get_DAG get_dict_file = dt.get_dict_file initialize = dt.initialize load_userdict = dt.load_userdict set_dictionary = dt.set_dictionary suggest_freq = dt.suggest_freq tokenize = dt.tokenize user_word_tag_tab = dt.user_word_tag_tab def _lcut_all(s): return dt._lcut_all(s) def _lcut(s): return dt._lcut(s) def _lcut_no_hmm(s): return dt._lcut_no_hmm(s) def _lcut_all(s): return dt._lcut_all(s) def _lcut_for_search(s): return dt._lcut_for_search(s) def _lcut_for_search_no_hmm(s): return dt._lcut_for_search_no_hmm(s) def _pcut(sentence, cut_all=False, HMM=True): parts = strdecode(sentence).splitlines(True) if cut_all: result = pool.map(_lcut_all, parts) elif HMM: result = pool.map(_lcut, parts) else: result = pool.map(_lcut_no_hmm, parts) for r in result: for w in r: yield w def _pcut_for_search(sentence, HMM=True): parts = strdecode(sentence).splitlines(True) if HMM: result = pool.map(_lcut_for_search, parts) else: result = pool.map(_lcut_for_search_no_hmm, parts) for r in result: for w in r: yield w def enable_parallel(processnum=None): """ Change the module's `cut` and `cut_for_search` functions to the parallel version. Note that this only works using dt, custom Tokenizer instances are not supported. """ global pool, dt, cut, cut_for_search from multiprocessing import cpu_count if os.name == 'nt': raise NotImplementedError( "jieba: parallel mode only supports posix system") else: from multiprocessing import Pool dt.check_initialized() if processnum is None: processnum = cpu_count() pool = Pool(processnum) cut = _pcut cut_for_search = _pcut_for_search def disable_parallel(): global pool, dt, cut, cut_for_search if pool: pool.close() pool = None cut = dt.cut cut_for_search = dt.cut_for_search
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class VirtualNetworksOperations: """VirtualNetworksOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.connectedvmware.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config async def _create_initial( self, resource_group_name: str, virtual_network_name: str, body: Optional["_models.VirtualNetwork"] = None, **kwargs: Any ) -> "_models.VirtualNetwork": cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-10-01-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] if body is not None: body_content = self._serialize.body(body, 'VirtualNetwork') else: body_content = None body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('VirtualNetwork', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{virtualNetworkName}'} # type: ignore async def begin_create( self, resource_group_name: str, virtual_network_name: str, body: Optional["_models.VirtualNetwork"] = None, **kwargs: Any ) -> AsyncLROPoller["_models.VirtualNetwork"]: """Implements virtual network PUT method. Create Or Update virtual network. :param resource_group_name: The Resource Group Name. :type resource_group_name: str :param virtual_network_name: Name of the virtual network resource. :type virtual_network_name: str :param body: Request payload. :type body: ~azure.mgmt.connectedvmware.models.VirtualNetwork :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either VirtualNetwork or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.connectedvmware.models.VirtualNetwork] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_initial( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, body=body, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{virtualNetworkName}'} # type: ignore async def get( self, resource_group_name: str, virtual_network_name: str, **kwargs: Any ) -> "_models.VirtualNetwork": """Gets a virtual network. Implements virtual network GET method. :param resource_group_name: The Resource Group Name. :type resource_group_name: str :param virtual_network_name: Name of the virtual network resource. :type virtual_network_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualNetwork, or the result of cls(response) :rtype: ~azure.mgmt.connectedvmware.models.VirtualNetwork :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-10-01-preview" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{virtualNetworkName}'} # type: ignore async def update( self, resource_group_name: str, virtual_network_name: str, body: Optional["_models.ResourcePatch"] = None, **kwargs: Any ) -> "_models.VirtualNetwork": """Updates a virtual network. API to update certain properties of the virtual network resource. :param resource_group_name: The Resource Group Name. :type resource_group_name: str :param virtual_network_name: Name of the virtual network resource. :type virtual_network_name: str :param body: Resource properties to update. :type body: ~azure.mgmt.connectedvmware.models.ResourcePatch :keyword callable cls: A custom type or function that will be passed the direct response :return: VirtualNetwork, or the result of cls(response) :rtype: ~azure.mgmt.connectedvmware.models.VirtualNetwork :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-10-01-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] if body is not None: body_content = self._serialize.body(body, 'ResourcePatch') else: body_content = None body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('VirtualNetwork', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{virtualNetworkName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, virtual_network_name: str, force: Optional[bool] = None, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-10-01-preview" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if force is not None: query_parameters['force'] = self._serialize.query("force", force, 'bool') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{virtualNetworkName}'} # type: ignore async def begin_delete( self, resource_group_name: str, virtual_network_name: str, force: Optional[bool] = None, **kwargs: Any ) -> AsyncLROPoller[None]: """Deletes an virtual network. Implements virtual network DELETE method. :param resource_group_name: The Resource Group Name. :type resource_group_name: str :param virtual_network_name: Name of the virtual network resource. :type virtual_network_name: str :param force: Whether force delete was specified. :type force: bool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, virtual_network_name=virtual_network_name, force=force, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks/{virtualNetworkName}'} # type: ignore def list( self, **kwargs: Any ) -> AsyncIterable["_models.VirtualNetworksList"]: """Implements GET virtualNetworks in a subscription. List of virtualNetworks in a subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualNetworksList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.connectedvmware.models.VirtualNetworksList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworksList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-10-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('VirtualNetworksList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks'} # type: ignore def list_by_resource_group( self, resource_group_name: str, **kwargs: Any ) -> AsyncIterable["_models.VirtualNetworksList"]: """Implements GET virtualNetworks in a resource group. List of virtualNetworks in a resource group. :param resource_group_name: The Resource Group Name. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either VirtualNetworksList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.connectedvmware.models.VirtualNetworksList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworksList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-10-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_by_resource_group.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('VirtualNetworksList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ConnectedVMwarevSphere/virtualNetworks'} # type: ignore
# Copyright (c) 2014, German Neuroinformatics Node (G-Node) # # All rights reserved. # # Redistribution and use in section and binary forms, with or without # modification, are permitted under the terms of the BSD License. See # LICENSE file in the root of the Project. from __future__ import (absolute_import, division, print_function) import os import unittest import nixio as nix skip_cpp = not hasattr(nix, "core") class _TestSection(unittest.TestCase): backend = None testfilename = "sectiontest.h5" def setUp(self): self.file = nix.File.open(self.testfilename, nix.FileMode.Overwrite, backend=self.backend) self.section = self.file.create_section("test section", "recordingsession") self.other = self.file.create_section("other section", "recordingsession") def tearDown(self): del self.file.sections[self.section.id] del self.file.sections[self.other.id] self.file.close() os.remove(self.testfilename) def test_section_eq(self): assert(self.section == self.section) assert(not self.section == self.other) assert(self.section is not None) def test_section_id(self): assert(self.section.id is not None) def test_section_name(self): assert(self.section.name is not None) def test_section_type(self): def set_none(): self.section.type = None assert(self.section.type is not None) self.assertRaises(Exception, set_none) self.section.type = "foo type" assert(self.section.type == "foo type") def test_section_definition(self): assert(self.section.definition is None) self.section.definition = "definition" assert(self.section.definition == "definition") self.section.definition = None assert(self.section.definition is None) def test_section_mapping(self): assert(self.section.mapping is None) self.section.mapping = "mapping" assert(self.section.mapping == "mapping") self.section.mapping = None assert(self.section.mapping is None) def test_section_repository(self): assert(self.section.repository is None) self.section.repository = "repository" assert(self.section.repository == "repository") self.section.repository = None assert(self.section.repository is None) def test_section_sections(self): assert(len(self.section.sections) == 0) child = self.section.create_section("test section", "electrode") assert(child.parent == self.section) assert(len(self.section.sections) == 1) assert(child in self.section.sections) assert(child.id in self.section.sections) assert("notexist" not in self.section.sections) assert(child.id == self.section.sections[0].id) assert(child.id == self.section.sections[-1].id) del self.section.sections[0] assert(len(self.section.sections) == 0) self.section['easy subsection'] = nix.S('electrode') subject = self.section['subject'] = nix.S('subject') assert(self.section['subject'] == subject) assert(self.section['subject'].id == subject.id) assert('easy subsection' in [v.name for k, v in self.section.sections.items()]) assert('easy subsection' in self.section.sections) assert(self.section['easy subsection'].name == 'easy subsection') def test_section_find_sections(self): for i in range(2): self.section.create_section("level1-p0-s" + str(i), "dummy") for i in range(2): self.section.sections[0].create_section("level2-p1-s" + str(i), "dummy") for i in range(2): self.section.sections[1].create_section("level2-p2-s" + str(i), "dummy") for i in range(2): self.section.sections[0].sections[0].create_section( "level3-p1-s" + str(i), "dummy" ) assert(len(self.section.find_sections()) == 9) assert(len(self.section.find_sections(limit=1)) == 3) assert(len(self.section.find_sections( filtr=lambda x: "level2-p1-s" in x.name)) == 2 ) assert(len(self.section.find_sections( filtr=lambda x: "level2-p1-s" in x.name, limit=1)) == 0 ) assert(len(self.section.find_related()) == 3) assert(len(self.section.sections[0].find_related()) == 5) def test_section_properties(self): assert(len(self.section) == 0) prop = self.section.create_property("test prop", nix.DataType.String) assert(len(self.section) == 1) for p in self.section: assert(p in self.section) assert(self.section.has_property_by_name("test prop")) assert(not self.section.has_property_by_name("notexist")) assert(self.section.get_property_by_name("test prop") is not None) assert(self.section.get_property_by_name("notexist") is None) assert(len(self.section.inherited_properties()) == 1) assert(prop in self.section) assert(prop.id in self.section) assert(prop.name in self.section) assert("notexist" not in self.section) props = dict(self.section.items()) assert(props["test prop"] == prop) assert(prop.id == self.section.props[0].id) assert(prop.id == self.section.props[-1].id) # easy prop creation self.section['ep_str'] = 'str' self.section['ep_int'] = 23 self.section['ep_float'] = 42.0 self.section['ep_list'] = [1, 2, 3] self.section['ep_val'] = nix.Value(1.0) self.section['ep_val'] = 2.0 res = [x in self.section for x in ['ep_str', 'ep_int', 'ep_float']] assert(all(res)) assert(self.section['ep_str'] == 'str') assert(self.section['ep_int'] == 23) assert(self.section['ep_float'] == 42.0) assert(self.section['ep_list'] == [1, 2, 3]) def create_hetero_section(): self.section['ep_ex'] = [1, 1.0] self.assertRaises(ValueError, create_hetero_section) sections = [x.id for x in self.section] for x in sections: del self.section[x] assert(len(self.section) == 0) def test_parent(self): self.assertIs(self.section.parent, None) child = self.section.create_section("child section", "sect") self.assertEqual(self.section, child.parent) block = self.file.create_block("block", "section parent test") mdsection = self.file.create_section("block md", "metadata sect") block.metadata = mdsection self.assertIs(block.metadata.parent, None) grp = block.create_group("group", "section parent test") grp.metadata = child self.assertEqual(grp.metadata.parent, self.section) def test_inverse_search(self): block = self.file.create_block("a block", "block with metadata") block.metadata = self.section otherblock = self.file.create_block("b block", "block with metadata") otherblock.metadata = self.other self.assertEqual(len(self.section.referring_blocks), 1) self.assertEqual(len(self.other.referring_blocks), 1) self.assertEqual(self.section.referring_blocks[0], block) self.assertEqual(self.other.referring_blocks[0], otherblock) da_one = block.create_data_array("foo", "data_array", data=range(10)) da_one.metadata = self.other da_two = block.create_data_array("foobar", "data_array", data=[1]) da_two.metadata = self.other self.assertEqual(len(self.other.referring_data_arrays), 2) self.assertIn(da_one, self.other.referring_data_arrays) self.assertIn(da_two, self.other.referring_data_arrays) tag = block.create_tag("tago", "tagtype", [1, 1]) tag.metadata = self.section self.assertEqual(len(self.section.referring_tags), 1) self.assertEqual(len(self.other.referring_tags), 0) self.assertEqual(self.section.referring_tags[0].id, tag.id) mtag = block.create_multi_tag("MultiTagName", "MultiTagType", da_one) mtag.metadata = self.section self.assertEqual(len(self.section.referring_multi_tags), 1) self.assertEqual(len(self.other.referring_multi_tags), 0) self.assertEqual(self.section.referring_multi_tags[0].id, mtag.id) src = block.create_source("sauce", "stype") src.metadata = self.other self.assertEqual(len(self.other.referring_sources), 1) self.assertEqual(len(self.section.referring_sources), 0) self.assertEqual(self.other.referring_sources[0].id, src.id) @unittest.skipIf(skip_cpp, "HDF5 backend not available.") class TestSectionCPP(_TestSection): backend = "hdf5" class TestSectionPy(_TestSection): backend = "h5py"
#!/usr/bin/env python # -*- encoding: utf-8 -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. ''' heron.py ''' import logging import tornado.httpclient import tornado.gen from tornado.options import options from fetch import fetch_url_as_json from query import QueryHandler # pylint: disable=bad-whitespace CLUSTER_URL_FMT = "%s/clusters" TOPOLOGIES_URL_FMT = "%s/topologies" EXECUTION_STATE_URL_FMT = "%s/executionstate" % TOPOLOGIES_URL_FMT LOGICALPLAN_URL_FMT = "%s/logicalplan" % TOPOLOGIES_URL_FMT PHYSICALPLAN_URL_FMT = "%s/physicalplan" % TOPOLOGIES_URL_FMT SCHEDULER_LOCATION_URL_FMT = "%s/schedulerlocation" % TOPOLOGIES_URL_FMT METRICS_URL_FMT = "%s/metrics" % TOPOLOGIES_URL_FMT METRICS_QUERY_URL_FMT = "%s/metricsquery" % TOPOLOGIES_URL_FMT METRICS_TIMELINE_URL_FMT = "%s/metricstimeline" % TOPOLOGIES_URL_FMT EXCEPTIONS_URL_FMT = "%s/exceptions" % TOPOLOGIES_URL_FMT EXCEPTION_SUMMARY_URL_FMT = "%s/exceptionsummary" % TOPOLOGIES_URL_FMT INFO_URL_FMT = "%s/info" % TOPOLOGIES_URL_FMT PID_URL_FMT = "%s/pid" % TOPOLOGIES_URL_FMT JSTACK_URL_FMT = "%s/jstack" % TOPOLOGIES_URL_FMT JMAP_URL_FMT = "%s/jmap" % TOPOLOGIES_URL_FMT HISTOGRAM_URL_FMT = "%s/histo" % TOPOLOGIES_URL_FMT FILE_DATA_URL_FMT = "%s/containerfiledata" % TOPOLOGIES_URL_FMT FILE_DOWNLOAD_URL_FMT = "%s/containerfiledownload" % TOPOLOGIES_URL_FMT FILESTATS_URL_FMT = "%s/containerfilestats" % TOPOLOGIES_URL_FMT capacity = "DIVIDE(" \ " DEFAULT(0," \ " MULTIPLY(" \ " TS({0},{1},__execute-count/default)," \ " TS({0},{1},__execute-latency/default)" \ " )" \ " )," \ " 60000000000" \ ")" failures = "DEFAULT(0," \ " DIVIDE(" \ " TS({0},{1},__fail-count/default)," \ " SUM(" \ " DEFAULT(1, TS({0},{1},__execute-count/default))," \ " DEFAULT(0, TS({0},{1},__fail-count/default))" \ " )" \ " )" \ ")" cpu = "DEFAULT(0, TS({0},{1},__jvm-process-cpu-load))" memory = "DIVIDE(" \ " DEFAULT(0, TS({0},{1},__jvm-memory-used-mb))," \ " DEFAULT(1, TS({0},{1},__jvm-memory-mb-total))" \ ")" gc = "RATE(TS({0},{1},__jvm-gc-collection-time-ms))" backpressure = "DEFAULT(0, TS(__stmgr__,*,__time_spent_back_pressure_by_compid/{0}))" queries = dict( cpu=cpu, capacity=capacity, failures=failures, memory=memory, gc=gc, backpressure=backpressure ) def get_tracker_endpoint(): ''' Get the endpoint for heron tracker :return: ''' return options.tracker_url def create_url(fmt): ''' Given an URL format, substitute with tracker service endpoint :param fmt: :return: ''' return fmt % get_tracker_endpoint() @tornado.gen.coroutine def get_clusters(): ''' :return: ''' request_url = create_url(CLUSTER_URL_FMT) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_topologies(): ''' Get the list of topologies given a data center from heron tracker :return: ''' request_url = create_url(TOPOLOGIES_URL_FMT) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_topologies_states(): ''' Get the list of topologies and their states :return: ''' request_url = create_url(TOPOLOGIES_URL_FMT) + "/states" raise tornado.gen.Return((yield fetch_url_as_json(request_url))) @tornado.gen.coroutine def _get_topologies(cluster, role=None, env=None): endpoint = create_url(TOPOLOGIES_URL_FMT) params = dict(cluster=cluster) if role is not None: params['role'] = role if env is not None: params['environ'] = env request_url = tornado.httputil.url_concat(endpoint, params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ def get_cluster_topologies(cluster): ''' Get the list of topologies given a cluster :param cluster: :return: ''' return _get_topologies(cluster) ################################################################################ def get_cluster_role_topologies(cluster, role): ''' Get the list of topologies given a cluster submitted by a given role :param cluster: :param role: :return: ''' return _get_topologies(cluster, role=role) ################################################################################ def get_cluster_role_env_topologies(cluster, role, env): ''' Get the list of topologies given a cluster submitted by a given role under a given environment :param cluster: :param role: :param env: :return: ''' return _get_topologies(cluster, role=role, env=env) ################################################################################ @tornado.gen.coroutine def get_execution_state(cluster, environ, topology, role=None): ''' Get the execution state of a topology in a cluster :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(EXECUTION_STATE_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_logical_plan(cluster, environ, topology, role=None): ''' Get the logical plan state of a topology in a cluster :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(LOGICALPLAN_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_comps(cluster, environ, topology, role=None): ''' Get the list of component names for the topology from Heron Nest :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(LOGICALPLAN_URL_FMT), params) lplan = yield fetch_url_as_json(request_url) comps = lplan['spouts'].keys() + lplan['bolts'].keys() raise tornado.gen.Return(comps) ################################################################################ @tornado.gen.coroutine def get_instances(cluster, environ, topology, role=None): ''' Get the list of instances for the topology from Heron Nest :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(PHYSICALPLAN_URL_FMT), params) pplan = yield fetch_url_as_json(request_url) instances = pplan['instances'].keys() raise tornado.gen.Return(instances) ################################################################################ @tornado.gen.coroutine def get_physical_plan(cluster, environ, topology, role=None): ''' Get the physical plan state of a topology in a cluster from tracker :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(PHYSICALPLAN_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_scheduler_location(cluster, environ, topology, role=None): ''' Get the scheduler location of a topology in a cluster from tracker :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict(cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(SCHEDULER_LOCATION_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_component_exceptionsummary(cluster, environ, topology, component, role=None): ''' Get summary of exception for a component :param cluster: :param environ: :param topology: :param component: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(EXCEPTION_SUMMARY_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_component_exceptions(cluster, environ, topology, component, role=None): ''' Get exceptions for 'component' for 'topology' :param cluster: :param environ: :param topology: :param component: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(EXCEPTIONS_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_comp_instance_metrics(cluster, environ, topology, component, metrics, instances, time_range, role=None): ''' Get the metrics for some instances of a topology from tracker :param cluster: :param environ: :param topology: :param component: :param metrics: dict of display name to cuckoo name :param instances: :param time_range: 2-tuple consisting of start and end of range :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role # form the fetch url request_url = tornado.httputil.url_concat( create_url(METRICS_URL_FMT), params) # convert a single instance to a list, if needed all_instances = instances if isinstance(instances, list) else [instances] # append each metric to the url for _, metric_name in metrics.items(): request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name[0])) # append each instance to the url for i in all_instances: request_url = tornado.httputil.url_concat(request_url, dict(instance=i)) # append the time interval to the url request_url = tornado.httputil.url_concat(request_url, dict(interval=time_range[1])) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_comp_metrics(cluster, environ, topology, component, instances, metricnames, time_range, role=None): ''' Get the metrics for all the instances of a topology from Heron Nest :param cluster: :param environ: :param topology: :param component: :param instances: :param metricnames: dict of display name to cuckoo name :param time_range: 2-tuple consisting of start and end of range :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role # form the url request_url = tornado.httputil.url_concat( create_url(METRICS_URL_FMT), params) # append each metric to the url for metric_name in metricnames: request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name)) # append each instance to the url for instance in instances: request_url = tornado.httputil.url_concat(request_url, dict(instance=instance)) # append the time interval to the url request_url = tornado.httputil.url_concat(request_url, dict(interval=time_range[1])) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_metrics(cluster, environment, topology, timerange, query, role=None): ''' Get the metrics for a topology from tracker :param cluster: :param environment: :param topology: :param timerange: :param query: :param role: :return: ''' params = dict( cluster=cluster, environ=environment, topology=topology, starttime=timerange[0], endtime=timerange[1], query=query) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(METRICS_QUERY_URL_FMT), params ) logging.info("get_metrics %s", request_url) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) ################################################################################ @tornado.gen.coroutine def get_comp_metrics_timeline(cluster, environ, topology, component, instances, metricnames, time_range, role=None): ''' Get the minute-by-minute metrics for all instances of a topology from tracker :param cluster: :param environ: :param topology: :param component: :param instances: :param metricnames: dict of display name to cuckoo name :param time_range: 2-tuple consisting of start and end of range :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, component=component) if role is not None: params['role'] = role # form the url request_url = tornado.httputil.url_concat(create_url(METRICS_TIMELINE_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) # append each metric to the url for metric_name in metricnames: request_url = tornado.httputil.url_concat(request_url, dict(metricname=metric_name)) # append each instance to the url for instance in instances: request_url = tornado.httputil.url_concat(request_url, dict(instance=instance)) # append the time interval to the url request_url = tornado.httputil.url_concat( request_url, dict(starttime=time_range[0], endtime=time_range[1])) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) @tornado.gen.coroutine def get_topology_info(cluster, environ, topology, role=None): ''' :param cluster: :param environ: :param topology: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(INFO_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) # Get pid of the instance @tornado.gen.coroutine def get_instance_pid(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(PID_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) # Get jstack of instance @tornado.gen.coroutine def get_instance_jstack(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(JSTACK_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) # Get histogram of active memory objects. @tornado.gen.coroutine def get_instance_mem_histogram(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(HISTOGRAM_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) # Call heap dump for an instance and save it at /tmp/heap.bin @tornado.gen.coroutine def run_instance_jmap(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(JMAP_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) # Get a url to download a file from the container def get_container_file_download_url(cluster, environ, topology, container, path, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(FILE_DOWNLOAD_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) return request_url # Get file data from the container @tornado.gen.coroutine def get_container_file_data(cluster, environ, topology, container, path, offset, length, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param offset: :param length: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path, offset=offset, length=length) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(FILE_DATA_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) # Get filestats @tornado.gen.coroutine def get_filestats(cluster, environ, topology, container, path, role=None): ''' :param cluster: :param environ: :param topology: :param container: :param path: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, container=container, path=path) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat(create_url(FILESTATS_URL_FMT), params) raise tornado.gen.Return((yield fetch_url_as_json(request_url))) class HeronQueryHandler(QueryHandler): ''' HeronQueryHandler ''' @tornado.gen.coroutine def fetch(self, cluster, metric, topology, component, instance, timerange, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param environ: :return: ''' components = [component] if component != "*" else (yield get_comps(cluster, environ, topology)) futures = [] for comp in components: query = self.get_query(metric, comp, instance) future = get_metrics(cluster, environ, topology, timerange, query) futures.append(future) results = yield futures timelines = [] for result in results: timelines.extend(result["timeline"]) result = self.get_metric_response(timerange, timelines, False) raise tornado.gen.Return(result) @tornado.gen.coroutine def fetch_max(self, cluster, metric, topology, component, instance, timerange, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param environ: :return: ''' components = [component] if component != "*" else (yield get_comps(cluster, environ, topology)) result = {} futures = [] for comp in components: query = self.get_query(metric, comp, instance) max_query = "MAX(%s)" % query future = get_metrics(cluster, environ, topology, timerange, max_query) futures.append(future) results = yield futures data = self.compute_max(results) result = self.get_metric_response(timerange, data, True) raise tornado.gen.Return(result) # pylint: disable=unused-argument @tornado.gen.coroutine def fetch_backpressure(self, cluster, metric, topology, component, instance, \ timerange, is_max, environ=None): ''' :param cluster: :param metric: :param topology: :param component: :param instance: :param timerange: :param isMax: :param environ: :return: ''' instances = yield get_instances(cluster, environ, topology) if component != "*": filtered_inst = [instance for instance in instances if instance.split("_")[2] == component] else: filtered_inst = instances futures_dict = {} for inst in filtered_inst: query = queries.get(metric).format(inst) futures_dict[inst] = get_metrics(cluster, environ, topology, timerange, query) res = yield futures_dict if not is_max: timelines = [] for key in res: result = res[key] # Replacing stream manager instance name with component instance name if len(result["timeline"]) > 0: result["timeline"][0]["instance"] = key timelines.extend(result["timeline"]) result = self.get_metric_response(timerange, timelines, is_max) else: data = self.compute_max(res.values()) result = self.get_metric_response(timerange, data, is_max) raise tornado.gen.Return(result) # pylint: disable=no-self-use def compute_max(self, multi_ts): ''' :param multi_ts: :return: ''' if len(multi_ts) > 0 and len(multi_ts[0]["timeline"]) > 0: keys = multi_ts[0]["timeline"][0]["data"].keys() timelines = ([res["timeline"][0]["data"][key] for key in keys] for res in multi_ts) values = (max(v) for v in zip(*timelines)) return dict(zip(keys, values)) return {} # pylint: disable=no-self-use def get_metric_response(self, timerange, data, isMax): ''' :param timerange: :param data: :param isMax: :return: ''' if isMax: return dict( status="success", starttime=timerange[0], endtime=timerange[1], result=dict(timeline=[dict(data=data)]) ) return dict( status="success", starttime=timerange[0], endtime=timerange[1], result=dict(timeline=data) ) # pylint: disable=no-self-use def get_query(self, metric, component, instance): ''' :param metric: :param component: :param instance: :return: ''' q = queries.get(metric) return q.format(component, instance)
""" Base class for item similarity recommenders. """ import numpy as np from itertools import izip from operator import itemgetter from scipy.sparse import csr_matrix from ..sparse import fast_sparse_matrix from ..base_recommender import BaseRecommender class ItemSimilarityRecommender(BaseRecommender): """ Abstract base class for recommenders that generate recommendations from an item similarity matrix. To implement a recommender you just need to supply the compute_similarities() method. """ def _init(self,dataset): if type(dataset) == fast_sparse_matrix: self.dataset = dataset else: self.dataset = fast_sparse_matrix(dataset) self.num_users,self.num_items = self.dataset.shape def fit(self,dataset): """ Learn the complete similarity matrix from a user-item matrix. Parameters ========== dataset : scipy sparse matrix or mrec.sparse.fast_sparse_matrix The matrix of user-item counts, row i holds the counts for the i-th user. """ self._init(dataset) # build up a sparse similarity matrix data = [] row = [] col = [] for j in xrange(self.num_items): w = self.compute_similarities(j) for k,v in enumerate(w): if v != 0: data.append(v) row.append(j) col.append(k) idx = np.array([row,col],dtype='int32') self.similarity_matrix = csr_matrix((data,idx),(self.num_items,self.num_items)) def load_similarity_matrix(self,filepath,num_items,offset=1): """ Parameters ========== filepath : str Filepath to tsv file holding externally computed similarity matrix. num_items : int Total number of items (might exceed highest ID in a sparse similarity matrix). offset : int Item index offset i.e. 1 if indices in file are 1-indexed. """ y = np.loadtxt(filepath) row = y[:,0] col = y[:,1] data = y[:,2] idx = np.array([row,col],dtype='int32')-offset self.similarity_matrix = csr_matrix((data,idx),(num_items,num_items)) def compute_similarities(self,j): """ Compute pairwise similarity scores between the j-th item and every item in the dataset. Parameters ========== j : int Index of item for which to compute similarity scores. Returns ======= similarities : numpy.ndarray Vector of similarity scores. """ pass def get_similar_items(self,j,max_similar_items=30): """ Get the most similar items to a supplied item. Parameters ========== j : int Index of item for which to get similar items. max_similar_items : int Maximum number of similar items to return. Returns ======= sims : list Sorted list of similar items, best first. Each entry is a tuple of the form (i,score). """ if hasattr(self,'similarity_matrix') and self.similarity_matrix is not None: w = zip(self.similarity_matrix[j].indices,self.similarity_matrix[j].data) sims = sorted(w,key=itemgetter(1),reverse=True)[:max_similar_items] sims = [(i,f) for i,f in sims if f > 0] else: w = self.compute_similarities(j) sims = [(i,w[i]) for i in w.argsort()[-1:-max_similar_items-1:-1] if w[i] > 0] return sims def recommend_items(self,dataset,u,max_items=10,return_scores=True): """ Recommend new items for a user. Assumes you've already called fit() to learn the similarity matrix. Parameters ========== dataset : scipy.sparse.csr_matrix User-item matrix containing known items. u : int Index of user for which to make recommendations. max_items : int Maximum number of recommended items to return. return_scores : bool If true return a score along with each recommended item. Returns ======= recs : list List of (idx,score) pairs if return_scores is True, else just a list of idxs. """ try: r = (self.similarity_matrix * dataset[u].T).toarray().flatten() except AttributeError: raise AttributeError('you must call fit() before trying to recommend items') known_items = set(dataset[u].indices) recs = [] for i in r.argsort()[::-1]: if i not in known_items: if return_scores: recs.append((i,r[i])) else: recs.append(i) if len(recs) >= max_items: break return recs def batch_recommend_items(self,dataset,max_items=10,return_scores=True,show_progress=False): """ Recommend new items for all users in the training dataset. Assumes you've already called fit() to learn the similarity matrix. Parameters ========== dataset : scipy.sparse.csr_matrix User-item matrix containing known items. max_items : int Maximum number of recommended items to return. return_scores : bool If true return a score along with each recommended item. show_progress: bool If true print something to stdout to show progress. Returns ======= recs : list of lists Each entry is a list of (idx,score) pairs if return_scores is True, else just a list of idxs. """ try: r = dataset * self.similarity_matrix.T except AttributeError: raise AttributeError('you must call fit() before trying to recommend items') # make the predicted score for all known-items # zero or less by substracting the max score from them max_score = r.data.max() # highest predicted score row,col = dataset.nonzero() # locations of known items data = max_score * np.ones(row.shape) r = r - csr_matrix((data,(row,col)),shape=r.shape) num_users = r.shape[0] recs = [[] for u in xrange(num_users)] for u in xrange(num_users): if show_progress and u%1000 == 0: print u,'..', if return_scores: recs[u] = [(i,v) for v,i in sorted(izip(r[u,:].data,r[u,:].indices),reverse=True) if v > 0][:max_items] else: recs[u] = [i for v,i in sorted(izip(r[u,:].data,r[u,:].indices),reverse=True) if v > 0][:max_items] if show_progress: print return recs def range_recommend_items(self,dataset,user_start,user_end,max_items=10,return_scores=True): """ Recommend new items for a range of users in the training dataset. Assumes you've already called fit() to learn the similarity matrix. Parameters ========== dataset : scipy.sparse.csr_matrix User-item matrix containing known items. user_start : int Index of first user in the range to recommend. user_end : int Index one beyond last user in the range to recommend. max_items : int Maximum number of recommended items to return. return_scores : bool If true return a score along with each recommended item. Returns ======= recs : list of lists Each entry is a list of (idx,score) pairs if return_scores is True, else just a list of idxs. """ data_subset = dataset[user_start:user_end,:] try: r = data_subset * self.similarity_matrix.T except AttributeError: raise AttributeError('you must call fit() before trying to recommend items') # make the predicted score for all known-items # zero or less by substracting the max score from them max_score = r.data.max() # highest predicted score row,col = data_subset.nonzero() # locations of known items data = max_score * np.ones(row.shape) r = r - csr_matrix((data,(row,col)),shape=r.shape) recs = [[] for u in xrange(user_start,user_end)] for u in xrange(user_start,user_end): ux = u - user_start ru = r[ux,:] if return_scores: recs[ux] = [(i,v) for v,i in sorted(izip(ru.data,ru.indices),reverse=True) if v > 0][:max_items] else: recs[ux] = [i for v,i in sorted(izip(ru.data,ru.indices),reverse=True) if v > 0][:max_items] return recs
#__BEGIN_LICENSE__ # Copyright (c) 2015, United States Government, as represented by the # Administrator of the National Aeronautics and Space Administration. # All rights reserved. # # The xGDS platform is licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. #__END_LICENSE__ import re import datetime import pytz from dateutil.parser import parse as dateparser import copy import logging import os from django.contrib.contenttypes.fields import GenericRelation from django.db import models from django.conf import settings from django.contrib.auth.models import User from django.core.urlresolvers import reverse from geocamUtil.models.UuidField import UuidField, makeUuid from geocamUtil.models.ExtrasDotField import ExtrasDotField from geocamUtil.modelJson import modelToDict from geocamUtil.dotDict import DotDict from xgds_planner2 import xpjson, statsPlanExporter from geocamUtil.loader import LazyGetModelByName from geocamPycroraptor2.views import getPyraptordClient, stopPyraptordServiceIfRunning from xgds_core.models import NamedURL, NameManager, HasFlight # pylint: disable=C1001,E1101 # SCHEMA = xpjson.loadDocument(settings.XGDS_PLANNER_SCHEMA_PATH) # LIBRARY = xpjson.loadDocument(settings.XGDS_PLANNER_LIBRARY_PATH, schema=SCHEMA) # # _schema = 'xgds_planner2/schema.json' # _library = 'xgds_planner2/library.json' # SIMPLIFIED_SCHEMA_PATH = settings.STATIC_ROOT + _schema # SIMPLIFIED_LIBRARY_PATH = settings.STATIC_ROOT + _library # SIMPLIFIED_SCHEMA_URL = settings.STATIC_URL + _schema # SIMPLIFIED_LIBRARY_URL = settings.STATIC_URL + _library PLAN_SCHEMA_CACHE = {} class AbstractPlanExecution(models.Model, HasFlight): """ Relationship table for managing flight to plan's many to many relationship. """ start_time = models.DateTimeField(null=True, blank=True, db_index=True) planned_start_time = models.DateTimeField(null=True, blank=True, db_index=True) end_time = models.DateTimeField(null=True, blank=True, db_index=True) flight = 'set to DEFAULT_FLIGHT_FIELD() or similar in derived classes' plan = 'set to DEFAULT_PLAN_FIELD() or similar in derived classes' def toSimpleDict(self): result = {} result['pk'] = self.pk result['id'] = self.pk result['start_time'] = self.start_time result['planned_start_time'] = self.planned_start_time result['end_time'] = self.end_time if self.plan: result['plan'] = self.plan.pk result['plan_id'] = self.plan.pk else: result['plan'] = None result['plan_id'] = None if self.flight: result['flight'] = self.flight.name result['flight_id'] = self.flight.pk else: result['flight'] = None result['flight_id'] = None return result def __unicode__(self): return str(self.pk) class Meta: abstract = True ordering = ['planned_start_time'] DEFAULT_PLAN_FIELD = lambda: models.ForeignKey('xgds_planner2.Plan', null=True, blank=True) #, related_name="executions") #TODO if you are using a different default flight field then you will have to customize the Plan Execution DEFAULT_FLIGHT_FIELD = lambda: models.ForeignKey('xgds_core.Flight', null=True, blank=True) #, related_name="plans") class PlanExecution(AbstractPlanExecution): flight = DEFAULT_FLIGHT_FIELD() plan = DEFAULT_PLAN_FIELD() class AbstractPlan(models.Model): uuid = UuidField(unique=True, db_index=True) name = models.CharField(max_length=128, db_index=True) dateModified = models.DateTimeField(db_index=True) creator = models.ForeignKey(User, null=True, blank=True, db_index=True) # the canonical serialization of the plan exchanged with javascript clients jsonPlan = ExtrasDotField() # a place to put an auto-generated summary of the plan summary = models.CharField(max_length=4096) # allow users to mark plans as deleted. remember to use this field! deleted = models.BooleanField(blank=True, default=False) # allow users to mark plans as read only, so when they are opened they cannot be edited readOnly = models.BooleanField(blank=True, default=False) # cache commonly used stats derived from the plan (relatively expensive to calculate) numStations = models.PositiveIntegerField(default=0) numSegments = models.PositiveIntegerField(default=0) numCommands = models.PositiveIntegerField(default=0) lengthMeters = models.FloatField(null=True, blank=True) estimatedDurationSeconds = models.FloatField(null=True, blank=True) stats = ExtrasDotField() # a place for richer stats such as numCommandsByType namedURLs = GenericRelation(NamedURL) class Meta: ordering = ['-dateModified'] abstract = True @property def acquisition_time(self): return self.dateModified def get_absolute_url(self): return reverse('planner2_plan_save_json', args=[self.pk, self.name]) def extractFromJson(self, overWriteDateModified=True, overWriteUuid=True, request=None): if overWriteUuid: if not self.uuid: self.uuid = makeUuid() self.jsonPlan.uuid = self.uuid self.jsonPlan.serverId = self.pk if overWriteDateModified: self.jsonPlan.dateModified = (datetime.datetime .now(pytz.utc) .replace(microsecond=0) .isoformat()) self.jsonPlan.dateModified = self.jsonPlan.dateModified[:-6]+'Z' self.name = self.jsonPlan.name self.jsonPlan.url = self.get_absolute_url() self.jsonPlan.serverId = self.pk self.dateModified = dateparser(self.jsonPlan.dateModified).replace(tzinfo=pytz.utc) plannerUsers = User.objects.filter(username=self.jsonPlan.creator) if plannerUsers: self.creator = plannerUsers[0] else: self.creator = None # fill in stats try: exporter = statsPlanExporter.StatsPlanExporter() # print ' about to do stats' stats = exporter.exportDbPlan(self, request) for f in ('numStations', 'numSegments', 'numCommands', 'lengthMeters', 'estimatedDurationSeconds'): setattr(self, f, stats[f]) self.stats.numCommandsByType = stats["numCommandsByType"] self.summary = statsPlanExporter.getSummary(stats) except: logging.warning('extractFromJson: could not extract stats from plan %s', self.uuid) raise # FIX return self def getSummaryOfCommandsByType(self): return statsPlanExporter.getSummaryOfCommandsByType(self.stats) # TODO test def toXpjson(self): platform = self.jsonPlan['platform'] if platform: planSchema = getPlanSchema(platform[u'name']) return xpjson.loadDocumentFromDict(self.jsonPlan, schema=planSchema.getSchema()) logging.warning('toXpjson: could not convert to xpjson, probably no schema %s', self.uuid) raise # FIX def escapedName(self): name = re.sub(r'[^\w]', '', self.name) if name == '': return 'plan' else: if self.jsonPlan and self.jsonPlan.planVersion: return name + "_" + self.jsonPlan.planVersion return name def getExportUrl(self, extension): return reverse('planner2_planExport', kwargs={'uuid': self.uuid, 'name': self.escapedName() + extension}) def getExporters(self): import choosePlanExporter # delayed import avoids import loop result = [] for exporterInfo in choosePlanExporter.PLAN_EXPORTERS: info = copy.deepcopy(exporterInfo) info.url = self.getExportUrl(info.extension) result.append(info) return result def getLinks(self): """ The links tab wil be populated with the name, value contents of this dictionary as links, name is the string displayed and link is what will be opened """ result = {"KML": reverse('planner2_planExport', kwargs={'uuid': self.uuid, 'name': self.name + '.kml'})} kwargs = {'plan_id':self.pk, 'crs': settings.XGDS_PLANNER_CRS_UNITS_DEFAULT} if settings.XGDS_PLANNER_CRS_UNITS_DEFAULT: result["SummaryCRS"] = reverse('plan_bearing_distance_crs', kwargs=kwargs) else: result["Summary"] = reverse('plan_bearing_distance', kwargs=kwargs) for exporter in self.getExporters(): result[exporter.label] = exporter.url return result def getEscapedId(self): if self.jsonPlan and self.jsonPlan.id: result = re.sub(r'[^\w]', '', self.jsonPlan.id) result = re.sub('_PLAN$', '', result) return result else: return None def toMapDict(self): """ Return a reduced dictionary that will be turned to JSON for rendering in a map Here we are just interested in the route plan and not in activities We just include stations """ result = {} result['id'] = self.uuid result['author'] = self.jsonPlan.creator result['name'] = self.jsonPlan.name result['type'] = 'Plan' if self.jsonPlan.notes: result['notes'] = self.jsonPlan.notes else: result['notes'] = '' stations = [] seq = self.jsonPlan.sequence for el in seq: if el.type == "Station": sta = {} sta['id'] = el.id sta['coords'] = el.geometry.coordinates sta['notes'] = '' if hasattr(el, 'notes'): if el.notes: sta['notes'] = el.notes stations.append(sta) result['stations'] = stations return result def get_tree_json(self): result = {"title": self.name, "key": self.uuid, "tooltip": self.jsonPlan.notes, "data": {"type": "PlanLink", # we cheat so this will be 'live' "json": reverse('planner2_mapJsonPlan', kwargs={'uuid': str(self.uuid)}), "kmlFile": reverse('planner2_planExport', kwargs={'uuid': str(self.uuid), 'name': self.name + '.kml'}), "href": reverse('planner2_edit', kwargs={'plan_id': str(self.pk)}) } } return result @property def executions(self): return LazyGetModelByName(settings.XGDS_PLANNER_PLAN_EXECUTION_MODEL).get().objects.filter(plan=self) def __unicode__(self): if self.name: return self.name else: return 'Unnamed plan ' + self.uuid class Plan(AbstractPlan): pass # PlanSchema used to be a database model, but is now a normal Python # class built from the Django settings. This will not change during # runtime so we should cache these in PLAN_SCHEMA_CACHE. class PlanSchema: def __init__(self, platform, schemaDict): self.platform = platform self.schemaSource = settings.PROJ_ROOT + schemaDict['schemaSource'] self.librarySource = settings.PROJ_ROOT + schemaDict['librarySource'] self.simulatorUrl = settings.STATIC_URL + schemaDict['simulatorUrl'] self.simulator = schemaDict['simulator'] schemaSuffix = os.path.join('xgds_planner2', os.path.basename(self.schemaSource)) librarySuffix = os.path.join('xgds_planner2', os.path.basename(self.librarySource)) self.simplifiedSchemaPath = os.path.join(settings.STATIC_ROOT, schemaSuffix) self.simplifiedLibraryPath = os.path.join(settings.STATIC_ROOT, librarySuffix) self.schemaUrl = os.path.join(settings.STATIC_URL, schemaSuffix) self.libraryUrl = os.path.join(settings.STATIC_URL, librarySuffix) self.schema = None self.library = None self.jsonSchema = None self.jsonLibrary = None def getJsonSchema(self): if not self.jsonSchema: try: with open(self.simplifiedSchemaPath) as schemaFile: self.jsonSchema = schemaFile.read() except: # pylint: disable=W0702 logging.warning('could not load XPJSON schema from ' + self.simplifiedSchemaPath) raise return self.jsonSchema def getSchema(self): if not self.schema: try: self.schema = xpjson.loadDocument(self.simplifiedSchemaPath) except: # pylint: disable=W0702 logging.warning('could not load XPJSON schema from ' + self.simplifiedSchemaPath) raise return self.schema def getJsonLibrary(self): if not self.jsonLibrary: try: with open(self.simplifiedLibraryPath) as libraryFile: self.jsonLibrary = libraryFile.read() except: # pylint: disable=W0702 logging.warning('could not load XPJSON library from ' + self.simplifiedLibraryPath) raise return self.jsonLibrary def getLibrary(self): if not self.library: try: self.library = xpjson.loadDocument(self.simplifiedLibraryPath, schema=self.getSchema(), fillInDefaults=True) except: # pylint: disable=W0702 logging.warning('could not load XPJSON library from ' + self.simplifiedLibraryPath) raise return self.library def loadSchema(platform): schemaDict = settings.XGDS_PLANNER_SCHEMAS[platform] schema = PlanSchema(platform, schemaDict) schema.getSchema() schema.getJsonSchema() schema.getLibrary() schema.getJsonLibrary() return schema # get the cached plan schema, building it if need be. def getPlanSchema(platform): result = PLAN_SCHEMA_CACHE.get(platform) if not result: try: result = loadSchema(platform) PLAN_SCHEMA_CACHE[platform] = result except: logging.warning('could not find plan schema for platform %s', platform) raise return result
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys from collections import defaultdict from contextlib import contextmanager from itertools import groupby from paste.deploy.converters import asbool from tg import tmpl_context as c, app_globals as g from pymongo.errors import DuplicateKeyError, InvalidDocument, OperationFailure from ming.orm import mapper, session, Mapper from ming.orm.declarative import MappedClass from allura.tasks.index_tasks import add_artifacts from allura.lib.exceptions import CompoundError from allura.lib import helpers as h from allura.lib import utils from . import base import six class ShowModelsCommand(base.Command): min_args = 1 max_args = 1 usage = '<ini file>' summary = 'Show the inheritance graph of all Ming models' parser = base.Command.standard_parser(verbose=True) def command(self): self.basic_setup() graph = build_model_inheritance_graph() for depth, cls in dfs(MappedClass, graph): for line in dump_cls(depth, cls): print(line) class ReindexCommand(base.Command): min_args = 1 max_args = 1 usage = '<ini file>' summary = 'Reindex into solr and re-shortlink all artifacts' parser = base.Command.standard_parser(verbose=True) parser.add_option('-p', '--project', dest='project', default=None, help='project to reindex') parser.add_option('--project-regex', dest='project_regex', default='', help='Restrict reindex to projects for which the shortname matches ' 'the provided regex.') parser.add_option( '-n', '--neighborhood', dest='neighborhood', default=None, help='neighborhood to reindex (e.g. p)') parser.add_option('--solr', action='store_true', dest='solr', help='Solr needs artifact references to already exist.') parser.add_option( '--skip-solr-delete', action='store_true', dest='skip_solr_delete', help='Skip clearing solr index.') parser.add_option('--refs', action='store_true', dest='refs', help='Update artifact references and shortlinks') parser.add_option('--tasks', action='store_true', dest='tasks', help='Run each individual index operation as a background task. ' 'Note: this is often better, since tasks have "request" objects ' 'which are needed for some markdown macros to run properly') parser.add_option('--solr-hosts', dest='solr_hosts', help='Override the solr host(s) to post to. Comma-separated list of solr server URLs') parser.add_option( '--max-chunk', dest='max_chunk', type=int, default=100 * 1000, help='Max number of artifacts to index in one Solr update command') parser.add_option('--ming-config', dest='ming_config', help='Path (absolute, or relative to ' 'Allura root) to .ini file defining ming configuration.') def command(self): from allura import model as M self.basic_setup() graph = build_model_inheritance_graph() if self.options.project: q_project = dict(shortname=self.options.project) elif self.options.project_regex: q_project = dict(shortname={'$regex': self.options.project_regex}) elif self.options.neighborhood: neighborhood_id = M.Neighborhood.query.get( url_prefix='/%s/' % self.options.neighborhood)._id q_project = dict(neighborhood_id=neighborhood_id) else: q_project = {} # if none specified, do all if not self.options.solr and not self.options.refs: self.options.solr = self.options.refs = True for projects in utils.chunked_find(M.Project, q_project): for p in projects: c.project = p base.log.info('Reindex project %s', p.shortname) # Clear index for this project if self.options.solr and not self.options.skip_solr_delete: g.solr.delete(q='project_id_s:%s' % p._id) if self.options.refs: M.ArtifactReference.query.remove( {'artifact_reference.project_id': p._id}) M.Shortlink.query.remove({'project_id': p._id}) app_config_ids = [ac._id for ac in p.app_configs] # Traverse the inheritance graph, finding all artifacts that # belong to this project for _, a_cls in dfs(M.Artifact, graph): base.log.info(' %s', a_cls) ref_ids = [] # Create artifact references and shortlinks for a in a_cls.query.find(dict(app_config_id={'$in': app_config_ids})): if self.options.verbose: base.log.info(' %s', a.shorthand_id()) if self.options.refs: try: M.ArtifactReference.from_artifact(a) M.Shortlink.from_artifact(a) except Exception: base.log.exception( 'Making ArtifactReference/Shortlink from %s', a) continue ref_ids.append(a.index_id()) M.main_orm_session.flush() M.artifact_orm_session.clear() try: self._chunked_add_artifacts(ref_ids) except CompoundError as err: base.log.exception( 'Error indexing artifacts:\n%r', err) base.log.error('%s', err.format_error()) M.main_orm_session.flush() M.main_orm_session.clear() base.log.info('Reindex %s', 'queued' if self.options.tasks else 'done') @property def add_artifact_kwargs(self): if self.options.solr_hosts: return {'solr_hosts': self.options.solr_hosts.split(',')} return {} def _chunked_add_artifacts(self, ref_ids): # ref_ids contains solr index ids which can easily be over # 100 bytes. Here we allow for 160 bytes avg, plus # room for other document overhead. for chunk in utils.chunked_list(ref_ids, self.options.max_chunk): if self.options.tasks: self._post_add_artifacts(chunk) else: add_artifacts(chunk, update_solr=self.options.solr, update_refs=self.options.refs, **self.add_artifact_kwargs) def _post_add_artifacts(self, chunk): """ Post task, recursively splitting and re-posting if the resulting mongo document is too large. """ try: with self.ming_config(self.options.ming_config): add_artifacts.post(chunk, update_solr=self.options.solr, update_refs=self.options.refs, **self.add_artifact_kwargs) except InvalidDocument as e: # there are many types of InvalidDocument, only recurse if its # expected to help if e.args[0].startswith('BSON document too large'): self._post_add_artifacts(chunk[:len(chunk) // 2]) self._post_add_artifacts(chunk[len(chunk) // 2:]) else: raise @property def ming_config(self): """Return a contextmanager for the provided ming config, if there is one. Otherwise return a no-op contextmanager. """ def noop_cm(*args): yield if self.options.ming_config: return h.ming_config_from_ini return contextmanager(noop_cm) class EnsureIndexCommand(base.Command): min_args = 1 max_args = 1 usage = '[<ini file>]' summary = 'Create all the Mongo indexes specified by Ming models' parser = base.Command.standard_parser(verbose=True) parser.add_option('--clean', action='store_true', dest='clean', help='Drop any unneeded indexes') def command(self): from allura import model as M main_session_classes = [M.main_orm_session, M.repository_orm_session, M.task_orm_session, M.main_explicitflush_orm_session] if asbool(self.config.get('activitystream.recording.enabled', False)): from activitystream.storage.mingstorage import activity_odm_session main_session_classes.append(activity_odm_session) self.basic_setup() # by db, then collection name main_indexes = defaultdict(lambda: defaultdict(list)) project_indexes = defaultdict(list) # by collection name base.log.info('Collecting indexes...') for m in Mapper.all_mappers(): mgr = m.collection.m cname = mgr.collection_name cls = m.mapped_class if cname is None: base.log.info('... skipping abstract class %s', cls) continue base.log.info('... for class %s', cls) if session(cls) in main_session_classes: idx = main_indexes[session(cls)][cname] else: idx = project_indexes[cname] idx.extend(mgr.indexes) base.log.info('Updating indexes for main DB') for odm_session, db_indexes in main_indexes.items(): db = odm_session.impl.db for name, indexes in db_indexes.items(): self._update_indexes(db[name], indexes) base.log.info('Updating indexes for project DB') db = M.project_doc_session.db base.log.info('... DB: %s', db) for name, indexes in project_indexes.items(): self._update_indexes(db[name], indexes) base.log.info('Done updating indexes') def _update_indexes(self, collection, indexes): uindexes = { # convert list to tuple so it's hashable for 'set' tuple(i.index_spec): i for i in indexes if i.unique} indexes = { tuple(i.index_spec): i for i in indexes if not i.unique} prev_indexes = {} prev_uindexes = {} unique_flag_drop = {} unique_flag_add = {} try: existing_indexes = collection.index_information().items() except OperationFailure: # exception is raised if db or collection doesn't exist yet existing_indexes = {} for iname, fields in existing_indexes: if iname == '_id_': continue keys = tuple(fields['key']) if fields.get('unique'): if keys in indexes: unique_flag_drop[iname] = keys else: prev_uindexes[iname] = keys else: if keys in uindexes: unique_flag_add[iname] = keys else: prev_indexes[iname] = keys for iname, keys in unique_flag_drop.items(): self._recreate_index(collection, iname, list(keys), unique=False) for iname, keys in unique_flag_add.items(): self._recreate_index(collection, iname, list(keys), unique=True) # Ensure all indexes for keys, idx in uindexes.items(): base.log.info('...... ensure %s:%s', collection.name, idx) while True: try: index_options = idx.index_options.copy() if idx.fields == ('_id',): # as of mongo 3.4 _id fields can't have these options set # _id is always non-sparse and unique anyway del index_options['sparse'] del index_options['unique'] collection.ensure_index(idx.index_spec, **index_options) break except DuplicateKeyError as err: base.log.info('Found dupe key(%s), eliminating dupes', err) self._remove_dupes(collection, idx.index_spec) for keys, idx in indexes.items(): base.log.info('...... ensure %s:%s', collection.name, idx) collection.ensure_index(idx.index_spec, background=True, **idx.index_options) # Drop obsolete indexes for iname, keys in prev_indexes.items(): if keys not in indexes: if self.options.clean: base.log.info('...... drop index %s:%s', collection.name, iname) collection.drop_index(iname) else: base.log.info('...... potentially unneeded index, could be removed by running with --clean %s:%s', collection.name, iname) for iname, keys in prev_uindexes.items(): if keys not in uindexes: if self.options.clean: base.log.info('...... drop index %s:%s', collection.name, iname) collection.drop_index(iname) else: base.log.info('...... potentially unneeded index, could be removed by running with --clean %s:%s', collection.name, iname) def _recreate_index(self, collection, iname, keys, **creation_options): '''Recreate an index with new creation options, using a temporary index so that at no time is an index missing from the specified keys''' superset_keys = keys + [('temporary_extra_field_for_indexing', 1)] base.log.info('...... ensure index %s:%s', collection.name, superset_keys) superset_index = collection.ensure_index(superset_keys) base.log.info('...... drop index %s:%s', collection.name, iname) collection.drop_index(iname) base.log.info('...... ensure index %s:%s %s', collection.name, keys, creation_options) collection.ensure_index(keys, **creation_options) base.log.info('...... drop index %s:%s', collection.name, superset_index) collection.drop_index(superset_index) def _remove_dupes(self, collection, spec): iname = collection.create_index(spec) fields = [f[0] for f in spec] q = collection.find({}, projection=fields).sort(spec) def keyfunc(doc): return tuple(doc.get(f, None) for f in fields) dupes = [] for key, doc_iter in groupby(q, key=keyfunc): docs = list(doc_iter) if len(docs) > 1: base.log.info('Found dupes with %s', key) dupes += [doc['_id'] for doc in docs[1:]] collection.drop_index(iname) collection.remove(dict(_id={'$in': dupes})) def build_model_inheritance_graph(): graph = {m.mapped_class: ([], []) for m in Mapper.all_mappers()} for cls, (parents, children) in graph.items(): for b in cls.__bases__: if b not in graph: continue parents.append(b) graph[b][1].append(cls) return graph def dump_cls(depth, cls): indent = ' ' * 4 * depth yield indent + f'{cls.__module__}.{cls.__name__}' m = mapper(cls) for p in m.properties: s = indent * 2 + ' - ' + str(p) if hasattr(p, 'field_type'): s += ' (%s)' % p.field_type yield s def dfs(root, graph, depth=0): yield depth, root for node in graph[root][1]: yield from dfs(node, graph, depth + 1)
# Created By: Virgil Dupras # Created On: 2004-12-27 # Copyright 2010 Hardcoded Software (http://www.hardcoded.net) # # This software is licensed under the "BSD" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.hardcoded.net/licenses/bsd_license import hsfs as fs from jobprogress import job from hscommon.util import nonone from hscommon.conflict import get_conflicted_name, is_conflicted class _CopyOf: #--- Public def copy(self, refnode): self.copyof = refnode def detach_copy(self, keep_original_files=False, keep_original_dirs=False): if self.is_container: keep = keep_original_dirs else: keep = keep_original_files if keep: self.copyof = self.original else: self.copyof = None for child in self: child.detach_copy(keep_original_files,keep_original_dirs) #--- Properties copyof = None @property def original(self): if hasattr(self.copyof, 'original'): return self.copyof.original else: return nonone(self.copyof, self) class Node(fs.Node): #--- Override def __init__(self, parent=None, name=''): try: super(Node, self).__init__(parent,name) except fs.AlreadyExistsError: newname = parent._resolve_conflict(parent[name], self, name) if newname: if isinstance(newname, str): super(Node, self).__init__(parent, newname) else: raise def _set_name(self, newname): try: super(Node, self)._set_name(newname) except fs.AlreadyExistsError: newname = self.parent._resolve_conflict(self.parent[newname], self, newname) if newname: if isinstance(newname, str): super(Node, self)._set_name(newname) else: raise #--- Public def delete(self): self.parent = None def move(self, dest, newname=None): dest.add_child(self, newname) def rename(self, newname): self.name = newname class File(fs.File, Node, _CopyOf): #--- Public def copy(self, reffile): super(File,self).copy(reffile) for attrname in reffile.INITIAL_INFO: if attrname in reffile.__dict__: setattr(self, attrname, getattr(reffile, attrname)) self.INITIAL_INFO = reffile.INITIAL_INFO class Directory(fs.Directory, Node, _CopyOf): """A Directory that you can manipulate at will This is the opposite of auto.Directory. When you subclass this, you have to manually add/delete/move everything. Littles notes: You might notice that some AlreadyExistsError are raised in this unit. You might think "hey, fs.Directory covers all possible occurance of AlreadyExistsError, why do you duplicate code here?" It is true that fs.Directory takes care of all this. However, if you look at the code after the raise (in this unit), you will see that , first, it is only in move. And what's special about move funcs is that you can change the name as you move. And to do this, you must delete the child from it's former parent before you add it in it's new parent. If you don't check for conflict *before* and there's is a conflict occuring, you're left with a parent less child. """ #--- Class Attributes cls_file_class = File #--- Overrides def __init__(self, parent=None, dirname=''): if isinstance(parent, Directory): self.__case_sensitive = parent.case_sensitive else: self.__case_sensitive = True self._attrs_to_read = None super(Directory, self).__init__(parent, dirname) def _do_hash(self, value): if (not self.case_sensitive) and isinstance(value, str): return value.lower() else: return value #--- Protected def _conflict_check(self, name, node): if name in self: newname = self._resolve_conflict(self[name], node, name) if newname: return newname else: raise fs.AlreadyExistsError(name, self) else: return name def _resolve_conflict(self, offended, offender, conflicted_name): # Virtual """Override this to automatically resolve a name conflict instead of raising an AlreadyExistsError. If you return something else than None or '', there will be a second try to add name. There is no third try. if the result of ResolveConflict is also conflictual, an error will be raised. You can also return a True value that is not a string, and it will cancel the exception raise, but not make a second try. """ #--- Public def add_child(self, child, newname=None): if child in self: return child if not newname: newname = child.name newname = self._conflict_check(newname, child) if not isinstance(newname, str): return child #Just don't perform the add, _resolve_conflict has taken #care of everything child.parent = None child.name = newname child.parent = self if isinstance(child, Directory): child.case_sensitive = self.case_sensitive return child def add_dir_copy(self, refdir, newname='', job=job.nulljob): if not newname: newname = refdir.name result = self._create_sub_dir(newname, False) result.copy(refdir, job) self.add_child(result) return result def add_file_copy(self, reffile, newname=''): if not newname: newname = reffile.name reffile._read_all_info(self._attrs_to_read) result = self._create_sub_file(newname, False) result.copy(reffile) self.add_child(result) return result def add_path(self, path): """ Creates the first item of path (a tuple), and calls _AddPath in this new directory. If the directory already exists, uses this directory. Returns the added (or found) directory. """ if not path: return self else: try: founddir = self[path[0]] if not isinstance(founddir, Directory): raise fs.InvalidPath(founddir) except KeyError: founddir = self._create_sub_dir(path[0]) return founddir.add_path(path[1:]) def clean_empty_dirs(self): for directory in self.dirs: directory.clean_empty_dirs() to_delete = (d for d in self.dirs if not len(d)) for directory in to_delete: directory.delete() def copy(self, refdir, job=job.nulljob): super(Directory, self).copy(refdir) filecount = refdir.filecount dircount = refdir.dircount if filecount > 0: job = job.start_subjob(dircount + 1) job.start_job(filecount) else: job = job.start_subjob(dircount) for myfile in refdir.files: self.add_file_copy(myfile) job.add_progress() for directory in refdir.dirs: self.add_dir_copy(directory, '', job) def new_directory(self, name): return self._create_sub_dir(name) def new_file(self, name): return self._create_sub_file(name) #--- Properties @property def case_sensitive(self): return self.__case_sensitive @case_sensitive.setter def case_sensitive(self, value): if value != self.__case_sensitive: self.__case_sensitive = value self._rebuild_hashes() for subdir in self: if isinstance(subdir, Directory): subdir.case_sensitive = value class AutoResolve(Directory): #---Override def _resolve_conflict(self, offended, offender, conflicted_name): if offended.is_container and offender.is_container: should_merge = self.on_should_merge(offender, offended) if should_merge: # There's a circular reference problem from .fs_utils import smart_move smart_move(offender, offended) offender.delete() return True return get_conflicted_name(self, conflicted_name) #---Events def on_should_merge(self, source, dest): if (self.parent is not None) and hasattr(self.parent, 'on_should_merge'): return self.parent.on_should_merge(source, dest) #---Properties @property def allconflicts(self): return self.get_stat('conflicts', []) @property def conflicts(self): return [y for y in self.files if is_conflicted(y.name)] class AutoMerge(AutoResolve): def on_should_merge(self, source, dest): return True
#!/usr/bin/env python # Copyright 2012 The Swarming Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 that # can be found in the LICENSE file. """Front end tool to operate on .isolate files. This includes creating, merging or compiling them to generate a .isolated file. See more information at https://code.google.com/p/swarming/wiki/IsolateDesign https://code.google.com/p/swarming/wiki/IsolateUserGuide """ # Run ./isolate.py --help for more detailed information. __version__ = '0.4.4' import datetime import itertools import logging import optparse import os import re import subprocess import sys import auth import isolate_format import isolated_format import isolateserver import run_isolated from third_party import colorama from third_party.depot_tools import fix_encoding from third_party.depot_tools import subcommand from utils import logging_utils from utils import file_path from utils import fs from utils import tools # Exit code of 'archive' and 'batcharchive' if the command fails due to an error # in *.isolate file (format error, or some referenced files are missing, etc.) EXIT_CODE_ISOLATE_ERROR = 1 # Exit code of 'archive' and 'batcharchive' if the command fails due to # a network or server issue. It is an infrastructure failure. EXIT_CODE_UPLOAD_ERROR = 101 # Supported version of *.isolated.gen.json files consumed by CMDbatcharchive. ISOLATED_GEN_JSON_VERSION = 1 class ExecutionError(Exception): """A generic error occurred.""" def __str__(self): return self.args[0] ### Path handling code. def recreate_tree(outdir, indir, infiles, action, as_hash): """Creates a new tree with only the input files in it. Arguments: outdir: Output directory to create the files in. indir: Root directory the infiles are based in. infiles: dict of files to map from |indir| to |outdir|. action: One of accepted action of file_path.link_file(). as_hash: Output filename is the hash instead of relfile. """ logging.info( 'recreate_tree(outdir=%s, indir=%s, files=%d, action=%s, as_hash=%s)' % (outdir, indir, len(infiles), action, as_hash)) assert os.path.isabs(outdir) and outdir == os.path.normpath(outdir), outdir if not os.path.isdir(outdir): logging.info('Creating %s' % outdir) fs.makedirs(outdir) for relfile, metadata in infiles.iteritems(): infile = os.path.join(indir, relfile) if as_hash: # Do the hashtable specific checks. if 'l' in metadata: # Skip links when storing a hashtable. continue outfile = os.path.join(outdir, metadata['h']) if os.path.isfile(outfile): # Just do a quick check that the file size matches. No need to stat() # again the input file, grab the value from the dict. if not 's' in metadata: raise isolated_format.MappingError( 'Misconfigured item %s: %s' % (relfile, metadata)) if metadata['s'] == fs.stat(outfile).st_size: continue else: logging.warn('Overwritting %s' % metadata['h']) fs.remove(outfile) else: outfile = os.path.join(outdir, relfile) outsubdir = os.path.dirname(outfile) if not os.path.isdir(outsubdir): fs.makedirs(outsubdir) if 'l' in metadata: pointed = metadata['l'] logging.debug('Symlink: %s -> %s' % (outfile, pointed)) # symlink doesn't exist on Windows. fs.symlink(pointed, outfile) # pylint: disable=E1101 else: file_path.link_file(outfile, infile, action) ### Variable stuff. def _normalize_path_variable(cwd, relative_base_dir, key, value): """Normalizes a path variable into a relative directory. """ # Variables could contain / or \ on windows. Always normalize to # os.path.sep. x = os.path.join(cwd, value.strip().replace('/', os.path.sep)) normalized = file_path.get_native_path_case(os.path.normpath(x)) if not os.path.isdir(normalized): raise ExecutionError('%s=%s is not a directory' % (key, normalized)) # All variables are relative to the .isolate file. normalized = os.path.relpath(normalized, relative_base_dir) logging.debug( 'Translated variable %s from %s to %s', key, value, normalized) return normalized def normalize_path_variables(cwd, path_variables, relative_base_dir): """Processes path variables as a special case and returns a copy of the dict. For each 'path' variable: first normalizes it based on |cwd|, verifies it exists then sets it as relative to relative_base_dir. """ logging.info( 'normalize_path_variables(%s, %s, %s)', cwd, path_variables, relative_base_dir) assert isinstance(cwd, unicode), cwd assert isinstance(relative_base_dir, unicode), relative_base_dir relative_base_dir = file_path.get_native_path_case(relative_base_dir) return dict( (k, _normalize_path_variable(cwd, relative_base_dir, k, v)) for k, v in path_variables.iteritems()) ### Internal state files. def isolatedfile_to_state(filename): """For a '.isolate' file, returns the path to the saved '.state' file.""" return filename + '.state' def chromium_save_isolated(isolated, data, path_variables, algo): """Writes one or many .isolated files. This slightly increases the cold cache cost but greatly reduce the warm cache cost by splitting low-churn files off the master .isolated file. It also reduces overall isolateserver memcache consumption. """ slaves = [] def extract_into_included_isolated(prefix): new_slave = { 'algo': data['algo'], 'files': {}, 'version': data['version'], } for f in data['files'].keys(): if f.startswith(prefix): new_slave['files'][f] = data['files'].pop(f) if new_slave['files']: slaves.append(new_slave) # Split test/data/ in its own .isolated file. extract_into_included_isolated(os.path.join('test', 'data', '')) # Split everything out of PRODUCT_DIR in its own .isolated file. if path_variables.get('PRODUCT_DIR'): extract_into_included_isolated(path_variables['PRODUCT_DIR']) files = [] for index, f in enumerate(slaves): slavepath = isolated[:-len('.isolated')] + '.%d.isolated' % index tools.write_json(slavepath, f, True) data.setdefault('includes', []).append( isolated_format.hash_file(slavepath, algo)) files.append(os.path.basename(slavepath)) files.extend(isolated_format.save_isolated(isolated, data)) return files class Flattenable(object): """Represents data that can be represented as a json file.""" MEMBERS = () def flatten(self): """Returns a json-serializable version of itself. Skips None entries. """ items = ((member, getattr(self, member)) for member in self.MEMBERS) return dict((member, value) for member, value in items if value is not None) @classmethod def load(cls, data, *args, **kwargs): """Loads a flattened version.""" data = data.copy() out = cls(*args, **kwargs) for member in out.MEMBERS: if member in data: # Access to a protected member XXX of a client class # pylint: disable=W0212 out._load_member(member, data.pop(member)) if data: raise ValueError( 'Found unexpected entry %s while constructing an object %s' % (data, cls.__name__), data, cls.__name__) return out def _load_member(self, member, value): """Loads a member into self.""" setattr(self, member, value) @classmethod def load_file(cls, filename, *args, **kwargs): """Loads the data from a file or return an empty instance.""" try: out = cls.load(tools.read_json(filename), *args, **kwargs) logging.debug('Loaded %s(%s)', cls.__name__, filename) except (IOError, ValueError) as e: # On failure, loads the default instance. out = cls(*args, **kwargs) logging.warn('Failed to load %s: %s', filename, e) return out class SavedState(Flattenable): """Describes the content of a .state file. This file caches the items calculated by this script and is used to increase the performance of the script. This file is not loaded by run_isolated.py. This file can always be safely removed. It is important to note that the 'files' dict keys are using native OS path separator instead of '/' used in .isolate file. """ MEMBERS = ( # Value of sys.platform so that the file is rejected if loaded from a # different OS. While this should never happen in practice, users are ... # "creative". 'OS', # Algorithm used to generate the hash. The only supported value is at the # time of writting 'sha-1'. 'algo', # List of included .isolated files. Used to support/remember 'slave' # .isolated files. Relative path to isolated_basedir. 'child_isolated_files', # Cache of the processed command. This value is saved because .isolated # files are never loaded by isolate.py so it's the only way to load the # command safely. 'command', # GYP variables that are used to generate conditions. The most frequent # example is 'OS'. 'config_variables', # GYP variables that will be replaced in 'command' and paths but will not be # considered a relative directory. 'extra_variables', # Cache of the files found so the next run can skip hash calculation. 'files', # Path of the original .isolate file. Relative path to isolated_basedir. 'isolate_file', # GYP variables used to generate the .isolated files paths based on path # variables. Frequent examples are DEPTH and PRODUCT_DIR. 'path_variables', # If the generated directory tree should be read-only. Defaults to 1. 'read_only', # Relative cwd to use to start the command. 'relative_cwd', # Root directory the files are mapped from. 'root_dir', # Version of the saved state file format. Any breaking change must update # the value. 'version', ) # Bump this version whenever the saved state changes. It is also keyed on the # .isolated file version so any change in the generator will invalidate .state # files. EXPECTED_VERSION = isolated_format.ISOLATED_FILE_VERSION + '.2' def __init__(self, isolated_basedir): """Creates an empty SavedState. Arguments: isolated_basedir: the directory where the .isolated and .isolated.state files are saved. """ super(SavedState, self).__init__() assert os.path.isabs(isolated_basedir), isolated_basedir assert os.path.isdir(isolated_basedir), isolated_basedir self.isolated_basedir = isolated_basedir # The default algorithm used. self.OS = sys.platform self.algo = isolated_format.SUPPORTED_ALGOS['sha-1'] self.child_isolated_files = [] self.command = [] self.config_variables = {} self.extra_variables = {} self.files = {} self.isolate_file = None self.path_variables = {} # Defaults to 1 when compiling to .isolated. self.read_only = None self.relative_cwd = None self.root_dir = None self.version = self.EXPECTED_VERSION def update_config(self, config_variables): """Updates the saved state with only config variables.""" self.config_variables.update(config_variables) def update(self, isolate_file, path_variables, extra_variables): """Updates the saved state with new data to keep GYP variables and internal reference to the original .isolate file. """ assert os.path.isabs(isolate_file) # Convert back to a relative path. On Windows, if the isolate and # isolated files are on different drives, isolate_file will stay an absolute # path. isolate_file = file_path.safe_relpath(isolate_file, self.isolated_basedir) # The same .isolate file should always be used to generate the .isolated and # .isolated.state. assert isolate_file == self.isolate_file or not self.isolate_file, ( isolate_file, self.isolate_file) self.extra_variables.update(extra_variables) self.isolate_file = isolate_file self.path_variables.update(path_variables) def update_isolated(self, command, infiles, read_only, relative_cwd): """Updates the saved state with data necessary to generate a .isolated file. The new files in |infiles| are added to self.files dict but their hash is not calculated here. """ self.command = command # Add new files. for f in infiles: self.files.setdefault(f, {}) # Prune extraneous files that are not a dependency anymore. for f in set(self.files).difference(set(infiles)): del self.files[f] if read_only is not None: self.read_only = read_only self.relative_cwd = relative_cwd def to_isolated(self): """Creates a .isolated dictionary out of the saved state. https://code.google.com/p/swarming/wiki/IsolatedDesign """ def strip(data): """Returns a 'files' entry with only the whitelisted keys.""" return dict((k, data[k]) for k in ('h', 'l', 'm', 's') if k in data) out = { 'algo': isolated_format.SUPPORTED_ALGOS_REVERSE[self.algo], 'files': dict( (filepath, strip(data)) for filepath, data in self.files.iteritems()), # The version of the .state file is different than the one of the # .isolated file. 'version': isolated_format.ISOLATED_FILE_VERSION, } if self.command: out['command'] = self.command out['read_only'] = self.read_only if self.read_only is not None else 1 if self.relative_cwd: out['relative_cwd'] = self.relative_cwd return out @property def isolate_filepath(self): """Returns the absolute path of self.isolate_file.""" return os.path.normpath( os.path.join(self.isolated_basedir, self.isolate_file)) # Arguments number differs from overridden method @classmethod def load(cls, data, isolated_basedir): # pylint: disable=W0221 """Special case loading to disallow different OS. It is not possible to load a .isolated.state files from a different OS, this file is saved in OS-specific format. """ out = super(SavedState, cls).load(data, isolated_basedir) if data.get('OS') != sys.platform: raise isolated_format.IsolatedError('Unexpected OS %s', data.get('OS')) # Converts human readable form back into the proper class type. algo = data.get('algo') if not algo in isolated_format.SUPPORTED_ALGOS: raise isolated_format.IsolatedError('Unknown algo \'%s\'' % out.algo) out.algo = isolated_format.SUPPORTED_ALGOS[algo] # Refuse the load non-exact version, even minor difference. This is unlike # isolateserver.load_isolated(). This is because .isolated.state could have # changed significantly even in minor version difference. if out.version != cls.EXPECTED_VERSION: raise isolated_format.IsolatedError( 'Unsupported version \'%s\'' % out.version) # The .isolate file must be valid. If it is not present anymore, zap the # value as if it was not noted, so .isolate_file can safely be overriden # later. if out.isolate_file and not fs.isfile(out.isolate_filepath): out.isolate_file = None if out.isolate_file: # It could be absolute on Windows if the drive containing the .isolate and # the drive containing the .isolated files differ, .e.g .isolate is on # C:\\ and .isolated is on D:\\ . assert not os.path.isabs(out.isolate_file) or sys.platform == 'win32' assert fs.isfile(out.isolate_filepath), out.isolate_filepath return out def flatten(self): """Makes sure 'algo' is in human readable form.""" out = super(SavedState, self).flatten() out['algo'] = isolated_format.SUPPORTED_ALGOS_REVERSE[out['algo']] return out def __str__(self): def dict_to_str(d): return ''.join('\n %s=%s' % (k, d[k]) for k in sorted(d)) out = '%s(\n' % self.__class__.__name__ out += ' command: %s\n' % self.command out += ' files: %d\n' % len(self.files) out += ' isolate_file: %s\n' % self.isolate_file out += ' read_only: %s\n' % self.read_only out += ' relative_cwd: %s\n' % self.relative_cwd out += ' child_isolated_files: %s\n' % self.child_isolated_files out += ' path_variables: %s\n' % dict_to_str(self.path_variables) out += ' config_variables: %s\n' % dict_to_str(self.config_variables) out += ' extra_variables: %s\n' % dict_to_str(self.extra_variables) return out class CompleteState(object): """Contains all the state to run the task at hand.""" def __init__(self, isolated_filepath, saved_state): super(CompleteState, self).__init__() assert isolated_filepath is None or os.path.isabs(isolated_filepath) self.isolated_filepath = isolated_filepath # Contains the data to ease developer's use-case but that is not strictly # necessary. self.saved_state = saved_state @classmethod def load_files(cls, isolated_filepath): """Loads state from disk.""" assert os.path.isabs(isolated_filepath), isolated_filepath isolated_basedir = os.path.dirname(isolated_filepath) return cls( isolated_filepath, SavedState.load_file( isolatedfile_to_state(isolated_filepath), isolated_basedir)) def load_isolate( self, cwd, isolate_file, path_variables, config_variables, extra_variables, blacklist, ignore_broken_items): """Updates self.isolated and self.saved_state with information loaded from a .isolate file. Processes the loaded data, deduce root_dir, relative_cwd. """ # Make sure to not depend on os.getcwd(). assert os.path.isabs(isolate_file), isolate_file isolate_file = file_path.get_native_path_case(isolate_file) logging.info( 'CompleteState.load_isolate(%s, %s, %s, %s, %s, %s)', cwd, isolate_file, path_variables, config_variables, extra_variables, ignore_broken_items) # Config variables are not affected by the paths and must be used to # retrieve the paths, so update them first. self.saved_state.update_config(config_variables) with fs.open(isolate_file, 'r') as f: # At that point, variables are not replaced yet in command and infiles. # infiles may contain directory entries and is in posix style. command, infiles, read_only, isolate_cmd_dir = ( isolate_format.load_isolate_for_config( os.path.dirname(isolate_file), f.read(), self.saved_state.config_variables)) # Processes the variables with the new found relative root. Note that 'cwd' # is used when path variables are used. path_variables = normalize_path_variables( cwd, path_variables, isolate_cmd_dir) # Update the rest of the saved state. self.saved_state.update(isolate_file, path_variables, extra_variables) total_variables = self.saved_state.path_variables.copy() total_variables.update(self.saved_state.config_variables) total_variables.update(self.saved_state.extra_variables) command = [ isolate_format.eval_variables(i, total_variables) for i in command ] total_variables = self.saved_state.path_variables.copy() total_variables.update(self.saved_state.extra_variables) infiles = [ isolate_format.eval_variables(f, total_variables) for f in infiles ] # root_dir is automatically determined by the deepest root accessed with the # form '../../foo/bar'. Note that path variables must be taken in account # too, add them as if they were input files. self.saved_state.root_dir = isolate_format.determine_root_dir( isolate_cmd_dir, infiles + self.saved_state.path_variables.values()) # The relative directory is automatically determined by the relative path # between root_dir and the directory containing the .isolate file, # isolate_base_dir. relative_cwd = os.path.relpath(isolate_cmd_dir, self.saved_state.root_dir) # Now that we know where the root is, check that the path_variables point # inside it. for k, v in self.saved_state.path_variables.iteritems(): dest = os.path.join(isolate_cmd_dir, relative_cwd, v) if not file_path.path_starts_with(self.saved_state.root_dir, dest): raise isolated_format.MappingError( 'Path variable %s=%r points outside the inferred root directory ' '%s; %s' % (k, v, self.saved_state.root_dir, dest)) # Normalize the files based to self.saved_state.root_dir. It is important to # keep the trailing os.path.sep at that step. infiles = [ file_path.relpath( file_path.normpath(os.path.join(isolate_cmd_dir, f)), self.saved_state.root_dir) for f in infiles ] follow_symlinks = sys.platform != 'win32' # Expand the directories by listing each file inside. Up to now, trailing # os.path.sep must be kept. infiles = isolated_format.expand_directories_and_symlinks( self.saved_state.root_dir, infiles, tools.gen_blacklist(blacklist), follow_symlinks, ignore_broken_items) # Finally, update the new data to be able to generate the foo.isolated file, # the file that is used by run_isolated.py. self.saved_state.update_isolated(command, infiles, read_only, relative_cwd) logging.debug(self) def files_to_metadata(self, subdir): """Updates self.saved_state.files with the files' mode and hash. If |subdir| is specified, filters to a subdirectory. The resulting .isolated file is tainted. See isolated_format.file_to_metadata() for more information. """ for infile in sorted(self.saved_state.files): if subdir and not infile.startswith(subdir): self.saved_state.files.pop(infile) else: filepath = os.path.join(self.root_dir, infile) self.saved_state.files[infile] = isolated_format.file_to_metadata( filepath, self.saved_state.files[infile], self.saved_state.read_only, self.saved_state.algo) def save_files(self): """Saves self.saved_state and creates a .isolated file.""" logging.debug('Dumping to %s' % self.isolated_filepath) self.saved_state.child_isolated_files = chromium_save_isolated( self.isolated_filepath, self.saved_state.to_isolated(), self.saved_state.path_variables, self.saved_state.algo) total_bytes = sum( i.get('s', 0) for i in self.saved_state.files.itervalues()) if total_bytes: # TODO(maruel): Stats are missing the .isolated files. logging.debug('Total size: %d bytes' % total_bytes) saved_state_file = isolatedfile_to_state(self.isolated_filepath) logging.debug('Dumping to %s' % saved_state_file) tools.write_json(saved_state_file, self.saved_state.flatten(), True) @property def root_dir(self): return self.saved_state.root_dir def __str__(self): def indent(data, indent_length): """Indents text.""" spacing = ' ' * indent_length return ''.join(spacing + l for l in str(data).splitlines(True)) out = '%s(\n' % self.__class__.__name__ out += ' root_dir: %s\n' % self.root_dir out += ' saved_state: %s)' % indent(self.saved_state, 2) return out def load_complete_state(options, cwd, subdir, skip_update): """Loads a CompleteState. This includes data from .isolate and .isolated.state files. Never reads the .isolated file. Arguments: options: Options instance generated with process_isolate_options. For either options.isolate and options.isolated, if the value is set, it is an absolute path. cwd: base directory to be used when loading the .isolate file. subdir: optional argument to only process file in the subdirectory, relative to CompleteState.root_dir. skip_update: Skip trying to load the .isolate file and processing the dependencies. It is useful when not needed, like when tracing. """ assert not options.isolate or os.path.isabs(options.isolate) assert not options.isolated or os.path.isabs(options.isolated) cwd = file_path.get_native_path_case(unicode(cwd)) if options.isolated: # Load the previous state if it was present. Namely, "foo.isolated.state". # Note: this call doesn't load the .isolate file. complete_state = CompleteState.load_files(options.isolated) else: # Constructs a dummy object that cannot be saved. Useful for temporary # commands like 'run'. There is no directory containing a .isolated file so # specify the current working directory as a valid directory. complete_state = CompleteState(None, SavedState(os.getcwd())) if not options.isolate: if not complete_state.saved_state.isolate_file: if not skip_update: raise ExecutionError('A .isolate file is required.') isolate = None else: isolate = complete_state.saved_state.isolate_filepath else: isolate = options.isolate if complete_state.saved_state.isolate_file: rel_isolate = file_path.safe_relpath( options.isolate, complete_state.saved_state.isolated_basedir) if rel_isolate != complete_state.saved_state.isolate_file: # This happens if the .isolate file was moved for example. In this case, # discard the saved state. logging.warning( '--isolated %s != %s as saved in %s. Discarding saved state', rel_isolate, complete_state.saved_state.isolate_file, isolatedfile_to_state(options.isolated)) complete_state = CompleteState( options.isolated, SavedState(complete_state.saved_state.isolated_basedir)) if not skip_update: # Then load the .isolate and expands directories. complete_state.load_isolate( cwd, isolate, options.path_variables, options.config_variables, options.extra_variables, options.blacklist, options.ignore_broken_items) # Regenerate complete_state.saved_state.files. if subdir: subdir = unicode(subdir) # This is tricky here. If it is a path, take it from the root_dir. If # it is a variable, it must be keyed from the directory containing the # .isolate file. So translate all variables first. translated_path_variables = dict( (k, os.path.normpath(os.path.join(complete_state.saved_state.relative_cwd, v))) for k, v in complete_state.saved_state.path_variables.iteritems()) subdir = isolate_format.eval_variables(subdir, translated_path_variables) subdir = subdir.replace('/', os.path.sep) if not skip_update: complete_state.files_to_metadata(subdir) return complete_state def create_isolate_tree(outdir, root_dir, files, relative_cwd, read_only): """Creates a isolated tree usable for test execution. Returns the current working directory where the isolated command should be started in. """ # Forcibly copy when the tree has to be read only. Otherwise the inode is # modified, and this cause real problems because the user's source tree # becomes read only. On the other hand, the cost of doing file copy is huge. if read_only not in (0, None): action = file_path.COPY else: action = file_path.HARDLINK_WITH_FALLBACK recreate_tree( outdir=outdir, indir=root_dir, infiles=files, action=action, as_hash=False) cwd = os.path.normpath(os.path.join(outdir, relative_cwd)) if not fs.isdir(cwd): # It can happen when no files are mapped from the directory containing the # .isolate file. But the directory must exist to be the current working # directory. fs.makedirs(cwd) run_isolated.change_tree_read_only(outdir, read_only) return cwd @tools.profile def prepare_for_archival(options, cwd): """Loads the isolated file and create 'infiles' for archival.""" complete_state = load_complete_state( options, cwd, options.subdir, False) # Make sure that complete_state isn't modified until save_files() is # called, because any changes made to it here will propagate to the files # created (which is probably not intended). complete_state.save_files() infiles = complete_state.saved_state.files # Add all the .isolated files. isolated_hash = [] isolated_files = [ options.isolated, ] + complete_state.saved_state.child_isolated_files for item in isolated_files: item_path = os.path.join( os.path.dirname(complete_state.isolated_filepath), item) # Do not use isolated_format.hash_file() here because the file is # likely smallish (under 500kb) and its file size is needed. with fs.open(item_path, 'rb') as f: content = f.read() isolated_hash.append( complete_state.saved_state.algo(content).hexdigest()) isolated_metadata = { 'h': isolated_hash[-1], 's': len(content), 'priority': '0' } infiles[item_path] = isolated_metadata return complete_state, infiles, isolated_hash def isolate_and_archive(trees, isolate_server, namespace): """Isolates and uploads a bunch of isolated trees. Args: trees: list of pairs (Options, working directory) that describe what tree to isolate. Options are processed by 'process_isolate_options'. isolate_server: URL of Isolate Server to upload to. namespace: namespace to upload to. Returns a dict {target name -> isolate hash or None}, where target name is a name of *.isolated file without an extension (e.g. 'base_unittests'). Have multiple failure modes: * If the upload fails due to server or network error returns None. * If some *.isolate file is incorrect (but rest of them are fine and were successfully uploaded), returns a dict where the value of the entry corresponding to invalid *.isolate file is None. """ if not trees: return {} # Helper generator to avoid materializing the full (huge) list of files until # the very end (in upload_tree). def emit_files(root_dir, files): for path, meta in files.iteritems(): yield (os.path.join(root_dir, path), meta) # Process all *.isolate files, it involves parsing, file system traversal and # hashing. The result is a list of generators that produce files to upload # and the mapping {target name -> hash of *.isolated file} to return from # this function. files_generators = [] isolated_hashes = {} with tools.Profiler('Isolate'): for opts, cwd in trees: target_name = os.path.splitext(os.path.basename(opts.isolated))[0] try: complete_state, files, isolated_hash = prepare_for_archival(opts, cwd) files_generators.append(emit_files(complete_state.root_dir, files)) isolated_hashes[target_name] = isolated_hash[0] print('%s %s' % (isolated_hash[0], target_name)) except Exception: logging.exception('Exception when isolating %s', target_name) isolated_hashes[target_name] = None # All bad? Nothing to upload. if all(v is None for v in isolated_hashes.itervalues()): return isolated_hashes # Now upload all necessary files at once. with tools.Profiler('Upload'): try: isolateserver.upload_tree( base_url=isolate_server, infiles=itertools.chain(*files_generators), namespace=namespace) except Exception: logging.exception('Exception while uploading files') return None return isolated_hashes def parse_archive_command_line(args, cwd): """Given list of arguments for 'archive' command returns parsed options. Used by CMDbatcharchive to parse options passed via JSON. See also CMDarchive. """ parser = optparse.OptionParser() add_isolate_options(parser) add_subdir_option(parser) options, args = parser.parse_args(args) if args: parser.error('Unsupported argument: %s' % args) process_isolate_options(parser, options, cwd) return options ### Commands. def CMDarchive(parser, args): """Creates a .isolated file and uploads the tree to an isolate server. All the files listed in the .isolated file are put in the isolate server cache via isolateserver.py. """ add_isolate_options(parser) add_subdir_option(parser) isolateserver.add_isolate_server_options(parser) auth.add_auth_options(parser) options, args = parser.parse_args(args) if args: parser.error('Unsupported argument: %s' % args) process_isolate_options(parser, options) auth.process_auth_options(parser, options) isolateserver.process_isolate_server_options(parser, options, True) result = isolate_and_archive( [(options, unicode(os.getcwd()))], options.isolate_server, options.namespace) if result is None: return EXIT_CODE_UPLOAD_ERROR assert len(result) == 1, result if result.values()[0] is None: return EXIT_CODE_ISOLATE_ERROR return 0 @subcommand.usage('-- GEN_JSON_1 GEN_JSON_2 ...') def CMDbatcharchive(parser, args): """Archives multiple isolated trees at once. Using single command instead of multiple sequential invocations allows to cut redundant work when isolated trees share common files (e.g. file hashes are checked only once, their presence on the server is checked only once, and so on). Takes a list of paths to *.isolated.gen.json files that describe what trees to isolate. Format of files is: { "version": 1, "dir": <absolute path to a directory all other paths are relative to>, "args": [list of command line arguments for single 'archive' command] } """ isolateserver.add_isolate_server_options(parser) isolateserver.add_archive_options(parser) auth.add_auth_options(parser) parser.add_option( '--dump-json', metavar='FILE', help='Write isolated hashes of archived trees to this file as JSON') options, args = parser.parse_args(args) auth.process_auth_options(parser, options) isolateserver.process_isolate_server_options(parser, options, True) # Validate all incoming options, prepare what needs to be archived as a list # of tuples (archival options, working directory). work_units = [] for gen_json_path in args: # Validate JSON format of a *.isolated.gen.json file. data = tools.read_json(gen_json_path) if data.get('version') != ISOLATED_GEN_JSON_VERSION: parser.error('Invalid version in %s' % gen_json_path) cwd = data.get('dir') if not isinstance(cwd, unicode) or not fs.isdir(cwd): parser.error('Invalid dir in %s' % gen_json_path) args = data.get('args') if (not isinstance(args, list) or not all(isinstance(x, unicode) for x in args)): parser.error('Invalid args in %s' % gen_json_path) # Convert command line (embedded in JSON) to Options object. work_units.append((parse_archive_command_line(args, cwd), cwd)) # Perform the archival, all at once. isolated_hashes = isolate_and_archive( work_units, options.isolate_server, options.namespace) # TODO(vadimsh): isolate_and_archive returns None on upload failure, there's # no way currently to figure out what *.isolated file from a batch were # successfully uploaded, so consider them all failed (and emit empty dict # as JSON result). if options.dump_json: tools.write_json(options.dump_json, isolated_hashes or {}, False) if isolated_hashes is None: return EXIT_CODE_UPLOAD_ERROR # isolated_hashes[x] is None if 'x.isolate' contains a error. if not all(isolated_hashes.itervalues()): return EXIT_CODE_ISOLATE_ERROR return 0 def CMDcheck(parser, args): """Checks that all the inputs are present and generates .isolated.""" add_isolate_options(parser) add_subdir_option(parser) options, args = parser.parse_args(args) if args: parser.error('Unsupported argument: %s' % args) process_isolate_options(parser, options) complete_state = load_complete_state( options, os.getcwd(), options.subdir, False) # Nothing is done specifically. Just store the result and state. complete_state.save_files() return 0 def CMDremap(parser, args): """Creates a directory with all the dependencies mapped into it. Useful to test manually why a test is failing. The target executable is not run. """ add_isolate_options(parser) add_outdir_options(parser) add_skip_refresh_option(parser) options, args = parser.parse_args(args) if args: parser.error('Unsupported argument: %s' % args) cwd = os.getcwd() process_isolate_options(parser, options, cwd, require_isolated=False) process_outdir_options(parser, options, cwd) complete_state = load_complete_state(options, cwd, None, options.skip_refresh) if not fs.isdir(options.outdir): fs.makedirs(options.outdir) print('Remapping into %s' % options.outdir) if fs.listdir(options.outdir): raise ExecutionError('Can\'t remap in a non-empty directory') create_isolate_tree( options.outdir, complete_state.root_dir, complete_state.saved_state.files, complete_state.saved_state.relative_cwd, complete_state.saved_state.read_only) if complete_state.isolated_filepath: complete_state.save_files() return 0 @subcommand.usage('-- [extra arguments]') def CMDrun(parser, args): """Runs the test executable in an isolated (temporary) directory. All the dependencies are mapped into the temporary directory and the directory is cleaned up after the target exits. Argument processing stops at -- and these arguments are appended to the command line of the target to run. For example, use: isolate.py run --isolated foo.isolated -- --gtest_filter=Foo.Bar """ add_isolate_options(parser) add_skip_refresh_option(parser) options, args = parser.parse_args(args) process_isolate_options(parser, options, require_isolated=False) complete_state = load_complete_state( options, os.getcwd(), None, options.skip_refresh) cmd = complete_state.saved_state.command + args if not cmd: raise ExecutionError('No command to run.') cmd = tools.fix_python_path(cmd) outdir = run_isolated.make_temp_dir( u'isolate-%s' % datetime.date.today(), os.path.dirname(complete_state.root_dir)) try: # TODO(maruel): Use run_isolated.run_tha_test(). cwd = create_isolate_tree( outdir, complete_state.root_dir, complete_state.saved_state.files, complete_state.saved_state.relative_cwd, complete_state.saved_state.read_only) file_path.ensure_command_has_abs_path(cmd, cwd) logging.info('Running %s, cwd=%s' % (cmd, cwd)) try: result = subprocess.call(cmd, cwd=cwd) except OSError: sys.stderr.write( 'Failed to executed the command; executable is missing, maybe you\n' 'forgot to map it in the .isolate file?\n %s\n in %s\n' % (' '.join(cmd), cwd)) result = 1 finally: file_path.rmtree(outdir) if complete_state.isolated_filepath: complete_state.save_files() return result def _process_variable_arg(option, opt, _value, parser): """Called by OptionParser to process a --<foo>-variable argument.""" if not parser.rargs: raise optparse.OptionValueError( 'Please use %s FOO=BAR or %s FOO BAR' % (opt, opt)) k = parser.rargs.pop(0) variables = getattr(parser.values, option.dest) if '=' in k: k, v = k.split('=', 1) else: if not parser.rargs: raise optparse.OptionValueError( 'Please use %s FOO=BAR or %s FOO BAR' % (opt, opt)) v = parser.rargs.pop(0) if not re.match('^' + isolate_format.VALID_VARIABLE + '$', k): raise optparse.OptionValueError( 'Variable \'%s\' doesn\'t respect format \'%s\'' % (k, isolate_format.VALID_VARIABLE)) variables.append((k, v.decode('utf-8'))) def add_variable_option(parser): """Adds --isolated and --<foo>-variable to an OptionParser.""" parser.add_option( '-s', '--isolated', metavar='FILE', help='.isolated file to generate or read') # Keep for compatibility. TODO(maruel): Remove once not used anymore. parser.add_option( '-r', '--result', dest='isolated', help=optparse.SUPPRESS_HELP) is_win = sys.platform in ('win32', 'cygwin') # There is really 3 kind of variables: # - path variables, like DEPTH or PRODUCT_DIR that should be # replaced opportunistically when tracing tests. # - extraneous things like EXECUTABE_SUFFIX. # - configuration variables that are to be used in deducing the matrix to # reduce. # - unrelated variables that are used as command flags for example. parser.add_option( '--config-variable', action='callback', callback=_process_variable_arg, default=[], dest='config_variables', metavar='FOO BAR', help='Config variables are used to determine which conditions should be ' 'matched when loading a .isolate file, default: %default. ' 'All 3 kinds of variables are persistent accross calls, they are ' 'saved inside <.isolated>.state') parser.add_option( '--path-variable', action='callback', callback=_process_variable_arg, default=[], dest='path_variables', metavar='FOO BAR', help='Path variables are used to replace file paths when loading a ' '.isolate file, default: %default') parser.add_option( '--extra-variable', action='callback', callback=_process_variable_arg, default=[('EXECUTABLE_SUFFIX', '.exe' if is_win else '')], dest='extra_variables', metavar='FOO BAR', help='Extraneous variables are replaced on the \'command\' entry and on ' 'paths in the .isolate file but are not considered relative paths.') def add_isolate_options(parser): """Adds --isolate, --isolated, --out and --<foo>-variable options.""" isolateserver.add_archive_options(parser) group = optparse.OptionGroup(parser, 'Common options') group.add_option( '-i', '--isolate', metavar='FILE', help='.isolate file to load the dependency data from') add_variable_option(group) group.add_option( '--ignore_broken_items', action='store_true', default=bool(os.environ.get('ISOLATE_IGNORE_BROKEN_ITEMS')), help='Indicates that invalid entries in the isolated file to be ' 'only be logged and not stop processing. Defaults to True if ' 'env var ISOLATE_IGNORE_BROKEN_ITEMS is set') parser.add_option_group(group) def add_subdir_option(parser): parser.add_option( '--subdir', help='Filters to a subdirectory. Its behavior changes depending if it ' 'is a relative path as a string or as a path variable. Path ' 'variables are always keyed from the directory containing the ' '.isolate file. Anything else is keyed on the root directory.') def add_skip_refresh_option(parser): parser.add_option( '--skip-refresh', action='store_true', help='Skip reading .isolate file and do not refresh the hash of ' 'dependencies') def add_outdir_options(parser): """Adds --outdir, which is orthogonal to --isolate-server. Note: On upload, separate commands are used between 'archive' and 'hashtable'. On 'download', the same command can download from either an isolate server or a file system. """ parser.add_option( '-o', '--outdir', metavar='DIR', help='Directory used to recreate the tree.') def process_outdir_options(parser, options, cwd): if not options.outdir: parser.error('--outdir is required.') if file_path.is_url(options.outdir): parser.error('Can\'t use an URL for --outdir.') options.outdir = unicode(options.outdir).replace('/', os.path.sep) # outdir doesn't need native path case since tracing is never done from there. options.outdir = os.path.abspath( os.path.normpath(os.path.join(cwd, options.outdir))) # In theory, we'd create the directory outdir right away. Defer doing it in # case there's errors in the command line. def process_isolate_options(parser, options, cwd=None, require_isolated=True): """Handles options added with 'add_isolate_options'. Mutates |options| in place, by normalizing path to isolate file, values of variables, etc. """ cwd = file_path.get_native_path_case(unicode(cwd or os.getcwd())) # Parse --isolated option. if options.isolated: options.isolated = os.path.abspath( os.path.join(cwd, unicode(options.isolated).replace('/', os.path.sep))) if require_isolated and not options.isolated: parser.error('--isolated is required.') if options.isolated and not options.isolated.endswith('.isolated'): parser.error('--isolated value must end with \'.isolated\'') # Processes all the --<foo>-variable flags. def try_make_int(s): """Converts a value to int if possible, converts to unicode otherwise.""" try: return int(s) except ValueError: return s.decode('utf-8') options.config_variables = dict( (k, try_make_int(v)) for k, v in options.config_variables) options.path_variables = dict(options.path_variables) options.extra_variables = dict(options.extra_variables) # Normalize the path in --isolate. if options.isolate: # TODO(maruel): Work with non-ASCII. # The path must be in native path case for tracing purposes. options.isolate = unicode(options.isolate).replace('/', os.path.sep) options.isolate = os.path.abspath(os.path.join(cwd, options.isolate)) options.isolate = file_path.get_native_path_case(options.isolate) def main(argv): dispatcher = subcommand.CommandDispatcher(__name__) parser = logging_utils.OptionParserWithLogging( version=__version__, verbose=int(os.environ.get('ISOLATE_DEBUG', 0))) try: return dispatcher.execute(parser, argv) except isolated_format.MappingError as e: print >> sys.stderr, 'Failed to find an input file: %s' % e return 1 except ExecutionError as e: print >> sys.stderr, 'Execution failure: %s' % e return 1 if __name__ == '__main__': fix_encoding.fix_encoding() tools.disable_buffering() colorama.init() sys.exit(main(sys.argv[1:]))
"""The tests for the pilight component.""" import logging import unittest from unittest.mock import patch import socket from datetime import timedelta import pytest from homeassistant import core as ha from homeassistant.setup import setup_component from homeassistant.components import pilight from homeassistant.util import dt as dt_util from tests.common import get_test_home_assistant, assert_setup_component _LOGGER = logging.getLogger(__name__) class PilightDaemonSim: """Class to fake the interface of the pilight python package. Is used in an asyncio loop, thus the mock cannot be accessed to determine if methods where called?! This is solved here in a hackish way by printing errors that can be checked using logging.error mocks. """ callback = None called = None test_message = {"protocol": "kaku_switch", "uuid": "1-2-3-4", "message": { "id": 0, "unit": 0, "off": 1}} def __init__(self, host, port): """Init pilight client, ignore parameters.""" pass def send_code(self, call): # pylint: disable=no-self-use """Handle pilight.send service callback.""" _LOGGER.error('PilightDaemonSim payload: ' + str(call)) def start(self): """Handle homeassistant.start callback. Also sends one test message after start up """ _LOGGER.error('PilightDaemonSim start') # Fake one code receive after daemon started if not self.called: self.callback(self.test_message) self.called = True def stop(self): # pylint: disable=no-self-use """Handle homeassistant.stop callback.""" _LOGGER.error('PilightDaemonSim stop') def set_callback(self, function): """Handle pilight.pilight_received event callback.""" self.callback = function _LOGGER.error('PilightDaemonSim callback: ' + str(function)) @pytest.mark.skip("Flaky") class TestPilight(unittest.TestCase): """Test the Pilight component.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() self.skip_teardown_stop = False def tearDown(self): """Stop everything that was started.""" if not self.skip_teardown_stop: self.hass.stop() @patch('homeassistant.components.pilight._LOGGER.error') def test_connection_failed_error(self, mock_error): """Try to connect at 127.0.0.1:5001 with socket error.""" with assert_setup_component(4): with patch('pilight.pilight.Client', side_effect=socket.error) as mock_client: assert not setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}) mock_client.assert_called_once_with(host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT) assert 1 == mock_error.call_count @patch('homeassistant.components.pilight._LOGGER.error') def test_connection_timeout_error(self, mock_error): """Try to connect at 127.0.0.1:5001 with socket timeout.""" with assert_setup_component(4): with patch('pilight.pilight.Client', side_effect=socket.timeout) as mock_client: assert not setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}) mock_client.assert_called_once_with(host=pilight.DEFAULT_HOST, port=pilight.DEFAULT_PORT) assert 1 == mock_error.call_count @patch('pilight.pilight.Client', PilightDaemonSim) @patch('homeassistant.core._LOGGER.error') @patch('tests.components.test_pilight._LOGGER.error') def test_send_code_no_protocol(self, mock_pilight_error, mock_error): """Try to send data without protocol information, should give error.""" with assert_setup_component(4): assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}) # Call without protocol info, should be ignored with error self.hass.services.call(pilight.DOMAIN, pilight.SERVICE_NAME, service_data={'noprotocol': 'test', 'value': 42}, blocking=True) self.hass.block_till_done() error_log_call = mock_error.call_args_list[-1] assert 'required key not provided @ data[\'protocol\']' in \ str(error_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('tests.components.test_pilight._LOGGER.error') def test_send_code(self, mock_pilight_error): """Try to send proper data.""" with assert_setup_component(4): assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}) # Call with protocol info, should not give error service_data = {'protocol': 'test', 'value': 42} self.hass.services.call(pilight.DOMAIN, pilight.SERVICE_NAME, service_data=service_data, blocking=True) self.hass.block_till_done() error_log_call = mock_pilight_error.call_args_list[-1] service_data['protocol'] = [service_data['protocol']] assert str(service_data) in str(error_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('homeassistant.components.pilight._LOGGER.error') def test_send_code_fail(self, mock_pilight_error): """Check IOError exception error message.""" with assert_setup_component(4): with patch('pilight.pilight.Client.send_code', side_effect=IOError): assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}) # Call with protocol info, should not give error service_data = {'protocol': 'test', 'value': 42} self.hass.services.call(pilight.DOMAIN, pilight.SERVICE_NAME, service_data=service_data, blocking=True) self.hass.block_till_done() error_log_call = mock_pilight_error.call_args_list[-1] assert 'Pilight send failed' in str(error_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('tests.components.test_pilight._LOGGER.error') def test_send_code_delay(self, mock_pilight_error): """Try to send proper data with delay afterwards.""" with assert_setup_component(4): assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {pilight.CONF_SEND_DELAY: 5.0}}) # Call with protocol info, should not give error service_data1 = {'protocol': 'test11', 'value': 42} service_data2 = {'protocol': 'test22', 'value': 42} self.hass.services.call(pilight.DOMAIN, pilight.SERVICE_NAME, service_data=service_data1, blocking=True) self.hass.services.call(pilight.DOMAIN, pilight.SERVICE_NAME, service_data=service_data2, blocking=True) service_data1['protocol'] = [service_data1['protocol']] service_data2['protocol'] = [service_data2['protocol']] self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: dt_util.utcnow()}) self.hass.block_till_done() error_log_call = mock_pilight_error.call_args_list[-1] assert str(service_data1) in str(error_log_call) new_time = dt_util.utcnow() + timedelta(seconds=5) self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: new_time}) self.hass.block_till_done() error_log_call = mock_pilight_error.call_args_list[-1] assert str(service_data2) in str(error_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('tests.components.test_pilight._LOGGER.error') def test_start_stop(self, mock_pilight_error): """Check correct startup and stop of pilight daemon.""" with assert_setup_component(4): assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}) # Test startup self.hass.start() self.hass.block_till_done() error_log_call = mock_pilight_error.call_args_list[-2] assert 'PilightDaemonSim callback' in str(error_log_call) error_log_call = mock_pilight_error.call_args_list[-1] assert 'PilightDaemonSim start' in str(error_log_call) # Test stop self.skip_teardown_stop = True self.hass.stop() error_log_call = mock_pilight_error.call_args_list[-1] assert 'PilightDaemonSim stop' in str(error_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('homeassistant.core._LOGGER.info') def test_receive_code(self, mock_info): """Check if code receiving via pilight daemon works.""" with assert_setup_component(4): assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {}}) # Test startup self.hass.start() self.hass.block_till_done() expected_message = dict( {'protocol': PilightDaemonSim.test_message['protocol'], 'uuid': PilightDaemonSim.test_message['uuid']}, **PilightDaemonSim.test_message['message']) error_log_call = mock_info.call_args_list[-1] # Check if all message parts are put on event bus for key, value in expected_message.items(): assert str(key) in str(error_log_call) assert str(value) in str(error_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('homeassistant.core._LOGGER.info') def test_whitelist_exact_match(self, mock_info): """Check whitelist filter with matched data.""" with assert_setup_component(4): whitelist = { 'protocol': [PilightDaemonSim.test_message['protocol']], 'uuid': [PilightDaemonSim.test_message['uuid']], 'id': [PilightDaemonSim.test_message['message']['id']], 'unit': [PilightDaemonSim.test_message['message']['unit']]} assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}) self.hass.start() self.hass.block_till_done() expected_message = dict( {'protocol': PilightDaemonSim.test_message['protocol'], 'uuid': PilightDaemonSim.test_message['uuid']}, **PilightDaemonSim.test_message['message']) info_log_call = mock_info.call_args_list[-1] # Check if all message parts are put on event bus for key, value in expected_message.items(): assert str(key) in str(info_log_call) assert str(value) in str(info_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('homeassistant.core._LOGGER.info') def test_whitelist_partial_match(self, mock_info): """Check whitelist filter with partially matched data, should work.""" with assert_setup_component(4): whitelist = { 'protocol': [PilightDaemonSim.test_message['protocol']], 'id': [PilightDaemonSim.test_message['message']['id']]} assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}) self.hass.start() self.hass.block_till_done() expected_message = dict( {'protocol': PilightDaemonSim.test_message['protocol'], 'uuid': PilightDaemonSim.test_message['uuid']}, **PilightDaemonSim.test_message['message']) info_log_call = mock_info.call_args_list[-1] # Check if all message parts are put on event bus for key, value in expected_message.items(): assert str(key) in str(info_log_call) assert str(value) in str(info_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('homeassistant.core._LOGGER.info') def test_whitelist_or_match(self, mock_info): """Check whitelist filter with several subsection, should work.""" with assert_setup_component(4): whitelist = { 'protocol': [PilightDaemonSim.test_message['protocol'], 'other_protocol'], 'id': [PilightDaemonSim.test_message['message']['id']]} assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}) self.hass.start() self.hass.block_till_done() expected_message = dict( {'protocol': PilightDaemonSim.test_message['protocol'], 'uuid': PilightDaemonSim.test_message['uuid']}, **PilightDaemonSim.test_message['message']) info_log_call = mock_info.call_args_list[-1] # Check if all message parts are put on event bus for key, value in expected_message.items(): assert str(key) in str(info_log_call) assert str(value) in str(info_log_call) @patch('pilight.pilight.Client', PilightDaemonSim) @patch('homeassistant.core._LOGGER.info') def test_whitelist_no_match(self, mock_info): """Check whitelist filter with unmatched data, should not work.""" with assert_setup_component(4): whitelist = { 'protocol': ['wrong_protocol'], 'id': [PilightDaemonSim.test_message['message']['id']]} assert setup_component( self.hass, pilight.DOMAIN, {pilight.DOMAIN: {"whitelist": whitelist}}) self.hass.start() self.hass.block_till_done() info_log_call = mock_info.call_args_list[-1] assert not ('Event pilight_received' in info_log_call) class TestPilightCallrateThrottler(unittest.TestCase): """Test the Throttler used to throttle calls to send_code.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" self.hass = get_test_home_assistant() def tearDown(self): """Stop everything that was started.""" self.hass.stop() def test_call_rate_delay_throttle_disabled(self): """Test that the limiter is a noop if no delay set.""" runs = [] limit = pilight.CallRateDelayThrottle(self.hass, 0.0) action = limit.limited(lambda x: runs.append(x)) for i in range(3): action(i) assert runs == [0, 1, 2] def test_call_rate_delay_throttle_enabled(self): """Test that throttling actually work.""" runs = [] delay = 5.0 limit = pilight.CallRateDelayThrottle(self.hass, delay) action = limit.limited(lambda x: runs.append(x)) for i in range(3): action(i) assert runs == [] exp = [] now = dt_util.utcnow() for i in range(3): exp.append(i) shifted_time = now + (timedelta(seconds=delay + 0.1) * i) self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: shifted_time}) self.hass.block_till_done() assert runs == exp
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Multi-GPU tests for MirroredStrategy.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys from tensorflow.contrib.distribute.python import mirrored_strategy from tensorflow.contrib.distribute.python import strategy_test_lib from tensorflow.contrib.distribute.python import values from tensorflow.core.protobuf import config_pb2 from tensorflow.python.data.ops import dataset_ops from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.layers import core from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell_impl from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.training import distribute as distribute_lib GPU_TEST = "test_gpu" in sys.argv[0] class MirroredTwoDeviceDistributionTest(strategy_test_lib.DistributionTestBase): def _get_distribution_strategy(self): devices = ["/device:CPU:0", "/device:GPU:0"] if GPU_TEST: self.assertGreater(context.num_gpus(), 0) if context.num_gpus() > 1: devices = ["/device:GPU:0", "/device:GPU:1"] print(self.id().split(".")[-1], "devices:", ", ".join(devices)) return mirrored_strategy.MirroredStrategy(devices) def testMinimizeLossEager(self): if not GPU_TEST: self.skipTest("Not GPU test") self._test_minimize_loss_eager(self._get_distribution_strategy()) def testMinimizeLossGraph(self): soft_placement = not GPU_TEST print("testMinimizeLossGraph soft_placement:", soft_placement) self._test_minimize_loss_graph( self._get_distribution_strategy(), soft_placement=soft_placement) def testMapReduce(self): if not GPU_TEST: self.skipTest("Not GPU test") self._test_map_reduce(self._get_distribution_strategy()) def testDeviceIndex(self): if not GPU_TEST: self.skipTest("Not GPU test") self._test_device_index(self._get_distribution_strategy()) def testTowerId(self): if not GPU_TEST: self.skipTest("Not GPU test") self._test_tower_id(self._get_distribution_strategy()) def testNumTowers(self): if not GPU_TEST: self.skipTest("Not GPU test") self.assertEqual(2, self._get_distribution_strategy().num_towers) @test_util.run_in_graph_and_eager_modes() def testCallAndMergeExceptions(self): if not GPU_TEST: self.skipTest("Not GPU test") self._test_call_and_merge_exceptions(self._get_distribution_strategy()) @test_util.run_in_graph_and_eager_modes() def testRunRegroupError(self): def run_fn(device_id): # Generates a list with different lengths on different devices. # Will fail in _regroup() (if more than one device). return list(range(device_id)) dist = self._get_distribution_strategy() with dist.scope(), self.assertRaises(AssertionError): dist.call_for_each_tower(run_fn, dist.worker_device_index) @test_util.run_in_graph_and_eager_modes() def testReduceToCpu(self): if not GPU_TEST: self.skipTest("Not GPU test") def run_fn(device_id): return device_id dist = self._get_distribution_strategy() with dist.scope(): result = dist.call_for_each_tower(run_fn, dist.worker_device_index) reduced = dist.reduce("sum", result, destinations="/device:CPU:0") unwrapped = dist.unwrap(reduced) self.assertEqual(1, len(unwrapped)) expected = sum(range(len(dist.worker_devices))) self.assertEqual(expected, self.evaluate(unwrapped[0])) class MirroredStrategyVariableCreationTest(test.TestCase): config = config_pb2.ConfigProto() config.allow_soft_placement = True def _skip_eager_if_gpus_less_than(self, num_gpus): if context.num_gpus() < num_gpus and context.executing_eagerly(): self.skipTest("Enough GPUs not available for this test in eager mode.") @test_util.run_in_graph_and_eager_modes(config=config) def testSingleVariable(self): self._skip_eager_if_gpus_less_than(1) def model_fn(): # This variable should be created only once across the threads because of # special variable_creator functions used by `dist.call_for_each_tower`. v = variable_scope.variable(1.0, name="foo") distribute_lib.get_tower_context().merge_call(lambda _: _) return v dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with dist.scope(): result = dist.call_for_each_tower(model_fn, run_concurrently=False) self.assertIsInstance(result, values.MirroredVariable) self.assertEquals("foo:0", result.name) @test_util.run_in_graph_and_eager_modes(config=config) def testUnnamedVariable(self): self._skip_eager_if_gpus_less_than(1) def model_fn(): v = variable_scope.variable(1.0) distribute_lib.get_tower_context().merge_call(lambda _: _) return v dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with dist.scope(): result = dist.call_for_each_tower(model_fn, run_concurrently=False) self.assertIsInstance(result, values.MirroredVariable) # Default name of "Variable" will be used. self.assertEquals("Variable:0", result.name) @test_util.run_in_graph_and_eager_modes(config=config) def testMultipleVariables(self): self._skip_eager_if_gpus_less_than(1) def model_fn(): vs = [] for i in range(5): vs.append(variable_scope.variable(1.0, name="foo" + str(i))) distribute_lib.get_tower_context().merge_call(lambda _: _) return vs dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with dist.scope(): result = dist.call_for_each_tower(model_fn, run_concurrently=False) for i, v in enumerate(result): self.assertIsInstance(v, values.MirroredVariable) self.assertEquals("foo" + str(i) + ":0", v.name) @test_util.run_in_graph_and_eager_modes(config=config) def testMultipleVariablesWithSameCanonicalName(self): self._skip_eager_if_gpus_less_than(1) def model_fn(): vs = [] vs.append(variable_scope.variable(1.0, name="foo/bar")) vs.append(variable_scope.variable(1.0, name="foo_1/bar")) vs.append(variable_scope.variable(1.0, name="foo_1/bar_1")) vs.append(variable_scope.variable(1.0, name="foo/bar_1")) distribute_lib.get_tower_context().merge_call(lambda _: _) return vs dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with dist.scope(): result = dist.call_for_each_tower(model_fn, run_concurrently=False) for v in result: self.assertIsInstance(v, values.MirroredVariable) self.assertEquals(4, len(result)) self.assertEquals("foo/bar:0", result[0].name) self.assertEquals("foo_1/bar:0", result[1].name) self.assertEquals("foo_1/bar_1:0", result[2].name) self.assertEquals("foo/bar_1:0", result[3].name) @test_util.run_in_graph_and_eager_modes(config=config) def testVariableWithSameCanonicalNameAcrossThreads(self): self._skip_eager_if_gpus_less_than(1) def model_fn(device_id): v = variable_scope.variable(1.0, name="foo_" + str(device_id)) distribute_lib.get_tower_context().merge_call(lambda _: _) return v dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with dist.scope(): result = dist.call_for_each_tower( model_fn, dist.worker_device_index, run_concurrently=False) self.assertIsInstance(result, values.MirroredVariable) # The resulting mirrored variable will use the name from the first device. self.assertEquals("foo_0:0", result.name) @test_util.run_in_graph_and_eager_modes(config=config) def testWithLayers(self): self._skip_eager_if_gpus_less_than(1) def model_fn(features): with variable_scope.variable_scope("common"): layer1 = core.Dense(1) layer1(features) layer2 = core.Dense(1) layer2(features) # This will pause the current thread, and execute the other thread. distribute_lib.get_tower_context().merge_call(lambda _: _) layer3 = core.Dense(1) layer3(features) return [(layer1.kernel, layer1.bias), (layer2.kernel, layer2.bias), (layer3.kernel, layer3.bias)] dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) features = dist.distribute_dataset( lambda: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10) ).make_one_shot_iterator().get_next() with dist.scope(): result = dist.call_for_each_tower( model_fn, features, run_concurrently=False) suffixes = ["", "_1", "_2"] for (kernel, bias), suffix in zip(result, suffixes): self.assertIsInstance(kernel, values.MirroredVariable) self.assertEquals("common/dense" + suffix + "/kernel:0", kernel.name) self.assertIsInstance(bias, values.MirroredVariable) self.assertEquals("common/dense" + suffix + "/bias:0", bias.name) @test_util.run_in_graph_and_eager_modes(config=config) def testWithGetVariableAndVariableScope(self): self._skip_eager_if_gpus_less_than(1) def model_fn(): v0 = variable_scope.get_variable("var-thread0", [1]) with variable_scope.variable_scope("common"): v1 = variable_scope.get_variable("var-thread1", [1]) # This will pause the current thread, and execute the other thread. distribute_lib.get_tower_context().merge_call(lambda _: _) v2 = variable_scope.get_variable("var-thread2", [1]) return v0, v1, v2 devices = ["/device:CPU:0", "/device:GPU:0"] dist = mirrored_strategy.MirroredStrategy(devices) with dist.scope(): with variable_scope.variable_scope("main"): v = variable_scope.get_variable("var-main0", [1]) self.assertEquals("main/var-main0:0", v.name) result = dist.call_for_each_tower(model_fn, run_concurrently=False) self.assertEquals(3, len(result)) v0, v1, v2 = result self.assertIsInstance(v0, values.MirroredVariable) self.assertEquals("main/var-thread0:0", v0.name) self.assertIsInstance(v1, values.MirroredVariable) self.assertEquals("main/common/var-thread1:0", v1.name) self.assertIsInstance(v2, values.MirroredVariable) self.assertEquals("main/common/var-thread2:0", v2.name) @test_util.run_in_graph_and_eager_modes(config=config) def testThreeDevices(self): self._skip_eager_if_gpus_less_than(2) def model_fn(): v = variable_scope.variable(1.0, name="foo") distribute_lib.get_tower_context().merge_call(lambda _: _) return v dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]) with dist.scope(): result = dist.call_for_each_tower(model_fn, run_concurrently=False) self.assertIsInstance(result, values.MirroredVariable) self.assertEquals("foo:0", result.name) @test_util.run_in_graph_and_eager_modes(config=config) def testNonMatchingVariableCreation(self): self._skip_eager_if_gpus_less_than(1) def model_fn(name): v = variable_scope.variable(1.0, name=name) distribute_lib.get_tower_context().merge_call(lambda _: _) return v dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with dist.scope(): names = values.DistributedValues({ "/device:CPU:0": "foo", "/device:GPU:0": "bar" }) with self.assertRaises(RuntimeError): _ = dist.call_for_each_tower(model_fn, names, run_concurrently=False) @test_util.run_in_graph_and_eager_modes(config=config) def testTowerLocalVariable(self): self._skip_eager_if_gpus_less_than(1) all_v_sum = {} all_v_mean = {} def model_fn(device_id): tower_context = distribute_lib.get_tower_context() with tower_context.tower_local_var_scope("sum"): v_sum = variable_scope.variable(1.0) with tower_context.tower_local_var_scope("mean"): v_mean = variable_scope.variable(4.0) self.assertTrue(isinstance(v_sum, values.TowerLocalVariable)) self.assertTrue(isinstance(v_mean, values.TowerLocalVariable)) updates = [v_sum.assign_add(2.0 + device_id), v_mean.assign(6.0 * device_id)] all_v_sum[device_id] = v_sum all_v_mean[device_id] = v_mean return updates, v_sum, v_mean dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with dist.scope(): # Create "sum" and "mean" versions of TowerLocalVariables. ret_ops, ret_v_sum, ret_v_mean = dist.call_for_each_tower( model_fn, dist.worker_device_index, run_concurrently=False) # Should see the same wrapping instance in all towers. self.assertIs(all_v_sum[0], ret_v_sum) self.assertIs(all_v_mean[0], ret_v_mean) for i in range(1, dist.num_towers): self.assertIs(all_v_sum[0], all_v_sum[1]) self.assertIs(all_v_mean[0], all_v_mean[1]) # Apply updates self.evaluate(variables.global_variables_initializer()) self.evaluate([y for x in ret_ops for y in dist.unwrap(x)]) expected_sum = 0.0 expected_mean = 0.0 for i, d in enumerate(dist.worker_devices): # Should see different values on different devices. v_sum_value = self.evaluate(ret_v_sum.get(d).read_value()) v_mean_value = self.evaluate(ret_v_mean.get(d).read_value()) expected = i + 3.0 self.assertEqual(expected, v_sum_value) expected_sum += expected expected = i * 6.0 self.assertEqual(expected, v_mean_value) expected_mean += expected expected_mean /= len(dist.worker_devices) # Without get(device), should return the value you get by # applying the reduction across all towers (whether you use # fetch(), get(), or nothing). self.assertEqual(expected_sum, self.evaluate(dist.fetch(ret_v_sum))) self.assertEqual(expected_mean, self.evaluate(dist.fetch(ret_v_mean))) self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get())) self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get())) if not context.executing_eagerly(): self.assertEqual(expected_sum, self.evaluate(ret_v_sum)) self.assertEqual(expected_mean, self.evaluate(ret_v_mean)) # NOTE(priyag): Names and name scopes are ignored in eager, hence we are not # testing this in eager mode. def testNameScope(self): def model_fn(): with ops.name_scope("foo"): a = constant_op.constant(1.0, name="a") distribute_lib.get_tower_context().merge_call(lambda _: _) b = constant_op.constant(1.0, name="b") return a, b dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with context.graph_mode(), dist.scope(): with ops.name_scope("main"): result = dist.call_for_each_tower(model_fn, run_concurrently=False) self.assertEquals(2, len(result)) for v, name in zip(result, ["a", "b"]): self.assertIsInstance(v, values.DistributedValues) v0, v1 = dist.unwrap(v) self.assertEquals("main/foo/" + name + ":0", v0.name) self.assertEquals("main/tower_1/foo/" + name + ":0", v1.name) def testWithDefaultName(self): def model_fn(): with ops.name_scope(None, "foo"): a = constant_op.constant(1.0, name="a") distribute_lib.get_tower_context().merge_call(lambda _: _) b = constant_op.constant(2.0, name="b") return a, b dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with context.graph_mode(), dist.scope(): result = dist.call_for_each_tower(model_fn, run_concurrently=False) self.assertEquals(2, len(result)) for v, name in zip(result, ["a", "b"]): self.assertIsInstance(v, values.DistributedValues) v0, v1 = dist.unwrap(v) self.assertEquals("foo/" + name + ":0", v0.name) self.assertEquals("tower_1/foo/" + name + ":0", v1.name) def testDynamicRnnVariables(self): def model_fn(): inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]]) cell_fw = rnn_cell_impl.LSTMCell(300) cell_bw = rnn_cell_impl.LSTMCell(300) (outputs, _) = rnn.bidirectional_dynamic_rnn( cell_fw, cell_bw, inputs, dtype=dtypes.float32) return outputs dist = mirrored_strategy.MirroredStrategy( ["/device:GPU:0", "/device:CPU:0"]) with context.graph_mode(), dist.scope(): result = dist.call_for_each_tower(model_fn, run_concurrently=False) # Two variables are created by the RNN layer. self.assertEquals(2, len(result)) for v in result: self.assertIsInstance(v, values.DistributedValues) _, v1 = dist.unwrap(v) self.assertStartsWith(v1.name, "tower_1/") if __name__ == "__main__": test.main()
"""Generated message classes for cloudresourcemanager version v1beta1. The Google Cloud Resource Manager API provides methods for creating, reading, and updating project metadata. """ # NOTE: This file is autogenerated and should not be edited by hand. from googlecloudsdk.third_party.apitools.base.protorpclite import messages as _messages from googlecloudsdk.third_party.apitools.base.py import encoding package = 'cloudresourcemanager' class Binding(_messages.Message): """Associates `members` with a `role`. Fields: members: Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `alice@gmail.com` or `joe@example.com`. * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other- app@appspot.gserviceaccount.com`. * `group:{emailid}`: An email address that represents a Google group. For example, `admins@example.com`. * `domain:{domain}`: A Google Apps domain name that represents all the users of that domain. For example, `google.com` or `example.com`. role: Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Required """ members = _messages.StringField(1, repeated=True) role = _messages.StringField(2) class CloudresourcemanagerOrganizationsGetIamPolicyRequest(_messages.Message): """A CloudresourcemanagerOrganizationsGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `getIamPolicy` documentation. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class CloudresourcemanagerOrganizationsGetRequest(_messages.Message): """A CloudresourcemanagerOrganizationsGetRequest object. Fields: organizationId: The id of the Organization resource to fetch. """ organizationId = _messages.StringField(1, required=True) class CloudresourcemanagerOrganizationsListRequest(_messages.Message): """A CloudresourcemanagerOrganizationsListRequest object. Fields: filter: An optional query string used to filter the Organizations to return in the response. Filter rules are case-insensitive. Organizations may be filtered by `owner.directoryCustomerId` or by `domain`, where the domain is a Google for Work domain, for example: |Filter|Description| |------|-----------| |owner.directorycustomerid:123456789|Organizations with `owner.directory_customer_id` equal to `123456789`.| |domain:google.com|Organizations corresponding to the domain `google.com`.| This field is optional. pageSize: The maximum number of Organizations to return in the response. This field is optional. pageToken: A pagination token returned from a previous call to `ListOrganizations` that indicates from where listing should continue. This field is optional. """ filter = _messages.StringField(1) pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32) pageToken = _messages.StringField(3) class CloudresourcemanagerOrganizationsSetIamPolicyRequest(_messages.Message): """A CloudresourcemanagerOrganizationsSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `setIamPolicy` documentation. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class CloudresourcemanagerOrganizationsTestIamPermissionsRequest(_messages.Message): """A CloudresourcemanagerOrganizationsTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `testIamPermissions` documentation. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class CloudresourcemanagerProjectsDeleteRequest(_messages.Message): """A CloudresourcemanagerProjectsDeleteRequest object. Fields: projectId: The Project ID (for example, `foo-bar-123`). Required. """ projectId = _messages.StringField(1, required=True) class CloudresourcemanagerProjectsGetIamPolicyRequest(_messages.Message): """A CloudresourcemanagerProjectsGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `getIamPolicy` documentation. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class CloudresourcemanagerProjectsGetRequest(_messages.Message): """A CloudresourcemanagerProjectsGetRequest object. Fields: projectId: The Project ID (for example, `my-project-123`). Required. """ projectId = _messages.StringField(1, required=True) class CloudresourcemanagerProjectsListRequest(_messages.Message): """A CloudresourcemanagerProjectsListRequest object. Fields: filter: An expression for filtering the results of the request. Filter rules are case insensitive. The fields eligible for filtering are: + `name` + `id` + <code>labels.<em>key</em></code> where *key* is the name of a label Some examples of using labels as filters: |Filter|Description| |------|-----------| |name:*|The project has a name.| |name:Howl|The project's name is `Howl` or `howl`.| |name:HOWL|Equivalent to above.| |NAME:howl|Equivalent to above.| |labels.color:*|The project has the label `color`.| |labels.color:red|The project's label `color` has the value `red`.| |labels.color:red&nbsp;label.size:big|The project's label `color` has the value `red` and its label `size` has the value `big`. Optional. pageSize: The maximum number of Projects to return in the response. The server can return fewer Projects than requested. If unspecified, server picks an appropriate default. Optional. pageToken: A pagination token returned from a previous call to ListProjects that indicates from where listing should continue. Optional. """ filter = _messages.StringField(1) pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32) pageToken = _messages.StringField(3) class CloudresourcemanagerProjectsSetIamPolicyRequest(_messages.Message): """A CloudresourcemanagerProjectsSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `setIamPolicy` documentation. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class CloudresourcemanagerProjectsTestIamPermissionsRequest(_messages.Message): """A CloudresourcemanagerProjectsTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. `resource` is usually specified as a path, such as `projects/*project*/zones/*zone*/disks/*disk*`. The format for the path specified in this value is resource specific and is specified in the `testIamPermissions` documentation. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class CloudresourcemanagerProjectsUndeleteRequest(_messages.Message): """A CloudresourcemanagerProjectsUndeleteRequest object. Fields: projectId: The project ID (for example, `foo-bar-123`). Required. undeleteProjectRequest: A UndeleteProjectRequest resource to be passed as the request body. """ projectId = _messages.StringField(1, required=True) undeleteProjectRequest = _messages.MessageField('UndeleteProjectRequest', 2) class Empty(_messages.Message): """A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. """ class GetIamPolicyRequest(_messages.Message): """Request message for `GetIamPolicy` method.""" class ListOrganizationsResponse(_messages.Message): """The response returned from the `ListOrganizations` method. Fields: nextPageToken: A pagination token to be used to retrieve the next page of results. If the result is too large to fit within the page size specified in the request, this field will be set with a token that can be used to fetch the next page of results. If this field is empty, it indicates that this response contains the last page of results. organizations: The list of Organizations that matched the list query, possibly paginated. """ nextPageToken = _messages.StringField(1) organizations = _messages.MessageField('Organization', 2, repeated=True) class ListProjectsResponse(_messages.Message): """A page of the response received from the ListProjects method. A paginated response where more pages are available has `next_page_token` set. This token can be used in a subsequent request to retrieve the next request page. Fields: nextPageToken: Pagination token. If the result set is too large to fit in a single response, this token is returned. It encodes the position of the current result cursor. Feeding this value into a new list request with the `page_token` parameter gives the next page of the results. When `next_page_token` is not filled in, there is no next page and the list returned is the last page in the result set. Pagination tokens have a limited lifetime. projects: The list of Projects that matched the list filter. This list can be paginated. """ nextPageToken = _messages.StringField(1) projects = _messages.MessageField('Project', 2, repeated=True) class Organization(_messages.Message): """The root node in the resource hierarchy to which a particular entity's (e.g., company) resources belong. Fields: creationTime: Timestamp when the Organization was created. Assigned by the server. @OutputOnly displayName: A friendly string to be used to refer to the Organization in the UI. This field is required. organizationId: An immutable id for the Organization that is assigned on creation. This should be omitted when creating a new Organization. This field is read-only. owner: The owner of this Organization. The owner should be specified on creation. Once set, it cannot be changed. This field is required. """ creationTime = _messages.StringField(1) displayName = _messages.StringField(2) organizationId = _messages.StringField(3) owner = _messages.MessageField('OrganizationOwner', 4) class OrganizationOwner(_messages.Message): """The entity that owns an Organization. The lifetime of the Organization and all of its descendants are bound to the `OrganizationOwner`. If the `OrganizationOwner` is deleted, the Organization and all its descendants will be deleted. Fields: directoryCustomerId: The Google for Work customer id used in the Directory API. """ directoryCustomerId = _messages.StringField(1) class Policy(_messages.Message): """Defines an Identity and Access Management (IAM) policy. It is used to specify access control policies for Cloud Platform resources. A `Policy` consists of a list of `bindings`. A `Binding` binds a list of `members` to a `role`, where the members can be user accounts, Google groups, Google domains, and service accounts. A `role` is a named list of permissions defined by IAM. **Example** { "bindings": [ { "role": "roles/owner", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-other- app@appspot.gserviceaccount.com", ] }, { "role": "roles/viewer", "members": ["user:sean@example.com"] } ] } For a description of IAM and its features, see the [IAM developer's guide](https://cloud.google.com/iam). Fields: bindings: Associates a list of `members` to a `role`. Multiple `bindings` must not be specified for the same `role`. `bindings` with no members will result in an error. etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read- modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. If no `etag` is provided in the call to `setIamPolicy`, then the existing policy is overwritten blindly. version: Version of the `Policy`. The default version is 0. """ bindings = _messages.MessageField('Binding', 1, repeated=True) etag = _messages.BytesField(2) version = _messages.IntegerField(3, variant=_messages.Variant.INT32) class Project(_messages.Message): """A Project is a high-level Google Cloud Platform entity. It is a container for ACLs, APIs, AppEngine Apps, VMs, and other Google Cloud Platform resources. Enums: LifecycleStateValueValuesEnum: The Project lifecycle state. Read-only. Messages: LabelsValue: The labels associated with this Project. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: \[a-z\](\[-a-z0-9\]*\[a-z0-9\])?. Label values must be between 0 and 63 characters long and must conform to the regular expression (\[a-z\](\[-a-z0-9\]*\[a-z0-9\])?)?. No more than 256 labels can be associated with a given resource. Clients should store labels in a representation such as JSON that does not depend on specific characters being disallowed. Example: <code>"environment" : "dev"</code> Read-write. Fields: createTime: Creation time. Read-only. labels: The labels associated with this Project. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: \[a-z\](\[-a-z0-9\]*\[a-z0-9\])?. Label values must be between 0 and 63 characters long and must conform to the regular expression (\[a-z\](\[-a-z0-9\]*\[a-z0-9\])?)?. No more than 256 labels can be associated with a given resource. Clients should store labels in a representation such as JSON that does not depend on specific characters being disallowed. Example: <code>"environment" : "dev"</code> Read-write. lifecycleState: The Project lifecycle state. Read-only. name: The user-assigned name of the Project. It must be 4 to 30 characters. Allowed characters are: lowercase and uppercase letters, numbers, hyphen, single-quote, double-quote, space, and exclamation point. Example: <code>My Project</code> Read-write. parent: An optional reference to a parent Resource. The only supported parent type is "organization". Once set, the parent cannot be modified. Read-write. projectId: The unique, user-assigned ID of the Project. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited. Example: <code>tokyo-rain-123</code> Read-only after creation. projectNumber: The number uniquely identifying the project. Example: <code>415104041262</code> Read-only. """ class LifecycleStateValueValuesEnum(_messages.Enum): """The Project lifecycle state. Read-only. Values: LIFECYCLE_STATE_UNSPECIFIED: Unspecified state. This is only used/useful for distinguishing unset values. ACTIVE: The normal and active state. DELETE_REQUESTED: The project has been marked for deletion by the user (by invoking DeleteProject) or by the system (Google Cloud Platform). This can generally be reversed by invoking UndeleteProject. DELETE_IN_PROGRESS: The process of deleting the project has begun. Reversing the deletion is no longer possible. """ LIFECYCLE_STATE_UNSPECIFIED = 0 ACTIVE = 1 DELETE_REQUESTED = 2 DELETE_IN_PROGRESS = 3 @encoding.MapUnrecognizedFields('additionalProperties') class LabelsValue(_messages.Message): """The labels associated with this Project. Label keys must be between 1 and 63 characters long and must conform to the following regular expression: \[a-z\](\[-a-z0-9\]*\[a-z0-9\])?. Label values must be between 0 and 63 characters long and must conform to the regular expression (\[a-z\](\[-a-z0-9\]*\[a-z0-9\])?)?. No more than 256 labels can be associated with a given resource. Clients should store labels in a representation such as JSON that does not depend on specific characters being disallowed. Example: <code>"environment" : "dev"</code> Read- write. Messages: AdditionalProperty: An additional property for a LabelsValue object. Fields: additionalProperties: Additional properties of type LabelsValue """ class AdditionalProperty(_messages.Message): """An additional property for a LabelsValue object. Fields: key: Name of the additional property. value: A string attribute. """ key = _messages.StringField(1) value = _messages.StringField(2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) createTime = _messages.StringField(1) labels = _messages.MessageField('LabelsValue', 2) lifecycleState = _messages.EnumField('LifecycleStateValueValuesEnum', 3) name = _messages.StringField(4) parent = _messages.MessageField('ResourceId', 5) projectId = _messages.StringField(6) projectNumber = _messages.IntegerField(7) class ResourceId(_messages.Message): """A container to reference an id for any resource type. A `resource` in Google Cloud Platform is a generic term for something you (a developer) may want to interact with through one of our API's. Some examples are an AppEngine app, a Compute Engine instance, a Cloud SQL database, and so on. Fields: id: Required field for the type-specific id. This should correspond to the id used in the type-specific API's. type: Required field representing the resource type this id is for. At present, the only valid type is "organization". """ id = _messages.StringField(1) type = _messages.StringField(2) class SetIamPolicyRequest(_messages.Message): """Request message for `SetIamPolicy` method. Fields: policy: REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. """ policy = _messages.MessageField('Policy', 1) class StandardQueryParameters(_messages.Message): """Query parameters accepted by all methods. Enums: FXgafvValueValuesEnum: V1 error format. AltValueValuesEnum: Data format for response. Fields: f__xgafv: V1 error format. access_token: OAuth access token. alt: Data format for response. bearer_token: OAuth bearer token. callback: JSONP fields: Selector specifying which fields to include in a partial response. key: API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. oauth_token: OAuth 2.0 token for the current user. pp: Pretty-print response. prettyPrint: Returns response with indentations and line breaks. quotaUser: Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. trace: A tracing token of the form "token:<tokenid>" to include in api requests. uploadType: Legacy upload protocol for media (e.g. "media", "multipart"). upload_protocol: Upload protocol for media (e.g. "raw", "multipart"). """ class AltValueValuesEnum(_messages.Enum): """Data format for response. Values: json: Responses with Content-Type of application/json media: Media download with context-dependent Content-Type proto: Responses with Content-Type of application/x-protobuf """ json = 0 media = 1 proto = 2 class FXgafvValueValuesEnum(_messages.Enum): """V1 error format. Values: _1: v1 error format _2: v2 error format """ _1 = 0 _2 = 1 f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1) access_token = _messages.StringField(2) alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json') bearer_token = _messages.StringField(4) callback = _messages.StringField(5) fields = _messages.StringField(6) key = _messages.StringField(7) oauth_token = _messages.StringField(8) pp = _messages.BooleanField(9, default=True) prettyPrint = _messages.BooleanField(10, default=True) quotaUser = _messages.StringField(11) trace = _messages.StringField(12) uploadType = _messages.StringField(13) upload_protocol = _messages.StringField(14) class TestIamPermissionsRequest(_messages.Message): """Request message for `TestIamPermissions` method. Fields: permissions: The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see IAM Overview. """ permissions = _messages.StringField(1, repeated=True) class TestIamPermissionsResponse(_messages.Message): """Response message for `TestIamPermissions` method. Fields: permissions: A subset of `TestPermissionsRequest.permissions` that the caller is allowed. """ permissions = _messages.StringField(1, repeated=True) class UndeleteProjectRequest(_messages.Message): """The request sent to the UndeleteProject method.""" encoding.AddCustomJsonFieldMapping( StandardQueryParameters, 'f__xgafv', '$.xgafv', package=u'cloudresourcemanager') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1', package=u'cloudresourcemanager') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2', package=u'cloudresourcemanager')
# ============================================================================= # Copyright (c) 2016, Cisco Systems, Inc # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================= from flask import Blueprint from flask import jsonify from flask import render_template from flask import request from flask import redirect from flask import url_for from flask import abort from flask import send_file from flask import flash from flask_login import login_required from flask_login import current_user from database import DBSession from models import logger from models import CustomCommandProfile from constants import get_temp_directory from wtforms import Form from wtforms import StringField from wtforms import TextAreaField from wtforms.validators import Length, required from utils import create_temp_user_directory from utils import create_directory from utils import make_file_writable from common import create_or_update_custom_command_profile from common import delete_custom_command_profile from common import get_custom_command_profile from common import can_delete import os import json custom_command = Blueprint('custom_command', __name__, url_prefix='/custom_command_profiles') @custom_command.route('/', methods=['GET', 'POST']) @login_required def home(): msg = '' # Custom Command Profile import if request.method == 'POST': file = request.files['file'] if file: if not allowed_file(file.filename): msg = "Incorrect file format -- " + file.filename + " must be .json" else: file_path = os.path.join(get_temp_directory(), "custom_command_profiles.json") file.save(file_path) failed = "" with open(file_path, 'r') as f: try: s = json.load(f) except: msg = "Incorrect file format -- " + file.filename + " must be a valid JSON file." flash(msg, 'import_feedback') return redirect(url_for(".home")) if "CSM Server:Custom Command Profile" not in s.keys(): msg = file.filename + " is not in the correct Custom Command Profile format." else: db_session = DBSession custom_profiles = [p for (p,) in DBSession().query(CustomCommandProfile.profile_name).all()] d = s["CSM Server:Custom Command Profile"] for profile_name in d.keys(): name = '' if profile_name in custom_profiles: name = profile_name # Will keep appending ' - copy' until it hits a unique name while name in custom_profiles: name += " - copy" msg += profile_name + ' -> ' + name + '\n' custom_profiles.append(name) if len(name) < 100 and len(profile_name) < 100: try: profile = CustomCommandProfile( profile_name=name if name else profile_name, command_list=d[profile_name], created_by=current_user.username ) db_session.add(profile) db_session.commit() except: failed += profile_name + '\n' else: failed += profile_name + ' (name too long)\n' if msg: msg = "The following profiles already exist and will try to be imported under modified names:\n\n" + \ msg + '\n' if failed: msg += 'The following profiles failed to import:\n\n' + failed elif failed: msg = 'The following profiles failed to import:\n\n' + failed else: msg = "Custom Command Profile import was successful!" # delete file os.remove(file_path) flash(msg, 'import_feedback') return redirect(url_for(".home")) custom_command_profile_form = CustomCommandProfileForm(request.form) return render_template('custom_command/custom_command_profile.html', form=custom_command_profile_form) @custom_command.route('/api/get_command_profiles') @login_required def api_get_command_profiles(): custom_profiles = DBSession().query(CustomCommandProfile).filter().all() rows = [] for profile in custom_profiles: row = {'id': profile.id, 'profile_name': profile.profile_name, 'command_list': profile.command_list, 'created_by': profile.created_by} rows.append(row) return jsonify(**{'data': rows}) @custom_command.route('/command_profile/create', methods=['GET', 'POST']) @login_required def command_profile_create(): db_session = DBSession() form = CustomCommandProfileForm(request.form) if request.method == 'POST' and form.validate(): command_profile = get_custom_command_profile(db_session, form.profile_name.data) if command_profile is not None: return render_template('custom_command/command_profile_edit.html', form=form, duplicate_error=True) create_or_update_custom_command_profile( db_session=db_session, profile_name=form.profile_name.data, command_list=','.join([l for l in form.command_list.data.splitlines() if l]), created_by=current_user.username ) return redirect(url_for('custom_command.home')) else: return render_template('custom_command/command_profile_edit.html', form=form) @custom_command.route('/command_profile/<profile_name>/edit', methods=['GET', 'POST']) @login_required def command_profile_edit(profile_name): db_session = DBSession() command_profile = get_custom_command_profile(db_session, profile_name) if command_profile is None: abort(404) form = CustomCommandProfileForm(request.form) if request.method == 'POST' and form.validate(): if profile_name != form.profile_name.data and \ get_custom_command_profile(db_session, form.profile_name.data) is not None: return render_template('custom_command/command_profile_edit.html', form=form, duplicate_error=True) create_or_update_custom_command_profile( db_session=db_session, profile_name=form.profile_name.data, command_list=','.join([l for l in form.command_list.data.splitlines() if l]), created_by=current_user.username, custom_command_profile=get_custom_command_profile(db_session, profile_name) ) return redirect(url_for('custom_command.home')) else: form.profile_name.data = command_profile.profile_name if command_profile.command_list is not None: form.command_list.data = '\n'.join(command_profile.command_list.split(',')) return render_template('custom_command/command_profile_edit.html', form=form) @custom_command.route('/command_profile/<profile_name>/delete', methods=['DELETE']) @login_required def custom_command_profile_delete(profile_name): if not can_delete(current_user): abort(401) db_session = DBSession() try: delete_custom_command_profile(db_session, profile_name) return jsonify({'status': 'OK'}) except Exception as e: logger.exception('custom_command_profile_delete hit exception.') return jsonify({'status': e.message}) @custom_command.route('/export_command_profiles', methods=['POST']) @login_required def export_command_profiles(): db_session = DBSession() profiles_list = request.args.getlist('profiles_list[]')[0].split(",") db_profiles = db_session.query(CustomCommandProfile).all() d = {"CSM Server:Custom Command Profile": {}} for profile in db_profiles: if profile.profile_name in profiles_list: d["CSM Server:Custom Command Profile"][profile.profile_name] = profile.command_list temp_user_dir = create_temp_user_directory(current_user.username) custom_command_export_temp_path = os.path.normpath(os.path.join(temp_user_dir, "custom_command_export")) create_directory(custom_command_export_temp_path) make_file_writable(custom_command_export_temp_path) with open(os.path.join(custom_command_export_temp_path, 'custom_command_profiles.json'), 'w') as command_export_file: command_export_file.write(json.dumps(d, indent=2)) return send_file(os.path.join(custom_command_export_temp_path, 'custom_command_profiles.json'), as_attachment=True) def allowed_file(filename): return '.' in filename and \ filename.rsplit('.', 1)[1] in ['json'] class CustomCommandProfileForm(Form): profile_name = StringField('Profile Name', [required(), Length(max=30)]) command_list = TextAreaField('CLI Commands')
from django.db import models from django.forms import ModelForm from django.conf.global_settings import LANGUAGES from django.contrib.auth.models import User class CreatedUpdatedModel(models.Model): created = models.DateTimeField(auto_now_add=True) updated = models.DateTimeField(auto_now=True) class Meta: abstract = True class Skill( models.Model ): user = models.ForeignKey(User) name = models.CharField(max_length=4096) def myToObj ( self ): return { "id" : self.id , "name": self.name } def fill ( self, data ): self.name = data["name"] def __str__( self ) : return self.name class PieceCategory( models.Model ): user = models.ForeignKey(User) name = models.CharField(max_length=4096, default="") description = models.CharField(max_length=4096, default="") language = models.CharField(max_length=7, choices=LANGUAGES, default="en") tags = models.CharField(max_length=4096, default="") pieces = models.TextField(default="") def myToObj ( self ): data = { "id" : self.id , "name": self.name, "description" : self.description } data["language"] = self.language data["tags"] = self.tags data["pieces"] = self.pieces return data def fill ( self, data ): self.name = data["name"] self.description = data["description"] self.tags = data["tags"] self.language = data["language"] self.pieces = data["pieces"] def __str__( self ) : return self.name class Piece( models.Model ): user = models.ForeignKey(User) content = models.TextField() language = models.CharField(max_length=7, choices=LANGUAGES) tags = models.CharField(max_length=4096, default="") legend = models.CharField(max_length=9064, default="") def myToObj ( self ): return { "id" : self.id , "content" : self.content, "language" : self.language, "tags" : self.tags, "legend" : self.legend } def fill( self, data ) : self.content = data["content"] self.language = data["language"] self.tags = data["tags"] self.legend = data["legend"] def __str__( self ) : return self.content class Cover( CreatedUpdatedModel ): user = models.ForeignKey(User) name = models.CharField(max_length=4096) content = models.TextField(default="") def myToObj ( self ): return { "id" : self.id, "name" : self.name, "content" : self.content } def fill( data ): self.name = data.name self.content = data.content def __str__( self ) : return self.name class Application( CreatedUpdatedModel ): user = models.ForeignKey(User) portal = models.CharField(blank=True, max_length=200) portal_link = models.CharField(blank=True, max_length=200, default="") company = models.CharField(blank=True, max_length=200) company_link = models.CharField(blank=True, max_length=200, default="") position = models.CharField(blank=True, max_length=300) position_link = models.CharField(blank=True, max_length=300, default="") salary = models.CharField(blank=True, max_length=100) contract = models.CharField(blank=True, max_length=300) latitude = models.CharField(blank=True, max_length=20) longitude = models.CharField(blank=True, max_length=20) skills = models.CharField(blank=True, max_length=200) written = models.BooleanField(default=False) called = models.BooleanField(default=False) interviewed = models.BooleanField(default=False) followup = models.BooleanField(default=False) notes = models.TextField(blank=True) next = models.TextField(blank=True) cover = models.TextField(blank=True) address1 = models.CharField(blank=True, max_length=100) address2 = models.CharField(blank=True, max_length=100) c1name = models.CharField(blank=True, max_length=40) c1mail = models.CharField(blank=True, max_length=40) c1phone = models.CharField(blank=True, max_length=20) c2name = models.CharField(blank=True, max_length=40) c2mail = models.CharField(blank=True, max_length=40) c2phone = models.CharField(blank=True, max_length=20) c3name = models.CharField(blank=True, max_length=40) c3mail = models.CharField(blank=True, max_length=40) c3phone = models.CharField(blank=True, max_length=20) c4name = models.CharField(blank=True, max_length=40) c4mail = models.CharField(blank=True, max_length=40) c4phone = models.CharField(blank=True, max_length=20) def myToObj ( self ): data = { "id" : self.id, "created" : self.created.strftime('%Y-%m-%d %H:%M') , "updated" : self.updated.strftime('%Y-%m-%d %H:%M') } data["portal"] = self.portal data["company"] = self.company data["position"] = self.position data["portal_link"] = self.portal_link data["company_link"] = self.company_link data["position_link"] = self.position_link data["salary"] = self.salary data["contract"] = self.contract data["latitude"] = self.latitude data["longitude"] = self.longitude data["skills"] = self.skills data["written"] = self.written data["called"] = self.called data["interviewed"] = self.interviewed data["followup"] = self.followup data["notes"] = self.notes data["next"] = self.next data["cover"] = self.cover data["address1"] = self.address1 data["address2"] = self.address2 data["c1name"] = self.c1name data["c1mail"] = self.c1mail data["c1phone"] = self.c1phone data["c2name"] = self.c2name data["c2mail"] = self.c2mail data["c2phone"] = self.c2phone data["c3name"] = self.c3name data["c3mail"] = self.c3mail data["c3phone"] = self.c3phone data["c4name"] = self.c4name data["c4mail"] = self.c4mail data["c4phone"] = self.c4phone return data def fill( self, data ) : self.company = data["company"] self.portal = data["portal"] self.position = data["position"] self.company_link = data["company_link"] self.portal_link = data["portal_link"] self.position_link = data["position_link"] self.salary = data["salary"] self.contract = data["contract"] self.latitude = data["latitude"] self.longitude = data["longitude"] self.skills = data["skills"] self.written = data["written"] self.called = data["called"] self.interviewed = data["interviewed"] self.followup = data["followup"] self.notes = data["notes"] self.next = data["next"] self.cover = data["cover"] self.address1 = data["address1"] self.address2 = data["address2"] self.c1name = data["c1name"] self.c1mail = data["c1mail"] self.c1phone = data["c1phone"] self.c2name = data["c2name"] self.c2mail = data["c2mail"] self.c2phone = data["c2phone"] self.c3name = data["c3name"] self.c3mail = data["c3mail"] self.c3phone = data["c3phone"] self.c4name = data["c4name"] self.c4mail = data["c4mail"] self.c4phone = data["c4phone"] def __str__( self ) : return self.company # User class Profile( CreatedUpdatedModel ): user = models.OneToOneField(User) uuid = models.UUIDField() bio = models.TextField() website = models.URLField(null=True) has_avatar = models.BooleanField(default=False) avatar = models.CharField(max_length=4096) tutorial = models.IntegerField() def myToObj ( self ): data = {} data["user"] = { "id" : self.user.id, "name" : self.user.name } data["uuid"] = self.uuid data["bio"] = self.bio data["website"] = self.website data["has_avatar"] = self.has_avatar data["avatar"] = self.avatar data["tutorial"] = self.tutorial return data def __str__( self ) : return self.user class ProfileForm(ModelForm): class Meta: model = Profile fields = ['user', 'bio', 'website'] class SkillForm(ModelForm): class Meta: model = Skill fields = ['name'] class ApplicationForm(ModelForm): class Meta: model = Application fields = ['company','portal', 'position','skills', 'written', 'called', 'interviewed', 'followup', 'notes', 'next', 'cover', 'address1', 'address2', 'c1name', 'c1mail', 'c1phone', 'c2name', 'c2mail', 'c2phone','c3name', 'c3mail', 'c3phone','c4name', 'c4mail', 'c4phone'] class PieceCategoryForm(ModelForm): class Meta: model = PieceCategory fields = ['name', 'description', 'tags', 'language', 'pieces'] class PieceForm(ModelForm): class Meta: model = Piece fields = ['language', 'tags', 'legend', 'content']
### Author: yela @cyela4t ### Description: Gravity makes squares falling down. Try to erase the colorful evil squares before it is too late! ### Category: Game ### License: MIT ### Appname : SQUARE WITH GRAVITY ## https://github.com/annaeg/square-with-gravity import ugfx import pyb import buttons # IMU is the Inertial Measurement Unit combines accelerometer and gyroscope. # This uses the https://github.com/emfcamp/Mk3-Firmware/blob/master/lib/imu.py from imu import IMU SCREEN_WIDTH = 320 SCREEN_HEIGHT = 240 # More delay will be a more cear background, but the falling is not so nice anymore # Delay should be always 10*VELOCITY. This feels nice. # If you double the delay, then set the square spawn time half VELOCITY = 15 # how fast a square falls down DELAY = VELOCITY*10 # delay between two rounds SQR_SPAWN = 15 # spawn a square every x rounds SQR_MIN = 20 # min size of a square SQR_MAX = 45 # max size of a square SIMILARITY = 35 # how similar a square color has to be compared to the BG color to vanish DYING_COUNTER = 6 # how long a dead square will be visible imu = IMU() ugfx.init() buttons.init() class Color: def __init__(self): self.red = pyb.rng() % (0xFF + 1) self.blue = pyb.rng() % (0xFF + 1) self.green = pyb.rng() % (0xFF + 1) def get_color(self): return (self.red<<16) + (self.blue<<8) + (self.green) def change(self, color, intensity): # intensity can be negative color += intensity if(color < 0): color = 0 if(color > 0xFF): color = 0xFF return color def more_red(self, intensity): self.red = self.change(self.red, intensity) self.green = self.change(self.green, intensity *-1) self.blue = self.change(self.blue, intensity *-1) def more_green(self, intensity): self.green = self.change(self.green, intensity) self.red = self.change(self.red, intensity *-1) self.blue = self.change(self.blue, intensity *-1) def more_blue(self, intensity): self.blue = self.change(self.blue, intensity) self.red = self.change(self.red, intensity *-1) self.green = self.change(self.green, intensity *-1) def rotate(self): red = self.red blue = self.blue green = self.green self.red = blue self.blue = green self.green = red def similar_to(self, color): if(abs(self.red-color.red) < SIMILARITY and abs(self.blue-color.blue) < SIMILARITY and abs(self.green-color.green) < SIMILARITY): return True return False def draw(self): ugfx.clear(ugfx.html_color(self.get_color())) class Square: def __init__(self): self.width = (pyb.rng() % (SQR_MAX - SQR_MIN + 1)) + SQR_MIN self.x = (int) (SCREEN_WIDTH/2 - self.width/2) self.y = (int) (SCREEN_HEIGHT/2 - self.width/2) self.color = Color() self.build = False self.dead = False self.dead_counter = 0 # Checks if there is a collision: def collides(self, square): if(self is square): return False if(self.x + self.width < square.x): return False if(self.x > square.x + square.width): return False if(self.y + self.width < square.y): return False if(self.y > square.y + square.width): return False return True def fall(self, acc, squares, x_boost, y_boost): if(self.build): return # (the boost can have values between 5 and -5) # x and y will have values between 10 and -10: x = (int) (acc['x'] * VELOCITY) y = (int) (acc['y'] * VELOCITY) self.x = self.x + x + x_boost self.y = self.y + y + y_boost for s in squares: if(self.collides(s)): self.build = True if(self.y + self.width >= SCREEN_HEIGHT): self.y = SCREEN_HEIGHT - self.width self.build = True elif(self.y < 0): self.y = 0 self.build = True if(self.x + self.width >= SCREEN_WIDTH): self.x = SCREEN_WIDTH - self.width self.build = True elif(self.x < 0): self.x = 0 self.build = True def draw(self): if(self.dead): self.dead_counter = self.dead_counter + 1 if(self.dead_counter % 2 == 1): ugfx.area(self.x, self.y, self.width, self.width, ugfx.BLACK) else: ugfx.area(self.x, self.y, self.width, self.width, ugfx.WHITE) else: ugfx.area(self.x, self.y, self.width, self.width, ugfx.html_color(self.color.get_color())) def init(): ugfx.clear(ugfx.BLUE) ugfx.set_default_font(ugfx.FONT_TITLE) ugfx.text(95, 50, "SQUARE", ugfx.YELLOW) ugfx.text(60, 80, "WITH GRAVITY", ugfx.YELLOW) ugfx.set_default_font(ugfx.FONT_MEDIUM_BOLD) ugfx.text(70, 180, "PRESS A TO START", ugfx.YELLOW) ugfx.text(70, 200, "PRESS B FOR HELP", ugfx.YELLOW) ugfx.area(260, 95, 25, 25, ugfx.RED) ugfx.area(240, 120, 30, 30, ugfx.YELLOW) ugfx.area(250, 150, 60, 60, ugfx.html_color(0xFF00FF)) pyb.delay(1000) def repaint(squares, color, score): color.draw() for s in squares: s.draw() ugfx.text(10, 10, "SCORE: " + str(score), ugfx.YELLOW) def game_over(score): ugfx.set_default_font(ugfx.FONT_MEDIUM_BOLD) ugfx.text(100, 120, "GAME OVER!", ugfx.YELLOW) ugfx.set_default_font(ugfx.FONT_SMALL) ugfx.text(10, 220, "Press Menu for a new game", ugfx.YELLOW) def new_game(): color = Color() squares = [] # Influence the falling of the square with the joystic. This will set the boost: x_boost = 0 y_boost = 0 start_square = Square() squares.append(start_square) score = start_square.width counter = 0 while not buttons.is_pressed("BTN_MENU"): pyb.delay(DELAY) acc = imu.get_acceleration() x = acc['x'] # RED y = acc['y'] # GREEN z = acc['z'] # BLUE if(abs(x) > abs(y) and abs(x) > abs(z)): color.more_red((int) (x * 10)) elif(abs(y) > abs(x) and abs(y) > abs(z)): color.more_blue((int) (y * 10)) elif(abs(z) > abs(x) and abs(z) > abs(y)): color.more_green((int) (y * 10)) if buttons.is_pressed("BTN_A"): color.rotate() if buttons.is_pressed("BTN_B"): for s in squares: s.color.rotate() if buttons.is_pressed("JOY_UP"): y_boost = -5 if buttons.is_pressed("JOY_DOWN"): y_boost = 5 if buttons.is_pressed("JOY_LEFT"): x_boost = -5 if buttons.is_pressed("JOY_RIGHT"): x_boost = 5 for s in squares: if(s.dead): if(s.dead_counter >= DYING_COUNTER): squares.remove(s) else: s.fall(acc, squares, x_boost, y_boost) if(color.similar_to(s.color)): s.dead = True x_boost = 0 y_boost = 0 repaint(squares, color, score) counter = counter + 1 if(counter == SQR_SPAWN): s = Square() squares.append(s) for s2 in squares: if(s.collides(s2)): # GAME OVER game_over(score) while not buttons.is_pressed("BTN_MENU"): pyb.delay(DELAY) return score = score + s.width counter = 0 init() while not buttons.is_pressed("BTN_MENU"): if buttons.is_pressed("BTN_A"): new_game() init() if buttons.is_pressed("BTN_B"): ugfx.clear(ugfx.BLUE) ugfx.set_default_font(ugfx.FONT_SMALL) ugfx.text(10, 10, "* The BG color changes based on orientation", ugfx.YELLOW) ugfx.text(10, 30, "* A rotates BG color, B rotates square colors", ugfx.YELLOW) ugfx.text(10, 50, "* Squares always fall down", ugfx.YELLOW) ugfx.text(10, 70, "* Control the falling square with the joystick", ugfx.YELLOW) ugfx.text(10, 90, "* A square vanishes,", ugfx.YELLOW) ugfx.text(50, 110, "if the background color is the same", ugfx.YELLOW) ugfx.text(10, 130, "* Game is over,", ugfx.YELLOW) ugfx.text(50, 150, "if a new square has not enough space", ugfx.YELLOW) ugfx.set_default_font(ugfx.FONT_MEDIUM_BOLD) ugfx.text(70, 180, "PRESS A TO START", ugfx.YELLOW) while not buttons.is_pressed("BTN_A"): pyb.delay(DELAY) new_game() init() pyb.delay(DELAY)
#!/usr/bin/python # -*- coding: utf-8 -*- # thumbor imaging service # https://github.com/globocom/thumbor/wiki # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license # Copyright (c) 2011 globo.com timehome@corp.globo.com from pyvows import Vows, expect from thumbor.config import Config STORAGE_DEFAULT_VALUE = 'thumbor.storages.file_storage' TEST_DATA = ( ('MAX_WIDTH', 0), ('MAX_HEIGHT', 0), ('ALLOWED_SOURCES', []), ('QUALITY', 80), ('LOADER', 'thumbor.loaders.http_loader'), ('STORAGE', STORAGE_DEFAULT_VALUE), ('ENGINE', 'thumbor.engines.pil'), ('GIF_ENGINE', 'thumbor.engines.gif'), ('ALLOW_UNSAFE_URL', True), ('FILE_LOADER_ROOT_PATH', '/tmp'), ('STORAGE_EXPIRATION_SECONDS', 60 * 60 * 24 * 30), ('STORES_CRYPTO_KEY_FOR_EACH_IMAGE', False), ('MONGO_STORAGE_SERVER_HOST', 'localhost'), ('MONGO_STORAGE_SERVER_PORT', 27017), ('MONGO_STORAGE_SERVER_DB', 'thumbor'), ('MONGO_STORAGE_SERVER_COLLECTION', 'images'), ('REDIS_STORAGE_SERVER_HOST', 'localhost'), ('REDIS_STORAGE_SERVER_PORT', 6379), ('REDIS_STORAGE_SERVER_DB', 0), ('MIXED_STORAGE_FILE_STORAGE', 'thumbor.storages.no_storage'), ('MIXED_STORAGE_CRYPTO_STORAGE', 'thumbor.storages.no_storage'), ('MIXED_STORAGE_DETECTOR_STORAGE', 'thumbor.storages.no_storage'), ('DETECTORS', []), ('FACE_DETECTOR_CASCADE_FILE', 'haarcascade_frontalface_alt.xml'), ('FILTERS', [ 'thumbor.filters.brightness', 'thumbor.filters.colorize', 'thumbor.filters.contrast', 'thumbor.filters.rgb', 'thumbor.filters.round_corner', 'thumbor.filters.quality', 'thumbor.filters.noise', 'thumbor.filters.watermark', 'thumbor.filters.equalize', 'thumbor.filters.fill', 'thumbor.filters.sharpen', 'thumbor.filters.strip_icc', 'thumbor.filters.frame', 'thumbor.filters.grayscale', 'thumbor.filters.rotate', 'thumbor.filters.format', 'thumbor.filters.max_bytes', 'thumbor.filters.convolution', 'thumbor.filters.blur', 'thumbor.filters.extract_focal', 'thumbor.filters.no_upscale', 'thumbor.filters.saturation', 'thumbor.filters.max_age', 'thumbor.filters.curve', ]) ) @Vows.batch class Configuration(Vows.Context): class DefaultThumborConf(Vows.Context): def topic(self): for data in TEST_DATA: yield data class VerifyDefaultValueContext(Vows.Context): def topic(self, data): key, default_value = data cfg = Config() return (getattr(cfg, key), default_value) def should_have_default_value(self, topic): expect(topic).not_to_be_an_error() expect(topic).to_length(2) actual, expected = topic expect(actual).not_to_be_null() expect(actual).to_equal(expected) class WhenSettingAnAlias(Vows.Context): def topic(self): Config.alias('OTHER_ENGINE', 'ENGINE') return Config(OTHER_ENGINE='x') def should_set_engine_attribute(self, config): expect(config.ENGINE).to_equal('x') def should_set_other_engine_attribute(self, config): expect(config.OTHER_ENGINE).to_equal('x') class WhenSettingAnAliasedKey(Vows.Context): def topic(self): Config.alias('LOADER_ALIAS', 'LOADER') return Config(LOADER='y') def should_set_loader_attribute(self, config): expect(config.LOADER).to_equal('y') def should_set_loader_alias_attribute(self, config): expect(config.LOADER_ALIAS).to_equal('y') class WithAliasedAliases(Vows.Context): def topic(self): Config.alias('STORAGE_ALIAS', 'STORAGE') Config.alias('STORAGE_ALIAS_ALIAS', 'STORAGE_ALIAS') return Config(STORAGE_ALIAS_ALIAS='z') def should_set_storage_attribute(self, config): expect(config.STORAGE).to_equal('z') def should_set_storage_alias_attribute(self, config): expect(config.STORAGE_ALIAS).to_equal('z') def should_set_storage_alias_alias_attribute(self, config): expect(config.STORAGE_ALIAS_ALIAS).to_equal('z') class WithDefaultValues(Vows.Context): def topic(self): return Config() def should_set_storage_attribute(self, config): expect(config.STORAGE).to_equal(STORAGE_DEFAULT_VALUE) def should_set_storage_alias_attribute(self, config): expect(config.STORAGE_ALIAS).to_equal(STORAGE_DEFAULT_VALUE) def should_set_storage_alias_alias_attribute(self, config): expect(config.STORAGE_ALIAS_ALIAS).to_equal(STORAGE_DEFAULT_VALUE) def should_be_a_derpconf(self, config): expect(config.__class__.__module__).to_equal('derpconf.config') #class ConfigContext(Vows.Context): #def _camel_split(self, string): #return re.sub('((?=[A-Z][a-z])|(?<=[a-z])(?=[A-Z])|(?=[0-9]\b))', ' ', string).strip() #def _config_name(self): #return '_'.join(self._camel_split(self.__class__.__name__).split(' ')).upper() #def topic(self): #config = self._config_name() #return getattr(conf, config) #def is_not_an_error(self, topic): #expect(topic).not_to_be_an_error() #class NumericConfigContext(ConfigContext): #def is_numeric(self, topic): #expect(topic).to_be_numeric() #@Vows.batch #class Configuration(Vows.Context): #class Defaults(Vows.Context): ##class SecurityKey(ConfigContext): ##def defaults_to_null(self, topic): ##expect(topic).to_be_null() #class AllowUnsafeUrl(ConfigContext): #def defaults_to_true(self, topic): #expect(topic).to_be_true() #class MaxWidth(NumericConfigContext): #def defaults_to_0(self, topic): #expect(topic).to_equal(0) #class MaxHeight(NumericConfigContext): #def defaults_to_0(self, topic): #expect(topic).to_equal(0) ##class AllowedSources(ConfigContext): ##def defaults_to_empty(self, topic): ##expect(topic).to_be_empty() #class Quality(NumericConfigContext): #def defaults_to_85(self, topic): #expect(topic).to_equal(85) ##class Loader(ConfigContext): ##def defaults_to_http_loader(self, topic): ##expect(topic).to_equal('thumbor.loaders.http_loader') #class MaxSourceSize(NumericConfigContext): #def defaults_to_0(self, topic): #expect(topic).to_equal(0) #class RequestTimeoutSeconds(NumericConfigContext): #def defaults_to_120(self, topic): #expect(topic).to_equal(120) #class Engine(ConfigContext): #def defaults_to_pil(self, topic): #expect(topic).to_equal('thumbor.engines.pil') #class Storage(ConfigContext): #def defaults_to_file_storage(self, topic): #expect(topic).to_equal('thumbor.storages.file_storage') #class StorageExpirationSeconds(NumericConfigContext): #def defaults_to_one_month(self, topic): #expect(topic).to_equal(60 * 60 * 24 * 30) #class MongoStorage(Vows.Context): #class MongoStorageServerHost(ConfigContext): #def defaults_to_localhost(self, topic): #expect(topic).to_equal('localhost') #class MongoStorageServerPort(NumericConfigContext): #def defaults_to_27017(self, topic): #expect(topic).to_equal(27017) #class MongoStorageServerDb(ConfigContext): #def defaults_to_thumbor(self, topic): #expect(topic).to_equal('thumbor') #class MongoStorageServerCollection(ConfigContext): #def defaults_to_images(self, topic): #expect(topic).to_equal('images') #class RedisStorage(Vows.Context): #class RedisStorageServerHost(ConfigContext): #def defaults_to_localhost(self, topic): #expect(topic).to_equal('localhost') #class RedisStorageServerPort(NumericConfigContext): #def defaults_to_6379(self, topic): #expect(topic).to_equal(6379) #class RedisStorageServerDb(NumericConfigContext): #def defaults_to_0(self, topic): #expect(topic).to_equal(0) #class MySqlStorage(Vows.Context): #class MysqlStorageServerHost(ConfigContext): #def defaults_to_localhost(self, topic): #expect(topic).to_equal('localhost') #class MysqlStorageServerPort(NumericConfigContext): #def defaults_to_3306(self, topic): #expect(topic).to_equal(3306) #class MysqlStorageServerUser(ConfigContext): #def defaults_to_root(self, topic): #expect(topic).to_equal('root') #class MysqlStorageServerPassword(ConfigContext): #def defaults_to_empty(self, topic): #expect(topic).to_be_empty() #class MysqlStorageServerDb(ConfigContext): #def defaults_to_thumbor(self, topic): #expect(topic).to_equal('thumbor') #class MysqlStorageServerTable(ConfigContext): #def defaults_to_images(self, topic): #expect(topic).to_equal('images') #class Engines(Vows.Context): #class ImageMagick(Vows.Context): #class MagickwandPath(ConfigContext): #def defaults_to_empty(self, topic): #expect(topic).to_be_empty() #class Json(Vows.Context): #class MetaCallbackName(ConfigContext): #def defaults_to_null(self, topic): #expect(topic).to_be_null() #class Detectors(ConfigContext): #def default_includes_face_detector(self, topic): #expect(topic).to_include('thumbor.detectors.face_detector') #def default_includes_feature_detector(self, topic): #expect(topic).to_include('thumbor.detectors.feature_detector') #class FaceDetector(Vows.Context): #class FaceDetectorCascadeFile(ConfigContext): #def defaults_to_haarcascade_frontalface_alt(self, topic): #expect(topic).to_equal('haarcascade_frontalface_alt.xml')
from string import Template from gettext import gettext as _ from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db import models from cyder.base.utils import transaction_atomic from cyder.core.ctnr.models import Ctnr from cyder.cydhcp.interface.static_intr.models import StaticInterface from cyder.cydns.domain.models import Domain from cyder.cydns.address_record.models import AddressRecord from cyder.cydns.validation import validate_label, validate_fqdn from cyder.cydns.view.models import View from cyder.cydns.models import CydnsRecord class Nameserver(CydnsRecord): """ Name server for forward domains:: >>> Nameserver(domain = domain, server = server) Sometimes a name server needs a glue record. A glue record can either be an AddressRecord or a StaticInterface. These two types are represented but the attributes `addr_glue` and `intr_glue`, which are both FK's enforced by the DB. If there are two A or two Interfaces, or one A and one Interface that fit the criterion of being a NS's glue record, the user should have the choice to choose between records. Because of this, a glue record will only be automatically assigned to a NS if a) The NS doesn't have a glue record or b) the glue record the NS has isn't valid. """ @property def pretty_name(self): return self.server id = models.AutoField(primary_key=True) domain = models.ForeignKey(Domain, null=False, help_text="The domain this " "record is for.") server = models.CharField(max_length=255, validators=[validate_fqdn], help_text="The name of the server this records " "points to.") # "If the name server does lie within the domain it should have a # corresponding A record." addr_glue = models.ForeignKey(AddressRecord, null=True, blank=True, related_name="nameserver_set") intr_glue = models.ForeignKey(StaticInterface, null=True, blank=True, related_name="nameserver_set") template = _("{bind_name:$lhs_just} {ttl:$ttl_just} " "{rdclass:$rdclass_just} " "{rdtype:$rdtype_just} {server:$rhs_just}.") search_fields = ("server", "domain__name") class Meta: app_label = 'cyder' db_table = "nameserver" unique_together = ("domain", "server") def __unicode__(self): return u'{} NS {}'.format(self.domain.name, self.server) @staticmethod def filter_by_ctnr(ctnr, objects=None): objects = objects or Nameserver.objects objects = objects.filter(domain__in=ctnr.domains.all()) return objects @property def rdtype(self): return 'NS' def bind_render_record(self, pk=False, **kwargs): # We need to override this because fqdn is actually self.domain.name template = Template(self.template).substitute(**self.justs) return template.format( rdtype=self.rdtype, rdclass='IN', bind_name=self.domain.name + '.', **self.__dict__ ) def details(self): """For tables.""" data = super(Nameserver, self).details() data['data'] = [ ('Domain', 'domain', self.domain), ('Server', 'server', self.server), ('Glue', None, self.get_glue()), ] return data @staticmethod def eg_metadata(): """EditableGrid metadata.""" return {'metadata': [ {'name': 'domain', 'datatype': 'string', 'editable': False}, {'name': 'server', 'datatype': 'string', 'editable': True}, {'name': 'glue', 'datatype': 'string', 'editable': True}, ]} # TODO, make this a property def get_glue(self): if self.addr_glue: return self.addr_glue elif self.intr_glue: return self.intr_glue else: return None def set_glue(self, glue): if isinstance(glue, AddressRecord): self.addr_glue = glue self.intr_glue = None elif isinstance(glue, StaticInterface): self.addr_glue = None self.intr_glue = glue elif isinstance(glue, type(None)): self.addr_glue = None self.intr_glue = None else: raise ValueError("Cannot assing {0}: Nameserver.glue must be of " "either type AddressRecord or type " "StaticInterface.".format(glue)) @transaction_atomic def del_glue(self): if self.addr_glue: self.addr_glue.delete(commit=False) elif self.intr_glue: self.intr_glue.delete(commit=False) else: raise AttributeError("'Nameserver' object has no attribute 'glue'") glue = property(get_glue, set_glue, del_glue, "The Glue property.") def __init__(self, *args, **kwargs): super(Nameserver, self).__init__(*args, **kwargs) self.ctnr = Ctnr.objects.get(name="global") @transaction_atomic def delete(self, *args, **kwargs): self.check_no_ns_soa_condition(self.domain) super(Nameserver, self).delete(*args, **kwargs) @transaction_atomic def save(self, *args, **kwargs): self.full_clean() super(Nameserver, self).save(*args, **kwargs) def clean(self, *args, **kwargs): try: self.domain except Domain.DoesNotExist: return # clean_fields already seen `domain`'s own ValidationError. self.check_for_cname() if not self.needs_glue(): self.glue = None else: # Try to find any glue record. It will be the first eligible # The resolution is: # * Address records are searched. # * Interface records are searched. # AddressRecords take higher priority over interface records. glue_label = self.server[:self.server.find('.')] # foo.com -> foo if (self.glue and self.glue.label == glue_label and self.glue.domain == self.domain): # Our glue record is valid. Don't go looking for a new one. pass else: # Ok, our glue record wasn't valid, let's find a new one. addr_glue = AddressRecord.objects.filter(label=glue_label, domain=self.domain) intr_glue = StaticInterface.objects.filter(label=glue_label, domain=self.domain) if not (addr_glue or intr_glue): raise ValidationError( "This NS needs a glue record. Create a glue " "record for the server before creating " "the NS record." ) else: if addr_glue: self.glue = addr_glue[0] else: self.glue = intr_glue[0] def clean_views(self, views): # Forms will call this function with the set of views it is about to # set on this object. Make sure we aren't serving as the NS for a view # that we are about to remove. removed_views = set(View.objects.all()) - set(views) for view in removed_views: if (self.domain.soa and self.domain.soa.root_domain == self.domain and self.domain.nameserver_set.filter(views=view).count() == 1 and # We are it! self.domain.soa.has_record_set(exclude_ns=True, view=view)): raise ValidationError( "Other records in this nameserver's zone are " "relying on it's existance in the {0} view. You can't " "remove it's memebership of the {0} view.".format(view) ) def check_no_ns_soa_condition(self, domain): if (domain.soa and domain.soa.root_domain == domain and domain.nameserver_set.count() == 1 and # We are it! domain.soa.has_record_set(exclude_ns=True)): raise ValidationError( "Other records in this nameserver's zone are " "relying on it's existance as it is the only nameserver " "at the root of the zone." ) def needs_glue(self): # Replace the domain portion of the server with "". # if domain == foo.com and server == ns1.foo.com. # ns1.foo.com --> ns1 try: possible_label = self.server.replace("." + self.domain.name, "") except ObjectDoesNotExist: return False if possible_label == self.server: return False try: validate_label(possible_label) except ValidationError: # It's not a valid label return False return True
import os.path, time import win32com.client class ExcelConstant: xlAutomatic = -4105 #Direction xlToRight = -4161 #Alignment xlLeft = -4131 xlRight = -4152 #Border xlDiagonalDown = 5 xlDiagonalUp = 6 xlEdgeBottom = 9 xlEdgeLeft = 7 xlEdgeRight = 10 xlEdgeTop = 8 xlInsideHorizontal = 12 xlInsideVertical = 11 #xlBorderWeight xlHairline = 1 xlMedium = -4138 xlThick = 4 xlThin = 2 #xlUnderlineStyle xlUnderlineStyleDouble = -4119 xlUnderlineStyleDoubleAccounting = 5 xlUnderlineStyleNone = -4142 xlUnderlineStyleSingle = 2 xlUnderlineStyleSingleAccounting = 4 #xlLineStyle xlContinuous = 1 xlSolid = 1 xlDash = -4115 xlDashDot = 4 xlDashDotDot = 5 xlDot = -4118 xlDouble = -4119 xlLineStyleNone = -4142 xlSlantDashDot = 13 def __init__(self): pass class Font: def __init__(self): self.name = 'Arial' self.size = 12 self.underline = ExcelConstant.xlUnderlineStyleNone self.colorIndex = 0 self.bold = False self.italic = False class Group: def __init__(self): self.cols = [] self.values = [] self.groupRows = [] self.groupFormat = {'FONT': Font()} self.custom = {} self.groupHeader = False self.groupFooter = False self.groupHeaderFormat = {'FONT': Font()} self.groupHeaderText = [] def isEqual(self, row): if self.values == []: return False for col in self.cols: if self.values[col] != row[col]: return False return True class ReportData: def __init__(self): self.headerRowNo = 1 font = Font() font.bold = True self.headerFormat = {'FONT': font} self.headerText = [] self.rows = [] self.groups = [] self.newSheet = False self.custom = [] self.showColCount = 0 def addGroup(self, cols, format, custom, groupHeaderFormat = {'FONT': Font()}, groupHeaderText = []): group = Group() group.cols = cols group.groupFormat = format group.custom = custom group.groupHeaderFormat = groupHeaderFormat group.groupHeaderText = groupHeaderText self.groups.append(group) def clone(self): aclone = ReportData() class ExcelReport: ROW = 'row' COL = 'col' def __init__(self): self.__excelApp = win32com.client.Dispatch("Excel.Application") self.reportData = [] self.__currentRow = {self.ROW: 1} self.reportSumCols = [] def addReportData(self, reportData, reportSumCols = {'SUM': [], 'COL_TEXT': [], 'CUSTOM_TEXT': {'COL': [], 'TEXT': []}}): # for compatible with old report w/o showColCount if reportData.showColCount == 0: reportData.showColCount = len(reportData.headerText) self.reportData.append(reportData) self.reportSumCols.append(reportSumCols) def reportStart(self, excelApp): pass def reportEnd(self, excelApp): pass def reportHeader(self, currentRow, reportIndex, excelApp): colNo = 1 """ # these codes run 2 times slower than the following one excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Select() excelApp.Selection.Font.Name = self.reportData[reportIndex].headerFormat['FONT'].name excelApp.Selection.Font.Size = self.reportData[reportIndex].headerFormat['FONT'].size excelApp.Selection.Font.Underline = self.reportData[reportIndex].headerFormat['FONT'].underline excelApp.Selection.Font.ColorIndex = self.reportData[reportIndex].headerFormat['FONT'].colorIndex excelApp.Selection.Font.Bold = self.reportData[reportIndex].headerFormat['FONT'].bold excelApp.Selection.Font.Italic = self.reportData[reportIndex].headerFormat['FONT'].italic """ if len(self.reportData[reportIndex].headerText) > 0: excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Font.Name = self.reportData[reportIndex].headerFormat['FONT'].name excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Font.Size = self.reportData[reportIndex].headerFormat['FONT'].size excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Font.Underline = self.reportData[reportIndex].headerFormat['FONT'].underline excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Font.ColorIndex = self.reportData[reportIndex].headerFormat['FONT'].colorIndex excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Font.Bold = self.reportData[reportIndex].headerFormat['FONT'].bold excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Font.Italic = self.reportData[reportIndex].headerFormat['FONT'].italic for i in range(len(self.reportData[reportIndex].headerText)): excelApp.Cells(currentRow[self.ROW], colNo).Value = self.reportData[reportIndex].headerText[i] colNo = colNo + 1 def reportFooter(self, currentRow, reportIndex, excelApp): for col in self.reportSumCols[reportIndex]['SUM']: totalValue = 0 for row in self.reportData[reportIndex].rows: if col < len(row): totalValue = totalValue + row[col] excelApp.Cells(currentRow[self.ROW], col + 1).Value = totalValue for col in self.reportSumCols[reportIndex]['COL_TEXT']: for row in self.reportData[reportIndex].rows: if col < len(row): value = row[col] break excelApp.Cells(currentRow[self.ROW], col + 1).Value = value for i in range(len(self.reportSumCols[reportIndex]['CUSTOM_TEXT']['COL'])): excelApp.Cells(currentRow[self.ROW], self.reportSumCols[reportIndex]['CUSTOM_TEXT']['COL'][i] + 1).Value = self.reportSumCols[reportIndex]['CUSTOM_TEXT']['TEXT'][i] currentRow[self.ROW] = currentRow[self.ROW] + 3 excelApp.Cells.Select() excelApp.Cells.EntireColumn.AutoFit() excelApp.Range("A1").Select() def groupHeader(self, currentRow, reportIndex, group, excelApp): colNo = 1 if len(group.groupHeaderText) > 0: excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(group.groupHeaderText))).Font.Name = group.groupHeaderFormat['FONT'].name excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(group.groupHeaderText))).Font.Size = group.groupHeaderFormat['FONT'].size excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(group.groupHeaderText))).Font.Underline = group.groupHeaderFormat['FONT'].underline excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(group.groupHeaderText))).Font.ColorIndex = group.groupHeaderFormat['FONT'].colorIndex excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(group.groupHeaderText))).Font.Bold = group.groupHeaderFormat['FONT'].bold excelApp.Range(excelApp.Cells(currentRow[self.ROW], colNo), excelApp.Cells(currentRow[self.ROW], len(group.groupHeaderText))).Font.Italic = group.groupHeaderFormat['FONT'].italic for i in range(len(group.groupHeaderText)): excelApp.Cells(currentRow[self.ROW], colNo).Value = group.groupHeaderText[i] colNo = colNo + 1 def groupFooter(self, currentRow, reportIndex, group, excelApp): for col in group.custom['SUM']: totalValue = 0 for row in group.groupRows: totalValue = totalValue + row[col] excelApp.Cells(currentRow[self.ROW], col + 1).Value = totalValue for col in group.custom['COL_TEXT']: for row in group.groupRows: value = row[col] break excelApp.Cells(currentRow[self.ROW], col + 1).Value = value for i in range(len(group.custom['CUSTOM_TEXT']['COL'])): text = group.custom['CUSTOM_TEXT']['TEXT'][i] for row in group.groupRows: for col in range(len(row) - 1, -1, -1): text = text.replace('%c' + str(col), str(row[col])) break excelApp.Cells(currentRow[self.ROW], group.custom['CUSTOM_TEXT']['COL'][i] + 1).Value = text excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Font.Name = group.groupFormat['FONT'].name excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Font.Size = group.groupFormat['FONT'].size excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Font.Underline = group.groupFormat['FONT'].underline excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Font.ColorIndex = group.groupFormat['FONT'].colorIndex excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Font.Italic = group.groupFormat['FONT'].italic excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Font.Bold = group.groupFormat['FONT'].bold currentRow[self.ROW] = currentRow[self.ROW] + 2 def detail(self, currentRow, reportIndex, row, excelApp): excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1) , excelApp.Cells(currentRow[self.ROW], len(row))).Value = row #for itemIndex in range(self.reportData[reportIndex].showColCount): # if itemIndex < len(row): # excelApp.Cells(currentRow[self.ROW], itemIndex + 1).Value = row[itemIndex] currentRow[self.ROW] = currentRow[self.ROW] + 1 def newSheet(self, reportIndex, sheetIndex, excelApp): pass def show(self): self.__excelApp.DisplayAlerts = True self.__excelApp.WorkBooks.Add() self.__generate() self.__excelApp.Visible = True def save(self, path): self.__excelApp.WorkBooks.Add() self.__excelApp.DisplayAlerts = True self.__generate() self.__excelApp.ActiveWorkbook.SaveAs(path) self.__excelApp.DisplayAlerts = False self.__excelApp.Quit() def saveNoQuit(self, path): self.__excelApp.WorkBooks.Add() self.__excelApp.DisplayAlerts = True self.__generate() self.__excelApp.ActiveWorkbook.SaveAs(path) #self.__excelApp.DisplayAlerts = False #self.__excelApp.Quit() #use this when u need to show and save at the same time def saveWithOutGenerate(self, path): self.__excelApp.ActiveWorkbook.SaveAs(path) self.__excelApp.DisplayAlerts = False self.__excelApp.Quit() def __generate(self): self.reportStart(self.__excelApp) sheetIndex = 0 for reportIndex in range(len(self.reportData)): if reportIndex != 0: if self.reportData[reportIndex].newSheet: self.__excelApp.Worksheets.Add(None, self.__excelApp.ActiveSheet) self.__currentRow[self.ROW] = 1 sheetIndex = sheetIndex + 1 self.newSheet(reportIndex, sheetIndex, self.__excelApp) else: self.newSheet(reportIndex, sheetIndex, self.__excelApp) self.reportHeader(self.__currentRow, reportIndex, self.__excelApp) if len(self.reportData[reportIndex].headerText) > 0: self.__currentRow[self.ROW] = self.__currentRow[self.ROW] + 1 for i in range(len(self.reportData[reportIndex].rows)): if i == 0: for j in range(len(self.reportData[reportIndex].groups)): self.reportData[reportIndex].groups[j].values = self.reportData[reportIndex].rows[i] self.reportData[reportIndex].groups[j].groupHeader = True else: for j in range(len(self.reportData[reportIndex].groups)): if j != 0 and self.reportData[reportIndex].groups[j - 1].groupHeader: self.reportData[reportIndex].groups[j].groupHeader = True self.reportData[reportIndex].groups[j].values = self.reportData[reportIndex].rows[i] self.reportData[reportIndex].groups[j].groupRows = [] continue if not self.reportData[reportIndex].groups[j].isEqual(self.reportData[reportIndex].rows[i]): self.reportData[reportIndex].groups[j].groupHeader = True self.reportData[reportIndex].groups[j].values = self.reportData[reportIndex].rows[i] self.reportData[reportIndex].groups[j].groupRows = [] else: self.reportData[reportIndex].groups[j].groupHeader = False for group in self.reportData[reportIndex].groups: isGroupStart = False if group.groupHeader: for j in range(len(self.reportData[reportIndex].rows)): if j < i: continue if group.isEqual(self.reportData[reportIndex].rows[j]): isGroupStart = True group.groupRows.append(self.reportData[reportIndex].rows[j]) else: if isGroupStart: break self.groupHeader(self.__currentRow, reportIndex, group, self.__excelApp) if len(group.groupHeaderText) > 0: self.__currentRow[self.ROW] = self.__currentRow[self.ROW] + 1 group.groupHeader = False #for group in self.reportData[reportIndex].groups: # if group.groupHeader: # for j in range(len(self.reportData[reportIndex].rows)): # if group.isEqual(self.reportData[reportIndex].rows[j]): # group.groupRows.append(self.reportData[reportIndex].rows[j]) # self.groupHeader(self.__currentRow, reportIndex, group, self.__excelApp) # group.groupHeader = False self.detail(self.__currentRow, reportIndex, self.reportData[reportIndex].rows[i], self.__excelApp) if i == len(self.reportData[reportIndex].rows) - 1: for j in range(len(self.reportData[reportIndex].groups)): self.reportData[reportIndex].groups[j].groupFooter = True else: for j in range(len(self.reportData[reportIndex].groups)): if j != len(self.reportData[reportIndex].groups) - 1 and self.reportData[reportIndex].groups[j + 1].groupFooter: self.reportData[reportIndex].groups[j].groupFooter = True self.reportData[reportIndex].groups[j].values = [] continue if not self.reportData[reportIndex].groups[j].isEqual(self.reportData[reportIndex].rows[i + 1]): self.reportData[reportIndex].groups[j].groupFooter = True self.reportData[reportIndex].groups[j].values = [] else: self.reportData[reportIndex].groups[j].groupFooter = False for j in range(len(self.reportData[reportIndex].groups) - 1, -1, -1): if self.reportData[reportIndex].groups[j].groupFooter: self.groupFooter(self.__currentRow, reportIndex, self.reportData[reportIndex].groups[j], self.__excelApp) self.reportData[reportIndex].groups[j].groupFooter = False self.reportData[reportIndex].groups[j].groupRows = [] # self.__currentRow[self.ROW] = self.__currentRow[self.ROW] + 1 # point back to the correct report end row as it was incremented by the end of the loop # self.__currentRow[self.ROW] = self.__currentRow[self.ROW] - 1 self.reportFooter(self.__currentRow, reportIndex, self.__excelApp) # end of report loop self.reportEnd(self.__excelApp) class CommonLayoutReport(ExcelReport): def __init__(self): ExcelReport.__init__(self) def addReportData(self, reportData, reportSumCols = {'SUM': [], 'COL_TEXT': [], 'CUSTOM_TEXT': {'COL': [], 'TEXT': []}}): ExcelReport.addReportData(self, reportData, reportSumCols) def newSheet(self, reportIndex, sheetIndex, excelApp): ExcelReport.newSheet(self, reportIndex, sheetIndex, excelApp) excelApp.Cells.Select() excelApp.Selection.Font.Name = 'Arial' def reportHeader(self, currentRow, reportIndex, excelApp): ExcelReport.reportHeader(self, currentRow, reportIndex, excelApp) excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Select() excelApp.Selection.Borders(ExcelConstant.xlEdgeBottom).LineStyle = ExcelConstant.xlContinuous #excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Borders(ExcelConstant.xlEdgeBottom).LineStyle = ExcelConstant.xlContinuous def reportFooter(self, currentRow, reportIndex, excelApp): #currentRow[self.ROW] = currentRow[self.ROW] + 1 ExcelReport.reportFooter(self, currentRow, reportIndex, excelApp) excelApp.Range(excelApp.Cells(currentRow[self.ROW] - 3, 1), excelApp.Cells(currentRow[self.ROW] - 3, self.reportData[reportIndex].showColCount)).Select() excelApp.Selection.Borders(ExcelConstant.xlEdgeBottom).LineStyle = ExcelConstant.xlContinuous excelApp.Selection.Font.Bold = True """ excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Borders(ExcelConstant.xlEdgeBottom).LineStyle = ExcelConstant.xlContinuous excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Font.Bold = True """ def groupHeader(self, currentRow, reportIndex, group, excelApp): ExcelReport.groupHeader(self, currentRow, reportIndex, group, excelApp) def groupFooter(self, currentRow, reportIndex, group, excelApp): ExcelReport.groupFooter(self, currentRow, reportIndex, group, excelApp) #excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Borders(ExcelConstant.xlEdgeTop).LineStyle = ExcelConstant.xlContinuous #excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], self.reportData[reportIndex].showColCount)).Borders(ExcelConstant.xlEdgeBottom).LineStyle = ExcelConstant.xlContinuous excelApp.Range(excelApp.Cells(currentRow[self.ROW] - 2, 1), excelApp.Cells(currentRow[self.ROW] - 2, self.reportData[reportIndex].showColCount)).Borders(ExcelConstant.xlEdgeTop).LineStyle = ExcelConstant.xlContinuous excelApp.Range(excelApp.Cells(currentRow[self.ROW] - 2, 1), excelApp.Cells(currentRow[self.ROW] - 2, self.reportData[reportIndex].showColCount)).Borders(ExcelConstant.xlEdgeBottom).LineStyle = ExcelConstant.xlContinuous #currentRow[self.ROW] = currentRow[self.ROW] + 1 """ # these codes run 2 times slower than the following one excelApp.Range(excelApp.Cells(currentRow[self.ROW], 1), excelApp.Cells(currentRow[self.ROW], len(self.reportData[reportIndex].headerText))).Select() excelApp.Selection.Borders(ExcelConstant.xlEdgeTop).LineStyle = ExcelConstant.xlContinuous excelApp.Selection.Borders(ExcelConstant.xlEdgeBottom).LineStyle = ExcelConstant.xlContinuous excelApp.Selection.Font.Name = group.groupFormat['FONT'].name excelApp.Selection.Font.Size = group.groupFormat['FONT'].size excelApp.Selection.Font.Underline = group.groupFormat['FONT'].underline excelApp.Selection.Font.ColorIndex = group.groupFormat['FONT'].colorIndex excelApp.Selection.Font.Italic = group.groupFormat['FONT'].italic excelApp.Selection.Font.Bold = group.groupFormat['FONT'].bold """ #Example ''' rpt = CommonLayoutReport() font = Font() font.bold = True font.italic = True reportData1 = ReportData() reportData1.newSheet = True reportData1.headerText = ['Book', 'Instrument', 'Position', 'Market Value', 'Avg Price'] reportData1.rows = [['PORT1', '0001.HK', 1000, 2000, 100], ['PORT1', '0001.HK', 2000, 4000, 102], ['PORT1', '0005.HK', 1000, 2000, 150], ['PORT2', '0008.HK', 3000, 2000, 5], ['PORT3', '3968.HK', 300, 200, 14.5]] reportData1.addGroup([0], {'FONT': font}, {'SUM': [2, 3], 'COL_TEXT': [0], 'CUSTOM_TEXT': {'COL': [], 'TEXT': []}}) reportData1.addGroup([1], {'FONT': font}, {'SUM': [2, 3], 'COL_TEXT': [], 'CUSTOM_TEXT': {'COL': [1], 'TEXT': ['Sub Total']}}) #reportData2 = ReportData() #reportData2.newSheet = True #reportData2.headerText = ['Book', 'Instrument', 'Position', 'Market Value'] #reportData2.rows = [['PORT1', '0001.HK', 1000, 2000], ['PORT1', '0001.HK', 2000, 4000], ['PORT1', '0005.HK', 1000, 2000], ['PORT2', '0008.HK', 3000, 2000], ['PORT3', '3968.HK', 300, 200]] #reportData2.addGroup([0, 1], {'FONT': font}, {'SUM': [3], 'COL_TEXT': [0], 'CUSTOM_TEXT': {'COL': [], 'TEXT': []}}) #reportData3 = ReportData() #reportData3.newSheet = False #reportData3.headerText = ['Book', 'Instrument', 'Position', 'Market Value', 'Gamma'] #reportData3.rows = [['PORT1', '0001.HK', 1000, 2000, 0.5], ['PORT1', '0001.HK', 2000, 4000, 0.5], ['PORT1', '0005.HK', 1000, 2000, 0.3], ['PORT2', '0008.HK', 3000, 2000, 0.9], ['PORT3', '3968.HK', 300, 200, 1]] rpt.addReportData(reportData1, {'SUM': [3], 'COL_TEXT': [1], 'CUSTOM_TEXT': {'COL': [1], 'TEXT': ['Total']}}) #rpt.addReportData(reportData2, {'SUM': [2], 'COL_TEXT': [1], 'CUSTOM_TEXT': {'COL': [1], 'TEXT': ['Total']}}) #rpt.addReportData(reportData3, {'SUM': [3], 'COL_TEXT': [1], 'CUSTOM_TEXT': {'COL': [1], 'TEXT': ['Total']}}) rpt.show() #rpt.save("C:\\testing.xls") '''
import oomlout_SVGG as oomSVG details = list() parts = list() def oompAddDetail(category,code,name,sort="",extra1="",extra2=""): details.append(oompDetail(category,code,name,sort,extra1="",extra2="")) def getDetailFromCode(category,code): for x in details: if x.category == category: if x.code == code: return x return oompDetail("","","","") def makeAllLabels(): x=0 def makeLabels(part): makeLabel("front",part) makeLabel("inventory",part) makeLabel("spec",part) def makeLabel(lType,part): templateFile = "templates/OOMP-label-" + str(lType) + ".tmpl.svg" outFile = getPartDirectory(part) + "label-" + lType + ".svg" pdfFile = getPartDirectory(part) + "label-" + lType + ".pdf" loi = [] for tag in part.tags: loi.append([tag.name,tag.value]) oomSVG.searchAndReplaceSVG(loi,templateFile,outFile) oomSVG.toPDF(outFile,pdfFile) def getPartDirectory(part): oompID = part.getTag("oompID").value print("Part: " + str(part)) print("Part Directory: " + oompID) return "parts/" + oompID + "/" def getPartByID(part): ## print(" Get Part By ID: " + part) for x in parts: if x.getTag("oompID").value == part: return x return oompItem("") def getPartByHex(hexid): ## print(" Get Part By ID: " + part) for x in parts: x.getTag("hexid").value if x.getTag("hexid").value == hexid: return x return oompItem("") def getDetailByCode(category, code): ## print(" Get Part By ID: " + code) for x in details: #print(" Matching: " + x.code + " with -- " + code) if x.code == code: return x return oompDetail("","","","") def printParts(): print("OOMP Parts") for x in parts: print(" Part: " + str(x.fullString())) class oompItem: def __init__(self,index): self.tags=list() self.index=index self.addTag("index", index) def __str__(self): rv = "" rv = rv + self.getName() #for x in self.tags: # rv = rv + " " + str(x) return rv def fullString(self): rv = "" rv = rv + self.getName() + "\n" for x in self.tags: rv = rv + "" + str(x) ## if not isinstance(x.value, list): ## rv = rv + " " + str(x) ## else: #tag has a list ## rv = rv + " " + str(x) + "\n" ## for y in x.value: ## if isinstance(y, list): ## rv = rv + " " + str(y) + "\n" ## for c in y: ## rv = rv + " " + str(c) + "\n" ## else: ## rv = rv + " " + str(y) + "\n" return rv def addTag(self,name,value): self.tags.append(oompTag(name,value)) def addTagSupplied(self,name,value,tag): tag.append(oompTag(name,value)) return tag def getTag(self,name): if name == "oompID": id = self.getTag("oompType").value + "-" + self.getTag("oompSize").value + "-" + self.getTag("oompColor").value + "-" + self.getTag("oompDesc").value + "-" + self.getTag("oompIndex").value return(oompTag("oompID", id)) elif name == "taxaID": id = self.getTag("taxaDomain").value.upper() + "-" + self.getTag("taxaKingdom").value.upper() + "-" + self.getTag("taxaDivision").value.upper() + "-" + self.getTag("taxaClass").value.upper() + "-" + self.getTag("taxaOrder").value.upper() + "-" + self.getTag("taxaFamily").value.upper() + "-" + self.getTag("taxaGenus").value.upper() + "-" + self.getTag("taxaSpecies").value.upper() return(oompTag("oompID", id)) #elif name == "hexID": # hexValue = hex(self.getTag("index").value).replace("0x","").upper() # return(oompTag("hexID",hexValue)) elif name == "name": id = self.getTag("namename").value if id == "" or id == "XXXXX": name = "" #size value = getDetailFromCode("size",self.getTag("oompSize").value).name if value != "": name = name + value + " " #desc value = getDetailFromCode("desc",self.getTag("oompDesc").value).name if value != "": name = name + value + " " #color value = getDetailFromCode("color",self.getTag("oompColor").value).name if value != "": name = name + value + " " #type value = getDetailFromCode("type",self.getTag("oompType").value).name if value != "": name = name + value + " " #index value = getDetailFromCode("index",self.getTag("oompIndex").value).name if value != "": name = name + value + " " name = name.strip() return(oompTag("name", name)) else: return self.getTag("namename") else: if name == "namename": name = "name" for x in self.tags: if x.name == name: return x return oompTag("","") def getName(self): return "OOMP Item: " + self.getTag("oompID").value + " " + self.getTag("name").value class oompTag: def __init__(self, name, value): self.name = name self.value = value def __str__(self): if isinstance(self.value, list): rv = "oompTagCC " + self.name + "\n" for x in self.value: rv = rv + " " + str(x) return rv elif isinstance(self.value, oompTag): return " " + self.name + " \n" + str(self.value) + "\n" else: return " " + str(self.name) + " : " + str(self.value)+ "\n" def getValue(self): return self.value class oompDetail: def __init__(self, category, code, name, sort="", extra1="", extra2=""): self.category = category self.code = code self.name = name self.sort = sort self.extra1 = extra1 self.extra2 = extra2 def __str__(self): return self.category + " " + self.code + " " + self.name + " " + self.sort #### import detail lists import OOMPdetailsType import OOMPdetailsSize import OOMPdetailsColor import OOMPdetailsDesc import OOMPdetailsIndex #### import parts import OOMPparts
"""audio_read reads in a whole audio file with resampling.""" # Equivalent to: #import librosa #def audio_read(filename, sr=11025, channels=1): # """Read a soundfile, return (d, sr).""" # d, sr = librosa.load(filename, sr=sr, mono=(channels == 1)) # return d, sr # The code below is adapted from: # https://github.com/bmcfee/librosa/blob/master/librosa/core/audio.py # This is its original copyright notice: # Copyright (c) 2014, Brian McFee, Matt McVicar, Dawen Liang, Colin Raffel, Douglas Repetto, Dan Ellis. # # Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import os import numpy as np def audio_read(filename, sr=None, channels=None): """Read a soundfile, return (d, sr).""" # Hacked version of librosa.load and audioread/ff. offset = 0.0 duration = None dtype = np.float32 y = [] with FFmpegAudioFile(os.path.realpath(filename), sample_rate=sr, channels=channels) as input_file: sr = input_file.sample_rate channels = input_file.channels s_start = int(np.floor(sr * offset) * channels) if duration is None: s_end = np.inf else: s_end = s_start + int(np.ceil(sr * duration) * channels) num_read = 0 for frame in input_file: frame = buf_to_float(frame, dtype=dtype) num_read_prev = num_read num_read += len(frame) if num_read < s_start: # offset is after the current frame, keep reading. continue if s_end < num_read_prev: # we're off the end. stop reading break if s_end < num_read: # the end is in this frame. crop. frame = frame[:s_end - num_read_prev] if num_read_prev <= s_start < num_read: # beginning is in this frame frame = frame[(s_start - num_read_prev):] # tack on the current frame y.append(frame) if not len(y): # Zero-length read y = np.zeros(0, dtype=dtype) else: y = np.concatenate(y) if channels > 1: y = y.reshape((-1, 2)).T # Final cleanup for dtype and contiguity y = np.ascontiguousarray(y, dtype=dtype) return (y, sr) def buf_to_float(x, n_bytes=2, dtype=np.float32): """Convert an integer buffer to floating point values. This is primarily useful when loading integer-valued wav data into numpy arrays. .. seealso:: :func:`librosa.util.buf_to_float` :parameters: - x : np.ndarray [dtype=int] The integer-valued data buffer - n_bytes : int [1, 2, 4] The number of bytes per sample in ``x`` - dtype : numeric type The target output type (default: 32-bit float) :return: - x_float : np.ndarray [dtype=float] The input data buffer cast to floating point """ # Invert the scale of the data scale = 1./float(1 << ((8 * n_bytes) - 1)) # Construct the format string fmt = '<i{:d}'.format(n_bytes) # Rescale and format the data buffer return scale * np.frombuffer(x, fmt).astype(dtype) # The code below is adapted from: # https://github.com/sampsyo/audioread/blob/master/audioread/ffdec.py # Below is its original copyright notice: # This file is part of audioread. # Copyright 2014, Adrian Sampson. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. import subprocess import re import threading import time try: import queue except ImportError: import Queue as queue class QueueReaderThread(threading.Thread): """A thread that consumes data from a filehandle and sends the data over a Queue. """ def __init__(self, fh, blocksize=1024, discard=False): super(QueueReaderThread, self).__init__() self.fh = fh self.blocksize = blocksize self.daemon = True self.discard = discard self.queue = None if discard else queue.Queue() def run(self): while True: data = self.fh.read(self.blocksize) if not self.discard: self.queue.put(data) if not data: # Stream closed (EOF). break class FFmpegAudioFile(object): """An audio file decoded by the ffmpeg command-line utility.""" def __init__(self, filename, channels=None, sample_rate=None, block_size=4096): if not os.path.isfile(filename): raise ValueError(filename + " not found.") popen_args = ['ffmpeg', '-i', filename, '-f', 's16le'] self.channels = channels self.sample_rate = sample_rate if channels: popen_args.extend(['-ac', str(channels)]) if sample_rate: popen_args.extend(['-ar', str(sample_rate)]) popen_args.append('-') self.proc = subprocess.Popen( popen_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) # Start another thread to consume the standard output of the # process, which contains raw audio data. self.stdout_reader = QueueReaderThread(self.proc.stdout, block_size) self.stdout_reader.start() # Read relevant information from stderr. try: self._get_info() except ValueError: raise ValueError("Error reading header info from " + filename) # Start a separate thread to read the rest of the data from # stderr. This (a) avoids filling up the OS buffer and (b) # collects the error output for diagnosis. self.stderr_reader = QueueReaderThread(self.proc.stderr) self.stderr_reader.start() def read_data(self, timeout=10.0): """Read blocks of raw PCM data from the file.""" # Read from stdout in a separate thread and consume data from # the queue. start_time = time.time() while True: # Wait for data to be available or a timeout. data = None try: data = self.stdout_reader.queue.get(timeout=timeout) if data: yield data else: # End of file. break except queue.Empty: # Queue read timed out. end_time = time.time() if not data: if end_time - start_time >= timeout: # Nothing interesting has happened for a while -- # FFmpeg is probably hanging. raise ValueError('ffmpeg output: {}'.format( ''.join(self.stderr_reader.queue.queue) )) else: start_time = end_time # Keep waiting. continue def _get_info(self): """Reads the tool's output from its stderr stream, extracts the relevant information, and parses it. """ out_parts = [] while True: line = self.proc.stderr.readline() if not line: # EOF and data not found. raise ValueError("stream info not found") # In Python 3, result of reading from stderr is bytes. if isinstance(line, bytes): line = line.decode('utf8', 'ignore') line = line.strip().lower() if 'no such file' in line: raise IOError('file not found') elif 'invalid data found' in line: raise UnsupportedError() elif 'duration:' in line: out_parts.append(line) elif 'audio:' in line: out_parts.append(line) self._parse_info(''.join(out_parts)) break def _parse_info(self, s): """Given relevant data from the ffmpeg output, set audio parameter fields on this object. """ # Sample rate. match = re.search(r'(\d+) hz', s) if match: self.sample_rate_orig = int(match.group(1)) else: self.sample_rate_orig = 0 if self.sample_rate is None: self.sample_rate = self.sample_rate_orig # Channel count. match = re.search(r'hz, ([^,]+),', s) if match: mode = match.group(1) if mode == 'stereo': self.channels_orig = 2 else: match = re.match(r'(\d+) ', mode) if match: self.channels_orig = int(match.group(1)) else: self.channels_orig = 1 else: self.channels_orig = 0 if self.channels is None: self.channels = self.channels_orig # Duration. match = re.search( r'duration: (\d+):(\d+):(\d+).(\d)', s ) if match: durparts = list(map(int, match.groups())) duration = ( durparts[0] * 60 * 60 + durparts[1] * 60 + durparts[2] + float(durparts[3]) / 10 ) self.duration = duration else: # No duration found. self.duration = 0 def close(self): """Close the ffmpeg process used to perform the decoding.""" # Kill the process if it is still running. if hasattr(self, 'proc') and self.proc.returncode is None: self.proc.kill() self.proc.wait() def __del__(self): self.close() # Iteration. def __iter__(self): return self.read_data() # Context manager. def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.close() return False
import copy import quantities as pq import scipy as sp import scipy.signal import scipy.special import tools default_kernel_area_fraction = 0.99999 class Kernel(object): """ Base class for kernels. """ def __init__(self, kernel_size, normalize): """ :param kernel_size: Parameter controlling the kernel size. :type kernel_size: Quantity 1D :param bool normalize: Whether to normalize the kernel to unit area. """ self.kernel_size = kernel_size self.normalize = normalize def __call__(self, t, kernel_size=None): """ Evaluates the kernel at all time points in the array `t`. :param t: Time points to evaluate the kernel at. :type t: Quantity 1D :param kernel_size: If not `None` this overwrites the kernel size of the `Kernel` instance. :type kernel_size: Quantity scalar :returns: The result of the kernel evaluations. :rtype: Quantity 1D """ if kernel_size is None: kernel_size = self.kernel_size if self.normalize: normalization = self.normalization_factor(kernel_size) else: normalization = 1.0 * pq.dimensionless return self._evaluate(t, kernel_size) * normalization def _evaluate(self, t, kernel_size): """ Evaluates the kernel. :param t: Time points to evaluate the kernel at. :type t: Quantity 1D :param kernel_size: Controls the width of the kernel. :type kernel_size: Quantity scalar :returns: The result of the kernel evaluations. :rtype: Quantity 1D """ raise NotImplementedError() def normalization_factor(self, kernel_size): """ Returns the factor needed to normalize the kernel to unit area. :param kernel_size: Controls the width of the kernel. :type kernel_size: Quantity scalar :returns: Factor to normalize the kernel to unit width. :rtype: Quantity scalar """ raise NotImplementedError() def boundary_enclosing_at_least(self, fraction): """ Calculates the boundary :math:`b` so that the integral from :math:`-b` to :math:`b` encloses at least a certain fraction of the integral over the complete kernel. :param float fraction: Fraction of the whole area which at least has to be enclosed. :returns: boundary :rtype: Quantity scalar """ raise NotImplementedError() def is_symmetric(self): """ Should return `True` if the kernel is symmetric. """ return False def summed_dist_matrix(self, vectors, presorted=False): """ Calculates the sum of all element pair distances for each pair of vectors. If :math:`(a_1, \\dots, a_n)` and :math:`(b_1, \\dots, b_m)` are the :math:`u`-th and :math:`v`-th vector from `vectors` and :math:`K` the kernel, the resulting entry in the 2D array will be :math:`D_{uv} = \\sum_{i=1}^{n} \\sum_{j=1}^{m} K(a_i - b_j)`. :param sequence vectors: A sequence of Quantity 1D to calculate the summed distances for each pair. The required units depend on the kernel. Usually it will be the inverse unit of the kernel size. :param bool presorted: Some optimized specializations of this function may need sorted vectors. Set `presorted` to `True` if you know that the passed vectors are already sorted to skip the sorting and thus increase performance. :rtype: Quantity 2D """ D = sp.empty((len(vectors), len(vectors))) if len(vectors) > 0: might_have_units = self(vectors[0]) if hasattr(might_have_units, 'units'): D = D * might_have_units.units else: D = D * pq.dimensionless for i, j in sp.ndindex(len(vectors), len(vectors)): D[i, j] = sp.sum(self( (vectors[i] - sp.atleast_2d(vectors[j]).T).flatten())) return D class KernelFromFunction(Kernel): """ Creates a kernel form a function. Please note, that not all methods for such a kernel are implemented. """ def __init__(self, kernel_func, kernel_size): Kernel.__init__(self, kernel_size, normalize=False) self._evaluate = kernel_func def is_symmetric(self): return False def as_kernel_of_size(obj, kernel_size): """ Returns a kernel of desired size. :param obj: Either an existing kernel or a kernel function. A kernel function takes two arguments. First a `Quantity 1D` of evaluation time points and second a kernel size. :type obj: Kernel or func :param kernel_size: Desired size of the kernel. :type kernel_size: Quantity 1D :returns: A :class:`Kernel` with the desired kernel size. If `obj` is already a :class:`Kernel` instance, a shallow copy of this instance with changed kernel size will be returned. If `obj` is a function it will be wrapped in a :class:`Kernel` instance. :rtype: :class:`Kernel` """ if isinstance(obj, Kernel): obj = copy.copy(obj) obj.kernel_size = kernel_size else: obj = KernelFromFunction(obj, kernel_size) return obj class SymmetricKernel(Kernel): """ Base class for symmetric kernels. """ def __init__(self, kernel_size, normalize): """ :param kernel_size: Parameter controlling the kernel size. :type kernel_size: Quantity 1D :param bool normalize: Whether to normalize the kernel to unit area. """ Kernel.__init__(self, kernel_size, normalize) def is_symmetric(self): return True def summed_dist_matrix(self, vectors, presorted=False): D = sp.empty((len(vectors), len(vectors))) if len(vectors) > 0: might_have_units = self(vectors[0]) if hasattr(might_have_units, 'units'): D = D * might_have_units.units for i in xrange(len(vectors)): for j in xrange(i, len(vectors)): D[i, j] = D[j, i] = sp.sum(self( (vectors[i] - sp.atleast_2d(vectors[j]).T).flatten())) return D class CausalDecayingExpKernel(Kernel): r""" Unnormalized: :math:`K(t) = \exp(-\frac{t}{\tau}) \Theta(t)` with :math:`\Theta(t) = \left\{\begin{array}{ll}0, & x < 0\\ 1, & x \geq 0\end{array}\right.` and kernel size :math:`\tau`. Normalized to unit area: :math:`K'(t) = \frac{1}{\tau} K(t)` """ @staticmethod def evaluate(t, kernel_size): return sp.piecewise( t, [t < 0, t >= 0], [ lambda t: 0, lambda t: sp.exp( (-t * pq.dimensionless / kernel_size).simplified)]) def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, kernel_size): return 1.0 / kernel_size def __init__(self, kernel_size=1.0 * pq.s, normalize=True): Kernel.__init__(self, kernel_size, normalize) def boundary_enclosing_at_least(self, fraction): return -self.kernel_size * sp.log(1.0 - fraction) class GaussianKernel(SymmetricKernel): r""" Unnormalized: :math:`K(t) = \exp(-\frac{t^2}{2 \sigma^2})` with kernel size :math:`\sigma` (corresponds to the standard deviation of a Gaussian distribution). Normalized to unit area: :math:`K'(t) = \frac{1}{\sigma \sqrt{2 \pi}} K(t)` """ @staticmethod def evaluate(t, kernel_size): return sp.exp( -0.5 * (t * pq.dimensionless / kernel_size).simplified ** 2) def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, kernel_size): return 1.0 / (sp.sqrt(2.0 * sp.pi) * kernel_size) def __init__(self, kernel_size=1.0 * pq.s, normalize=True): Kernel.__init__(self, kernel_size, normalize) def boundary_enclosing_at_least(self, fraction): return self.kernel_size * sp.sqrt(2.0) * \ scipy.special.erfinv(fraction + scipy.special.erf(0.0)) class LaplacianKernel(SymmetricKernel): r""" Unnormalized: :math:`K(t) = \exp(-|\frac{t}{\tau}|)` with kernel size :math:`\tau`. Normalized to unit area: :math:`K'(t) = \frac{1}{2 \tau} K(t)` """ @staticmethod def evaluate(t, kernel_size): return sp.exp( -(sp.absolute(t) * pq.dimensionless / kernel_size).simplified) def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, kernel_size): return 0.5 / kernel_size def __init__(self, kernel_size=1.0 * pq.s, normalize=True): Kernel.__init__(self, kernel_size, normalize) def boundary_enclosing_at_least(self, fraction): return -self.kernel_size * sp.log(1.0 - fraction) def summed_dist_matrix(self, vectors, presorted=False): # This implementation is based on # # Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van # Rossum distances. Network: Computation in Neural Systems, 23(1-2), # 48-58. # # Note that the cited paper contains some errors: In formula (9) the # left side of the equation should be divided by two and in the last # sum in this equation it should say `j|v_i >= u_i` instead of # `j|v_i > u_i`. Also, in equation (11) it should say `j|u_i >= v_i` # instead of `j|u_i > v_i`. # # Given N vectors with n entries on average the run-time complexity is # O(N^2 * n). O(N^2 + N * n) memory will be needed. if len(vectors) <= 0: return sp.zeros((0, 0)) if not presorted: vectors = [v.copy() for v in vectors] for v in vectors: v.sort() sizes = sp.asarray([v.size for v in vectors]) values = sp.empty((len(vectors), max(1, sizes.max()))) values.fill(sp.nan) for i, v in enumerate(vectors): if v.size > 0: values[i, :v.size] = \ (v / self.kernel_size * pq.dimensionless).simplified exp_diffs = sp.exp(values[:, :-1] - values[:, 1:]) markage = sp.zeros(values.shape) for u in xrange(len(vectors)): markage[u, 0] = 0 for i in xrange(sizes[u] - 1): markage[u, i + 1] = (markage[u, i] + 1.0) * exp_diffs[u, i] # Same vector terms D = sp.empty((len(vectors), len(vectors))) D[sp.diag_indices_from(D)] = sizes + 2.0 * sp.sum(markage, axis=1) # Cross vector terms for u in xrange(D.shape[0]): all_ks = sp.searchsorted(values[u], values, 'left') - 1 for v in xrange(u): js = sp.searchsorted(values[v], values[u], 'right') - 1 ks = all_ks[v] slice_j = sp.s_[sp.searchsorted(js, 0):sizes[u]] slice_k = sp.s_[sp.searchsorted(ks, 0):sizes[v]] D[u, v] = sp.sum( sp.exp(values[v][js[slice_j]] - values[u][slice_j]) * (1.0 + markage[v][js[slice_j]])) D[u, v] += sp.sum( sp.exp(values[u][ks[slice_k]] - values[v][slice_k]) * (1.0 + markage[u][ks[slice_k]])) D[v, u] = D[u, v] if self.normalize: normalization = self.normalization_factor(self.kernel_size) else: normalization = 1.0 return normalization * D class RectangularKernel(SymmetricKernel): r""" Unnormalized: :math:`K(t) = \left\{\begin{array}{ll}1, & |t| < \tau \\ 0, & |t| \geq \tau\end{array} \right.` with kernel size :math:`\tau` corresponding to the half width. Normalized to unit area: :math:`K'(t) = \frac{1}{2 \tau} K(t)` """ @staticmethod def evaluate(t, half_width): return sp.absolute(t) < half_width def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, half_width): return 0.5 / half_width def __init__(self, half_width=1.0 * pq.s, normalize=True): Kernel.__init__(self, half_width, normalize) def boundary_enclosing_at_least(self, fraction): return self.kernel_size class TriangularKernel(SymmetricKernel): r""" Unnormalized: :math:`K(t) = \left\{ \begin{array}{ll}1 - \frac{|t|}{\tau}, & |t| < \tau \\ 0, & |t| \geq \tau \end{array} \right.` with kernel size :math:`\tau` corresponding to the half width. Normalized to unit area: :math:`K'(t) = \frac{1}{\tau} K(t)` """ @staticmethod def evaluate(t, half_width): return sp.maximum( 0.0, (1.0 - sp.absolute(t.rescale(half_width.units)) * pq.dimensionless / half_width).magnitude) def _evaluate(self, t, kernel_size): return self.evaluate(t, kernel_size) def normalization_factor(self, half_width): return 1.0 / half_width def __init__(self, half_width=1.0 * pq.s, normalize=True): Kernel.__init__(self, half_width, normalize) def boundary_enclosing_at_least(self, fraction): return self.kernel_size def discretize_kernel( kernel, sampling_rate, area_fraction=default_kernel_area_fraction, num_bins=None, ensure_unit_area=False): """ Discretizes a kernel. :param kernel: The kernel or kernel function. If a kernel function is used it should take exactly one 1-D array as argument. :type kernel: :class:`Kernel` or function :param float area_fraction: Fraction between 0 and 1 (exclusive) of the integral of the kernel which will be at least covered by the discretization. Will be ignored if `num_bins` is not `None`. If `area_fraction` is used, the kernel has to provide a method :meth:`boundary_enclosing_at_least` (see :meth:`.Kernel.boundary_enclosing_at_least`). :param sampling_rate: Sampling rate for the discretization. The unit will typically be a frequency unit. :type sampling_rate: Quantity scalar :param int num_bins: Number of bins to use for the discretization. :param bool ensure_unit_area: If `True`, the area of the discretized kernel will be normalized to 1.0. :rtype: Quantity 1D """ t_step = 1.0 / sampling_rate if num_bins is not None: start = -num_bins // 2 stop = num_bins // 2 elif area_fraction is not None: boundary = kernel.boundary_enclosing_at_least(area_fraction) if hasattr(boundary, 'rescale'): boundary = boundary.rescale(t_step.units) start = sp.ceil(-boundary / t_step) stop = sp.floor(boundary / t_step) + 1 else: raise ValueError( "One of area_fraction and num_bins must not be None.") k = kernel(sp.arange(start, stop) * t_step) if ensure_unit_area: k /= sp.sum(k) * t_step return k def smooth( binned, kernel, sampling_rate, mode='same', **kernel_discretization_params): """ Smoothes a binned representation (e.g. of a spike train) by convolving with a kernel. :param binned: Bin array to smooth. :type binned: 1-D array :param kernel: The kernel instance to convolve with. :type kernel: :class:`Kernel` :param sampling_rate: The sampling rate which will be used to discretize the kernel. It should be equal to the sampling rate used to obtain `binned`. The unit will typically be a frequency unit. :type sampling_rate: Quantity scalar :param mode: * 'same': The default which returns an array of the same size as `binned` * 'full': Returns an array with a bin for each shift where `binned` and the discretized kernel overlap by at least one bin. * 'valid': Returns only the discretization bins where the discretized kernel and `binned` completely overlap. See also `numpy.convolve <http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html>`_. :type mode: {'same', 'full', 'valid'} :param dict kernel_discretization_params: Additional discretization arguments which will be passed to :func:`.discretize_kernel`. :returns: The smoothed representation of `binned`. :rtype: Quantity 1D """ k = discretize_kernel( kernel, sampling_rate=sampling_rate, **kernel_discretization_params) return scipy.signal.convolve(binned, k, mode) * k.units def st_convolve( train, kernel, sampling_rate, mode='same', binning_params=None, kernel_discretization_params=None): """ Convolves a :class:`neo.core.SpikeTrain` with a kernel. :param train: Spike train to convolve. :type train: :class:`neo.core.SpikeTrain` :param kernel: The kernel instance to convolve with. :type kernel: :class:`Kernel` :param sampling_rate: The sampling rate which will be used to bin the spike train. The unit will typically be a frequency unit. :type sampling_rate: Quantity scalar :param mode: * 'same': The default which returns an array covering the whole duration of the spike train `train`. * 'full': Returns an array with additional discretization bins in the beginning and end so that for each spike the whole discretized kernel is included. * 'valid': Returns only the discretization bins where the discretized kernel and spike train completely overlap. See also :func:`scipy.signal.convolve`. :type mode: {'same', 'full', 'valid'} :param dict binning_params: Additional discretization arguments which will be passed to :func:`.tools.bin_spike_trains`. :param dict kernel_discretization_params: Additional discretization arguments which will be passed to :func:`.discretize_kernel`. :returns: The convolved spike train, the boundaries of the discretization bins :rtype: (Quantity 1D, Quantity 1D with the inverse units of `sampling_rate`) """ if binning_params is None: binning_params = {} if kernel_discretization_params is None: kernel_discretization_params = {} binned, bins = tools.bin_spike_trains( {0: [train]}, sampling_rate, **binning_params) binned = binned[0][0] #sampling_rate = binned.size / (bins[-1] - bins[0]) result = smooth( binned, kernel, sampling_rate, mode, **kernel_discretization_params) assert (result.size - binned.size) % 2 == 0 num_additional_bins = (result.size - binned.size) // 2 if len(binned): bins = sp.linspace( bins[0] - num_additional_bins / sampling_rate, bins[-1] + num_additional_bins / sampling_rate, result.size + 1) else: bins = [] * pq.s return result, bins
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import print_function from __future__ import with_statement import json import os import platform import locale from re import sub from .const import IS_PY2 from .const import PLUGIN_NAME from .const import PRETTIER_CONFIG_FILES from .const import PRETTIER_IGNORE_FILE if IS_PY2: text_type = unicode string_types = (str, unicode) else: text_type = str string_types = (str,) def contains(needle, haystack): if not needle or not haystack: return False return needle in haystack def find_prettier_config(start_dir, alt_dirs=None): """ Find a prettier config file by searching up the file hierarchy. Hat tip to SublimeLinter 3! :param start_dir: The search start path. :param alt_dirs: If alt_dirs is not empty and the file hierarchy search failed, those directories are also checked. """ dirs = generate_dirs(start_dir, limit=500) for d in dirs: for config_file in PRETTIER_CONFIG_FILES: target = os.path.join(d, config_file) if os.path.exists(target): if config_file == 'package.json' and not _prettier_opts_in_package_json(target): continue return target if alt_dirs is None: alt_dirs = [] if '~' not in alt_dirs: alt_dirs.append('~') for d in alt_dirs: d = os.path.expanduser(d) for config_file in PRETTIER_CONFIG_FILES: target = os.path.join(d, config_file) if os.path.exists(target): if config_file == 'package.json' and not _prettier_opts_in_package_json(target): continue return target return None def generate_dirs(start_dir, limit=None): """ Generate directories, starting from start_dir. Hat tip goes to SublimeLinter 3. :param start_dir: The search start path. :param limit: If limit is None, the search will continue up to the root directory. Otherwise a maximum of limit directories will be checked. """ right = True while right and (limit is None or limit > 0): yield start_dir start_dir, right = os.path.split(start_dir) if limit is not None: limit -= 1 def _prettier_opts_in_package_json(package_json_file): try: with open(package_json_file) as package_file: json_data = json.load(package_file) except Exception: from .sthelper import log_warn log_warn("Could not parse package.json file: '{0}'. Any Prettier options " "defined in this file will be ignored.".format(package_json_file), True) return False try: if json_data['prettier']: return True return False except KeyError: return False def is_mac_os(): return platform.system() == 'Darwin' def is_windows(): return platform.system() == 'Windows' or os.name == 'nt' def to_str(value): if value is None: return '' if value is True: return 'true' if value is False: return 'false' return text_type(value) def is_bool_str(val): """Determine if the specified string :val is 'true' or 'false'. :param val: The value to check. :return: True if if val: is a boolean string, otherwise False. :rtype: bool """ if val is None: return False if isinstance(val, string_types): val = val.lower().strip() if val == 'true' or val == 'false': return True return False def trim_trailing_ws_and_lines(val): """Trim trailing whitespace and line-breaks at the end of a string. :param val: The value to trim. :return: The val with trailing whitespace and line-breaks removed. """ if val is None: return val val = sub(r'\s+\Z', '', val) return val def repeat_str(str_to_repeat, repeat_length): """Repeat a string to a certain length. :param str_to_repeat: The string to repeat. Normally a single char. :param repeat_length: The amount of times to repeat the string. :return: The repeated string. """ quotient, remainder = divmod(repeat_length, len(str_to_repeat)) return str_to_repeat * quotient + str_to_repeat[:remainder] def list_to_str(list_to_convert): """Convert a list of values into string. Each value will be separated by a single space. :param list_to_convert: The list to convert to a string. :return: The list converted into a string. """ return ' '.join(to_str(item) for item in list_to_convert) def is_str_empty_or_whitespace_only(txt): if not txt or len(txt) == 0: return True # strip all whitespace/invisible chars to determine textual content: txt = sub(r'\s+', '', txt) if not txt or len(txt) == 0: return True return False def is_str_none_or_empty(val): """Determine if the specified str val is None or an empty. :param val: The str to check. :return: True if if val: is None or an empty, otherwise False. :rtype: bool """ if val is None: return True if isinstance(val, string_types): val = val.strip() if not val: return True return False def get_file_abs_dir(filepath): return os.path.abspath(os.path.dirname(filepath)) def env_path_contains(path_to_look_for, env_path=None): """Check if the specified path is listed in OS environment path. :param path_to_look_for: The path the search for. :param env_path: The environment path str. :return: True if the find_path exists in the env_path. :rtype: bool """ if not path_to_look_for: return False if not env_path: env_path = os.environ['PATH'] path_to_look_for = str.replace(path_to_look_for, os.pathsep, '') paths = env_path.split(os.pathsep) for path in paths: if path == path_to_look_for: return True return False def env_path_exists(path): if not path: return False if os.path.exists(str.replace(path, os.pathsep, '')): return True return False def which(executable, path=None): if is_str_none_or_empty(executable): return None executable = os.path.normpath(executable) if os.path.isfile(executable) and os.access(executable, os.X_OK): return executable if is_str_none_or_empty(path): path = os.environ.get("PATH", os.defpath) if not path: return None if not is_windows(): # add '/usr/local/bin' on macos/linux if not already in path. usr_local_bin = ':/usr/local/bin' if not env_path_contains(usr_local_bin, path) \ and env_path_exists(usr_local_bin): path += usr_local_bin search_paths = path.split(os.pathsep) if is_windows(): # The current directory takes precedence on Windows. if os.curdir not in search_paths: search_paths.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. pathext = os.environ.get("PATHEXT", "").split(os.pathsep) # See if the given file matches any of the expected path # extensions. This will allow us to short circuit when given # "python.exe". If it does match, only test that one, otherwise we # have to try others. # hat tip: https://github.com/pydanny/whichcraft/blob/master/whichcraft.py if any(executable.lower().endswith(ext.lower()) for ext in pathext): executable_files = [executable] else: executable_files = [executable + ext for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. executable_files = [executable] dirs_seen = set() for directory in search_paths: dir_normalized = os.path.normcase(directory) if dir_normalized not in dirs_seen: dirs_seen.add(dir_normalized) for exec_file in executable_files: exec_file_path = os.path.normpath(os.path.join(directory, exec_file)) if os.path.isfile(exec_file_path) and os.access(exec_file_path, os.X_OK): return exec_file_path return None def get_proc_env(): env = None if not is_windows(): env = os.environ.copy() usr_path = ':/usr/local/bin' if not env_path_contains(usr_path) and env_path_exists(usr_path): env['PATH'] += usr_path return env def in_source_file_path_or_project_root(source_file_dir, st_project_path, filename): # check in source file dir: source_file_dir_ignore_path = os.path.join(source_file_dir, filename) if os.path.exists(source_file_dir_ignore_path): return source_file_dir_ignore_path # check in sublime text project root dir: sublime_text_project_dir_path = os.path.join(st_project_path, filename) if os.path.exists(sublime_text_project_dir_path): return sublime_text_project_dir_path return None def resolve_prettier_ignore_path(source_file_dir, st_project_path): """Look for a '.prettierignore' Try to resolve a '.prettieringore' file in source file dir, or ST project root (#97). :return: The path (str) to a '.prettierignore' file (if one exists) in the active Sublime Text Project Window. """ return in_source_file_path_or_project_root(source_file_dir, st_project_path, PRETTIER_IGNORE_FILE) def format_error_message(error_message, error_code): # inject a line break between the error message, and debug output (legibility purposes): error_message = error_message.replace('[error] stdin: ', '\n[error] stdin: ') return '\nPrettier reported the following output:\n\n' \ '{0}\n' \ '\nPrettier process finished with exit code {1}.\n' \ .format(error_message, '{0}'.format(error_code)) def format_debug_message(label, message, debug_enabled=False): if not debug_enabled: return header = ' {0} DEBUG - {1} '.format(PLUGIN_NAME, label) horizontal_rule = repeat_str('-', len(header)) print('\n{0}\n{1}\n{2}\n\n''{3}'.format( horizontal_rule, header, horizontal_rule, message)) def get_cli_arg_value(additional_cli_args, arg_key, arg_val_can_be_empty=False, default=None): if not additional_cli_args or not arg_key: return default if not isinstance(additional_cli_args, dict): return default result = None for key, val in additional_cli_args.items(): if key == arg_key: if arg_val_can_be_empty: result = key else: result = val break if result is None: return default return result def ensure_file_has_ext(file_name, file_ext): if not file_name.endswith(file_ext): return '{0}{1}'.format(file_name, file_ext) return file_name def normalize_line_endings(lines): if not lines: return lines return lines.replace('\r\n', '\n').replace('\r', '\n') def decode_bytes(bytes_to_decode): """ Decode and return a byte string using utf-8, falling back to system's encoding if that fails. Source: Sublime Linter https://github.com/SublimeLinter/SublimeLinter/blob/master/lint/util.py#L272 """ if not bytes_to_decode: return '' try: return bytes_to_decode.decode('utf-8') except UnicodeError: return bytes_to_decode.decode(locale.getpreferredencoding(), errors='replace')
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class DataConnectorsOperations(object): """DataConnectorsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.securityinsight.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name, # type: str workspace_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["models.DataConnectorList"] """Gets all data connectors. :param resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either DataConnectorList or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.securityinsight.models.DataConnectorList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnectorList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-01-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('DataConnectorList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/dataConnectors'} # type: ignore def get( self, resource_group_name, # type: str workspace_name, # type: str data_connector_id, # type: str **kwargs # type: Any ): # type: (...) -> "models.DataConnector" """Gets a data connector. :param resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param data_connector_id: Connector ID. :type data_connector_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: DataConnector, or the result of cls(response) :rtype: ~azure.mgmt.securityinsight.models.DataConnector :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnector"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-01-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1), 'dataConnectorId': self._serialize.url("data_connector_id", data_connector_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('DataConnector', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/dataConnectors/{dataConnectorId}'} # type: ignore def create_or_update( self, resource_group_name, # type: str workspace_name, # type: str data_connector_id, # type: str data_connector, # type: "models.DataConnector" **kwargs # type: Any ): # type: (...) -> "models.DataConnector" """Creates or updates the data connector. :param resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param data_connector_id: Connector ID. :type data_connector_id: str :param data_connector: The data connector. :type data_connector: ~azure.mgmt.securityinsight.models.DataConnector :keyword callable cls: A custom type or function that will be passed the direct response :return: DataConnector, or the result of cls(response) :rtype: ~azure.mgmt.securityinsight.models.DataConnector :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.DataConnector"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-01-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1), 'dataConnectorId': self._serialize.url("data_connector_id", data_connector_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(data_connector, 'DataConnector') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('DataConnector', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('DataConnector', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/dataConnectors/{dataConnectorId}'} # type: ignore def delete( self, resource_group_name, # type: str workspace_name, # type: str data_connector_id, # type: str **kwargs # type: Any ): # type: (...) -> None """Delete the data connector. :param resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param data_connector_id: Connector ID. :type data_connector_id: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-01-01" accept = "application/json" # Construct URL url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1), 'dataConnectorId': self._serialize.url("data_connector_id", data_connector_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/dataConnectors/{dataConnectorId}'} # type: ignore
################################################################################ # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import os import shutil import sys import unittest import uuid from pyflink.pyflink_gateway_server import on_windows from pyflink.table import DataTypes, TableEnvironment, EnvironmentSettings from pyflink.table import expressions as expr from pyflink.table.udf import udf from pyflink.testing import source_sink_utils from pyflink.testing.test_case_utils import (PyFlinkStreamTableTestCase, PyFlinkBatchTableTestCase) class DependencyTests(object): def test_add_python_file(self): python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4())) os.mkdir(python_file_dir) python_file_path = os.path.join(python_file_dir, "test_dependency_manage_lib.py") with open(python_file_path, 'w') as f: f.write("def add_two(a):\n raise Exception('This function should not be called!')") self.t_env.add_python_file(python_file_path) python_file_dir_with_higher_priority = os.path.join( self.tempdir, "python_file_dir_" + str(uuid.uuid4())) os.mkdir(python_file_dir_with_higher_priority) python_file_path_higher_priority = os.path.join(python_file_dir_with_higher_priority, "test_dependency_manage_lib.py") with open(python_file_path_higher_priority, 'w') as f: f.write("def add_two(a):\n return a + 2") self.t_env.add_python_file(python_file_path_higher_priority) def plus_two(i): from test_dependency_manage_lib import add_two return add_two(i) self.t_env.create_temporary_system_function( "add_two", udf(plus_two, DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b']) t.select(expr.call("add_two", t.a), t.a).execute_insert("Results").wait() actual = source_sink_utils.results() self.assert_equals(actual, ["+I[3, 1]", "+I[4, 2]", "+I[5, 3]"]) class BatchDependencyTests(DependencyTests, PyFlinkBatchTableTestCase): pass class StreamDependencyTests(DependencyTests, PyFlinkStreamTableTestCase): def setUp(self): super(StreamDependencyTests, self).setUp() origin_execution_mode = os.environ['_python_worker_execution_mode'] os.environ['_python_worker_execution_mode'] = "loopback" try: self.st_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode()) finally: if origin_execution_mode is not None: os.environ['_python_worker_execution_mode'] = origin_execution_mode def test_set_requirements_without_cached_directory(self): requirements_txt_path = os.path.join(self.tempdir, str(uuid.uuid4())) with open(requirements_txt_path, 'w') as f: f.write("cloudpickle==1.2.2") self.st_env.set_python_requirements(requirements_txt_path) def check_requirements(i): import cloudpickle # noqa # pylint: disable=unused-import assert '_PYTHON_REQUIREMENTS_INSTALL_DIR' in os.environ return i self.st_env.create_temporary_system_function( "check_requirements", udf(check_requirements, DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()]) self.st_env.register_table_sink("Results", table_sink) t = self.st_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b']) t.select(expr.call('check_requirements', t.a), t.a).execute_insert("Results").wait() actual = source_sink_utils.results() self.assert_equals(actual, ["+I[1, 1]", "+I[2, 2]", "+I[3, 3]"]) def test_set_requirements_with_cached_directory(self): tmp_dir = self.tempdir requirements_txt_path = os.path.join(tmp_dir, "requirements_txt_" + str(uuid.uuid4())) with open(requirements_txt_path, 'w') as f: f.write("python-package1==0.0.0") requirements_dir_path = os.path.join(tmp_dir, "requirements_dir_" + str(uuid.uuid4())) os.mkdir(requirements_dir_path) package_file_name = "python-package1-0.0.0.tar.gz" with open(os.path.join(requirements_dir_path, package_file_name), 'wb') as f: import base64 # This base64 data is encoded from a python package file which includes a # "python_package1" module. The module contains a "plus(a, b)" function. # The base64 can be recomputed by following code: # base64.b64encode(open("python-package1-0.0.0.tar.gz", "rb").read()).decode("utf-8") f.write(base64.b64decode( "H4sICNefrV0C/2Rpc3QvcHl0aG9uLXBhY2thZ2UxLTAuMC4wLnRhcgDtmVtv2jAYhnPtX2H1CrRCY+ckI" "XEx7axuUA11u5imyICTRc1JiVnHfv1MKKWjYxwKEdPehws7xkmUfH5f+3PyqfqWpa1cjG5EKFnLbOvfhX" "FQTI3nOPPSdavS5Pa8nGMwy3Esi3ke9wyTObbnGNQxamBSKlFQavzUryG8ldG6frpbEGx4yNmDLMp/hPy" "P8b+6fNN613vdP1z8XdteG3+ug/17/F3Hcw1qIv5H54NUYiyUaH2SRRllaYeytkl6IpEdujI2yH2XapCQ" "wSRJRDHt0OveZa//uUfeZonUvUO5bHo+0ZcoVo9bMhFRvGx9H41kWj447aUsR0WUq+pui8arWKggK5Jli" "wGOo/95q79ovXi6/nfyf246Dof/n078fT9KI+X77Xx6BP83bX4Xf5NxT7dz7toO/L8OxjKgeTwpG+KcDp" "sdQjWFVJMipYI+o0MCk4X/t2UYtqI0yPabCHb3f861XcD/Ty/+Y5nLdCzT0dSPo/SmbKsf6un+b7KV+Ls" "W4/D/OoC9w/930P9eGwM75//csrD+Q/6P/P/k9D/oX3988Wqw1bS/tf6tR+s/m3EG/ddBqXO9XKf15C8p" "P9k4HZBtBgzZaVW5vrfKcj+W32W82ygEB9D/Xu9+4/qfP9L/rBv0X1v87yONKRX61/qfzwqjIDzIPTbv/" "7or3/88i0H/tfBFW7s/s/avRInQH06ieEy7tDrQeYHUdRN7wP+n/vf62LOH/pld7f9xz7a5Pfufedy0oP" "86iJI8KxStAq6yLC4JWdbbVbWRikR2z1ZGytk5vauW3QdnBFE6XqwmykazCesAAAAAAAAAAAAAAAAAAAA" "AAAAAAAAAAAAAAOBw/AJw5CHBAFAAAA==")) self.st_env.set_python_requirements(requirements_txt_path, requirements_dir_path) def add_one(i): from python_package1 import plus return plus(i, 1) self.st_env.create_temporary_system_function( "add_one", udf(add_one, DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()]) self.st_env.register_table_sink("Results", table_sink) t = self.st_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b']) t.select(expr.call('add_one', t.a), t.a).execute_insert("Results").wait() actual = source_sink_utils.results() self.assert_equals(actual, ["+I[2, 1]", "+I[3, 2]", "+I[4, 3]"]) def test_add_python_archive(self): tmp_dir = self.tempdir archive_dir_path = os.path.join(tmp_dir, "archive_" + str(uuid.uuid4())) os.mkdir(archive_dir_path) with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f: f.write("2") archive_file_path = \ shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path) self.t_env.add_python_archive(archive_file_path, "data") def add_from_file(i): with open("data/data.txt", 'r') as f: return i + int(f.read()) self.t_env.create_temporary_system_function("add_from_file", udf(add_from_file, DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()]) self.t_env.register_table_sink("Results", table_sink) t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b']) t.select(expr.call('add_from_file', t.a), t.a).execute_insert("Results").wait() actual = source_sink_utils.results() self.assert_equals(actual, ["+I[3, 1]", "+I[4, 2]", "+I[5, 3]"]) @unittest.skipIf(on_windows(), "Symbolic link is not supported on Windows, skipping.") def test_set_environment(self): python_exec = sys.executable tmp_dir = self.tempdir python_exec_link_path = os.path.join(tmp_dir, "py_exec") os.symlink(python_exec, python_exec_link_path) self.st_env.get_config().set_python_executable(python_exec_link_path) def check_python_exec(i): import os assert os.environ["python"] == python_exec_link_path return i self.st_env.create_temporary_system_function( "check_python_exec", udf(check_python_exec, DataTypes.BIGINT(), DataTypes.BIGINT())) def check_pyflink_gateway_disabled(i): from pyflink.java_gateway import get_gateway get_gateway() return i self.st_env.create_temporary_system_function( "check_pyflink_gateway_disabled", udf(check_pyflink_gateway_disabled, DataTypes.BIGINT(), DataTypes.BIGINT())) table_sink = source_sink_utils.TestAppendSink( ['a', 'b'], [DataTypes.BIGINT(), DataTypes.BIGINT()]) self.st_env.register_table_sink("Results", table_sink) t = self.st_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b']) t.select( expr.call('check_python_exec', t.a), expr.call('check_pyflink_gateway_disabled', t.a)) \ .execute_insert("Results").wait() actual = source_sink_utils.results() self.assert_equals(actual, ["+I[1, 1]", "+I[2, 2]", "+I[3, 3]"]) if __name__ == "__main__": try: import xmlrunner testRunner = xmlrunner.XMLTestRunner(output='target/test-reports') except ImportError: testRunner = None unittest.main(testRunner=testRunner, verbosity=2)
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2011 Eldar Nugaev # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import mock import six import webob from nova.api.openstack.compute import floating_ips as fips_v21 from nova.api.openstack import extensions from nova import compute from nova.compute import utils as compute_utils from nova import context from nova import db from nova import exception from nova import network from nova import objects from nova.objects import base as obj_base from nova import test from nova.tests.unit.api.openstack import fakes from nova.tests.unit import fake_network FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' TEST_INST = 1 WRONG_INST = 9999 def network_api_get_floating_ip(self, context, id): return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova', 'fixed_ip_id': None} def network_api_get_floating_ip_by_address(self, context, address): return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova', 'fixed_ip_id': 10} def network_api_get_floating_ips_by_project(self, context): return [{'id': 1, 'address': '10.10.10.10', 'pool': 'nova', 'fixed_ip': {'address': '10.0.0.1', 'instance_uuid': FAKE_UUID, 'instance': objects.Instance( **{'uuid': FAKE_UUID})}}, {'id': 2, 'pool': 'nova', 'interface': 'eth0', 'address': '10.10.10.11', 'fixed_ip': None}] def compute_api_get(self, context, instance_id, expected_attrs=None): return objects.Instance(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob') def network_api_allocate(self, context): return '10.10.10.10' def network_api_release(self, context, address): pass def compute_api_associate(self, context, instance_id, address): pass def network_api_associate(self, context, floating_address, fixed_address): pass def network_api_disassociate(self, context, instance, floating_address): pass def fake_instance_get(context, instance_id): return objects.Instance(**{ "id": 1, "uuid": uuid.uuid4(), "name": 'fake', "user_id": 'fakeuser', "project_id": '123'}) def stub_nw_info(test): def get_nw_info_for_instance(instance): return fake_network.fake_get_instance_nw_info(test) return get_nw_info_for_instance def get_instance_by_floating_ip_addr(self, context, address): return None class FloatingIpTestNeutronV21(test.NoDBTestCase): floating_ips = fips_v21 def setUp(self): super(FloatingIpTestNeutronV21, self).setUp() self.flags(use_neutron=True) self.controller = self.floating_ips.FloatingIPController() def test_floatingip_delete(self): req = fakes.HTTPRequest.blank('') fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'} with test.nested( mock.patch.object(self.controller.network_api, 'disassociate_floating_ip'), mock.patch.object(self.controller.network_api, 'disassociate_and_release_floating_ip'), mock.patch.object(self.controller.network_api, 'release_floating_ip'), mock.patch.object(self.controller.network_api, 'get_instance_id_by_floating_address', return_value=None), mock.patch.object(self.controller.network_api, 'get_floating_ip', return_value=fip_val)) as ( disoc_fip, dis_and_del, rel_fip, _, _): self.controller.delete(req, 1) self.assertFalse(disoc_fip.called) self.assertFalse(rel_fip.called) # Only disassociate_and_release_floating_ip is # called if using neutron self.assertTrue(dis_and_del.called) def _test_floatingip_delete_not_found(self, ex, expect_ex=webob.exc.HTTPNotFound): req = fakes.HTTPRequest.blank('') with mock.patch.object(self.controller.network_api, 'get_floating_ip', side_effect=ex): self.assertRaises(expect_ex, self.controller.delete, req, 1) def test_floatingip_delete_not_found_ip(self): ex = exception.FloatingIpNotFound(id=1) self._test_floatingip_delete_not_found(ex) def test_floatingip_delete_not_found(self): ex = exception.NotFound self._test_floatingip_delete_not_found(ex) def test_floatingip_delete_invalid_id(self): ex = exception.InvalidID(id=1) self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest) class FloatingIpTestV21(test.TestCase): floating_ip = "10.10.10.10" floating_ip_2 = "10.10.10.11" floating_ips = fips_v21 validation_error = exception.ValidationError def _create_floating_ips(self, floating_ips=None): """Create a floating IP object.""" if floating_ips is None: floating_ips = [self.floating_ip] elif not isinstance(floating_ips, (list, tuple)): floating_ips = [floating_ips] dict_ = {'pool': 'nova', 'host': 'fake_host'} return db.floating_ip_bulk_create( self.context, [dict(address=ip, **dict_) for ip in floating_ips], ) def _delete_floating_ip(self): db.floating_ip_destroy(self.context, self.floating_ip) def setUp(self): super(FloatingIpTestV21, self).setUp() self.stubs.Set(compute.api.API, "get", compute_api_get) self.stubs.Set(network.api.API, "get_floating_ip", network_api_get_floating_ip) self.stubs.Set(network.api.API, "get_floating_ip_by_address", network_api_get_floating_ip_by_address) self.stubs.Set(network.api.API, "get_floating_ips_by_project", network_api_get_floating_ips_by_project) self.stubs.Set(network.api.API, "release_floating_ip", network_api_release) self.stubs.Set(network.api.API, "disassociate_floating_ip", network_api_disassociate) self.stubs.Set(network.api.API, "get_instance_id_by_floating_address", get_instance_by_floating_ip_addr) self.stubs.Set(compute_utils, "get_nw_info_for_instance", stub_nw_info(self)) fake_network.stub_out_nw_api_get_instance_nw_info(self) self.stub_out('nova.db.instance_get', fake_instance_get) self.context = context.get_admin_context() self._create_floating_ips() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.controller = self.floating_ips.FloatingIPController() self.manager = self.floating_ips.\ FloatingIPActionController(self.ext_mgr) self.fake_req = fakes.HTTPRequest.blank('') def tearDown(self): self._delete_floating_ip() super(FloatingIpTestV21, self).tearDown() def test_floatingip_delete(self): fip_val = {'address': '1.1.1.1', 'fixed_ip_id': '192.168.1.2'} with test.nested( mock.patch.object(self.controller.network_api, 'disassociate_floating_ip'), mock.patch.object(self.controller.network_api, 'release_floating_ip'), mock.patch.object(self.controller.network_api, 'get_instance_id_by_floating_address', return_value=None), mock.patch.object(self.controller.network_api, 'get_floating_ip', return_value=fip_val)) as ( disoc_fip, rel_fip, _, _): self.controller.delete(self.fake_req, 1) self.assertTrue(disoc_fip.called) self.assertTrue(rel_fip.called) def _test_floatingip_delete_not_found(self, ex, expect_ex=webob.exc.HTTPNotFound): with mock.patch.object(self.controller.network_api, 'get_floating_ip', side_effect=ex): self.assertRaises(expect_ex, self.controller.delete, self.fake_req, 1) def test_floatingip_delete_not_found_ip(self): ex = exception.FloatingIpNotFound(id=1) self._test_floatingip_delete_not_found(ex) def test_floatingip_delete_not_found(self): ex = exception.NotFound self._test_floatingip_delete_not_found(ex) def test_floatingip_delete_invalid_id(self): ex = exception.InvalidID(id=1) self._test_floatingip_delete_not_found(ex, webob.exc.HTTPBadRequest) def test_translate_floating_ip_view(self): floating_ip_address = self.floating_ip floating_ip = db.floating_ip_get_by_address(self.context, floating_ip_address) # NOTE(vish): network_get uses the id not the address floating_ip = db.floating_ip_get(self.context, floating_ip['id']) floating_obj = objects.FloatingIP() objects.FloatingIP._from_db_object(self.context, floating_obj, floating_ip) view = self.floating_ips._translate_floating_ip_view(floating_obj) self.assertIn('floating_ip', view) self.assertTrue(view['floating_ip']['id']) self.assertEqual(view['floating_ip']['ip'], floating_obj.address) self.assertIsNone(view['floating_ip']['fixed_ip']) self.assertIsNone(view['floating_ip']['instance_id']) def test_translate_floating_ip_view_neutronesque(self): uuid = 'ca469a10-fa76-11e5-86aa-5e5517507c66' fixed_id = 'ae900cf4-fb73-11e5-86aa-5e5517507c66' floating_ip = objects.floating_ip.NeutronFloatingIP(id=uuid, address='1.2.3.4', pool='pool', context='ctxt', fixed_ip_id=fixed_id) view = self.floating_ips._translate_floating_ip_view(floating_ip) self.assertEqual(uuid, view['floating_ip']['id']) def test_translate_floating_ip_view_dict(self): floating_ip = {'id': 0, 'address': '10.0.0.10', 'pool': 'nova', 'fixed_ip': None} view = self.floating_ips._translate_floating_ip_view(floating_ip) self.assertIn('floating_ip', view) def test_translate_floating_ip_view_obj(self): fip = objects.FixedIP(address='192.168.1.2', instance_uuid=FAKE_UUID) floater = self._build_floating_ip('10.0.0.2', fip) result = self.floating_ips._translate_floating_ip_view(floater) expected = self._build_expected(floater, fip.address, fip.instance_uuid) self._test_result(expected, result) def test_translate_floating_ip_bad_address(self): fip = objects.FixedIP(instance_uuid=FAKE_UUID) floater = self._build_floating_ip('10.0.0.2', fip) result = self.floating_ips._translate_floating_ip_view(floater) expected = self._build_expected(floater, None, fip.instance_uuid) self._test_result(expected, result) def test_translate_floating_ip_bad_instance_id(self): fip = objects.FixedIP(address='192.168.1.2') floater = self._build_floating_ip('10.0.0.2', fip) result = self.floating_ips._translate_floating_ip_view(floater) expected = self._build_expected(floater, fip.address, None) self._test_result(expected, result) def test_translate_floating_ip_bad_instance_and_address(self): fip = objects.FixedIP() floater = self._build_floating_ip('10.0.0.2', fip) result = self.floating_ips._translate_floating_ip_view(floater) expected = self._build_expected(floater, None, None) self._test_result(expected, result) def test_translate_floating_ip_null_fixed(self): floater = self._build_floating_ip('10.0.0.2', None) result = self.floating_ips._translate_floating_ip_view(floater) expected = self._build_expected(floater, None, None) self._test_result(expected, result) def test_translate_floating_ip_unset_fixed(self): floater = objects.FloatingIP(id=1, address='10.0.0.2', pool='foo') result = self.floating_ips._translate_floating_ip_view(floater) expected = self._build_expected(floater, None, None) self._test_result(expected, result) def test_translate_floating_ips_view(self): mock_trans = mock.Mock() mock_trans.return_value = {'floating_ip': 'foo'} self.floating_ips._translate_floating_ip_view = mock_trans fip1 = objects.FixedIP(address='192.168.1.2', instance_uuid=FAKE_UUID) fip2 = objects.FixedIP(address='192.168.1.3', instance_uuid=FAKE_UUID) floaters = [self._build_floating_ip('10.0.0.2', fip1), self._build_floating_ip('10.0.0.3', fip2)] result = self.floating_ips._translate_floating_ips_view(floaters) called_floaters = [call[0][0] for call in mock_trans.call_args_list] self.assertTrue(any(obj_base.obj_equal_prims(floaters[0], f) for f in called_floaters), "_translate_floating_ip_view was not called with all " "floating ips") self.assertTrue(any(obj_base.obj_equal_prims(floaters[1], f) for f in called_floaters), "_translate_floating_ip_view was not called with all " "floating ips") expected_result = {'floating_ips': ['foo', 'foo']} self.assertEqual(expected_result, result) def test_floating_ips_list(self): res_dict = self.controller.index(self.fake_req) response = {'floating_ips': [{'instance_id': FAKE_UUID, 'ip': '10.10.10.10', 'pool': 'nova', 'fixed_ip': '10.0.0.1', 'id': 1}, {'instance_id': None, 'ip': '10.10.10.11', 'pool': 'nova', 'fixed_ip': None, 'id': 2}]} self.assertEqual(res_dict, response) def test_floating_ip_release_nonexisting(self): def fake_get_floating_ip(*args, **kwargs): raise exception.FloatingIpNotFound(id=id) self.stubs.Set(network.api.API, "get_floating_ip", fake_get_floating_ip) ex = self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete, self.fake_req, '9876') self.assertIn("Floating IP not found for ID 9876", ex.explanation) def test_floating_ip_release_race_cond(self): def fake_get_floating_ip(*args, **kwargs): return {'fixed_ip_id': 1, 'address': self.floating_ip} def fake_get_instance_by_floating_ip_addr(*args, **kwargs): return 'test-inst' def fake_disassociate_floating_ip(*args, **kwargs): raise exception.FloatingIpNotAssociated(args[3]) self.stubs.Set(network.api.API, "get_floating_ip", fake_get_floating_ip) self.stubs.Set(self.floating_ips, "get_instance_by_floating_ip_addr", fake_get_instance_by_floating_ip_addr) self.stubs.Set(self.floating_ips, "disassociate_floating_ip", fake_disassociate_floating_ip) delete = self.controller.delete res = delete(self.fake_req, '9876') # NOTE: on v2.1, http status code is set as wsgi_code of API # method instead of status_int in a response object. if isinstance(self.controller, fips_v21.FloatingIPController): status_int = delete.wsgi_code else: status_int = res.status_int self.assertEqual(status_int, 202) def test_floating_ip_show(self): res_dict = self.controller.show(self.fake_req, 1) self.assertEqual(res_dict['floating_ip']['id'], 1) self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') self.assertIsNone(res_dict['floating_ip']['instance_id']) def test_floating_ip_show_not_found(self): def fake_get_floating_ip(*args, **kwargs): raise exception.FloatingIpNotFound(id='fake') self.stubs.Set(network.api.API, "get_floating_ip", fake_get_floating_ip) ex = self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, self.fake_req, '9876') self.assertIn("Floating IP not found for ID 9876", ex.explanation) def test_show_associated_floating_ip(self): def get_floating_ip(self, context, id): return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova', 'fixed_ip': {'address': '10.0.0.1', 'instance_uuid': FAKE_UUID, 'instance': {'uuid': FAKE_UUID}}} self.stubs.Set(network.api.API, "get_floating_ip", get_floating_ip) res_dict = self.controller.show(self.fake_req, 1) self.assertEqual(res_dict['floating_ip']['id'], 1) self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') self.assertEqual(res_dict['floating_ip']['fixed_ip'], '10.0.0.1') self.assertEqual(res_dict['floating_ip']['instance_id'], FAKE_UUID) def test_recreation_of_floating_ip(self): self._delete_floating_ip() self._create_floating_ips() def test_floating_ip_in_bulk_creation(self): self._delete_floating_ip() self._create_floating_ips([self.floating_ip, self.floating_ip_2]) all_ips = db.floating_ip_get_all(self.context) ip_list = [ip['address'] for ip in all_ips] self.assertIn(self.floating_ip, ip_list) self.assertIn(self.floating_ip_2, ip_list) def test_fail_floating_ip_in_bulk_creation(self): self.assertRaises(exception.FloatingIpExists, self._create_floating_ips, [self.floating_ip, self.floating_ip_2]) all_ips = db.floating_ip_get_all(self.context) ip_list = [ip['address'] for ip in all_ips] self.assertIn(self.floating_ip, ip_list) self.assertNotIn(self.floating_ip_2, ip_list) def test_floating_ip_allocate_no_free_ips(self): def fake_allocate(*args, **kwargs): raise exception.NoMoreFloatingIps() self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate) ex = self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, self.fake_req) self.assertIn('No more floating IPs', ex.explanation) def test_floating_ip_allocate_no_free_ips_pool(self): def fake_allocate(*args, **kwargs): raise exception.NoMoreFloatingIps() self.stubs.Set(network.api.API, "allocate_floating_ip", fake_allocate) ex = self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, self.fake_req, {'pool': 'non_existent_pool'}) self.assertIn('No more floating IPs in pool non_existent_pool', ex.explanation) @mock.patch.object(network.api.API, 'allocate_floating_ip', side_effect=exception.FloatingIpBadRequest( 'Bad floatingip request: Network ' 'c8f0e88f-ae41-47cb-be6c-d8256ba80576 does not contain any ' 'IPv4 subnet')) def test_floating_ip_allocate_no_ipv4_subnet(self, allocate_mock): ex = self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create, self.fake_req, {'pool': 'non_existent_pool'}) self.assertIn("does not contain any IPv4 subnet", six.text_type(ex)) @mock.patch('nova.network.api.API.allocate_floating_ip', side_effect=exception.FloatingIpLimitExceeded()) def test_floating_ip_allocate_over_quota(self, allocate_mock): ex = self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.fake_req) self.assertIn('IP allocation over quota', ex.explanation) @mock.patch('nova.network.api.API.allocate_floating_ip', side_effect=exception.FloatingIpLimitExceeded()) def test_floating_ip_allocate_quota_exceed_in_pool(self, allocate_mock): ex = self.assertRaises(webob.exc.HTTPForbidden, self.controller.create, self.fake_req, {'pool': 'non_existent_pool'}) self.assertIn('IP allocation over quota in pool non_existent_pool.', ex.explanation) @mock.patch('nova.network.api.API.allocate_floating_ip', side_effect=exception.FloatingIpPoolNotFound()) def test_floating_ip_create_with_unknown_pool(self, allocate_mock): ex = self.assertRaises(webob.exc.HTTPNotFound, self.controller.create, self.fake_req, {'pool': 'non_existent_pool'}) self.assertIn('Floating IP pool not found.', ex.explanation) def test_floating_ip_allocate(self): def fake1(*args, **kwargs): pass def fake2(*args, **kwargs): return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova'} self.stubs.Set(network.api.API, "allocate_floating_ip", fake1) self.stubs.Set(network.api.API, "get_floating_ip_by_address", fake2) res_dict = self.controller.create(self.fake_req) ip = res_dict['floating_ip'] expected = { "id": 1, "instance_id": None, "ip": "10.10.10.10", "fixed_ip": None, "pool": 'nova'} self.assertEqual(ip, expected) def test_floating_ip_release(self): self.controller.delete(self.fake_req, 1) def _test_floating_ip_associate(self, fixed_address): def fake_associate_floating_ip(*args, **kwargs): self.assertEqual(fixed_address, kwargs['fixed_address']) self.stubs.Set(network.api.API, "associate_floating_ip", fake_associate_floating_ip) body = dict(addFloatingIp=dict(address=self.floating_ip)) rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST, body=body) self.assertEqual(202, rsp.status_int) def test_floating_ip_associate(self): self._test_floating_ip_associate(fixed_address='192.168.1.100') @mock.patch.object(network.model.NetworkInfo, 'fixed_ips') def test_associate_floating_ip_v4v6_fixed_ip(self, fixed_ips_mock): fixed_address = '192.168.1.100' fixed_ips_mock.return_value = [{'address': 'fc00:2001:db8::100'}, {'address': ''}, {'address': fixed_address}] self._test_floating_ip_associate(fixed_address=fixed_address) @mock.patch.object(network.model.NetworkInfo, 'fixed_ips', return_value=[{'address': 'fc00:2001:db8::100'}]) def test_associate_floating_ip_v6_fixed_ip(self, fixed_ips_mock): body = dict(addFloatingIp=dict(address=self.floating_ip)) self.assertRaises(webob.exc.HTTPBadRequest, self.manager._add_floating_ip, self.fake_req, TEST_INST, body=body) def test_floating_ip_associate_invalid_instance(self): def fake_get(self, context, id, expected_attrs=None): raise exception.InstanceNotFound(instance_id=id) self.stubs.Set(compute.api.API, "get", fake_get) body = dict(addFloatingIp=dict(address=self.floating_ip)) self.assertRaises(webob.exc.HTTPNotFound, self.manager._add_floating_ip, self.fake_req, 'test_inst', body=body) def test_associate_not_allocated_floating_ip_to_instance(self): def fake_associate_floating_ip(self, context, instance, floating_address, fixed_address, affect_auto_assigned=False): raise exception.FloatingIpNotFoundForAddress( address=floating_address) self.stubs.Set(network.api.API, "associate_floating_ip", fake_associate_floating_ip) floating_ip = '10.10.10.11' body = dict(addFloatingIp=dict(address=floating_ip)) ex = self.assertRaises(webob.exc.HTTPNotFound, self.manager._add_floating_ip, self.fake_req, TEST_INST, body=body) self.assertIn("floating IP not found", ex.explanation) @mock.patch.object(network.api.API, 'associate_floating_ip', side_effect=exception.Forbidden) def test_associate_floating_ip_forbidden(self, associate_mock): body = dict(addFloatingIp=dict(address='10.10.10.11')) self.assertRaises(webob.exc.HTTPForbidden, self.manager._add_floating_ip, self.fake_req, TEST_INST, body=body) def test_associate_floating_ip_bad_address_key(self): body = dict(addFloatingIp=dict(bad_address='10.10.10.11')) req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') self.assertRaises(self.validation_error, self.manager._add_floating_ip, req, 'test_inst', body=body) def test_associate_floating_ip_bad_addfloatingip_key(self): body = dict(bad_addFloatingIp=dict(address='10.10.10.11')) req = fakes.HTTPRequest.blank('/v2/fake/servers/test_inst/action') self.assertRaises(self.validation_error, self.manager._add_floating_ip, req, 'test_inst', body=body) def test_floating_ip_disassociate(self): def get_instance_by_floating_ip_addr(self, context, address): if address == '10.10.10.10': return TEST_INST self.stubs.Set(network.api.API, "get_instance_id_by_floating_address", get_instance_by_floating_ip_addr) body = dict(removeFloatingIp=dict(address='10.10.10.10')) rsp = self.manager._remove_floating_ip(self.fake_req, TEST_INST, body=body) self.assertEqual(202, rsp.status_int) def test_floating_ip_disassociate_missing(self): body = dict(removeFloatingIp=dict(address='10.10.10.10')) self.assertRaises(webob.exc.HTTPConflict, self.manager._remove_floating_ip, self.fake_req, 'test_inst', body=body) def test_floating_ip_associate_non_existent_ip(self): def fake_network_api_associate(self, context, instance, floating_address=None, fixed_address=None): floating_ips = ["10.10.10.10", "10.10.10.11"] if floating_address not in floating_ips: raise exception.FloatingIpNotFoundForAddress( address=floating_address) self.stubs.Set(network.api.API, "associate_floating_ip", fake_network_api_associate) body = dict(addFloatingIp=dict(address='1.1.1.1')) self.assertRaises(webob.exc.HTTPNotFound, self.manager._add_floating_ip, self.fake_req, TEST_INST, body=body) def test_floating_ip_disassociate_non_existent_ip(self): def network_api_get_floating_ip_by_address(self, context, floating_address): floating_ips = ["10.10.10.10", "10.10.10.11"] if floating_address not in floating_ips: raise exception.FloatingIpNotFoundForAddress( address=floating_address) self.stubs.Set(network.api.API, "get_floating_ip_by_address", network_api_get_floating_ip_by_address) body = dict(removeFloatingIp=dict(address='1.1.1.1')) self.assertRaises(webob.exc.HTTPNotFound, self.manager._remove_floating_ip, self.fake_req, TEST_INST, body=body) def test_floating_ip_disassociate_wrong_instance_uuid(self): def get_instance_by_floating_ip_addr(self, context, address): if address == '10.10.10.10': return TEST_INST self.stubs.Set(network.api.API, "get_instance_id_by_floating_address", get_instance_by_floating_ip_addr) wrong_uuid = 'aaaaaaaa-ffff-ffff-ffff-aaaaaaaaaaaa' body = dict(removeFloatingIp=dict(address='10.10.10.10')) self.assertRaises(webob.exc.HTTPConflict, self.manager._remove_floating_ip, self.fake_req, wrong_uuid, body=body) def test_floating_ip_disassociate_wrong_instance_id(self): def get_instance_by_floating_ip_addr(self, context, address): if address == '10.10.10.10': return WRONG_INST self.stubs.Set(network.api.API, "get_instance_id_by_floating_address", get_instance_by_floating_ip_addr) body = dict(removeFloatingIp=dict(address='10.10.10.10')) self.assertRaises(webob.exc.HTTPConflict, self.manager._remove_floating_ip, self.fake_req, TEST_INST, body=body) def test_floating_ip_disassociate_auto_assigned(self): def fake_get_floating_ip_addr_auto_assigned(self, context, address): return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova', 'fixed_ip_id': 10, 'auto_assigned': 1} def get_instance_by_floating_ip_addr(self, context, address): if address == '10.10.10.10': return TEST_INST def network_api_disassociate(self, context, instance, floating_address): raise exception.CannotDisassociateAutoAssignedFloatingIP() self.stubs.Set(network.api.API, "get_floating_ip_by_address", fake_get_floating_ip_addr_auto_assigned) self.stubs.Set(network.api.API, "get_instance_id_by_floating_address", get_instance_by_floating_ip_addr) self.stubs.Set(network.api.API, "disassociate_floating_ip", network_api_disassociate) body = dict(removeFloatingIp=dict(address='10.10.10.10')) self.assertRaises(webob.exc.HTTPForbidden, self.manager._remove_floating_ip, self.fake_req, TEST_INST, body=body) def test_floating_ip_disassociate_map_authorization_exc(self): def fake_get_floating_ip_addr_auto_assigned(self, context, address): return {'id': 1, 'address': '10.10.10.10', 'pool': 'nova', 'fixed_ip_id': 10, 'auto_assigned': 1} def get_instance_by_floating_ip_addr(self, context, address): if address == '10.10.10.10': return TEST_INST def network_api_disassociate(self, context, instance, address): raise exception.Forbidden() self.stubs.Set(network.api.API, "get_floating_ip_by_address", fake_get_floating_ip_addr_auto_assigned) self.stubs.Set(network.api.API, "get_instance_id_by_floating_address", get_instance_by_floating_ip_addr) self.stubs.Set(network.api.API, "disassociate_floating_ip", network_api_disassociate) body = dict(removeFloatingIp=dict(address='10.10.10.10')) self.assertRaises(webob.exc.HTTPForbidden, self.manager._remove_floating_ip, self.fake_req, TEST_INST, body=body) # these are a few bad param tests def test_bad_address_param_in_remove_floating_ip(self): body = dict(removeFloatingIp=dict(badparam='11.0.0.1')) self.assertRaises(self.validation_error, self.manager._remove_floating_ip, self.fake_req, TEST_INST, body=body) def test_missing_dict_param_in_remove_floating_ip(self): body = dict(removeFloatingIp='11.0.0.1') self.assertRaises(self.validation_error, self.manager._remove_floating_ip, self.fake_req, TEST_INST, body=body) def test_missing_dict_param_in_add_floating_ip(self): body = dict(addFloatingIp='11.0.0.1') self.assertRaises(self.validation_error, self.manager._add_floating_ip, self.fake_req, TEST_INST, body=body) def _build_floating_ip(self, address, fixed_ip): floating = objects.FloatingIP(id=1, address=address, pool='foo', fixed_ip=fixed_ip) return floating def _build_expected(self, floating_ip, fixed_ip, instance_id): return {'floating_ip': {'id': floating_ip.id, 'ip': floating_ip.address, 'pool': floating_ip.pool, 'fixed_ip': fixed_ip, 'instance_id': instance_id}} def _test_result(self, expected, actual): expected_fl = expected['floating_ip'] actual_fl = actual['floating_ip'] self.assertEqual(expected_fl, actual_fl) class ExtendedFloatingIpTestV21(test.TestCase): floating_ip = "10.10.10.10" floating_ip_2 = "10.10.10.11" floating_ips = fips_v21 def _create_floating_ips(self, floating_ips=None): """Create a floating IP object.""" if floating_ips is None: floating_ips = [self.floating_ip] elif not isinstance(floating_ips, (list, tuple)): floating_ips = [floating_ips] dict_ = {'pool': 'nova', 'host': 'fake_host'} return db.floating_ip_bulk_create( self.context, [dict(address=ip, **dict_) for ip in floating_ips], ) def _delete_floating_ip(self): db.floating_ip_destroy(self.context, self.floating_ip) def setUp(self): super(ExtendedFloatingIpTestV21, self).setUp() self.stubs.Set(compute.api.API, "get", compute_api_get) self.stubs.Set(network.api.API, "get_floating_ip", network_api_get_floating_ip) self.stubs.Set(network.api.API, "get_floating_ip_by_address", network_api_get_floating_ip_by_address) self.stubs.Set(network.api.API, "get_floating_ips_by_project", network_api_get_floating_ips_by_project) self.stubs.Set(network.api.API, "release_floating_ip", network_api_release) self.stubs.Set(network.api.API, "disassociate_floating_ip", network_api_disassociate) self.stubs.Set(network.api.API, "get_instance_id_by_floating_address", get_instance_by_floating_ip_addr) self.stubs.Set(compute_utils, "get_nw_info_for_instance", stub_nw_info(self)) fake_network.stub_out_nw_api_get_instance_nw_info(self) self.stub_out('nova.db.instance_get', fake_instance_get) self.context = context.get_admin_context() self._create_floating_ips() self.ext_mgr = extensions.ExtensionManager() self.ext_mgr.extensions = {} self.ext_mgr.extensions['os-floating-ips'] = True self.ext_mgr.extensions['os-extended-floating-ips'] = True self.controller = self.floating_ips.FloatingIPController() self.manager = self.floating_ips.\ FloatingIPActionController(self.ext_mgr) self.fake_req = fakes.HTTPRequest.blank('') def tearDown(self): self._delete_floating_ip() super(ExtendedFloatingIpTestV21, self).tearDown() def test_extended_floating_ip_associate_fixed(self): fixed_address = '192.168.1.101' def fake_associate_floating_ip(*args, **kwargs): self.assertEqual(fixed_address, kwargs['fixed_address']) self.stubs.Set(network.api.API, "associate_floating_ip", fake_associate_floating_ip) body = dict(addFloatingIp=dict(address=self.floating_ip, fixed_address=fixed_address)) rsp = self.manager._add_floating_ip(self.fake_req, TEST_INST, body=body) self.assertEqual(202, rsp.status_int) def test_extended_floating_ip_associate_fixed_not_allocated(self): def fake_associate_floating_ip(*args, **kwargs): pass self.stubs.Set(network.api.API, "associate_floating_ip", fake_associate_floating_ip) body = dict(addFloatingIp=dict(address=self.floating_ip, fixed_address='11.11.11.11')) ex = self.assertRaises(webob.exc.HTTPBadRequest, self.manager._add_floating_ip, self.fake_req, TEST_INST, body=body) self.assertIn("Specified fixed address not assigned to instance", ex.explanation) class FloatingIPPolicyEnforcementV21(test.NoDBTestCase): def setUp(self): super(FloatingIPPolicyEnforcementV21, self).setUp() self.controller = fips_v21.FloatingIPController() self.req = fakes.HTTPRequest.blank('') def _common_policy_check(self, func, *arg, **kwarg): rule_name = "os_compute_api:os-floating-ips" rule = {rule_name: "project:non_fake"} self.policy.set_rules(rule) exc = self.assertRaises( exception.PolicyNotAuthorized, func, *arg, **kwarg) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_index_policy_failed(self): self._common_policy_check(self.controller.index, self.req) def test_show_policy_failed(self): self._common_policy_check(self.controller.show, self.req, FAKE_UUID) def test_create_policy_failed(self): self._common_policy_check(self.controller.create, self.req) def test_delete_policy_failed(self): self._common_policy_check(self.controller.delete, self.req, FAKE_UUID) class FloatingIPActionPolicyEnforcementV21(test.NoDBTestCase): def setUp(self): super(FloatingIPActionPolicyEnforcementV21, self).setUp() self.controller = fips_v21.FloatingIPActionController() self.req = fakes.HTTPRequest.blank('') def _common_policy_check(self, func, *arg, **kwarg): rule_name = "os_compute_api:os-floating-ips" rule = {rule_name: "project:non_fake"} self.policy.set_rules(rule) exc = self.assertRaises( exception.PolicyNotAuthorized, func, *arg, **kwarg) self.assertEqual( "Policy doesn't allow %s to be performed." % rule_name, exc.format_message()) def test_add_policy_failed(self): body = dict(addFloatingIp=dict(address='10.10.10.11')) self._common_policy_check( self.controller._add_floating_ip, self.req, FAKE_UUID, body=body) def test_remove_policy_failed(self): body = dict(removeFloatingIp=dict(address='10.10.10.10')) self._common_policy_check( self.controller._remove_floating_ip, self.req, FAKE_UUID, body=body) class FloatingIpsDeprecationTest(test.NoDBTestCase): def setUp(self): super(FloatingIpsDeprecationTest, self).setUp() self.req = fakes.HTTPRequest.blank('', version='2.36') self.controller = fips_v21.FloatingIPController() def test_all_apis_return_not_found(self): self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.show, self.req, fakes.FAKE_UUID) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.index, self.req) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.create, self.req, {}) self.assertRaises(exception.VersionNotFoundForAPIMethod, self.controller.delete, self.req, fakes.FAKE_UUID)
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import datetime as dt import calendar import tg from pylons import tmpl_context as c from webob import Request, exc from bson import ObjectId from ming.orm.ormsession import ThreadLocalORMSession from nose.tools import ( assert_equals, assert_equal, assert_raises, assert_is_none, assert_is, assert_true, assert_false, ) from mock import Mock, MagicMock, patch from allura import model as M from allura.app import Application from allura.lib import plugin from allura.lib import phone from allura.lib import helpers as h from allura.lib.utils import TruthyCallable from allura.lib.plugin import ProjectRegistrationProvider from allura.lib.plugin import ThemeProvider from allura.lib.exceptions import ProjectConflict, ProjectShortnameInvalid from allura.tests.decorators import audits from alluratest.controller import setup_basic_test, setup_global_objects class TestProjectRegistrationProvider(object): def setUp(self): self.provider = ProjectRegistrationProvider() @patch('allura.lib.security.has_access') def test_validate_project_15char_user(self, has_access): has_access.return_value = TruthyCallable(lambda: True) nbhd = M.Neighborhood() self.provider.validate_project( neighborhood=nbhd, shortname='u/' + ('a' * 15), project_name='15 char username', user=MagicMock(), user_project=True, private_project=False, ) @patch('allura.model.Project') def test_shortname_validator(self, Project): Project.query.get.return_value = None nbhd = Mock() v = self.provider.shortname_validator.to_python v('thisislegit', neighborhood=nbhd) assert_raises(ProjectShortnameInvalid, v, 'not valid', neighborhood=nbhd) assert_raises(ProjectShortnameInvalid, v, 'this-is-valid-but-too-long', neighborhood=nbhd) assert_raises(ProjectShortnameInvalid, v, 'this is invalid and too long', neighborhood=nbhd) assert_raises(ProjectShortnameInvalid, v, 'end-dash-', neighborhood=nbhd) Project.query.get.return_value = Mock() assert_raises(ProjectConflict, v, 'thisislegit', neighborhood=nbhd) class TestProjectRegistrationProviderParseProjectFromUrl(object): def setUp(self): setup_basic_test() ThreadLocalORMSession.close_all() setup_global_objects() self.provider = ProjectRegistrationProvider() self.parse = self.provider.project_from_url def test_empty_url(self): assert_equal((None, u'Empty url'), self.parse(None)) assert_equal((None, u'Empty url'), self.parse('')) assert_equal((None, u'Empty url'), self.parse('/')) def test_neighborhood_not_found(self): assert_equal((None, u'Neighborhood not found'), self.parse('/nbhd/project')) def test_project_not_found(self): assert_equal((None, u'Project not found'), self.parse('/p/project')) assert_equal((None, u'Project not found'), self.parse('project')) def test_ok_full(self): p = M.Project.query.get(shortname='test') adobe = M.Project.query.get(shortname='adobe-1') assert_equal((p, None), self.parse('p/test')) assert_equal((p, None), self.parse('/p/test')) assert_equal((p, None), self.parse('/p/test/tickets/1')) assert_equal((p, None), self.parse('http://localhost:8080/p/test/tickets/1')) assert_equal((adobe, None), self.parse('/adobe/adobe-1/')) def test_only_shortname_multiple_projects_matched(self): adobe_n = M.Neighborhood.query.get(url_prefix='/adobe/') M.Project(shortname='test', neighborhood_id=adobe_n._id) ThreadLocalORMSession.flush_all() assert_equal((None, u'Too many matches for project: 2'), self.parse('test')) def test_only_shortname_ok(self): p = M.Project.query.get(shortname='test') adobe = M.Project.query.get(shortname='adobe-1') assert_equal((p, None), self.parse('test')) assert_equal((adobe, None), self.parse('adobe-1')) def test_subproject(self): p = M.Project.query.get(shortname='test/sub1') assert_equal((p, None), self.parse('p/test/sub1')) assert_equal((p, None), self.parse('p/test/sub1/something')) assert_equal((p, None), self.parse('http://localhost:8080/p/test/sub1')) assert_equal((p, None), self.parse('http://localhost:8080/p/test/sub1/something')) def test_subproject_not_found(self): p = M.Project.query.get(shortname='test') assert_equal((p, None), self.parse('http://localhost:8080/p/test/not-a-sub')) class UserMock(object): def __init__(self): self.tool_data = {} self._projects = [] def get_tool_data(self, tool, key): return self.tool_data.get(tool, {}).get(key, None) def set_tool_data(self, tool, **kw): d = self.tool_data.setdefault(tool, {}) d.update(kw) def set_projects(self, projects): self._projects = projects def my_projects_by_role_name(self, role): return self._projects class TestProjectRegistrationProviderPhoneVerification(object): def setUp(self): self.p = ProjectRegistrationProvider() self.user = UserMock() self.nbhd = MagicMock() def test_phone_verified_disabled(self): with h.push_config(tg.config, **{'project.verify_phone': 'false'}): assert_true(self.p.phone_verified(self.user, self.nbhd)) @patch.object(plugin.security, 'has_access', autospec=True) def test_phone_verified_admin(self, has_access): has_access.return_value.return_value = True with h.push_config(tg.config, **{'project.verify_phone': 'true'}): assert_true(self.p.phone_verified(self.user, self.nbhd)) @patch.object(plugin.security, 'has_access', autospec=True) def test_phone_verified_project_admin(self, has_access): has_access.return_value.return_value = False with h.push_config(tg.config, **{'project.verify_phone': 'true'}): self.user.set_projects([Mock()]) assert_false(self.p.phone_verified(self.user, self.nbhd)) self.user.set_projects([Mock(neighborhood_id=self.nbhd._id)]) assert_true(self.p.phone_verified(self.user, self.nbhd)) @patch.object(plugin.security, 'has_access', autospec=True) def test_phone_verified(self, has_access): has_access.return_value.return_value = False with h.push_config(tg.config, **{'project.verify_phone': 'true'}): assert_false(self.p.phone_verified(self.user, self.nbhd)) self.user.set_tool_data('phone_verification', number_hash='123') assert_true(self.p.phone_verified(self.user, self.nbhd)) @patch.object(plugin, 'g') def test_verify_phone_disabled(self, g): g.phone_service = Mock(spec=phone.PhoneService) with h.push_config(tg.config, **{'project.verify_phone': 'false'}): result = self.p.verify_phone(self.user, '12345') assert_false(g.phone_service.verify.called) assert_equal(result, {'status': 'ok'}) @patch.object(plugin, 'g') def test_verify_phone(self, g): g.phone_service = Mock(spec=phone.PhoneService) with h.push_config(tg.config, **{'project.verify_phone': 'true'}): result = self.p.verify_phone(self.user, '123 45 45') g.phone_service.verify.assert_called_once_with('1234545') assert_equal(result, g.phone_service.verify.return_value) @patch.object(plugin, 'g') def test_check_phone_verification_disabled(self, g): g.phone_service = Mock(spec=phone.PhoneService) with h.push_config(tg.config, **{'project.verify_phone': 'false'}): result = self.p.check_phone_verification( self.user, 'request-id', '1111', 'hash') assert_false(g.phone_service.check.called) assert_equal(result, {'status': 'ok'}) @patch.object(plugin.h, 'auditlog_user', autospec=True) @patch.object(plugin, 'g') def test_check_phone_verification_fail(self, g, audit): g.phone_service = Mock(spec=phone.PhoneService) with h.push_config(tg.config, **{'project.verify_phone': 'true'}): result = self.p.check_phone_verification( self.user, 'request-id', '1111', 'hash') g.phone_service.check.assert_called_once_with( 'request-id', '1111') assert_equal(result, g.phone_service.check.return_value) assert_equal( self.user.get_tool_data('phone_verification', 'number_hash'), None) audit.assert_called_once_with( 'Phone verification failed. Hash: hash', user=self.user) @patch.object(plugin.h, 'auditlog_user', autospec=True) @patch.object(plugin, 'g') def test_check_phone_verification_success(self, g, audit): g.phone_service = Mock(spec=phone.PhoneService) with h.push_config(tg.config, **{'project.verify_phone': 'true'}): g.phone_service.check.return_value = {'status': 'ok'} result = self.p.check_phone_verification( self.user, 'request-id', '1111', 'hash') g.phone_service.check.assert_called_once_with( 'request-id', '1111') assert_equal( self.user.get_tool_data('phone_verification', 'number_hash'), 'hash') audit.assert_called_once_with( 'Phone verification succeeded. Hash: hash', user=self.user) class TestThemeProvider(object): @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response') @patch('pylons.request') def test_get_site_notification_no_note(self, request, response, SiteNotification): SiteNotification.current.return_value = None assert_is_none(ThemeProvider().get_site_notification()) assert not response.set_cookie.called @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response') @patch('pylons.request') def test_get_site_notification_closed(self, request, response, SiteNotification): SiteNotification.current.return_value._id = 'deadbeef' SiteNotification.current.return_value.user_role = None SiteNotification.current.return_value.page_regex = None SiteNotification.current.return_value.page_tool_type = None request.cookies = {'site-notification': 'deadbeef-1-true'} assert_is_none(ThemeProvider().get_site_notification()) assert not response.set_cookie.called @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response') @patch('pylons.request') def test_get_site_notification_impressions_over(self, request, response, SiteNotification): note = SiteNotification.current.return_value note._id = 'deadbeef' note.impressions = 2 note.user_role = None note.page_regex = None note.page_tool_type = None request.cookies = {'site-notification': 'deadbeef-3-false'} assert_is_none(ThemeProvider().get_site_notification()) assert not response.set_cookie.called @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response') @patch('pylons.request') def test_get_site_notification_impressions_under(self, request, response, SiteNotification): note = SiteNotification.current.return_value note._id = 'deadbeef' note.impressions = 2 note.user_role = None note.page_regex = None note.page_tool_type = None request.cookies = {'site-notification': 'deadbeef-1-false'} assert_is(ThemeProvider().get_site_notification(), note) response.set_cookie.assert_called_once_with( 'site-notification', 'deadbeef-2-False', max_age=dt.timedelta(days=365)) @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response') @patch('pylons.request') def test_get_site_notification_impressions_persistent(self, request, response, SiteNotification): note = SiteNotification.current.return_value note._id = 'deadbeef' note.impressions = 0 note.user_role = None note.page_regex = None note.page_tool_type = None request.cookies = {'site-notification': 'deadbeef-1000-false'} assert_is(ThemeProvider().get_site_notification(), note) @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response') @patch('pylons.request') def test_get_site_notification_new_notification(self, request, response, SiteNotification): note = SiteNotification.current.return_value note._id = 'deadbeef' note.impressions = 1 note.user_role = None note.page_regex = None note.page_tool_type = None request.cookies = {'site-notification': '0ddba11-1000-true'} assert_is(ThemeProvider().get_site_notification(), note) response.set_cookie.assert_called_once_with( 'site-notification', 'deadbeef-1-False', max_age=dt.timedelta(days=365)) @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response') @patch('pylons.request') def test_get_site_notification_no_cookie(self, request, response, SiteNotification): note = SiteNotification.current.return_value note._id = 'deadbeef' note.impressions = 0 note.user_role = None note.page_regex = None note.page_tool_type = None request.cookies = {} assert_is(ThemeProvider().get_site_notification(), note) response.set_cookie.assert_called_once_with( 'site-notification', 'deadbeef-1-False', max_age=dt.timedelta(days=365)) @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response') @patch('pylons.request') def test_get_site_notification_bad_cookie(self, request, response, SiteNotification): note = SiteNotification.current.return_value note._id = 'deadbeef' note.impressions = 0 note.user_role = None note.page_regex = None note.page_tool_type = None request.cookies = {'site-notification': 'deadbeef-1000-true-bad'} assert_is(ThemeProvider().get_site_notification(), note) response.set_cookie.assert_called_once_with( 'site-notification', 'deadbeef-1-False', max_age=dt.timedelta(days=365)) @patch('allura.app.g') @patch('allura.lib.plugin.g') def test_app_icon_str(self, plugin_g, app_g): class TestApp(Application): icons = { 24: 'images/testapp_24.png', } plugin_g.entry_points = {'tool': {'testapp': TestApp}} assert_equals(ThemeProvider().app_icon_url('testapp', 24), app_g.theme_href.return_value) app_g.theme_href.assert_called_with('images/testapp_24.png') @patch('allura.lib.plugin.g') def test_app_icon_str_invalid(self, g): g.entry_points = {'tool': {'testapp': Mock()}} assert_equals(ThemeProvider().app_icon_url('invalid', 24), None) @patch('allura.app.g') def test_app_icon_app(self, g): class TestApp(Application): icons = { 24: 'images/testapp_24.png', } app = TestApp(None, None) assert_equals(ThemeProvider().app_icon_url(app, 24), g.theme_href.return_value) g.theme_href.assert_called_with('images/testapp_24.png') @patch('allura.lib.plugin.c') @patch('allura.model.notification.SiteNotification') @patch('pylons.response', MagicMock()) @patch('pylons.request', MagicMock()) def test_get_site_notification_with_role(self, SiteNotification, c): note = SiteNotification.current.return_value note.user_role = 'Test' note.page_regex = None note.page_tool_type = None projects = c.user.my_projects_by_role_name c.user.is_anonymous.return_value = True assert_is(ThemeProvider().get_site_notification(), None) c.user.is_anonymous.return_value = False projects.return_value = [] assert_is(ThemeProvider().get_site_notification(), None) projects.return_value = [Mock()] projects.return_value[0].is_user_project = True assert_is(ThemeProvider().get_site_notification(), None) projects.return_value[0].is_user_project = False assert_is(ThemeProvider().get_site_notification(), note) projects.projects.return_value = [Mock(), Mock()] assert_is(ThemeProvider().get_site_notification(), note) @patch('allura.lib.plugin.c', MagicMock()) @patch('allura.model.notification.SiteNotification') @patch('pylons.response', MagicMock()) @patch('pylons.request', MagicMock()) def test_get_site_notification_without_role(self, SiteNotification): note = SiteNotification.current.return_value note.user_role = None note.page_regex = None note.page_tool_type = None assert_is(ThemeProvider().get_site_notification(), note) @patch('allura.lib.plugin.c', MagicMock()) @patch('re.search') @patch('allura.model.notification.SiteNotification') @patch('pylons.response', MagicMock()) @patch('pylons.request', MagicMock()) def test_get_site_notification_with_page_regex(self, SiteNotification, search): note = SiteNotification.current.return_value note.user_role = None note.page_regex = 'test' note.page_tool_type = None search.return_value = True assert_is(ThemeProvider().get_site_notification(), note) search.return_value = None assert_is(ThemeProvider().get_site_notification(), None) @patch('allura.lib.plugin.c') @patch('allura.model.notification.SiteNotification') @patch('pylons.response', MagicMock()) @patch('pylons.request', MagicMock()) def test_get_site_notification_with_page_tool_type(self, SiteNotification, c): note = SiteNotification.current.return_value note.user_role = None note.page_regex = None c.app = Mock() note.page_tool_type.lower.return_value = 'test1' c.app.config.tool_name.lower.return_value = 'test1' assert_is(ThemeProvider().get_site_notification(), note) c.app.config.tool_name.lower.return_value = 'test2' assert_is(ThemeProvider().get_site_notification(), None) c.app = None assert_is(ThemeProvider().get_site_notification(), None) @patch('allura.lib.plugin.c') @patch('pylons.request') @patch('allura.model.notification.SiteNotification') @patch('pylons.response', MagicMock()) @patch('pylons.request', MagicMock()) def test_get_site_notification_with_page_tool_type_page_regex(self, SiteNotification, request, c): note = SiteNotification.current.return_value note.user_role = None note.page_regex = 'test' c.app = Mock() note.page_tool_type.lower.return_value = 'test1' request.path_qs = 'ttt' c.app.config.tool_name.lower.return_value = 'test2' assert_is(ThemeProvider().get_site_notification(), None) request.path_qs = 'test' assert_is(ThemeProvider().get_site_notification(), None) request.path_qs = 'ttt' c.app.config.tool_name.lower.return_value = 'test1' assert_is(ThemeProvider().get_site_notification(), None) request.path_qs = 'test' assert_is(ThemeProvider().get_site_notification(), note) c.app = None assert_is(ThemeProvider().get_site_notification(), None) request.path_qs = 'ttt' assert_is(ThemeProvider().get_site_notification(), None) @patch('allura.model.notification.SiteNotification') def test_get__site_notification(self, SiteNotification): note = SiteNotification.current.return_value note._id = 'test_id' note.user_role = None note.page_regex = None note.page_tool_type = None get_note = ThemeProvider()._get_site_notification() assert isinstance(get_note, tuple) assert len(get_note) is 2 assert get_note[0] is note assert get_note[1] == 'test_id-1-False' @patch('allura.model.notification.SiteNotification') @patch('pylons.request') def test_get_site_notifications_with_api_cookie(self, request, SiteNotification): note = SiteNotification.current.return_value note._id = 'test_id' note.user_role = None note.page_regex = None note.page_tool_type = None request.cookies = {} get_note = ThemeProvider()._get_site_notification( site_notification_cookie_value='test_id-1-False' ) assert get_note[0] is note assert get_note[1] == 'test_id-2-False' class TestLocalAuthenticationProvider(object): def setUp(self): setup_basic_test() ThreadLocalORMSession.close_all() setup_global_objects() self.provider = plugin.LocalAuthenticationProvider(Request.blank('/')) def test_password_encoder(self): # Verify salt ep = self.provider._encode_password assert ep('test_pass') != ep('test_pass') assert ep('test_pass', '0000') == ep('test_pass', '0000') def test_set_password_with_old_password(self): user = Mock() user.__ming__ = Mock() self.provider.validate_password = lambda u, p: False assert_raises( exc.HTTPUnauthorized, self.provider.set_password, user, 'old', 'new') assert_equal(user._encode_password.call_count, 0) self.provider.validate_password = lambda u, p: True self.provider.set_password(user, 'old', 'new') user._encode_password.assert_callued_once_with('new') @patch('allura.lib.plugin.datetime', autospec=True) def test_set_password_sets_last_updated(self, dt_mock): user = Mock() user.__ming__ = Mock() user.last_password_updated = None self.provider.set_password(user, None, 'new') assert_equal(user.last_password_updated, dt_mock.utcnow.return_value) def test_get_last_password_updated_not_set(self): user = Mock() user._id = ObjectId() user.last_password_updated = None upd = self.provider.get_last_password_updated(user) gen_time = dt.datetime.utcfromtimestamp( calendar.timegm(user._id.generation_time.utctimetuple())) assert_equal(upd, gen_time) def test_get_last_password_updated(self): user = Mock() user.last_password_updated = dt.datetime(2014, 06, 04, 13, 13, 13) upd = self.provider.get_last_password_updated(user) assert_equal(upd, user.last_password_updated) def test_enable_user(self): user = Mock(disabled=True, __ming__=Mock(), is_anonymous=lambda: False, _id=ObjectId()) c.user = Mock(username='test-admin') with audits('Account enabled', user=True, actor='test-admin'): self.provider.enable_user(user) ThreadLocalORMSession.flush_all() assert_equal(user.disabled, False) def test_disable_user(self): user = Mock(disabled=False, __ming__=Mock(), is_anonymous=lambda: False, _id=ObjectId()) c.user = Mock(username='test-admin') with audits('Account disabled', user=True, actor='test-admin'): self.provider.disable_user(user) ThreadLocalORMSession.flush_all() assert_equal(user.disabled, True) class TestAuthenticationProvider(object): def setUp(self): setup_basic_test() self.provider = plugin.AuthenticationProvider(Request.blank('/')) self.pwd_updated = dt.datetime.utcnow() - dt.timedelta(days=100) self.provider.get_last_password_updated = lambda u: self.pwd_updated self.user = Mock() def test_is_password_expired_disabled(self): assert_false(self.provider.is_password_expired(self.user)) def test_is_password_expired_days(self): with h.push_config(tg.config, **{'auth.pwdexpire.days': '180'}): assert_false(self.provider.is_password_expired(self.user)) with h.push_config(tg.config, **{'auth.pwdexpire.days': '90'}): assert_true(self.provider.is_password_expired(self.user)) def test_is_password_expired_before(self): before = dt.datetime.utcnow() - dt.timedelta(days=180) before = calendar.timegm(before.timetuple()) with h.push_config(tg.config, **{'auth.pwdexpire.before': str(before)}): assert_false(self.provider.is_password_expired(self.user)) before = dt.datetime.utcnow() - dt.timedelta(days=1) before = calendar.timegm(before.timetuple()) with h.push_config(tg.config, **{'auth.pwdexpire.before': str(before)}): assert_true(self.provider.is_password_expired(self.user))
#!/usr/bin/env python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import functools import os from resource_management.libraries.functions import conf_select from resource_management.libraries.functions import format from resource_management.libraries.functions import get_kinit_path from resource_management.libraries.functions import stack_select from resource_management.libraries.functions.default import default from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources from resource_management.libraries.resources.hdfs_resource import HdfsResource from resource_management.libraries.script import Script from resource_management.libraries.functions.version import format_stack_version from resource_management.libraries.functions.stack_features import check_stack_feature from resource_management.libraries.functions import StackFeature import status_params # server configurations config = Script.get_config() tmp_dir = Script.get_tmp_dir() hdp_version = default("/commandParams/version", None) hostname = config['hostname'] metron_home = status_params.metron_home metron_apps_hdfs_dir = config['configurations']['metron-env']['metron_apps_hdfs_dir'] parsers = status_params.parsers parser_error_topic = config['configurations']['metron-parsers-env']['parser_error_topic'] geoip_hdfs_dir = metron_apps_hdfs_dir + "/geo/default/" asn_hdfs_dir = metron_apps_hdfs_dir + "/asn/default/" hbase_coprocessor_local_dir = format("{metron_home}/coprocessor") hbase_coprocessor_hdfs_dir = metron_apps_hdfs_dir + "/coprocessor" metron_user = status_params.metron_user metron_group = config['configurations']['metron-env']['metron_group'] metron_log_dir = config['configurations']['metron-env']['metron_log_dir'] metron_pid_dir = config['configurations']['metron-env']['metron_pid_dir'] metron_rest_host = status_params.metron_rest_host metron_rest_port = status_params.metron_rest_port metron_management_ui_host = status_params.metron_management_ui_host metron_management_ui_port = status_params.metron_management_ui_port metron_management_ui_path = metron_home + '/web/management-ui/' metron_alerts_ui_host = status_params.metron_alerts_ui_host metron_alerts_ui_port = status_params.metron_alerts_ui_port metron_alerts_ui_path = metron_home + '/web/alerts-ui/' metron_jvm_flags = config['configurations']['metron-rest-env']['metron_jvm_flags'] # Construct the profiles as a temp variable first. Only the first time it's set will carry through metron_spring_profiles_active = config['configurations']['metron-rest-env']['metron_spring_profiles_active'] metron_ldap_enabled = config['configurations']['metron-security-env']['metron.ldap.enabled'] if metron_ldap_enabled: if not len(metron_spring_profiles_active) == 0: metron_spring_profiles_active += ',ldap' else: metron_spring_profiles_active = 'ldap' metron_jdbc_driver = config['configurations']['metron-rest-env']['metron_jdbc_driver'] metron_jdbc_url = config['configurations']['metron-rest-env']['metron_jdbc_url'] metron_jdbc_username = config['configurations']['metron-rest-env']['metron_jdbc_username'] metron_jdbc_password = config['configurations']['metron-rest-env']['metron_jdbc_password'] metron_jdbc_platform = config['configurations']['metron-rest-env']['metron_jdbc_platform'] metron_jdbc_client_path = config['configurations']['metron-rest-env']['metron_jdbc_client_path'] metron_spring_options = config['configurations']['metron-rest-env']['metron_spring_options'] metron_escalation_topic = config['configurations']['metron-rest-env']['metron_escalation_topic'] metron_config_path = metron_home + '/config' metron_zookeeper_config_dir = status_params.metron_zookeeper_config_dir metron_zookeeper_config_path = status_params.metron_zookeeper_config_path # indicates if zk_load_configs.sh --mode PUSH has been executed zk_configured_flag_file = status_params.zk_configured_flag_file parsers_configured_flag_file = status_params.parsers_configured_flag_file parsers_acl_configured_flag_file = status_params.parsers_acl_configured_flag_file enrichment_kafka_configured_flag_file = status_params.enrichment_kafka_configured_flag_file enrichment_kafka_acl_configured_flag_file = status_params.enrichment_kafka_acl_configured_flag_file enrichment_hbase_configured_flag_file = status_params.enrichment_hbase_configured_flag_file enrichment_hbase_coprocessor_configured_flag_file = status_params.enrichment_hbase_coprocessor_configured_flag_file enrichment_hbase_acl_configured_flag_file = status_params.enrichment_hbase_acl_configured_flag_file enrichment_maxmind_configured_flag_file = status_params.enrichment_maxmind_configured_flag_file indexing_configured_flag_file = status_params.indexing_configured_flag_file indexing_acl_configured_flag_file = status_params.indexing_acl_configured_flag_file indexing_hbase_configured_flag_file = status_params.indexing_hbase_configured_flag_file indexing_hbase_acl_configured_flag_file = status_params.indexing_hbase_acl_configured_flag_file indexing_hdfs_perm_configured_flag_file = status_params.indexing_hdfs_perm_configured_flag_file elasticsearch_template_installed_flag_file = status_params.elasticsearch_template_installed_flag_file solr_schema_installed_flag_file = status_params.solr_schema_installed_flag_file rest_kafka_configured_flag_file = status_params.rest_kafka_configured_flag_file rest_kafka_acl_configured_flag_file = status_params.rest_kafka_acl_configured_flag_file rest_hbase_configured_flag_file = status_params.rest_hbase_configured_flag_file rest_hbase_acl_configured_flag_file = status_params.rest_hbase_acl_configured_flag_file metron_knox_installed_flag_file = status_params.metron_knox_installed_flag_file global_properties_template = config['configurations']['metron-env']['elasticsearch-properties'] # Elasticsearch hosts and port management es_cluster_name = config['configurations']['metron-env']['es_cluster_name'] es_hosts = config['configurations']['metron-env']['es_hosts'] es_host_list = es_hosts.split(",") es_http_port = config['configurations']['metron-env']['es_http_port'] es_url = ",".join([host + ":" + es_http_port for host in es_host_list]) es_http_url = es_host_list[0] + ":" + es_http_port es_date_format = config['configurations']['metron-env']['es_date_format'] # hadoop params stack_root = Script.get_stack_root() # This is the cluster group named 'hadoop'. Its membership is the stack process user ids not individual users. # The config name 'user_group' is out of our control and a bit misleading, so it is renamed to 'hadoop_group'. hadoop_group = config['configurations']['cluster-env']['user_group'] hadoop_home_dir = stack_select.get_hadoop_dir("home") hadoop_bin_dir = stack_select.get_hadoop_dir("bin") hadoop_conf_dir = conf_select.get_hadoop_conf_dir() kafka_home = os.path.join(stack_root, "current", "kafka-broker") kafka_bin_dir = os.path.join(kafka_home, "bin") # zookeeper zk_hosts = default("/clusterHostInfo/zookeeper_hosts", []) has_zk_host = not len(zk_hosts) == 0 zookeeper_quorum = None if has_zk_host: if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']: zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort'] else: zookeeper_clientPort = '2181' zookeeper_quorum = (':' + zookeeper_clientPort + ',').join(config['clusterHostInfo']['zookeeper_hosts']) # last port config zookeeper_quorum += ':' + zookeeper_clientPort # Solr params solr_version = '6.6.2' solr_home = '/var/solr/solr-' + solr_version solr_zookeeper_url = format(format(config['configurations']['metron-env']['solr_zookeeper_url'])) solr_user = config['configurations']['solr-config-env']['solr_config_user'] solr_principal_name = config['configurations']['solr-config-env']['solr_principal_name'] solr_keytab_path = config['configurations']['solr-config-env']['solr_keytab_path'] # HDFS hdfs_url = status_params.hdfs_url # Storm storm_rest_addr = status_params.storm_rest_addr # Zeppelin zeppelin_server_url = status_params.zeppelin_server_url # Kafka kafka_hosts = default("/clusterHostInfo/kafka_broker_hosts", []) has_kafka_host = not len(kafka_hosts) == 0 kafka_brokers = None if has_kafka_host: if 'port' in config['configurations']['kafka-broker']: kafka_broker_port = config['configurations']['kafka-broker']['port'] else: kafka_broker_port = '6667' kafka_brokers = (':' + kafka_broker_port + ',').join(config['clusterHostInfo']['kafka_broker_hosts']) kafka_brokers += ':' + kafka_broker_port # the double "format" is not an error - we are pulling in a jinja-templated param. This is a bit of a hack, but works # well enough until we find a better way via Ambari metron_temp_grok_path = format(format(config['configurations']['metron-rest-env']['metron_temp_grok_path'])) metron_topic_retention = config['configurations']['metron-env']['metron_topic_retention'] local_grok_patterns_dir = format("{metron_home}/patterns") hdfs_grok_patterns_dir = format("{metron_apps_hdfs_dir}/patterns") # for create_hdfs_directory security_enabled = config['configurations']['cluster-env']['security_enabled'] hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab'] hdfs_user = config['configurations']['hadoop-env']['hdfs_user'] hdfs_principal_name = config['configurations']['hadoop-env']['hdfs_principal_name'] smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name'] kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None)) hdfs_site = config['configurations']['hdfs-site'] default_fs = config['configurations']['core-site']['fs.defaultFS'] dfs_type = default("/commandParams/dfs_type", "") # create partial functions with common arguments for every HdfsResource call # to create/delete hdfs directory/file/copyfromlocal we need to call params.HdfsResource in code HdfsResource = functools.partial( HdfsResource, user=hdfs_user, hdfs_resource_ignore_file="/var/lib/ambari-agent/data/.hdfs_resource_ignore", security_enabled=security_enabled, keytab=hdfs_user_keytab, kinit_path_local=kinit_path_local, hadoop_bin_dir=hadoop_bin_dir, hadoop_conf_dir=hadoop_conf_dir, principal_name=hdfs_principal_name, hdfs_site=hdfs_site, default_fs=default_fs, immutable_paths=get_not_managed_resources(), dfs_type=dfs_type ) # Metron HBase configuration enrichment_hbase_provider_impl = 'org.apache.metron.hbase.HTableProvider' enrichment_hbase_table = status_params.enrichment_hbase_table enrichment_hbase_cf = status_params.enrichment_hbase_cf # coprocessor config for enrichment list enrichment_list_hbase_provider_impl = status_params.enrichment_list_hbase_provider_impl enrichment_list_hbase_coprocessor_impl = status_params.enrichment_list_hbase_coprocessor_impl enrichment_list_hbase_table = status_params.enrichment_list_hbase_table enrichment_list_hbase_cf = status_params.enrichment_list_hbase_cf update_hbase_table = status_params.update_hbase_table update_hbase_cf = status_params.update_hbase_cf threatintel_hbase_table = status_params.threatintel_hbase_table threatintel_hbase_cf = status_params.threatintel_hbase_cf # Kafka Topics ambari_kafka_service_check_topic = 'ambari_kafka_service_check' consumer_offsets_topic = '__consumer_offsets' # ES Templates bro_index_path = tmp_dir + "/bro_index.template" snort_index_path = tmp_dir + "/snort_index.template" yaf_index_path = tmp_dir + "/yaf_index.template" error_index_path = tmp_dir + "/error_index.template" meta_index_path = tmp_dir + "/metaalert_index.template" # Solr Schemas bro_schema_path = metron_home + "/config/schema/bro" snort_schema_path = metron_home + "/config/schema/snort" yaf_schema_path = metron_home + "/config/schema/yaf" error_schema_path = metron_home + "/config/schema/error" meta_schema_path = metron_home + "/config/schema/metaalert" # Zeppelin Notebooks metron_config_zeppelin_path = format("{metron_config_path}/zeppelin") zeppelin_shiro_ini_content = status_params.zeppelin_shiro_ini_content # kafka_security kafka_security_protocol = config['configurations']['kafka-broker'].get('security.inter.broker.protocol', 'PLAINTEXT') kafka_user = config['configurations']['kafka-env']['kafka_user'] storm_user = config['configurations']['storm-env']['storm_user'] # HBase user table creation and ACLs hbase_user = config['configurations']['hbase-env']['hbase_user'] # Security security_enabled = status_params.security_enabled client_jaas_path = metron_home + '/client_jaas.conf' client_jaas_arg = '-Djava.security.auth.login.config=' + metron_home + '/client_jaas.conf' enrichment_topology_worker_childopts = client_jaas_arg if security_enabled else '' profiler_topology_worker_childopts = client_jaas_arg if security_enabled else '' indexing_topology_worker_childopts = client_jaas_arg if security_enabled else '' pcap_topology_worker_childopts = client_jaas_arg if security_enabled else '' metron_jvm_flags += (' ' + client_jaas_arg) if security_enabled else '' topology_auto_credentials = config['configurations']['storm-site'].get('nimbus.credential.renewers.classes', []) # Needed for storm.config, because it needs Java String topology_auto_credentials_double_quotes = str(topology_auto_credentials).replace("'", '"') if security_enabled: hostname_lowercase = config['hostname'].lower() metron_principal_name = status_params.metron_principal_name metron_keytab_path = status_params.metron_keytab_path kinit_path_local = status_params.kinit_path_local hbase_principal_name = config['configurations']['hbase-env']['hbase_principal_name'] hbase_keytab_path = config['configurations']['hbase-env']['hbase_user_keytab'] kafka_principal_raw = config['configurations']['kafka-env']['kafka_principal_name'] kafka_principal_name = kafka_principal_raw.replace('_HOST', hostname_lowercase) kafka_keytab_path = config['configurations']['kafka-env']['kafka_keytab'] metron_client_jaas_conf_template = config['configurations']['metron-client-jaas-conf']['content'] nimbus_seeds = config['configurations']['storm-site']['nimbus.seeds'] # Check wether Solr mpack is installed if 'solr-config-env' in config['configurations']: solr_principal_name = solr_principal_name.replace('_HOST', hostname_lowercase) # LDAP metron_ldap_url = config['configurations']['metron-security-env']['metron.ldap.url'] metron_ldap_userdn = config['configurations']['metron-security-env']['metron.ldap.bind.dn'] metron_ldap_password = config['configurations']['metron-security-env']['metron.ldap.bind.password'] metron_ldap_user_pattern = config['configurations']['metron-security-env']['metron.ldap.user.dnpattern'] metron_ldap_user_password = config['configurations']['metron-security-env']['metron.ldap.user.password'] metron_ldap_user_dnbase = config['configurations']['metron-security-env']['metron.ldap.user.basedn'] metron_ldap_user_searchbase = config['configurations']['metron-security-env']['metron.ldap.user.searchbase'] metron_ldap_user_searchfilter = config['configurations']['metron-security-env']['metron.ldap.user.searchfilter'] metron_ldap_group_searchbase = config['configurations']['metron-security-env']['metron.ldap.group.searchbase'] metron_ldap_group_searchfilter = config['configurations']['metron-security-env']['metron.ldap.group.searchfilter'] metron_ldap_group_role = config['configurations']['metron-security-env']['metron.ldap.group.roleattribute'] metron_ldap_ssl_truststore = config['configurations']['metron-security-env']['metron.ldap.ssl.truststore'] metron_ldap_ssl_truststore_password = config['configurations']['metron-security-env']['metron.ldap.ssl.truststore.password'] # Roles metron_user_role = config['configurations']['metron-security-env']['metron_user_role'] metron_admin_role = config['configurations']['metron-security-env']['metron_admin_role'] # REST metron_rest_pid_dir = config['configurations']['metron-rest-env']['metron_rest_pid_dir'] metron_rest_pid = 'metron-rest.pid' metron_indexing_classpath = config['configurations']['metron-rest-env']['metron_indexing_classpath'] metron_rest_classpath = config['configurations']['metron-rest-env']['metron_rest_classpath'] metron_sysconfig = config['configurations']['metron-rest-env']['metron_sysconfig'] user_settings_hbase_table = status_params.user_settings_hbase_table user_settings_hbase_cf = status_params.user_settings_hbase_cf source_type_field = config['configurations']['metron-rest-env']['source_type_field'] threat_triage_score_field = config['configurations']['metron-rest-env']['threat_triage_score_field'] # Enrichment metron_enrichment_topology = status_params.metron_enrichment_topology geoip_url = config['configurations']['metron-enrichment-env']['geoip_url'] asn_url = config['configurations']['metron-enrichment-env']['asn_url'] enrichment_host_known_hosts = config['configurations']['metron-enrichment-env']['enrichment_host_known_hosts'] # Enrichment - Kafka enrichment_kafka_start = config['configurations']['metron-enrichment-env']['enrichment_kafka_start'] enrichment_input_topic = status_params.enrichment_input_topic enrichment_output_topic = config['configurations']['metron-enrichment-env']['enrichment_output_topic'] enrichment_error_topic = config['configurations']['metron-enrichment-env']['enrichment_error_topic'] threatintel_error_topic = config['configurations']['metron-enrichment-env']['threatintel_error_topic'] enrichment_kafka_writer_batch_size = config['configurations']['metron-enrichment-env']['enrichment_kafka_writer_batch_size'] enrichment_kafka_writer_batch_timeout = config['configurations']['metron-enrichment-env']['enrichment_kafka_writer_batch_timeout'] # Enrichment - Storm common parameters enrichment_workers = config['configurations']['metron-enrichment-env']['enrichment_workers'] enrichment_acker_executors = config['configurations']['metron-enrichment-env']['enrichment_acker_executors'] if not len(enrichment_topology_worker_childopts) == 0: enrichment_topology_worker_childopts += ' ' enrichment_topology_worker_childopts += config['configurations']['metron-enrichment-env']['enrichment_topology_worker_childopts'] enrichment_topology_max_spout_pending = config['configurations']['metron-enrichment-env']['enrichment_topology_max_spout_pending'] enrichment_topology = config['configurations']['metron-enrichment-env']['enrichment_topology'] # Enrichment - Split Join topology enrichment_join_cache_size = config['configurations']['metron-enrichment-env']['enrichment_join_cache_size'] threatintel_join_cache_size = config['configurations']['metron-enrichment-env']['threatintel_join_cache_size'] enrichment_kafka_spout_parallelism = config['configurations']['metron-enrichment-env']['enrichment_kafka_spout_parallelism'] enrichment_split_parallelism = config['configurations']['metron-enrichment-env']['enrichment_split_parallelism'] enrichment_stellar_parallelism = config['configurations']['metron-enrichment-env']['enrichment_stellar_parallelism'] enrichment_join_parallelism = config['configurations']['metron-enrichment-env']['enrichment_join_parallelism'] threat_intel_split_parallelism = config['configurations']['metron-enrichment-env']['threat_intel_split_parallelism'] threat_intel_stellar_parallelism = config['configurations']['metron-enrichment-env']['threat_intel_stellar_parallelism'] threat_intel_join_parallelism = config['configurations']['metron-enrichment-env']['threat_intel_join_parallelism'] kafka_writer_parallelism = config['configurations']['metron-enrichment-env']['kafka_writer_parallelism'] # Enrichment - Unified topology unified_kafka_spout_parallelism = config['configurations']['metron-enrichment-env']['unified_kafka_spout_parallelism'] unified_enrichment_parallelism = config['configurations']['metron-enrichment-env']['unified_enrichment_parallelism'] unified_threat_intel_parallelism = config['configurations']['metron-enrichment-env']['unified_threat_intel_parallelism'] unified_kafka_writer_parallelism = config['configurations']['metron-enrichment-env']['unified_kafka_writer_parallelism'] unified_enrichment_cache_size = config['configurations']['metron-enrichment-env']['unified_enrichment_cache_size'] unified_threat_intel_cache_size = config['configurations']['metron-enrichment-env']['unified_threat_intel_cache_size'] unified_enrichment_threadpool_size = config['configurations']['metron-enrichment-env']['unified_enrichment_threadpool_size'] unified_enrichment_threadpool_type = config['configurations']['metron-enrichment-env']['unified_enrichment_threadpool_type'] # Profiler metron_profiler_topology = 'profiler' profiler_input_topic = config['configurations']['metron-enrichment-env']['enrichment_output_topic'] profiler_kafka_start = config['configurations']['metron-profiler-env']['profiler_kafka_start'] profiler_period_duration = config['configurations']['metron-profiler-env']['profiler_period_duration'] profiler_period_units = config['configurations']['metron-profiler-env']['profiler_period_units'] profiler_window_duration = config['configurations']['metron-profiler-env']['profiler_window_duration'] profiler_window_units = config['configurations']['metron-profiler-env']['profiler_window_units'] profiler_ttl = config['configurations']['metron-profiler-env']['profiler_ttl'] profiler_ttl_units = config['configurations']['metron-profiler-env']['profiler_ttl_units'] profiler_hbase_batch = config['configurations']['metron-profiler-env']['profiler_hbase_batch'] profiler_hbase_flush_interval = config['configurations']['metron-profiler-env']['profiler_hbase_flush_interval'] profiler_topology_workers = config['configurations']['metron-profiler-env']['profiler_topology_workers'] profiler_acker_executors = config['configurations']['metron-profiler-env']['profiler_acker_executors'] profiler_hbase_table = config['configurations']['metron-profiler-env']['profiler_hbase_table'] profiler_hbase_cf = config['configurations']['metron-profiler-env']['profiler_hbase_cf'] profiler_configured_flag_file = status_params.profiler_configured_flag_file profiler_acl_configured_flag_file = status_params.profiler_acl_configured_flag_file profiler_hbase_configured_flag_file = status_params.profiler_hbase_configured_flag_file profiler_hbase_acl_configured_flag_file = status_params.profiler_hbase_acl_configured_flag_file if not len(profiler_topology_worker_childopts) == 0: profiler_topology_worker_childopts += ' ' profiler_topology_worker_childopts += config['configurations']['metron-profiler-env']['profiler_topology_worker_childopts'] profiler_max_routes_per_bolt=config['configurations']['metron-profiler-env']['profiler_max_routes_per_bolt'] profiler_window_lag=config['configurations']['metron-profiler-env']['profiler_window_lag'] profiler_window_lag_units=config['configurations']['metron-profiler-env']['profiler_window_lag_units'] profiler_topology_message_timeout_secs=config['configurations']['metron-profiler-env']['profiler_topology_message_timeout_secs'] profiler_topology_max_spout_pending=config['configurations']['metron-profiler-env']['profiler_topology_max_spout_pending'] profiler_kafka_writer_batch_size = config['configurations']['metron-profiler-env']['profiler_kafka_writer_batch_size'] profiler_kafka_writer_batch_timeout = config['configurations']['metron-profiler-env']['profiler_kafka_writer_batch_timeout'] # Indexing ra_indexing_kafka_start = config['configurations']['metron-indexing-env']['ra_indexing_kafka_start'] batch_indexing_kafka_start = config['configurations']['metron-indexing-env']['batch_indexing_kafka_start'] indexing_input_topic = status_params.indexing_input_topic indexing_error_topic = config['configurations']['metron-indexing-env']['indexing_error_topic'] metron_random_access_indexing_topology = status_params.metron_random_access_indexing_topology metron_batch_indexing_topology = status_params.metron_batch_indexing_topology ra_indexing_writer = config['configurations']['metron-indexing-env']['ra_indexing_writer'] batch_indexing_writer_class_name = config['configurations']['metron-indexing-env']['batch_indexing_writer_class_name'] ra_indexing_workers = config['configurations']['metron-indexing-env']['ra_indexing_workers'] batch_indexing_workers = config['configurations']['metron-indexing-env']['batch_indexing_workers'] ra_indexing_acker_executors = config['configurations']['metron-indexing-env']['ra_indexing_acker_executors'] batch_indexing_acker_executors = config['configurations']['metron-indexing-env']['batch_indexing_acker_executors'] if not len(indexing_topology_worker_childopts) == 0: indexing_topology_worker_childopts += ' ' indexing_topology_worker_childopts += config['configurations']['metron-indexing-env']['indexing_topology_worker_childopts'] ra_indexing_topology_max_spout_pending = config['configurations']['metron-indexing-env']['ra_indexing_topology_max_spout_pending'] batch_indexing_topology_max_spout_pending = config['configurations']['metron-indexing-env']['batch_indexing_topology_max_spout_pending'] ra_indexing_kafka_spout_parallelism = config['configurations']['metron-indexing-env']['ra_indexing_kafka_spout_parallelism'] batch_indexing_kafka_spout_parallelism = config['configurations']['metron-indexing-env']['batch_indexing_kafka_spout_parallelism'] ra_indexing_writer_parallelism = config['configurations']['metron-indexing-env']['ra_indexing_writer_parallelism'] hdfs_writer_parallelism = config['configurations']['metron-indexing-env']['hdfs_writer_parallelism'] # the double "format" is not an error - we are pulling in a jinja-templated param. This is a bit of a hack, but works # well enough until we find a better way via Ambari metron_apps_indexed_hdfs_dir = format(format(config['configurations']['metron-indexing-env']['metron_apps_indexed_hdfs_dir'])) bolt_hdfs_rotation_policy = config['configurations']['metron-indexing-env']['bolt_hdfs_rotation_policy'] bolt_hdfs_rotation_policy_units = config['configurations']['metron-indexing-env']['bolt_hdfs_rotation_policy_units'] bolt_hdfs_rotation_policy_count = config['configurations']['metron-indexing-env']['bolt_hdfs_rotation_policy_count'] # PCAP metron_pcap_topology = status_params.metron_pcap_topology pcap_input_topic = status_params.pcap_input_topic pcap_base_path = config['configurations']['metron-pcap-env']['pcap_base_path'] pcap_base_interim_result_path = config['configurations']['metron-pcap-env']['pcap_base_interim_result_path'] pcap_final_output_path = config['configurations']['metron-pcap-env']['pcap_final_output_path'] pcap_page_size = config['configurations']['metron-pcap-env']['pcap_page_size'] pcap_yarn_queue = config['configurations']['metron-pcap-env']['pcap_yarn_queue'] pcap_finalizer_threadpool_size= config['configurations']['metron-pcap-env']['pcap_finalizer_threadpool_size'] pcap_configured_flag_file = status_params.pcap_configured_flag_file pcap_perm_configured_flag_file = status_params.pcap_perm_configured_flag_file pcap_acl_configured_flag_file = status_params.pcap_acl_configured_flag_file pcap_topology_workers = config['configurations']['metron-pcap-env']['pcap_topology_workers'] if not len(pcap_topology_worker_childopts) == 0: pcap_topology_worker_childopts += ' ' pcap_topology_worker_childopts += config['configurations']['metron-pcap-env']['pcap_topology_worker_childopts'] spout_kafka_topic_pcap = config['configurations']['metron-pcap-env']['spout_kafka_topic_pcap'] hdfs_sync_every = config['configurations']['metron-pcap-env']['hdfs_sync_every'] hdfs_replication_factor = config['configurations']['metron-pcap-env']['hdfs_replication_factor'] kafka_pcap_start = config['configurations']['metron-pcap-env']['kafka_pcap_start'] kafka_pcap_numpackets = config['configurations']['metron-pcap-env']['kafka_pcap_numpackets'] kafka_pcap_maxtimems = config['configurations']['metron-pcap-env']['kafka_pcap_maxtimems'] kafka_pcap_tsscheme = config['configurations']['metron-pcap-env']['kafka_pcap_tsscheme'] kafka_pcap_out = config['configurations']['metron-pcap-env']['kafka_pcap_out'] kafka_pcap_ts_granularity = config['configurations']['metron-pcap-env']['kafka_pcap_ts_granularity'] kafka_spout_parallelism = config['configurations']['metron-pcap-env']['kafka_spout_parallelism'] # MapReduce metron_user_hdfs_dir = '/user/' + metron_user metron_user_hdfs_dir_configured_flag_file = status_params.metron_user_hdfs_dir_configured_flag_file # Knox knox_user = config['configurations']['knox-env']['knox_user'] knox_group = config['configurations']['knox-env']['knox_group'] metron_knox_root_path = '/gateway/metron' metron_rest_path = '/api/v1' metron_alerts_ui_login_path = '/login' metron_management_ui_login_path = '/login' metron_knox_enabled = config['configurations']['metron-security-env']['metron.knox.enabled'] metron_knox_sso_pubkey = config['configurations']['metron-security-env']['metron.knox.sso.pubkey'] metron_knox_sso_token_ttl = config['configurations']['metron-security-env']['metron.knox.sso.token.ttl'] if metron_knox_enabled: metron_rest_path = metron_knox_root_path + '/metron-rest' + metron_rest_path metron_alerts_ui_login_path = metron_knox_root_path + '/metron-alerts/' metron_management_ui_login_path = metron_knox_root_path + '/metron-management/sensors' if not len(metron_spring_options) == 0: metron_spring_options += ' ' metron_spring_options += '--knox.root=' + metron_knox_root_path + '/metron-rest' metron_spring_options += ' --knox.sso.pubkey=' + metron_knox_sso_pubkey if not len(metron_spring_profiles_active) == 0: metron_spring_profiles_active += ',' metron_spring_profiles_active += 'knox' knox_home = os.path.join(stack_root, "current", "knox-server") knox_hosts = default("/clusterHostInfo/knox_gateway_hosts", []) knox_host = '' if not len(knox_hosts) == 0: knox_host = knox_hosts[0]
# stdlib import os import re import requests import time import socket import urllib2 from collections import defaultdict, Counter, deque # project from checks import AgentCheck from config import _is_affirmative from utils.dockerutil import find_cgroup, find_cgroup_filename_pattern, get_client, MountException, \ set_docker_settings, image_tag_extractor, container_name_extractor from utils.kubeutil import get_kube_labels from utils.platform import Platform EVENT_TYPE = 'docker' SERVICE_CHECK_NAME = 'docker.service_up' SIZE_REFRESH_RATE = 5 # Collect container sizes every 5 iterations of the check MAX_CGROUP_LISTING_RETRIES = 3 CONTAINER_ID_RE = re.compile('[0-9a-f]{64}') POD_NAME_LABEL = "io.kubernetes.pod.name" GAUGE = AgentCheck.gauge RATE = AgentCheck.rate HISTORATE = AgentCheck.generate_historate_func(["container_name"]) HISTO = AgentCheck.generate_histogram_func(["container_name"]) FUNC_MAP = { GAUGE: {True: HISTO, False: GAUGE}, RATE: {True: HISTORATE, False: RATE} } CGROUP_METRICS = [ { "cgroup": "memory", "file": "memory.stat", "metrics": { "cache": ("docker.mem.cache", GAUGE), "rss": ("docker.mem.rss", GAUGE), "swap": ("docker.mem.swap", GAUGE), }, "to_compute": { # We only get these metrics if they are properly set, i.e. they are a "reasonable" value "docker.mem.limit": (["hierarchical_memory_limit"], lambda x: float(x) if float(x) < 2 ** 60 else None, GAUGE), "docker.mem.sw_limit": (["hierarchical_memsw_limit"], lambda x: float(x) if float(x) < 2 ** 60 else None, GAUGE), "docker.mem.in_use": (["rss", "hierarchical_memory_limit"], lambda x,y: float(x)/float(y) if float(y) < 2 ** 60 else None, GAUGE), "docker.mem.sw_in_use": (["swap", "rss", "hierarchical_memsw_limit"], lambda x,y,z: float(x + y)/float(z) if float(z) < 2 ** 60 else None, GAUGE) } }, { "cgroup": "cpuacct", "file": "cpuacct.stat", "metrics": { "user": ("docker.cpu.user", RATE), "system": ("docker.cpu.system", RATE), }, }, { "cgroup": "blkio", "file": 'blkio.throttle.io_service_bytes', "metrics": { "io_read": ("docker.io.read_bytes", RATE), "io_write": ("docker.io.write_bytes", RATE), }, }, ] DEFAULT_CONTAINER_TAGS = [ "docker_image", "image_name", "image_tag", ] DEFAULT_PERFORMANCE_TAGS = [ "container_name", "docker_image", "image_name", "image_tag", ] DEFAULT_IMAGE_TAGS = [ 'image_name', 'image_tag' ] TAG_EXTRACTORS = { "docker_image": lambda c: [c["Image"]], "image_name": lambda c: image_tag_extractor(c, 0), "image_tag": lambda c: image_tag_extractor(c, 1), "container_command": lambda c: [c["Command"]], "container_name": container_name_extractor, } CONTAINER = "container" PERFORMANCE = "performance" FILTERED = "filtered" IMAGE = "image" def get_mountpoints(docker_root): mountpoints = {} for metric in CGROUP_METRICS: mountpoints[metric["cgroup"]] = find_cgroup(metric["cgroup"], docker_root) return mountpoints def get_filters(include, exclude): # The reasoning is to check exclude first, so we can skip if there is no exclude if not exclude: return filtered_tag_names = [] exclude_patterns = [] include_patterns = [] # Compile regex for rule in exclude: exclude_patterns.append(re.compile(rule)) filtered_tag_names.append(rule.split(':')[0]) for rule in include: include_patterns.append(re.compile(rule)) filtered_tag_names.append(rule.split(':')[0]) return set(exclude_patterns), set(include_patterns), set(filtered_tag_names) class DockerDaemon(AgentCheck): """Collect metrics and events from Docker API and cgroups.""" def __init__(self, name, init_config, agentConfig, instances=None): if instances is not None and len(instances) > 1: raise Exception("Docker check only supports one configured instance.") AgentCheck.__init__(self, name, init_config, agentConfig, instances=instances) self.init_success = False self.init() def is_k8s(self): return self.is_check_enabled("kubernetes") def init(self): try: # We configure the check with the right cgroup settings for this host # Just needs to be done once instance = self.instances[0] set_docker_settings(self.init_config, instance) self.client = get_client() self._docker_root = self.init_config.get('docker_root', '/') self._mountpoints = get_mountpoints(self._docker_root) self.cgroup_listing_retries = 0 self._latest_size_query = 0 self._filtered_containers = set() self._disable_net_metrics = False # At first run we'll just collect the events from the latest 60 secs self._last_event_collection_ts = int(time.time()) - 60 # Set tagging options self.custom_tags = instance.get("tags", []) self.collect_labels_as_tags = instance.get("collect_labels_as_tags", []) self.kube_labels = {} self.use_histogram = _is_affirmative(instance.get('use_histogram', False)) performance_tags = instance.get("performance_tags", DEFAULT_PERFORMANCE_TAGS) self.tag_names = { CONTAINER: instance.get("container_tags", DEFAULT_CONTAINER_TAGS), PERFORMANCE: performance_tags, IMAGE: instance.get('image_tags', DEFAULT_IMAGE_TAGS) } # Set filtering settings if not instance.get("exclude"): self._filtering_enabled = False if instance.get("include"): self.log.warning("You must specify an exclude section to enable filtering") else: self._filtering_enabled = True include = instance.get("include", []) exclude = instance.get("exclude", []) self._exclude_patterns, self._include_patterns, _filtered_tag_names = get_filters(include, exclude) self.tag_names[FILTERED] = _filtered_tag_names # Other options self.collect_image_stats = _is_affirmative(instance.get('collect_images_stats', False)) self.collect_container_size = _is_affirmative(instance.get('collect_container_size', False)) self.collect_events = _is_affirmative(instance.get('collect_events', True)) self.collect_image_size = _is_affirmative(instance.get('collect_image_size', False)) self.collect_ecs_tags = _is_affirmative(instance.get('ecs_tags', True)) and Platform.is_ecs_instance() self.ecs_tags = {} except Exception, e: self.log.critical(e) self.warning("Initialization failed. Will retry at next iteration") else: self.init_success = True def check(self, instance): """Run the Docker check for one instance.""" if not self.init_success: # Initialization can fail if cgroups are not ready. So we retry if needed # https://github.com/DataDog/dd-agent/issues/1896 self.init() if not self.init_success: # Initialization failed, will try later return # Report image metrics if self.collect_image_stats: self._count_and_weigh_images() if self.collect_ecs_tags: self.refresh_ecs_tags() if self.is_k8s(): self.kube_labels = get_kube_labels() # Get the list of containers and the index of their names containers_by_id = self._get_and_count_containers() containers_by_id = self._crawl_container_pids(containers_by_id) # Report performance container metrics (cpu, mem, net, io) self._report_performance_metrics(containers_by_id) if self.collect_container_size: self._report_container_size(containers_by_id) # Send events from Docker API if self.collect_events: self._process_events(containers_by_id) def _count_and_weigh_images(self): try: tags = self._get_tags() active_images = self.client.images(all=False) active_images_len = len(active_images) all_images_len = len(self.client.images(quiet=True, all=True)) self.gauge("docker.images.available", active_images_len, tags=tags) self.gauge("docker.images.intermediate", (all_images_len - active_images_len), tags=tags) if self.collect_image_size: self._report_image_size(active_images) except Exception, e: # It's not an important metric, keep going if it fails self.warning("Failed to count Docker images. Exception: {0}".format(e)) def _get_and_count_containers(self): """List all the containers from the API, filter and count them.""" # Querying the size of containers is slow, we don't do it at each run must_query_size = self.collect_container_size and self._latest_size_query == 0 self._latest_size_query = (self._latest_size_query + 1) % SIZE_REFRESH_RATE running_containers_count = Counter() all_containers_count = Counter() try: containers = self.client.containers(all=True, size=must_query_size) except Exception, e: message = "Unable to list Docker containers: {0}".format(e) self.service_check(SERVICE_CHECK_NAME, AgentCheck.CRITICAL, message=message) raise Exception(message) else: self.service_check(SERVICE_CHECK_NAME, AgentCheck.OK) # Filter containers according to the exclude/include rules self._filter_containers(containers) containers_by_id = {} for container in containers: container_name = container_name_extractor(container)[0] container_status_tags = self._get_tags(container, CONTAINER) all_containers_count[tuple(sorted(container_status_tags))] += 1 if self._is_container_running(container): running_containers_count[tuple(sorted(container_status_tags))] += 1 # Check if the container is included/excluded via its tags if self._is_container_excluded(container): self.log.debug("Container {0} is excluded".format(container_name)) continue containers_by_id[container['Id']] = container for tags, count in running_containers_count.iteritems(): self.gauge("docker.containers.running", count, tags=list(tags)) for tags, count in all_containers_count.iteritems(): stopped_count = count - running_containers_count[tags] self.gauge("docker.containers.stopped", stopped_count, tags=list(tags)) return containers_by_id def _is_container_running(self, container): """Tell if a container is running, according to its status. There is no "nice" API field to figure it out. We just look at the "Status" field, knowing how it is generated. See: https://github.com/docker/docker/blob/v1.6.2/daemon/state.go#L35 """ return container["Status"].startswith("Up") or container["Status"].startswith("Restarting") def _get_tags(self, entity=None, tag_type=None): """Generate the tags for a given entity (container or image) according to a list of tag names.""" # Start with custom tags tags = list(self.custom_tags) # Collect pod names as tags on kubernetes if self.is_k8s() and POD_NAME_LABEL not in self.collect_labels_as_tags: self.collect_labels_as_tags.append(POD_NAME_LABEL) if entity is not None: pod_name = None # Get labels as tags labels = entity.get("Labels") if labels is not None: for k in self.collect_labels_as_tags: if k in labels: v = labels[k] if k == POD_NAME_LABEL and self.is_k8s(): pod_name = v k = "pod_name" if "-" in pod_name: replication_controller = "-".join(pod_name.split("-")[:-1]) if "/" in replication_controller: namespace, replication_controller = replication_controller.split("/", 1) tags.append("kube_namespace:%s" % namespace) tags.append("kube_replication_controller:%s" % replication_controller) if not v: tags.append(k) else: tags.append("%s:%s" % (k,v)) if k == POD_NAME_LABEL and self.is_k8s() and k not in labels: tags.append("pod_name:no_pod") # Get entity specific tags if tag_type is not None: tag_names = self.tag_names[tag_type] for tag_name in tag_names: tag_value = self._extract_tag_value(entity, tag_name) if tag_value is not None: for t in tag_value: tags.append('%s:%s' % (tag_name, str(t).strip())) # Add ECS tags if self.collect_ecs_tags: entity_id = entity.get("Id") if entity_id in self.ecs_tags: ecs_tags = self.ecs_tags[entity_id] tags.extend(ecs_tags) # Add kube labels if self.is_k8s(): kube_tags = self.kube_labels.get(pod_name) if kube_tags: tags.extend(list(kube_tags)) return tags def _extract_tag_value(self, entity, tag_name): """Extra tag information from the API result (containers or images). Cache extracted tags inside the entity object. """ if tag_name not in TAG_EXTRACTORS: self.warning("{0} isn't a supported tag".format(tag_name)) return # Check for already extracted tags if "_tag_values" not in entity: entity["_tag_values"] = {} if tag_name not in entity["_tag_values"]: entity["_tag_values"][tag_name] = TAG_EXTRACTORS[tag_name](entity) return entity["_tag_values"][tag_name] def refresh_ecs_tags(self): ecs_config = self.client.inspect_container('ecs-agent') ip = ecs_config.get('NetworkSettings', {}).get('IPAddress') ports = ecs_config.get('NetworkSettings', {}).get('Ports') port = ports.keys()[0].split('/')[0] if ports else None ecs_tags = {} if ip and port: tasks = requests.get('http://%s:%s/v1/tasks' % (ip, port)).json() for task in tasks.get('Tasks', []): for container in task.get('Containers', []): tags = ['task_name:%s' % task['Family'], 'task_version:%s' % task['Version']] ecs_tags[container['DockerId']] = tags self.ecs_tags = ecs_tags def _filter_containers(self, containers): if not self._filtering_enabled: return self._filtered_containers = set() for container in containers: container_tags = self._get_tags(container, FILTERED) if self._are_tags_filtered(container_tags): container_name = container_name_extractor(container)[0] self._filtered_containers.add(container_name) self.log.debug("Container {0} is filtered".format(container["Names"][0])) def _are_tags_filtered(self, tags): if self._tags_match_patterns(tags, self._exclude_patterns): if self._tags_match_patterns(tags, self._include_patterns): return False return True return False def _tags_match_patterns(self, tags, filters): for rule in filters: for tag in tags: if re.match(rule, tag): return True return False def _is_container_excluded(self, container): """Check if a container is excluded according to the filter rules. Requires _filter_containers to run first. """ container_name = container_name_extractor(container)[0] return container_name in self._filtered_containers def _report_container_size(self, containers_by_id): container_list_with_size = None for container in containers_by_id.itervalues(): if self._is_container_excluded(container): continue tags = self._get_tags(container, PERFORMANCE) m_func = FUNC_MAP[GAUGE][self.use_histogram] if "SizeRw" in container: m_func(self, 'docker.container.size_rw', container['SizeRw'], tags=tags) if "SizeRootFs" in container: m_func( self, 'docker.container.size_rootfs', container['SizeRootFs'], tags=tags) def _report_image_size(self, images): for image in images: tags = self._get_tags(image, IMAGE) if 'VirtualSize' in image: self.gauge('docker.image.virtual_size', image['VirtualSize'], tags=tags) if 'Size' in image: self.gauge('docker.image.size', image['Size'], tags=tags) # Performance metrics def _report_performance_metrics(self, containers_by_id): containers_without_proc_root = [] for container in containers_by_id.itervalues(): if self._is_container_excluded(container) or not self._is_container_running(container): continue tags = self._get_tags(container, PERFORMANCE) self._report_cgroup_metrics(container, tags) if "_proc_root" not in container: containers_without_proc_root.append(container_name_extractor(container)[0]) continue self._report_net_metrics(container, tags) if containers_without_proc_root: message = "Couldn't find pid directory for container: {0}. They'll be missing network metrics".format( ",".join(containers_without_proc_root)) if not self.is_k8s(): self.warning(message) else: # On kubernetes, this is kind of expected. Network metrics will be collected by the kubernetes integration anyway self.log.debug(message) def _report_cgroup_metrics(self, container, tags): try: for cgroup in CGROUP_METRICS: stat_file = self._get_cgroup_file(cgroup["cgroup"], container['Id'], cgroup['file']) stats = self._parse_cgroup_file(stat_file) if stats: for key, (dd_key, metric_func) in cgroup['metrics'].iteritems(): metric_func = FUNC_MAP[metric_func][self.use_histogram] if key in stats: metric_func(self, dd_key, int(stats[key]), tags=tags) # Computed metrics for mname, (key_list, fct, metric_func) in cgroup.get('to_compute', {}).iteritems(): values = [stats[key] for key in key_list if key in stats] if len(values) != len(key_list): self.log.debug("Couldn't compute {0}, some keys were missing.".format(mname)) continue value = fct(*values) metric_func = FUNC_MAP[metric_func][self.use_histogram] if value is not None: metric_func(self, mname, value, tags=tags) except MountException as ex: if self.cgroup_listing_retries > MAX_CGROUP_LISTING_RETRIES: raise ex else: self.warning("Couldn't find the cgroup files. Skipping the CGROUP_METRICS for now." "Will retry {0} times before failing.".format(MAX_CGROUP_LISTING_RETRIES - self.cgroup_listing_retries)) self.cgroup_listing_retries += 1 else: self.cgroup_listing_retries = 0 def _report_net_metrics(self, container, tags): """Find container network metrics by looking at /proc/$PID/net/dev of the container process.""" if self._disable_net_metrics: self.log.debug("Network metrics are disabled. Skipping") return proc_net_file = os.path.join(container['_proc_root'], 'net/dev') try: with open(proc_net_file, 'r') as fp: lines = fp.readlines() """Two first lines are headers: Inter-| Receive | Transmit face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed """ for l in lines[2:]: cols = l.split(':', 1) interface_name = str(cols[0]).strip() if interface_name == 'eth0': x = cols[1].split() m_func = FUNC_MAP[RATE][self.use_histogram] m_func(self, "docker.net.bytes_rcvd", long(x[0]), tags) m_func(self, "docker.net.bytes_sent", long(x[8]), tags) break except Exception, e: # It is possible that the container got stopped between the API call and now self.warning("Failed to report IO metrics from file {0}. Exception: {1}".format(proc_net_file, e)) def _process_events(self, containers_by_id): try: api_events = self._get_events() aggregated_events = self._pre_aggregate_events(api_events, containers_by_id) events = self._format_events(aggregated_events, containers_by_id) except (socket.timeout, urllib2.URLError): self.warning('Timeout when collecting events. Events will be missing.') return except Exception, e: self.warning("Unexpected exception when collecting events: {0}. " "Events will be missing".format(e)) return for ev in events: self.log.debug("Creating event: %s" % ev['msg_title']) self.event(ev) def _get_events(self): """Get the list of events.""" now = int(time.time()) events = [] event_generator = self.client.events(since=self._last_event_collection_ts, until=now, decode=True) for event in event_generator: if event != '': events.append(event) self._last_event_collection_ts = now return events def _pre_aggregate_events(self, api_events, containers_by_id): # Aggregate events, one per image. Put newer events first. events = defaultdict(deque) for event in api_events: # Skip events related to filtered containers container = containers_by_id.get(event['id']) if container is not None and self._is_container_excluded(container): self.log.debug("Excluded event: container {0} status changed to {1}".format( event['id'], event['status'])) continue # Known bug: from may be missing if 'from' in event: events[event['from']].appendleft(event) return events def _format_events(self, aggregated_events, containers_by_id): events = [] for image_name, event_group in aggregated_events.iteritems(): max_timestamp = 0 status = defaultdict(int) status_change = [] container_names = set() for event in event_group: max_timestamp = max(max_timestamp, int(event['time'])) status[event['status']] += 1 container_name = event['id'][:11] if event['id'] in containers_by_id: container_name = container_name_extractor(containers_by_id[event['id']])[0] container_names.add(container_name) status_change.append([container_name, event['status']]) status_text = ", ".join(["%d %s" % (count, st) for st, count in status.iteritems()]) msg_title = "%s %s on %s" % (image_name, status_text, self.hostname) msg_body = ( "%%%\n" "{image_name} {status} on {hostname}\n" "```\n{status_changes}\n```\n" "%%%" ).format( image_name=image_name, status=status_text, hostname=self.hostname, status_changes="\n".join( ["%s \t%s" % (change[1].upper(), change[0]) for change in status_change]) ) events.append({ 'timestamp': max_timestamp, 'host': self.hostname, 'event_type': EVENT_TYPE, 'msg_title': msg_title, 'msg_text': msg_body, 'source_type_name': EVENT_TYPE, 'event_object': 'docker:%s' % image_name, 'tags': ['container_name:%s' % c_name for c_name in container_names] }) return events # Cgroups def _get_cgroup_file(self, cgroup, container_id, filename): """Find a specific cgroup file, containing metrics to extract.""" params = { "mountpoint": self._mountpoints[cgroup], "id": container_id, "file": filename, } return find_cgroup_filename_pattern(self._mountpoints, container_id) % (params) def _parse_cgroup_file(self, stat_file): """Parse a cgroup pseudo file for key/values.""" self.log.debug("Opening cgroup file: %s" % stat_file) try: with open(stat_file, 'r') as fp: if 'blkio' in stat_file: return self._parse_blkio_metrics(fp.read().splitlines()) else: return dict(map(lambda x: x.split(' ', 1), fp.read().splitlines())) except IOError: # It is possible that the container got stopped between the API call and now self.log.info("Can't open %s. Metrics for this container are skipped." % stat_file) def _parse_blkio_metrics(self, stats): """Parse the blkio metrics.""" metrics = { 'io_read': 0, 'io_write': 0, } for line in stats: if 'Read' in line: metrics['io_read'] += int(line.split()[2]) if 'Write' in line: metrics['io_write'] += int(line.split()[2]) return metrics # proc files def _crawl_container_pids(self, container_dict): """Crawl `/proc` to find container PIDs and add them to `containers_by_id`.""" proc_path = os.path.join(self._docker_root, 'proc') pid_dirs = [_dir for _dir in os.listdir(proc_path) if _dir.isdigit()] if len(pid_dirs) == 0: self.warning("Unable to find any pid directory in {0}. " "If you are running the agent in a container, make sure to " 'share the volume properly: "/proc:/host/proc:ro". ' "See https://github.com/DataDog/docker-dd-agent/blob/master/README.md for more information. " "Network metrics will be missing".format(proc_path)) self._disable_net_metrics = True return container_dict self._disable_net_metrics = False for folder in pid_dirs: try: path = os.path.join(proc_path, folder, 'cgroup') with open(path, 'r') as f: content = [line.strip().split(':') for line in f.readlines()] except Exception, e: self.warning("Cannot read %s : %s" % (path, str(e))) continue try: for line in content: if line[1] in ('cpu,cpuacct', 'cpuacct,cpu', 'cpuacct') and 'docker' in line[2]: cpuacct = line[2] break else: continue match = CONTAINER_ID_RE.search(cpuacct) if match: container_id = match.group(0) container_dict[container_id]['_pid'] = folder container_dict[container_id]['_proc_root'] = os.path.join(proc_path, folder) except Exception, e: self.warning("Cannot parse %s content: %s" % (path, str(e))) continue return container_dict
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import glob import os import time from pants.util.contextutil import temporary_dir from pants.util.dirutil import safe_delete, safe_mkdir, touch from pants_test.pants_run_integration_test import PantsRunIntegrationTest class CacheCleanupIntegrationTest(PantsRunIntegrationTest): def _create_platform_args(self, version): return [("""--jvm-platform-platforms={{'default': {{'target': '{version}'}}}}""" .format(version=version)), '--jvm-platform-default-platform=default'] def _run_pants_get_artifact_dir(self, args, cache_dir, subdir, num_files_to_insert, expected_num_files, config=None, prev_dirs=[]): """Run Pants with the given `args` and `config`, delete the results, add some files, then run pants again and ensure there are exactly `expected_num_files` in the output. Pants needs to be run twice because we don't know what the results directory will be named before we run Pants, and we want to insert files into that specific directory to test cache cleanup procedures. """ self.assert_success(self.run_pants(args, config=config)) artifact_base_dir = self.get_cache_subdir(cache_dir, other_dirs=prev_dirs) artifact_dir = os.path.join(artifact_base_dir, subdir) for tgz in glob.glob(os.path.join(artifact_dir, '*.tgz')): safe_delete(tgz) for i in range(0, num_files_to_insert): touch(os.path.join(artifact_dir, 'old_cache_test{}'.format(i + 1))) self.assert_success(self.run_pants(args, config=config)) self.assertEqual(len(os.listdir(artifact_dir)), expected_num_files) return artifact_base_dir def test_buildcache_leave_one(self): """Ensure that max-old of 1 removes all but one files""" with temporary_dir() as cache_dir: config = {'cache.compile.zinc': {'write_to': [cache_dir]}} java_6_args = self._create_platform_args(6) + [ 'compile.zinc', 'testprojects/src/java/org/pantsbuild/testproject/unicode/main', '--cache-max-entries-per-target=1', ] java_6_artifact_base_dir = self._run_pants_get_artifact_dir( java_6_args, cache_dir, 'testprojects.src.java.org.pantsbuild.testproject.unicode.main.main', num_files_to_insert=5, # One artifact for java 6 expected_num_files=1, config=config, ) # Rerun for java 7 java_7_args = self._create_platform_args(7) + [ 'compile.zinc', 'testprojects/src/java/org/pantsbuild/testproject/unicode/main', '--cache-max-entries-per-target=1', ] self._run_pants_get_artifact_dir( java_7_args, cache_dir, 'testprojects.src.java.org.pantsbuild.testproject.unicode.main.main', num_files_to_insert=2, # One artifact for java 6 expected_num_files=1, config=config, # java 7 platform args should change the name of the cache directory prev_dirs=[java_6_artifact_base_dir], ) def test_buildcache_leave_none(self): """Ensure that max-old of zero removes all files This test should ensure that conditional doesn't change to the simpler test of if max_old since we need to handle zero as well. """ with temporary_dir() as cache_dir: config = {'cache.compile.zinc': {'write_to': [cache_dir]}} java_6_args = self._create_platform_args(6) + [ 'compile.zinc', 'testprojects/src/java/org/pantsbuild/testproject/unicode/main', '--cache-max-entries-per-target=0', ] java_6_artifact_base_dir = self._run_pants_get_artifact_dir( java_6_args, cache_dir, 'testprojects.src.java.org.pantsbuild.testproject.unicode.main.main', num_files_to_insert=5, # Cache cleanup disabled for 0 expected_num_files=6, config=config, ) # Rerun for java 7 java_7_args = self._create_platform_args(7) + [ 'compile.zinc', 'testprojects/src/java/org/pantsbuild/testproject/unicode/main', '--cache-max-entries-per-target=0', ] self._run_pants_get_artifact_dir( java_7_args, cache_dir, 'testprojects.src.java.org.pantsbuild.testproject.unicode.main.main', num_files_to_insert=2, # Cache cleanup disabled for 0 expected_num_files=3, config=config, # java 7 platform args should change the name of the cache directory prev_dirs=[java_6_artifact_base_dir], ) def test_workdir_stale_builds_cleanup(self): """Ensure that current and previous build result_dirs and the newest `--workdir-max-build-entries` number of dirs will be kept, and the rest will be purged. """ with temporary_dir() as tmp_dir: workdir = os.path.join(tmp_dir, '.pants.d') self.assert_success(self.run_pants_with_workdir([ 'compile', 'export-classpath', 'testprojects/src/java/org/pantsbuild/testproject/unicode/main', ], workdir)) # Use the static exported classpath symlink to access the artifact in workdir # in order to avoid computing hashed task version used in workdir. classpath = 'dist/export-classpath/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main-0.jar' # <workdir>/compile/zinc/d4600a981d5d/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main/1a317a2504f6/z.jar' jar_path_in_pantsd = os.path.realpath(classpath) # <workdir>/compile/zinc/d4600a981d5d/testprojects.src.java.org.pantsbuild.testproject.unicode.main.main/ target_dir_in_pantsd = os.path.dirname(os.path.dirname(jar_path_in_pantsd)) old_cache_dirnames = set([ 'old_cache_test1_dir/', 'old_cache_test2_dir/', 'old_cache_test3_dir/', ]) new_cache_dirnames = set([ 'old_cache_test4_dir/', 'old_cache_test5_dir/', ]) old_cache_entries = {os.path.join(target_dir_in_pantsd, subdir) for subdir in old_cache_dirnames} new_cache_entries = {os.path.join(target_dir_in_pantsd, subdir) for subdir in new_cache_dirnames} for old_entry in old_cache_entries: safe_mkdir(old_entry) # sleep for a bit so these files are all newer than the other ones time.sleep(1.1) for new_entry in new_cache_entries: safe_mkdir(new_entry) expected_dirs = set([os.path.join(target_dir_in_pantsd, 'current/')]) | old_cache_entries | new_cache_entries # stable symlink, current version directory, and synthetically created directories. remaining_cache_dir_fingerprinted = self.get_cache_subdir(target_dir_in_pantsd, other_dirs=expected_dirs) fingerprinted_realdir = os.path.realpath(os.path.join(target_dir_in_pantsd, 'current')) self.assertEqual( fingerprinted_realdir, remaining_cache_dir_fingerprinted.rstrip('/')) max_entries_per_target = 2 self.assert_success(self.run_pants_with_workdir([ 'compile', 'export-classpath', 'testprojects/src/java/org/pantsbuild/testproject/unicode/main', '--workdir-max-build-entries={}'.format(max_entries_per_target) ], workdir)) # stable (same as before), current, and 2 newest dirs self.assertEqual(os.path.dirname(os.path.dirname(os.path.realpath(classpath))), target_dir_in_pantsd) newest_expected_dirs = expected_dirs - old_cache_entries other_cache_dir_fingerprinted = self.get_cache_subdir(target_dir_in_pantsd, other_dirs=newest_expected_dirs) self.assertEqual(other_cache_dir_fingerprinted, remaining_cache_dir_fingerprinted) self.assertEqual( os.path.realpath(os.path.join(target_dir_in_pantsd, 'current')), fingerprinted_realdir) self.assert_success(self.run_pants_with_workdir([ 'compile', 'export-classpath', 'testprojects/src/java/org/pantsbuild/testproject/unicode/main', '--compile-zinc-debug-symbols', '--workdir-max-build-entries={}'.format(max_entries_per_target) ], workdir)) # stable, current, and 2 newest dirs self.assertEqual(os.path.dirname(os.path.dirname(os.path.realpath(classpath))), target_dir_in_pantsd) new_cache_dir_fingerprinted = self.get_cache_subdir(target_dir_in_pantsd, other_dirs=newest_expected_dirs) # subsequent run with --compile-zinc-debug-symbols will invalidate previous build thus triggering the clean up. self.assertNotEqual(new_cache_dir_fingerprinted, remaining_cache_dir_fingerprinted) new_fingerprinted_realdir = os.path.realpath(os.path.join(target_dir_in_pantsd, 'current')) self.assertEqual(new_fingerprinted_realdir, new_cache_dir_fingerprinted.rstrip('/'))
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from oslo_log import log as logging from tempest_lib import exceptions as lib_exc from tempest.api.compute import base from tempest.common.utils import data_utils from tempest import config from tempest import test CONF = config.CONF LOG = logging.getLogger(__name__) class AuthorizationTestJSON(base.BaseV2ComputeTest): credentials = ['primary', 'alt'] @classmethod def skip_checks(cls): super(AuthorizationTestJSON, cls).skip_checks() if not CONF.service_available.glance: raise cls.skipException('Glance is not available.') @classmethod def setup_credentials(cls): # No network resources required for this test cls.set_network_resources() super(AuthorizationTestJSON, cls).setup_credentials() @classmethod def setup_clients(cls): super(AuthorizationTestJSON, cls).setup_clients() cls.client = cls.os.servers_client cls.images_client = cls.os.images_client cls.glance_client = cls.os.image_client cls.keypairs_client = cls.os.keypairs_client cls.security_client = cls.os.security_groups_client cls.rule_client = cls.os.security_group_rules_client cls.alt_client = cls.alt_manager.servers_client cls.alt_images_client = cls.alt_manager.images_client cls.alt_keypairs_client = cls.alt_manager.keypairs_client cls.alt_security_client = cls.alt_manager.security_groups_client cls.alt_rule_client = cls.alt_manager.security_group_rules_client @classmethod def resource_setup(cls): super(AuthorizationTestJSON, cls).resource_setup() server = cls.create_test_server(wait_until='ACTIVE') cls.server = cls.client.show_server(server['id'])['server'] name = data_utils.rand_name('image') body = cls.glance_client.create_image(name=name, container_format='bare', disk_format='raw', is_public=False)['image'] image_id = body['id'] image_file = six.StringIO(('*' * 1024)) body = cls.glance_client.update_image(image_id, data=image_file)['image'] cls.glance_client.wait_for_image_status(image_id, 'active') cls.image = cls.images_client.show_image(image_id)['image'] cls.keypairname = data_utils.rand_name('keypair') cls.keypairs_client.create_keypair(name=cls.keypairname) name = data_utils.rand_name('security') description = data_utils.rand_name('description') cls.security_group = cls.security_client.create_security_group( name=name, description=description)['security_group'] parent_group_id = cls.security_group['id'] ip_protocol = 'tcp' from_port = 22 to_port = 22 cls.rule = cls.rule_client.create_security_group_rule( parent_group_id=parent_group_id, ip_protocol=ip_protocol, from_port=from_port, to_port=to_port)['security_group_rule'] @classmethod def resource_cleanup(cls): if hasattr(cls, 'image'): cls.images_client.delete_image(cls.image['id']) if hasattr(cls, 'keypairname'): cls.keypairs_client.delete_keypair(cls.keypairname) if hasattr(cls, 'security_group'): cls.security_client.delete_security_group(cls.security_group['id']) super(AuthorizationTestJSON, cls).resource_cleanup() @test.idempotent_id('56816e4a-bd34-47b5-aee9-268c3efeb5d4') def test_get_server_for_alt_account_fails(self): # A GET request for a server on another user's account should fail self.assertRaises(lib_exc.NotFound, self.alt_client.show_server, self.server['id']) @test.idempotent_id('fb8a4870-6d9d-44ad-8375-95d52e98d9f6') def test_delete_server_for_alt_account_fails(self): # A DELETE request for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_client.delete_server, self.server['id']) @test.idempotent_id('d792f91f-1d49-4eb5-b1ff-b229c4b9dc64') def test_update_server_for_alt_account_fails(self): # An update server request for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_client.update_server, self.server['id'], name='test') @test.idempotent_id('488f24df-d7f7-4207-949a-f17fcb8e8769') def test_list_server_addresses_for_alt_account_fails(self): # A list addresses request for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_client.list_addresses, self.server['id']) @test.idempotent_id('00b442d0-2e72-40e7-9b1f-31772e36da01') def test_list_server_addresses_by_network_for_alt_account_fails(self): # A list address/network request for another user's server should fail server_id = self.server['id'] self.assertRaises(lib_exc.NotFound, self.alt_client.list_addresses_by_network, server_id, 'public') @test.idempotent_id('cc90b35a-19f0-45d2-b680-2aabf934aa22') def test_list_servers_with_alternate_tenant(self): # A list on servers from one tenant should not # show on alternate tenant # Listing servers from alternate tenant alt_server_ids = [] body = self.alt_client.list_servers() alt_server_ids = [s['id'] for s in body['servers']] self.assertNotIn(self.server['id'], alt_server_ids) @test.idempotent_id('376dbc16-0779-4384-a723-752774799641') def test_change_password_for_alt_account_fails(self): # A change password request for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_client.change_password, self.server['id'], 'newpass') @test.idempotent_id('14cb5ff5-f646-45ca-8f51-09081d6c0c24') def test_reboot_server_for_alt_account_fails(self): # A reboot request for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_client.reboot_server, self.server['id'], 'HARD') @test.idempotent_id('8a0bce51-cd00-480b-88ba-dbc7d8408a37') def test_rebuild_server_for_alt_account_fails(self): # A rebuild request for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_client.rebuild_server, self.server['id'], self.image_ref_alt) @test.idempotent_id('e4da647e-f982-4e61-9dad-1d1abebfb933') def test_resize_server_for_alt_account_fails(self): # A resize request for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_client.resize_server, self.server['id'], self.flavor_ref_alt) @test.idempotent_id('a9fe8112-0ffa-4902-b061-f892bd5fe0d3') def test_create_image_for_alt_account_fails(self): # A create image request for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_images_client.create_image, self.server['id'], name='testImage') @test.idempotent_id('95d445f6-babc-4f2e-aea3-aa24ec5e7f0d') def test_create_server_with_unauthorized_image(self): # Server creation with another user's image should fail self.assertRaises(lib_exc.BadRequest, self.alt_client.create_server, name='test', imageRef=self.image['id'], flavorRef=self.flavor_ref) @test.idempotent_id('acf8724b-142b-4044-82c3-78d31a533f24') def test_create_server_fails_when_tenant_incorrect(self): # BUG(sdague): this test should fail because of bad auth url, # which means that when we run with a service catalog without # project_id in the urls, it should fail to fail, and thus # fail the test. It does not. # # The 400 BadRequest is clearly ambiguous, and something else # is wrong about this request. This should be fixed. # # A create server request should fail if the tenant id does not match # the current user # Change the base URL to impersonate another user self.alt_client.auth_provider.set_alt_auth_data( request_part='url', auth_data=self.client.auth_provider.auth_data ) self.assertRaises(lib_exc.BadRequest, self.alt_client.create_server, name='test', imageRef=self.image['id'], flavorRef=self.flavor_ref) @test.idempotent_id('f03d1ded-7fd4-4d29-bc13-e2391f29c625') def test_create_keypair_in_analt_user_tenant(self): """create keypair should not function for alternate tenant POST {alt_service_url}/os-keypairs Attempt to create a keypair against an alternate tenant by changing using a different tenant's service url. This should return a BadRequest. This tests basic tenant isolation protections. NOTE(sdague): if the environment does not use project_id in the service urls, this test is not valid. Skip under these conditions. """ if self.alt_keypairs_client.base_url == self.keypairs_client.base_url: raise self.skipException("Service urls don't include project_id") k_name = data_utils.rand_name('keypair') try: # Change the base URL to impersonate another user self.alt_keypairs_client.auth_provider.set_alt_auth_data( request_part='url', auth_data=self.keypairs_client.auth_provider.auth_data ) resp = {} resp['status'] = None self.assertRaises(lib_exc.BadRequest, self.alt_keypairs_client.create_keypair, name=k_name) finally: # Next request the base_url is back to normal if (resp['status'] is not None): self.alt_keypairs_client.delete_keypair(k_name) LOG.error("Create keypair request should not happen " "if the tenant id does not match the current user") @test.idempotent_id('85bcdd8f-56b4-4868-ae56-63fbf6f7e405') def test_get_keypair_of_alt_account_fails(self): # A GET request for another user's keypair should fail self.assertRaises(lib_exc.NotFound, self.alt_keypairs_client.show_keypair, self.keypairname) @test.idempotent_id('6d841683-a8e0-43da-a1b8-b339f7692b61') def test_delete_keypair_of_alt_account_fails(self): # A DELETE request for another user's keypair should fail self.assertRaises(lib_exc.NotFound, self.alt_keypairs_client.delete_keypair, self.keypairname) @test.idempotent_id('fcb2e144-36e3-4dfb-9f9f-e72fcdec5656') def test_get_image_for_alt_account_fails(self): # A GET request for an image on another user's account should fail self.assertRaises(lib_exc.NotFound, self.alt_images_client.show_image, self.image['id']) @test.idempotent_id('9facb962-f043-4a9d-b9ee-166a32dea098') def test_delete_image_for_alt_account_fails(self): # A DELETE request for another user's image should fail self.assertRaises(lib_exc.NotFound, self.alt_images_client.delete_image, self.image['id']) @test.idempotent_id('752c917e-83be-499d-a422-3559127f7d3c') def test_create_security_group_in_analt_user_tenant(self): """create security group should not function for alternate tenant POST {alt_service_url}/os-security-groups Attempt to create a security group against an alternate tenant by changing using a different tenant's service url. This should return a BadRequest. This tests basic tenant isolation protections. NOTE(sdague): if the environment does not use project_id in the service urls, this test is not valid. Skip under these conditions. """ if self.alt_security_client.base_url == self.security_client.base_url: raise self.skipException("Service urls don't include project_id") s_name = data_utils.rand_name('security') s_description = data_utils.rand_name('security') try: # Change the base URL to impersonate another user self.alt_security_client.auth_provider.set_alt_auth_data( request_part='url', auth_data=self.security_client.auth_provider.auth_data ) resp = {} resp['status'] = None self.assertRaises(lib_exc.BadRequest, self.alt_security_client.create_security_group, name=s_name, description=s_description) finally: # Next request the base_url is back to normal if resp['status'] is not None: self.alt_security_client.delete_security_group(resp['id']) LOG.error("Create Security Group request should not happen if" "the tenant id does not match the current user") @test.idempotent_id('9db3590f-4d15-4e5f-985e-b28514919a6f') def test_get_security_group_of_alt_account_fails(self): # A GET request for another user's security group should fail self.assertRaises(lib_exc.NotFound, self.alt_security_client.show_security_group, self.security_group['id']) @test.idempotent_id('155387a5-2bbc-4acf-ab06-698dae537ea5') def test_delete_security_group_of_alt_account_fails(self): # A DELETE request for another user's security group should fail self.assertRaises(lib_exc.NotFound, self.alt_security_client.delete_security_group, self.security_group['id']) @test.idempotent_id('b2b76de0-210a-4089-b921-591c9ec552f6') def test_create_security_group_rule_in_analt_user_tenant(self): """create security group rule should not function for alternate tenant POST {alt_service_url}/os-security-group-rules Attempt to create a security group rule against an alternate tenant by changing using a different tenant's service url. This should return a BadRequest. This tests basic tenant isolation protections. NOTE(sdague): if the environment does not use project_id in the service urls, this test is not valid. Skip under these conditions. """ if self.alt_security_client.base_url == self.security_client.base_url: raise self.skipException("Service urls don't include project_id") parent_group_id = self.security_group['id'] ip_protocol = 'icmp' from_port = -1 to_port = -1 try: # Change the base URL to impersonate another user self.alt_rule_client.auth_provider.set_alt_auth_data( request_part='url', auth_data=self.rule_client.auth_provider.auth_data ) resp = {} resp['status'] = None self.assertRaises(lib_exc.BadRequest, self.alt_rule_client. create_security_group_rule, parent_group_id=parent_group_id, ip_protocol=ip_protocol, from_port=from_port, to_port=to_port) finally: # Next request the base_url is back to normal if resp['status'] is not None: self.alt_rule_client.delete_security_group_rule(resp['id']) LOG.error("Create security group rule request should not " "happen if the tenant id does not match the" " current user") @test.idempotent_id('c6044177-37ef-4ce4-b12c-270ddf26d7da') def test_delete_security_group_rule_of_alt_account_fails(self): # A DELETE request for another user's security group rule # should fail self.assertRaises(lib_exc.NotFound, self.alt_rule_client.delete_security_group_rule, self.rule['id']) @test.idempotent_id('c5f52351-53d9-4fc9-83e5-917f7f5e3d71') def test_set_metadata_of_alt_account_server_fails(self): # A set metadata for another user's server should fail req_metadata = {'meta1': 'data1', 'meta2': 'data2'} self.assertRaises(lib_exc.NotFound, self.alt_client.set_server_metadata, self.server['id'], req_metadata) @test.idempotent_id('fb6f51e9-df15-4939-898d-1aca38c258f0') def test_set_metadata_of_alt_account_image_fails(self): # A set metadata for another user's image should fail req_metadata = {'meta1': 'value1', 'meta2': 'value2'} self.assertRaises(lib_exc.NotFound, self.alt_images_client.set_image_metadata, self.image['id'], req_metadata) @test.idempotent_id('dea1936a-473d-49f2-92ad-97bb7aded22e') def test_get_metadata_of_alt_account_server_fails(self): # A get metadata for another user's server should fail req_metadata = {'meta1': 'data1'} self.client.set_server_metadata(self.server['id'], req_metadata) self.addCleanup(self.client.delete_server_metadata_item, self.server['id'], 'meta1') self.assertRaises(lib_exc.NotFound, self.alt_client.get_server_metadata_item, self.server['id'], 'meta1') @test.idempotent_id('16b2d724-0d3b-4216-a9fa-97bd4d9cf670') def test_get_metadata_of_alt_account_image_fails(self): # A get metadata for another user's image should fail req_metadata = {'meta1': 'value1'} self.addCleanup(self.images_client.delete_image_metadata_item, self.image['id'], 'meta1') self.images_client.set_image_metadata(self.image['id'], req_metadata) self.assertRaises(lib_exc.NotFound, self.alt_images_client.show_image_metadata_item, self.image['id'], 'meta1') @test.idempotent_id('79531e2e-e721-493c-8b30-a35db36fdaa6') def test_delete_metadata_of_alt_account_server_fails(self): # A delete metadata for another user's server should fail req_metadata = {'meta1': 'data1'} self.addCleanup(self.client.delete_server_metadata_item, self.server['id'], 'meta1') self.client.set_server_metadata(self.server['id'], req_metadata) self.assertRaises(lib_exc.NotFound, self.alt_client.delete_server_metadata_item, self.server['id'], 'meta1') @test.idempotent_id('a5175dcf-cef8-43d6-9b77-3cb707d62e94') def test_delete_metadata_of_alt_account_image_fails(self): # A delete metadata for another user's image should fail req_metadata = {'meta1': 'data1'} self.addCleanup(self.images_client.delete_image_metadata_item, self.image['id'], 'meta1') self.images_client.set_image_metadata(self.image['id'], req_metadata) self.assertRaises(lib_exc.NotFound, self.alt_images_client.delete_image_metadata_item, self.image['id'], 'meta1') @test.idempotent_id('b0c1e7a0-8853-40fd-8384-01f93d116cae') def test_get_console_output_of_alt_account_server_fails(self): # A Get Console Output for another user's server should fail self.assertRaises(lib_exc.NotFound, self.alt_client.get_console_output, self.server['id'], 10)
# Copyright (c) 2012 NetApp, Inc. All rights reserved. # Copyright (c) 2014 Navneet Singh. All rights reserved. # Copyright (c) 2014 Glenn Gobeli. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2015 Alex Meade. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ NetApp API for Data ONTAP and OnCommand DFM. Contains classes required to issue API calls to Data ONTAP and OnCommand DFM. """ from eventlet import greenthread from eventlet import semaphore from lxml import etree from oslo_log import log as logging import random import six from six.moves import urllib from cinder import exception from cinder.i18n import _, _LE from cinder import ssh_utils from cinder import utils LOG = logging.getLogger(__name__) EAPIPRIVILEGE = '13003' EAPINOTFOUND = '13005' ESIS_CLONE_NOT_LICENSED = '14956' ESNAPSHOTNOTALLOWED = '13023' class NaServer(object): """Encapsulates server connection logic.""" TRANSPORT_TYPE_HTTP = 'http' TRANSPORT_TYPE_HTTPS = 'https' SERVER_TYPE_FILER = 'filer' SERVER_TYPE_DFM = 'dfm' URL_FILER = 'servlets/netapp.servlets.admin.XMLrequest_filer' URL_DFM = 'apis/XMLrequest' NETAPP_NS = 'http://www.netapp.com/filer/admin' STYLE_LOGIN_PASSWORD = 'basic_auth' STYLE_CERTIFICATE = 'certificate_auth' def __init__(self, host, server_type=SERVER_TYPE_FILER, transport_type=TRANSPORT_TYPE_HTTP, style=STYLE_LOGIN_PASSWORD, username=None, password=None, port=None): self._host = host self.set_server_type(server_type) self.set_transport_type(transport_type) self.set_style(style) if port: self.set_port(port) self._username = username self._password = password self._refresh_conn = True LOG.debug('Using NetApp controller: %s', self._host) def set_transport_type(self, transport_type): """Set the transport type protocol for API. Supports http and https transport types. """ if not transport_type: raise ValueError('No transport type specified') if transport_type.lower() not in ( NaServer.TRANSPORT_TYPE_HTTP, NaServer.TRANSPORT_TYPE_HTTPS): raise ValueError('Unsupported transport type') self._protocol = transport_type.lower() if self._protocol == NaServer.TRANSPORT_TYPE_HTTP: if self._server_type == NaServer.SERVER_TYPE_FILER: self.set_port(80) else: self.set_port(8088) else: if self._server_type == NaServer.SERVER_TYPE_FILER: self.set_port(443) else: self.set_port(8488) self._refresh_conn = True def set_style(self, style): """Set the authorization style for communicating with the server. Supports basic_auth for now. Certificate_auth mode to be done. """ if style.lower() not in (NaServer.STYLE_LOGIN_PASSWORD, NaServer.STYLE_CERTIFICATE): raise ValueError('Unsupported authentication style') self._auth_style = style.lower() def set_server_type(self, server_type): """Set the target server type. Supports filer and dfm server types. """ if server_type.lower() not in (NaServer.SERVER_TYPE_FILER, NaServer.SERVER_TYPE_DFM): raise ValueError('Unsupported server type') self._server_type = server_type.lower() if self._server_type == NaServer.SERVER_TYPE_FILER: self._url = NaServer.URL_FILER else: self._url = NaServer.URL_DFM self._ns = NaServer.NETAPP_NS self._refresh_conn = True def set_api_version(self, major, minor): """Set the API version.""" try: self._api_major_version = int(major) self._api_minor_version = int(minor) self._api_version = six.text_type(major) + "." + \ six.text_type(minor) except ValueError: raise ValueError('Major and minor versions must be integers') self._refresh_conn = True def get_api_version(self): """Gets the API version tuple.""" if hasattr(self, '_api_version'): return (self._api_major_version, self._api_minor_version) return None def set_port(self, port): """Set the server communication port.""" try: int(port) except ValueError: raise ValueError('Port must be integer') self._port = six.text_type(port) self._refresh_conn = True def set_timeout(self, seconds): """Sets the timeout in seconds.""" try: self._timeout = int(seconds) except ValueError: raise ValueError('timeout in seconds must be integer') def set_vfiler(self, vfiler): """Set the vfiler to use if tunneling gets enabled.""" self._vfiler = vfiler def set_vserver(self, vserver): """Set the vserver to use if tunneling gets enabled.""" self._vserver = vserver @utils.trace_api def send_http_request(self, na_element, enable_tunneling=False): """Invoke the API on the server.""" if not na_element or not isinstance(na_element, NaElement): raise ValueError('NaElement must be supplied to invoke API') request, request_element = self._create_request(na_element, enable_tunneling) if not hasattr(self, '_opener') or not self._opener \ or self._refresh_conn: self._build_opener() try: if hasattr(self, '_timeout'): response = self._opener.open(request, timeout=self._timeout) else: response = self._opener.open(request) except urllib.error.HTTPError as e: raise NaApiError(e.code, e.msg) except Exception: LOG.exception(_LE("Error communicating with NetApp filer.")) raise NaApiError('Unexpected error') response_xml = response.read() response_element = self._get_result(response_xml) return response_element def invoke_successfully(self, na_element, enable_tunneling=False): """Invokes API and checks execution status as success. Need to set enable_tunneling to True explicitly to achieve it. This helps to use same connection instance to enable or disable tunneling. The vserver or vfiler should be set before this call otherwise tunneling remains disabled. """ result = self.send_http_request(na_element, enable_tunneling) if result.has_attr('status') and result.get_attr('status') == 'passed': return result code = result.get_attr('errno')\ or result.get_child_content('errorno')\ or 'ESTATUSFAILED' if code == ESIS_CLONE_NOT_LICENSED: msg = 'Clone operation failed: FlexClone not licensed.' else: msg = result.get_attr('reason')\ or result.get_child_content('reason')\ or 'Execution status is failed due to unknown reason' raise NaApiError(code, msg) def _create_request(self, na_element, enable_tunneling=False): """Creates request in the desired format.""" netapp_elem = NaElement('netapp') netapp_elem.add_attr('xmlns', self._ns) if hasattr(self, '_api_version'): netapp_elem.add_attr('version', self._api_version) if enable_tunneling: self._enable_tunnel_request(netapp_elem) netapp_elem.add_child_elem(na_element) request_d = netapp_elem.to_string() request = urllib.request.Request( self._get_url(), data=request_d, headers={'Content-Type': 'text/xml', 'charset': 'utf-8'}) return request, netapp_elem def _enable_tunnel_request(self, netapp_elem): """Enables vserver or vfiler tunneling.""" if hasattr(self, '_vfiler') and self._vfiler: if hasattr(self, '_api_major_version') and \ hasattr(self, '_api_minor_version') and \ self._api_major_version >= 1 and \ self._api_minor_version >= 7: netapp_elem.add_attr('vfiler', self._vfiler) else: raise ValueError('ontapi version has to be atleast 1.7' ' to send request to vfiler') if hasattr(self, '_vserver') and self._vserver: if hasattr(self, '_api_major_version') and \ hasattr(self, '_api_minor_version') and \ self._api_major_version >= 1 and \ self._api_minor_version >= 15: netapp_elem.add_attr('vfiler', self._vserver) else: raise ValueError('ontapi version has to be atleast 1.15' ' to send request to vserver') def _parse_response(self, response): """Get the NaElement for the response.""" if not response: raise NaApiError('No response received') xml = etree.XML(response) return NaElement(xml) def _get_result(self, response): """Gets the call result.""" processed_response = self._parse_response(response) return processed_response.get_child_by_name('results') def _get_url(self): return '%s://%s:%s/%s' % (self._protocol, self._host, self._port, self._url) def _build_opener(self): if self._auth_style == NaServer.STYLE_LOGIN_PASSWORD: auth_handler = self._create_basic_auth_handler() else: auth_handler = self._create_certificate_auth_handler() opener = urllib.request.build_opener(auth_handler) self._opener = opener def _create_basic_auth_handler(self): password_man = urllib.request.HTTPPasswordMgrWithDefaultRealm() password_man.add_password(None, self._get_url(), self._username, self._password) auth_handler = urllib.request.HTTPBasicAuthHandler(password_man) return auth_handler def _create_certificate_auth_handler(self): raise NotImplementedError() def __str__(self): return "server: %s" % self._host class NaElement(object): """Class wraps basic building block for NetApp API request.""" def __init__(self, name): """Name of the element or etree.Element.""" if isinstance(name, etree._Element): self._element = name else: self._element = etree.Element(name) def get_name(self): """Returns the tag name of the element.""" return self._element.tag def set_content(self, text): """Set the text string for the element.""" self._element.text = text def get_content(self): """Get the text for the element.""" return self._element.text def add_attr(self, name, value): """Add the attribute to the element.""" self._element.set(name, value) def add_attrs(self, **attrs): """Add multiple attributes to the element.""" for attr in attrs.keys(): self._element.set(attr, attrs.get(attr)) def add_child_elem(self, na_element): """Add the child element to the element.""" if isinstance(na_element, NaElement): self._element.append(na_element._element) return raise def get_child_by_name(self, name): """Get the child element by the tag name.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return NaElement(child) return None def get_child_content(self, name): """Get the content of the child.""" for child in self._element.iterchildren(): if child.tag == name or etree.QName(child.tag).localname == name: return child.text return None def get_children(self): """Get the children for the element.""" return [NaElement(el) for el in self._element.iterchildren()] def has_attr(self, name): """Checks whether element has attribute.""" attributes = self._element.attrib or {} return name in attributes.keys() def get_attr(self, name): """Get the attribute with the given name.""" attributes = self._element.attrib or {} return attributes.get(name) def get_attr_names(self): """Returns the list of attribute names.""" attributes = self._element.attrib or {} return list(attributes.keys()) def add_new_child(self, name, content, convert=False): """Add child with tag name and content. Convert replaces entity refs to chars. """ child = NaElement(name) if convert: content = NaElement._convert_entity_refs(content) child.set_content(content) self.add_child_elem(child) @staticmethod def _convert_entity_refs(text): """Converts entity refs to chars to handle etree auto conversions.""" text = text.replace("&lt;", "<") text = text.replace("&gt;", ">") return text @staticmethod def create_node_with_children(node, **children): """Creates and returns named node with children.""" parent = NaElement(node) for child in children.keys(): parent.add_new_child(child, children.get(child, None)) return parent def add_node_with_children(self, node, **children): """Creates named node with children.""" parent = NaElement.create_node_with_children(node, **children) self.add_child_elem(parent) def to_string(self, pretty=False, method='xml', encoding='UTF-8'): """Prints the element to string.""" return etree.tostring(self._element, method=method, encoding=encoding, pretty_print=pretty) def __str__(self): xml = self.to_string(pretty=True) if six.PY3: xml = xml.decode('utf-8') return xml def __eq__(self, other): return str(self) == str(other) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash(str(self)) def __repr__(self): return str(self) def __getitem__(self, key): """Dict getter method for NaElement. Returns NaElement list if present, text value in case no NaElement node children or attribute value if present. """ child = self.get_child_by_name(key) if child: if child.get_children(): return child else: return child.get_content() elif self.has_attr(key): return self.get_attr(key) raise KeyError(_('No element by given name %s.') % (key)) def __setitem__(self, key, value): """Dict setter method for NaElement. Accepts dict, list, tuple, str, int, float and long as valid value. """ if key: if value: if isinstance(value, NaElement): child = NaElement(key) child.add_child_elem(value) self.add_child_elem(child) elif isinstance(value, six.integer_types + (str, float)): self.add_new_child(key, six.text_type(value)) elif isinstance(value, (list, tuple, dict)): child = NaElement(key) child.translate_struct(value) self.add_child_elem(child) else: raise TypeError(_('Not a valid value for NaElement.')) else: self.add_child_elem(NaElement(key)) else: raise KeyError(_('NaElement name cannot be null.')) def translate_struct(self, data_struct): """Convert list, tuple, dict to NaElement and appends. Example usage: 1. .. code-block:: xml <root> <elem1>vl1</elem1> <elem2>vl2</elem2> <elem3>vl3</elem3> </root> The above can be achieved by doing .. code-block:: python root = NaElement('root') root.translate_struct({'elem1': 'vl1', 'elem2': 'vl2', 'elem3': 'vl3'}) 2. .. code-block:: xml <root> <elem1>vl1</elem1> <elem2>vl2</elem2> <elem1>vl3</elem1> </root> The above can be achieved by doing .. code-block:: python root = NaElement('root') root.translate_struct([{'elem1': 'vl1', 'elem2': 'vl2'}, {'elem1': 'vl3'}]) """ if isinstance(data_struct, (list, tuple)): for el in data_struct: if isinstance(el, (list, tuple, dict)): self.translate_struct(el) else: self.add_child_elem(NaElement(el)) elif isinstance(data_struct, dict): for k in data_struct.keys(): child = NaElement(k) if isinstance(data_struct[k], (dict, list, tuple)): child.translate_struct(data_struct[k]) else: if data_struct[k]: child.set_content(six.text_type(data_struct[k])) self.add_child_elem(child) else: raise ValueError(_('Type cannot be converted into NaElement.')) class NaApiError(Exception): """Base exception class for NetApp API errors.""" def __init__(self, code='unknown', message='unknown'): self.code = code self.message = message def __str__(self, *args, **kwargs): return 'NetApp API failed. Reason - %s:%s' % (self.code, self.message) class SSHUtil(object): """Encapsulates connection logic and command execution for SSH client.""" MAX_CONCURRENT_SSH_CONNECTIONS = 5 RECV_TIMEOUT = 3 CONNECTION_KEEP_ALIVE = 600 WAIT_ON_STDOUT_TIMEOUT = 3 def __init__(self, host, username, password, port=22): self.ssh_pool = self._init_ssh_pool(host, port, username, password) # Note(cfouts) Number of SSH connections made to the backend need to be # limited. Use of SSHPool allows connections to be cached and reused # instead of creating a new connection each time a command is executed # via SSH. self.ssh_connect_semaphore = semaphore.Semaphore( self.MAX_CONCURRENT_SSH_CONNECTIONS) def _init_ssh_pool(self, host, port, username, password): return ssh_utils.SSHPool(host, port, self.CONNECTION_KEEP_ALIVE, username, password) def execute_command(self, client, command_text, timeout=RECV_TIMEOUT): LOG.debug("execute_command() - Sending command.") stdin, stdout, stderr = client.exec_command(command_text) stdin.close() self._wait_on_stdout(stdout, timeout) output = stdout.read() LOG.debug("Output of length %(size)d received.", {'size': len(output)}) stdout.close() stderr.close() return output def execute_command_with_prompt(self, client, command, expected_prompt_text, prompt_response, timeout=RECV_TIMEOUT): LOG.debug("execute_command_with_prompt() - Sending command.") stdin, stdout, stderr = client.exec_command(command) self._wait_on_stdout(stdout, timeout) response = stdout.channel.recv(999) if response.strip() != expected_prompt_text: msg = _("Unexpected output. Expected [%(expected)s] but " "received [%(output)s]") % { 'expected': expected_prompt_text, 'output': response.strip(), } LOG.error(msg) stdin.close() stdout.close() stderr.close() raise exception.VolumeBackendAPIException(msg) else: LOG.debug("execute_command_with_prompt() - Sending answer") stdin.write(prompt_response + '\n') stdin.flush() stdin.close() stdout.close() stderr.close() def _wait_on_stdout(self, stdout, timeout=WAIT_ON_STDOUT_TIMEOUT): wait_time = 0.0 # NOTE(cfouts): The server does not always indicate when EOF is reached # for stdout. The timeout exists for this reason and an attempt is made # to read from stdout. while not stdout.channel.exit_status_ready(): # period is 10 - 25 centiseconds period = random.randint(10, 25) / 100.0 greenthread.sleep(period) wait_time += period if wait_time > timeout: LOG.debug("Timeout exceeded while waiting for exit status.") break
from .. utils import TranspileTestCase, UnaryOperationTestCase, BinaryOperationTestCase, InplaceOperationTestCase class FrozensetTests(TranspileTestCase): pass class UnaryFrozensetOperationTests(UnaryOperationTestCase, TranspileTestCase): data_type = 'frozenset' not_implemented = [ 'test_unary_invert', 'test_unary_negative', 'test_unary_not', 'test_unary_positive', ] class BinaryFrozensetOperationTests(BinaryOperationTestCase, TranspileTestCase): data_type = 'frozenset' not_implemented = [ 'test_add_bool', 'test_add_bytearray', 'test_add_bytes', 'test_add_class', 'test_add_complex', 'test_add_dict', 'test_add_float', 'test_add_frozenset', 'test_add_int', 'test_add_list', 'test_add_None', 'test_add_NotImplemented', 'test_add_range', 'test_add_set', 'test_add_slice', 'test_add_str', 'test_add_tuple', 'test_and_bool', 'test_and_bytearray', 'test_and_bytes', 'test_and_class', 'test_and_complex', 'test_and_dict', 'test_and_float', 'test_and_frozenset', 'test_and_int', 'test_and_list', 'test_and_None', 'test_and_NotImplemented', 'test_and_range', 'test_and_set', 'test_and_slice', 'test_and_str', 'test_and_tuple', 'test_eq_bool', 'test_eq_bytearray', 'test_eq_bytes', 'test_eq_class', 'test_eq_complex', 'test_eq_dict', 'test_eq_float', 'test_eq_frozenset', 'test_eq_int', 'test_eq_list', 'test_eq_None', 'test_eq_NotImplemented', 'test_eq_range', 'test_eq_set', 'test_eq_slice', 'test_eq_str', 'test_eq_tuple', 'test_floor_divide_bool', 'test_floor_divide_bytearray', 'test_floor_divide_bytes', 'test_floor_divide_class', 'test_floor_divide_complex', 'test_floor_divide_dict', 'test_floor_divide_float', 'test_floor_divide_frozenset', 'test_floor_divide_int', 'test_floor_divide_list', 'test_floor_divide_None', 'test_floor_divide_NotImplemented', 'test_floor_divide_range', 'test_floor_divide_set', 'test_floor_divide_slice', 'test_floor_divide_str', 'test_floor_divide_tuple', 'test_ge_bool', 'test_ge_bytearray', 'test_ge_bytes', 'test_ge_class', 'test_ge_complex', 'test_ge_dict', 'test_ge_float', 'test_ge_frozenset', 'test_ge_int', 'test_ge_list', 'test_ge_None', 'test_ge_NotImplemented', 'test_ge_range', 'test_ge_set', 'test_ge_slice', 'test_ge_str', 'test_ge_tuple', 'test_gt_bool', 'test_gt_bytearray', 'test_gt_bytes', 'test_gt_class', 'test_gt_complex', 'test_gt_dict', 'test_gt_float', 'test_gt_frozenset', 'test_gt_int', 'test_gt_list', 'test_gt_None', 'test_gt_NotImplemented', 'test_gt_range', 'test_gt_set', 'test_gt_slice', 'test_gt_str', 'test_gt_tuple', 'test_le_bool', 'test_le_bytearray', 'test_le_bytes', 'test_le_class', 'test_le_complex', 'test_le_dict', 'test_le_float', 'test_le_frozenset', 'test_le_int', 'test_le_list', 'test_le_None', 'test_le_NotImplemented', 'test_le_range', 'test_le_set', 'test_le_slice', 'test_le_str', 'test_le_tuple', 'test_lshift_bool', 'test_lshift_bytearray', 'test_lshift_bytes', 'test_lshift_class', 'test_lshift_complex', 'test_lshift_dict', 'test_lshift_float', 'test_lshift_frozenset', 'test_lshift_int', 'test_lshift_list', 'test_lshift_None', 'test_lshift_NotImplemented', 'test_lshift_range', 'test_lshift_set', 'test_lshift_slice', 'test_lshift_str', 'test_lshift_tuple', 'test_lt_bool', 'test_lt_bytearray', 'test_lt_bytes', 'test_lt_class', 'test_lt_complex', 'test_lt_dict', 'test_lt_float', 'test_lt_frozenset', 'test_lt_int', 'test_lt_list', 'test_lt_None', 'test_lt_NotImplemented', 'test_lt_range', 'test_lt_set', 'test_lt_slice', 'test_lt_str', 'test_lt_tuple', 'test_modulo_bool', 'test_modulo_bytearray', 'test_modulo_bytes', 'test_modulo_class', 'test_modulo_complex', 'test_modulo_dict', 'test_modulo_float', 'test_modulo_frozenset', 'test_modulo_int', 'test_modulo_list', 'test_modulo_None', 'test_modulo_NotImplemented', 'test_modulo_range', 'test_modulo_set', 'test_modulo_slice', 'test_modulo_str', 'test_modulo_tuple', 'test_multiply_bool', 'test_multiply_bytearray', 'test_multiply_bytes', 'test_multiply_class', 'test_multiply_complex', 'test_multiply_dict', 'test_multiply_float', 'test_multiply_frozenset', 'test_multiply_int', 'test_multiply_list', 'test_multiply_None', 'test_multiply_NotImplemented', 'test_multiply_range', 'test_multiply_set', 'test_multiply_slice', 'test_multiply_str', 'test_multiply_tuple', 'test_ne_bool', 'test_ne_bytearray', 'test_ne_bytes', 'test_ne_class', 'test_ne_complex', 'test_ne_dict', 'test_ne_float', 'test_ne_frozenset', 'test_ne_int', 'test_ne_list', 'test_ne_None', 'test_ne_NotImplemented', 'test_ne_range', 'test_ne_set', 'test_ne_slice', 'test_ne_str', 'test_ne_tuple', 'test_or_bool', 'test_or_bytearray', 'test_or_bytes', 'test_or_class', 'test_or_complex', 'test_or_dict', 'test_or_float', 'test_or_frozenset', 'test_or_int', 'test_or_list', 'test_or_None', 'test_or_NotImplemented', 'test_or_range', 'test_or_set', 'test_or_slice', 'test_or_str', 'test_or_tuple', 'test_power_bool', 'test_power_bytearray', 'test_power_bytes', 'test_power_class', 'test_power_complex', 'test_power_dict', 'test_power_float', 'test_power_frozenset', 'test_power_int', 'test_power_list', 'test_power_None', 'test_power_NotImplemented', 'test_power_range', 'test_power_set', 'test_power_slice', 'test_power_str', 'test_power_tuple', 'test_rshift_bool', 'test_rshift_bytearray', 'test_rshift_bytes', 'test_rshift_class', 'test_rshift_complex', 'test_rshift_dict', 'test_rshift_float', 'test_rshift_frozenset', 'test_rshift_int', 'test_rshift_list', 'test_rshift_None', 'test_rshift_NotImplemented', 'test_rshift_range', 'test_rshift_set', 'test_rshift_slice', 'test_rshift_str', 'test_rshift_tuple', 'test_subscr_bool', 'test_subscr_bytearray', 'test_subscr_bytes', 'test_subscr_class', 'test_subscr_complex', 'test_subscr_dict', 'test_subscr_float', 'test_subscr_frozenset', 'test_subscr_int', 'test_subscr_list', 'test_subscr_None', 'test_subscr_NotImplemented', 'test_subscr_range', 'test_subscr_set', 'test_subscr_slice', 'test_subscr_str', 'test_subscr_tuple', 'test_subtract_bool', 'test_subtract_bytearray', 'test_subtract_bytes', 'test_subtract_class', 'test_subtract_complex', 'test_subtract_dict', 'test_subtract_float', 'test_subtract_frozenset', 'test_subtract_int', 'test_subtract_list', 'test_subtract_None', 'test_subtract_NotImplemented', 'test_subtract_range', 'test_subtract_set', 'test_subtract_slice', 'test_subtract_str', 'test_subtract_tuple', 'test_true_divide_bool', 'test_true_divide_bytearray', 'test_true_divide_bytes', 'test_true_divide_class', 'test_true_divide_complex', 'test_true_divide_dict', 'test_true_divide_float', 'test_true_divide_frozenset', 'test_true_divide_int', 'test_true_divide_list', 'test_true_divide_None', 'test_true_divide_NotImplemented', 'test_true_divide_range', 'test_true_divide_set', 'test_true_divide_slice', 'test_true_divide_str', 'test_true_divide_tuple', 'test_xor_bool', 'test_xor_bytearray', 'test_xor_bytes', 'test_xor_class', 'test_xor_complex', 'test_xor_dict', 'test_xor_float', 'test_xor_frozenset', 'test_xor_int', 'test_xor_list', 'test_xor_None', 'test_xor_NotImplemented', 'test_xor_range', 'test_xor_set', 'test_xor_slice', 'test_xor_str', 'test_xor_tuple', ] class InplaceFrozensetOperationTests(InplaceOperationTestCase, TranspileTestCase): data_type = 'frozenset' not_implemented = [ 'test_add_bool', 'test_add_bytearray', 'test_add_bytes', 'test_add_class', 'test_add_complex', 'test_add_dict', 'test_add_float', 'test_add_frozenset', 'test_add_int', 'test_add_list', 'test_add_None', 'test_add_NotImplemented', 'test_add_range', 'test_add_set', 'test_add_slice', 'test_add_str', 'test_add_tuple', 'test_and_bool', 'test_and_bytearray', 'test_and_bytes', 'test_and_class', 'test_and_complex', 'test_and_dict', 'test_and_float', 'test_and_frozenset', 'test_and_int', 'test_and_list', 'test_and_None', 'test_and_NotImplemented', 'test_and_range', 'test_and_set', 'test_and_slice', 'test_and_str', 'test_and_tuple', 'test_floor_divide_bool', 'test_floor_divide_bytearray', 'test_floor_divide_bytes', 'test_floor_divide_class', 'test_floor_divide_complex', 'test_floor_divide_dict', 'test_floor_divide_float', 'test_floor_divide_frozenset', 'test_floor_divide_int', 'test_floor_divide_list', 'test_floor_divide_None', 'test_floor_divide_NotImplemented', 'test_floor_divide_range', 'test_floor_divide_set', 'test_floor_divide_slice', 'test_floor_divide_str', 'test_floor_divide_tuple', 'test_lshift_bool', 'test_lshift_bytearray', 'test_lshift_bytes', 'test_lshift_class', 'test_lshift_complex', 'test_lshift_dict', 'test_lshift_float', 'test_lshift_frozenset', 'test_lshift_int', 'test_lshift_list', 'test_lshift_None', 'test_lshift_NotImplemented', 'test_lshift_range', 'test_lshift_set', 'test_lshift_slice', 'test_lshift_str', 'test_lshift_tuple', 'test_modulo_bool', 'test_modulo_bytearray', 'test_modulo_bytes', 'test_modulo_class', 'test_modulo_complex', 'test_modulo_dict', 'test_modulo_float', 'test_modulo_frozenset', 'test_modulo_int', 'test_modulo_list', 'test_modulo_None', 'test_modulo_NotImplemented', 'test_modulo_range', 'test_modulo_set', 'test_modulo_slice', 'test_modulo_str', 'test_modulo_tuple', 'test_multiply_bool', 'test_multiply_bytearray', 'test_multiply_bytes', 'test_multiply_class', 'test_multiply_complex', 'test_multiply_dict', 'test_multiply_float', 'test_multiply_frozenset', 'test_multiply_int', 'test_multiply_list', 'test_multiply_None', 'test_multiply_NotImplemented', 'test_multiply_range', 'test_multiply_set', 'test_multiply_slice', 'test_multiply_str', 'test_multiply_tuple', 'test_or_bool', 'test_or_bytearray', 'test_or_bytes', 'test_or_class', 'test_or_complex', 'test_or_dict', 'test_or_float', 'test_or_frozenset', 'test_or_int', 'test_or_list', 'test_or_None', 'test_or_NotImplemented', 'test_or_range', 'test_or_set', 'test_or_slice', 'test_or_str', 'test_or_tuple', 'test_power_bool', 'test_power_bytearray', 'test_power_bytes', 'test_power_class', 'test_power_complex', 'test_power_dict', 'test_power_float', 'test_power_frozenset', 'test_power_int', 'test_power_list', 'test_power_None', 'test_power_NotImplemented', 'test_power_range', 'test_power_set', 'test_power_slice', 'test_power_str', 'test_power_tuple', 'test_rshift_bool', 'test_rshift_bytearray', 'test_rshift_bytes', 'test_rshift_class', 'test_rshift_complex', 'test_rshift_dict', 'test_rshift_float', 'test_rshift_frozenset', 'test_rshift_int', 'test_rshift_list', 'test_rshift_None', 'test_rshift_NotImplemented', 'test_rshift_range', 'test_rshift_set', 'test_rshift_slice', 'test_rshift_str', 'test_rshift_tuple', 'test_subtract_bool', 'test_subtract_bytearray', 'test_subtract_bytes', 'test_subtract_class', 'test_subtract_complex', 'test_subtract_dict', 'test_subtract_float', 'test_subtract_frozenset', 'test_subtract_int', 'test_subtract_list', 'test_subtract_None', 'test_subtract_NotImplemented', 'test_subtract_range', 'test_subtract_set', 'test_subtract_slice', 'test_subtract_str', 'test_subtract_tuple', 'test_true_divide_bool', 'test_true_divide_bytearray', 'test_true_divide_bytes', 'test_true_divide_class', 'test_true_divide_complex', 'test_true_divide_dict', 'test_true_divide_float', 'test_true_divide_frozenset', 'test_true_divide_int', 'test_true_divide_list', 'test_true_divide_None', 'test_true_divide_NotImplemented', 'test_true_divide_range', 'test_true_divide_set', 'test_true_divide_slice', 'test_true_divide_str', 'test_true_divide_tuple', 'test_xor_bool', 'test_xor_bytearray', 'test_xor_bytes', 'test_xor_class', 'test_xor_complex', 'test_xor_dict', 'test_xor_float', 'test_xor_frozenset', 'test_xor_int', 'test_xor_list', 'test_xor_None', 'test_xor_NotImplemented', 'test_xor_range', 'test_xor_set', 'test_xor_slice', 'test_xor_str', 'test_xor_tuple', ]
#!/usr/bin/env python # -*- coding: utf-8 -*- """Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of commands, run ``$ invoke --list``. """ import os import sys import json import platform import subprocess import logging from time import sleep import invoke from invoke import Collection from website import settings from .utils import pip_install, bin_prefix logging.getLogger('invoke').setLevel(logging.CRITICAL) # gets the root path for all the scripts that rely on it HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE') CONSTRAINTS_PATH = os.path.join(HERE, 'requirements', 'constraints.txt') ns = Collection() try: from admin import tasks as admin_tasks ns.add_collection(Collection.from_module(admin_tasks), name='admin') except ImportError: pass def task(*args, **kwargs): """Behaves the same way as invoke.task. Adds the task to the root namespace. """ if len(args) == 1 and callable(args[0]): new_task = invoke.task(args[0]) ns.add_task(new_task) return new_task def decorator(f): new_task = invoke.task(f, *args, **kwargs) ns.add_task(new_task) return new_task return decorator @task def server(ctx, host=None, port=5000, debug=True, gitlogs=False): """Run the app server.""" if os.environ.get('WERKZEUG_RUN_MAIN') == 'true' or not debug: if os.environ.get('WEB_REMOTE_DEBUG', None): import pydevd # e.g. '127.0.0.1:5678' remote_parts = os.environ.get('WEB_REMOTE_DEBUG').split(':') pydevd.settrace(remote_parts[0], port=int(remote_parts[1]), suspend=False, stdoutToServer=True, stderrToServer=True) if gitlogs: git_logs(ctx) from website.app import init_app os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings' app = init_app(set_backends=True, routes=True) settings.API_SERVER_PORT = port else: from framework.flask import app context = None if settings.SECURE_MODE: context = (settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY) app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH], ssl_context=context) @task def git_logs(ctx, branch=None): from scripts.meta import gatherer gatherer.main(branch=branch) @task def apiserver(ctx, port=8000, wait=True, autoreload=True, host='127.0.0.1', pty=True): """Run the API server.""" env = os.environ.copy() cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {}:{} --nothreading'\ .format(sys.executable, host, port) if not autoreload: cmd += ' --noreload' if settings.SECURE_MODE: cmd = cmd.replace('runserver', 'runsslserver') cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY) if wait: return ctx.run(cmd, echo=True, pty=pty) from subprocess import Popen return Popen(cmd, shell=True, env=env) @task def adminserver(ctx, port=8001, host='127.0.0.1', pty=True): """Run the Admin server.""" env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"' cmd = '{} python manage.py runserver {}:{} --nothreading'.format(env, host, port) if settings.SECURE_MODE: cmd = cmd.replace('runserver', 'runsslserver') cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY) ctx.run(cmd, echo=True, pty=pty) @task def shell(ctx, transaction=True, print_sql=False, notebook=False): cmd = 'DJANGO_SETTINGS_MODULE="api.base.settings" python manage.py osf_shell' if print_sql: cmd += ' --print-sql' if notebook: cmd += ' --notebook' if not transaction: cmd += ' --no-transaction' return ctx.run(cmd, pty=True, echo=True) @task def sharejs(ctx, host=None, port=None, db_url=None, cors_allow_origin=None): """Start a local ShareJS server.""" if host: os.environ['SHAREJS_SERVER_HOST'] = host if port: os.environ['SHAREJS_SERVER_PORT'] = port if db_url: os.environ['SHAREJS_DB_URL'] = db_url if cors_allow_origin: os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin if settings.SENTRY_DSN: os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js') ctx.run('node {0}'.format(share_server)) @task(aliases=['celery']) def celery_worker(ctx, level='debug', hostname=None, beat=False, queues=None, concurrency=None, max_tasks_per_child=None): """Run the Celery process.""" os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings' cmd = 'celery worker -A framework.celery_tasks -Ofair -l {0}'.format(level) if hostname: cmd = cmd + ' --hostname={}'.format(hostname) # beat sets up a cron like scheduler, refer to website/settings if beat: cmd = cmd + ' --beat' if queues: cmd = cmd + ' --queues={}'.format(queues) if concurrency: cmd = cmd + ' --concurrency={}'.format(concurrency) if max_tasks_per_child: cmd = cmd + ' --maxtasksperchild={}'.format(max_tasks_per_child) ctx.run(bin_prefix(cmd), pty=True) @task(aliases=['beat']) def celery_beat(ctx, level='debug', schedule=None): """Run the Celery process.""" os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings' # beat sets up a cron like scheduler, refer to website/settings cmd = 'celery beat -A framework.celery_tasks -l {0} --pidfile='.format(level) if schedule: cmd = cmd + ' --schedule={}'.format(schedule) ctx.run(bin_prefix(cmd), pty=True) @task def migrate_search(ctx, delete=True, remove=False, index=settings.ELASTIC_INDEX): """Migrate the search-enabled models.""" from website.app import init_app init_app(routes=False, set_backends=False) from website.search_migration.migrate import migrate # NOTE: Silence the warning: # "InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised." SILENT_LOGGERS = ['py.warnings'] for logger in SILENT_LOGGERS: logging.getLogger(logger).setLevel(logging.ERROR) migrate(delete, remove=remove, index=index) @task def rebuild_search(ctx): """Delete and recreate the index for elasticsearch""" from website.app import init_app import requests from website import settings init_app(routes=False, set_backends=True) if not settings.ELASTIC_URI.startswith('http'): protocol = 'http://' if settings.DEBUG_MODE else 'https://' else: protocol = '' url = '{protocol}{uri}/{index}'.format( protocol=protocol, uri=settings.ELASTIC_URI.rstrip('/'), index=settings.ELASTIC_INDEX, ) print('Deleting index {}'.format(settings.ELASTIC_INDEX)) print('----- DELETE {}*'.format(url)) requests.delete(url + '*') print('Creating index {}'.format(settings.ELASTIC_INDEX)) print('----- PUT {}'.format(url)) requests.put(url) migrate_search(ctx, delete=False) @task def mailserver(ctx, port=1025): """Run a SMTP test server.""" cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port) ctx.run(bin_prefix(cmd), pty=True) @task def jshint(ctx): """Run JSHint syntax check""" js_folder = os.path.join(HERE, 'website', 'static', 'js') jshint_bin = os.path.join(HERE, 'node_modules', '.bin', 'jshint') cmd = '{} {}'.format(jshint_bin, js_folder) ctx.run(cmd, echo=True) @task(aliases=['flake8']) def flake(ctx): ctx.run('flake8 .', echo=True) @task(aliases=['req']) def requirements(ctx, base=False, addons=False, release=False, dev=False, quick=False): """Install python dependencies. Examples: inv requirements inv requirements --quick Quick requirements are, in order, addons, dev and the base requirements. You should be able to use --quick for day to day development. By default, base requirements will run. However, if any set of addons, release, or dev are chosen, base will have to be mentioned explicitly in order to run. This is to remain compatible with previous usages. Release requirements will prevent dev, and base from running. """ if quick: base = True addons = True dev = True if not(addons or dev): base = True if release or addons: addon_requirements(ctx) # "release" takes precedence if release: req_file = os.path.join(HERE, 'requirements', 'release.txt') ctx.run( pip_install(req_file, constraints_file=CONSTRAINTS_PATH), echo=True ) else: if dev: # then dev requirements req_file = os.path.join(HERE, 'requirements', 'dev.txt') ctx.run( pip_install(req_file, constraints_file=CONSTRAINTS_PATH), echo=True ) if base: # then base requirements req_file = os.path.join(HERE, 'requirements.txt') ctx.run( pip_install(req_file, constraints_file=CONSTRAINTS_PATH), echo=True ) # fix URITemplate name conflict h/t @github ctx.run('pip uninstall uritemplate.py --yes || true') ctx.run('pip install --no-cache-dir uritemplate.py==0.3.0') @task def test_module(ctx, module=None, numprocesses=None, nocapture=False, params=None): """Helper for running tests. """ os.environ['DJANGO_SETTINGS_MODULE'] = 'osf_tests.settings' import pytest if not numprocesses: from multiprocessing import cpu_count numprocesses = cpu_count() # NOTE: Subprocess to compensate for lack of thread safety in the httpretty module. # https://github.com/gabrielfalcao/HTTPretty/issues/209#issue-54090252 if nocapture: args = [] else: args = ['-s'] if numprocesses > 1: args += ['-n {}'.format(numprocesses), '--max-slave-restart=0'] modules = [module] if isinstance(module, basestring) else module args.extend(modules) if params: params = [params] if isinstance(params, basestring) else params args.extend(params) retcode = pytest.main(args) sys.exit(retcode) OSF_TESTS = [ 'osf_tests', ] ELSE_TESTS = [ 'tests', ] API_TESTS1 = [ 'api_tests/identifiers', 'api_tests/institutions', 'api_tests/licenses', 'api_tests/logs', 'api_tests/metaschemas', 'api_tests/preprint_providers', 'api_tests/preprints', 'api_tests/registrations', 'api_tests/users', ] API_TESTS2 = [ 'api_tests/nodes', ] API_TESTS3 = [ 'api_tests/addons_tests', 'api_tests/applications', 'api_tests/base', 'api_tests/collections', 'api_tests/comments', 'api_tests/files', 'api_tests/guids', 'api_tests/reviews', 'api_tests/search', 'api_tests/taxonomies', 'api_tests/test', 'api_tests/tokens', 'api_tests/view_only_links', 'api_tests/wikis', ] ADDON_TESTS = [ 'addons', ] ADMIN_TESTS = [ 'admin_tests', ] @task def test_osf(ctx, numprocesses=None): """Run the OSF test suite.""" print('Testing modules "{}"'.format(OSF_TESTS)) test_module(ctx, module=OSF_TESTS, numprocesses=numprocesses) @task def test_else(ctx, numprocesses=None): """Run the old test suite.""" print('Testing modules "{}"'.format(ELSE_TESTS)) test_module(ctx, module=ELSE_TESTS, numprocesses=numprocesses) @task def test_api1(ctx, numprocesses=None): """Run the API test suite.""" print('Testing modules "{}"'.format(API_TESTS1 + ADMIN_TESTS)) test_module(ctx, module=API_TESTS1 + ADMIN_TESTS, numprocesses=numprocesses) @task def test_api2(ctx, numprocesses=None): """Run the API test suite.""" print('Testing modules "{}"'.format(API_TESTS2)) test_module(ctx, module=API_TESTS2, numprocesses=numprocesses) @task def test_api3(ctx, numprocesses=None): """Run the API test suite.""" print('Testing modules "{}"'.format(API_TESTS3 + OSF_TESTS)) test_module(ctx, module=API_TESTS3 + OSF_TESTS, numprocesses=numprocesses) @task def test_admin(ctx, numprocesses=None): """Run the Admin test suite.""" print('Testing module "admin_tests"') test_module(ctx, module=ADMIN_TESTS, numprocesses=numprocesses) @task def test_addons(ctx, numprocesses=None): """Run all the tests in the addons directory. """ print('Testing modules "{}"'.format(ADDON_TESTS)) test_module(ctx, module=ADDON_TESTS, numprocesses=numprocesses) @task def test_varnish(ctx): """Run the Varnish test suite.""" proc = apiserver(ctx, wait=False, autoreload=False) try: sleep(5) test_module(ctx, module='api/caching/tests/test_caching.py') finally: proc.kill() @task def test(ctx, all=False, syntax=False): """ Run unit tests: OSF (always), plus addons and syntax checks (optional) """ if syntax: flake(ctx) jshint(ctx) test_else(ctx) # /tests test_api1(ctx) test_api2(ctx) test_api3(ctx) # also /osf_tests if all: test_addons(ctx) # TODO: Enable admin tests test_admin(ctx) karma(ctx) @task def test_js(ctx): jshint(ctx) karma(ctx) @task def test_travis_addons(ctx, numprocesses=None): """ Run half of the tests to help travis go faster. Lints and Flakes happen everywhere to keep from wasting test time. """ flake(ctx) jshint(ctx) test_addons(ctx, numprocesses=numprocesses) @task def test_travis_else(ctx, numprocesses=None): """ Run other half of the tests to help travis go faster. Lints and Flakes happen everywhere to keep from wasting test time. """ flake(ctx) jshint(ctx) test_else(ctx, numprocesses=numprocesses) @task def test_travis_api1_and_js(ctx, numprocesses=None): flake(ctx) jshint(ctx) # TODO: Uncomment when https://github.com/travis-ci/travis-ci/issues/8836 is resolved # karma(ctx) test_api1(ctx, numprocesses=numprocesses) @task def test_travis_api2(ctx, numprocesses=None): flake(ctx) jshint(ctx) test_api2(ctx, numprocesses=numprocesses) @task def test_travis_api3_and_osf(ctx, numprocesses=None): flake(ctx) jshint(ctx) test_api3(ctx, numprocesses=numprocesses) @task def test_travis_varnish(ctx): """ Run the fast and quirky JS tests and varnish tests in isolation """ flake(ctx) jshint(ctx) test_js(ctx) test_varnish(ctx) @task def karma(ctx): """Run JS tests with Karma. Requires Chrome to be installed.""" ctx.run('yarn test', echo=True) @task def wheelhouse(ctx, addons=False, release=False, dev=False, pty=True): """Build wheels for python dependencies. Examples: inv wheelhouse --dev inv wheelhouse --addons inv wheelhouse --release """ if release or addons: for directory in os.listdir(settings.ADDON_PATH): path = os.path.join(settings.ADDON_PATH, directory) if os.path.isdir(path): req_file = os.path.join(path, 'requirements.txt') if os.path.exists(req_file): cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format( WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_PATH, ) ctx.run(cmd, pty=pty) if release: req_file = os.path.join(HERE, 'requirements', 'release.txt') elif dev: req_file = os.path.join(HERE, 'requirements', 'dev.txt') else: req_file = os.path.join(HERE, 'requirements.txt') cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format( WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_PATH, ) ctx.run(cmd, pty=pty) @task def addon_requirements(ctx): """Install all addon requirements.""" for directory in os.listdir(settings.ADDON_PATH): path = os.path.join(settings.ADDON_PATH, directory) requirements_file = os.path.join(path, 'requirements.txt') if os.path.isdir(path) and os.path.isfile(requirements_file): print('Installing requirements for {0}'.format(directory)) ctx.run( pip_install(requirements_file, constraints_file=CONSTRAINTS_PATH), echo=True ) print('Finished installing addon requirements') @task def travis_addon_settings(ctx): for directory in os.listdir(settings.ADDON_PATH): path = os.path.join(settings.ADDON_PATH, directory, 'settings') if os.path.isdir(path): try: open(os.path.join(path, 'local-travis.py')) ctx.run('cp {path}/local-travis.py {path}/local.py'.format(path=path)) except IOError: pass @task def copy_addon_settings(ctx): for directory in os.listdir(settings.ADDON_PATH): path = os.path.join(settings.ADDON_PATH, directory, 'settings') if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')): try: open(os.path.join(path, 'local-dist.py')) ctx.run('cp {path}/local-dist.py {path}/local.py'.format(path=path)) except IOError: pass @task def copy_settings(ctx, addons=False): # Website settings if not os.path.isfile('website/settings/local.py'): print('Creating local.py file') ctx.run('cp website/settings/local-dist.py website/settings/local.py') # Addon settings if addons: copy_addon_settings(ctx) @task(aliases=['bower']) def bower_install(ctx): print('Installing bower-managed packages') bower_bin = os.path.join(HERE, 'node_modules', '.bin', 'bower') ctx.run('{} prune --allow-root'.format(bower_bin), echo=True) ctx.run('{} install --allow-root'.format(bower_bin), echo=True) @task def docker_init(ctx): """Initial docker setup""" print('You will be asked for your sudo password to continue...') if platform.system() == 'Darwin': # Mac OSX ctx.run('sudo ifconfig lo0 alias 192.168.168.167') else: print('Your system is not recognized, you will have to setup docker manually') def ensure_docker_env_setup(ctx): if hasattr(os.environ, 'DOCKER_ENV_SETUP') and os.environ['DOCKER_ENV_SETUP'] == '1': pass else: os.environ['WEB_REMOTE_DEBUG'] = '192.168.168.167:11000' os.environ['API_REMOTE_DEBUG'] = '192.168.168.167:12000' os.environ['WORKER_REMOTE_DEBUG'] = '192.168.168.167:13000' os.environ['DOCKER_ENV_SETUP'] = '1' docker_init(ctx) @task def docker_requirements(ctx): ensure_docker_env_setup(ctx) ctx.run('docker-compose up requirements requirements_mfr requirements_wb') @task def docker_appservices(ctx): ensure_docker_env_setup(ctx) ctx.run('docker-compose up assets fakecas elasticsearch tokumx postgres') @task def docker_osf(ctx): ensure_docker_env_setup(ctx) ctx.run('docker-compose up mfr wb web api') @task def clear_sessions(ctx, months=1, dry_run=False): from website.app import init_app init_app(routes=False, set_backends=True) from scripts import clear_sessions clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run) # Release tasks @task def hotfix(ctx, name, finish=False, push=False): """Rename hotfix branch to hotfix/<next-patch-version> and optionally finish hotfix. """ print('Checking out master to calculate curent version') ctx.run('git checkout master') latest_version = latest_tag_info()['current_version'] print('Current version is: {}'.format(latest_version)) major, minor, patch = latest_version.split('.') next_patch_version = '.'.join([major, minor, str(int(patch) + 1)]) print('Bumping to next patch version: {}'.format(next_patch_version)) print('Renaming branch...') new_branch_name = 'hotfix/{}'.format(next_patch_version) ctx.run('git checkout {}'.format(name), echo=True) ctx.run('git branch -m {}'.format(new_branch_name), echo=True) if finish: ctx.run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True) if push: ctx.run('git push --tags origin master', echo=True) ctx.run('git push origin develop', echo=True) @task def feature(ctx, name, finish=False, push=False): """Rename the current branch to a feature branch and optionally finish it.""" print('Renaming branch...') ctx.run('git branch -m feature/{}'.format(name), echo=True) if finish: ctx.run('git flow feature finish {}'.format(name), echo=True) if push: ctx.run('git push origin develop', echo=True) # Adapted from bumpversion def latest_tag_info(): try: # git-describe doesn't update the git-index, so we do that # subprocess.check_output(["git", "update-index", "--refresh"]) # get info about the latest tag in git describe_out = subprocess.check_output([ 'git', 'describe', '--dirty', '--tags', '--long', '--abbrev=40' ], stderr=subprocess.STDOUT ).decode().split('-') except subprocess.CalledProcessError as err: raise err # logger.warn("Error when running git describe") return {} info = {} if describe_out[-1].strip() == 'dirty': info['dirty'] = True describe_out.pop() info['commit_sha'] = describe_out.pop().lstrip('g') info['distance_to_latest_tag'] = int(describe_out.pop()) info['current_version'] = describe_out.pop().lstrip('v') # assert type(info["current_version"]) == str assert 0 == len(describe_out) return info # Tasks for generating and bundling SSL certificates # See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details @task def generate_key(ctx, domain, bits=2048): cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits) ctx.run(cmd) @task def generate_key_nopass(ctx, domain): cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format( domain=domain ) ctx.run(cmd) @task def generate_csr(ctx, domain): cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format( domain=domain ) ctx.run(cmd) @task def request_ssl_cert(ctx, domain): """Generate a key, a key with password removed, and a signing request for the specified domain. Usage: > invoke request_ssl_cert pizza.osf.io """ generate_key(ctx, domain) generate_key_nopass(ctx, domain) generate_csr(ctx, domain) @task def bundle_certs(ctx, domain, cert_path): """Concatenate certificates from NameCheap in the correct order. Certificate files must be in the same directory. """ cert_files = [ '{0}.crt'.format(domain), 'COMODORSADomainValidationSecureServerCA.crt', 'COMODORSAAddTrustCA.crt', 'AddTrustExternalCARoot.crt', ] certs = ' '.join( os.path.join(cert_path, cert_file) for cert_file in cert_files ) cmd = 'cat {certs} > {domain}.bundle.crt'.format( certs=certs, domain=domain, ) ctx.run(cmd) @task def clean_assets(ctx): """Remove built JS files.""" public_path = os.path.join(HERE, 'website', 'static', 'public') js_path = os.path.join(public_path, 'js') ctx.run('rm -rf {0}'.format(js_path), echo=True) @task(aliases=['pack']) def webpack(ctx, clean=False, watch=False, dev=False, colors=False): """Build static assets with webpack.""" if clean: clean_assets(ctx) args = ['yarn run webpack-{}'.format('dev' if dev else 'prod')] args += ['--progress'] if watch: args += ['--watch'] if colors: args += ['--colors'] command = ' '.join(args) ctx.run(command, echo=True) @task() def build_js_config_files(ctx): from website import settings print('Building JS config files...') with open(os.path.join(settings.STATIC_FOLDER, 'built', 'nodeCategories.json'), 'wb') as fp: json.dump(settings.NODE_CATEGORY_MAP, fp) print('...Done.') @task() def assets(ctx, dev=False, watch=False, colors=False): """Install and build static assets.""" command = 'yarn install --frozen-lockfile' if not dev: command += ' --production' ctx.run(command, echo=True) bower_install(ctx) build_js_config_files(ctx) # Always set clean=False to prevent possible mistakes # on prod webpack(ctx, clean=False, watch=watch, dev=dev, colors=colors) @task def generate_self_signed(ctx, domain): """Generate self-signed SSL key and certificate. """ cmd = ( 'openssl req -x509 -nodes -days 365 -newkey rsa:2048' ' -keyout {0}.key -out {0}.crt' ).format(domain) ctx.run(cmd) @task def update_citation_styles(ctx): from scripts import parse_citation_styles total = parse_citation_styles.main() print('Parsed {} styles'.format(total)) @task def clean(ctx, verbose=False): ctx.run('find . -name "*.pyc" -delete', echo=True) @task(default=True) def usage(ctx): ctx.run('invoke --list') ### Maintenance Tasks ### @task def set_maintenance(ctx, message='', level=1, start=None, end=None): from website.app import setup_django setup_django() from website.maintenance import set_maintenance """Display maintenance notice across OSF applications (incl. preprints, registries, etc.) start - Start time for the maintenance period end - End time for the mainteance period NOTE: If no start or end values are provided, default to starting now and ending 24 hours from now. message - Message to display. If omitted, will be: "The site will undergo maintenance between <localized start time> and <localized end time>. Thank you for your patience." level - Severity level. Modifies the color of the displayed notice. Must be one of 1 (info), 2 (warning), 3 (danger). Examples: invoke set_maintenance --start 2016-03-16T15:41:00-04:00 --end 2016-03-16T15:42:00-04:00 invoke set_maintenance --message 'The OSF is experiencing issues connecting to a 3rd party service' --level 2 --start 2016-03-16T15:41:00-04:00 --end 2016-03-16T15:42:00-04:00 """ state = set_maintenance(message, level, start, end) print('Maintenance notice up {} to {}.'.format(state['start'], state['end'])) @task def unset_maintenance(ctx): from website.app import setup_django setup_django() from website.maintenance import unset_maintenance print('Taking down maintenance notice...') unset_maintenance() print('...Done.')
import ast from sys import version_info from pyflakes import messages as m, checker from pyflakes.test.harness import TestCase, skipIf, skip class Test(TestCase): def test_undefined(self): self.flakes('bar', m.UndefinedName) def test_definedInListComp(self): self.flakes('[a for a in range(10) if a]') @skipIf(version_info < (3,), 'in Python 2 list comprehensions execute in the same scope') def test_undefinedInListComp(self): self.flakes(''' [a for a in range(10)] a ''', m.UndefinedName) @skipIf(version_info < (3,), 'in Python 2 exception names stay bound after the except: block') def test_undefinedExceptionName(self): """Exception names can't be used after the except: block. The exc variable is unused inside the exception handler.""" self.flakes(''' try: raise ValueError('ve') except ValueError as exc: pass exc ''', m.UndefinedName, m.UnusedVariable) def test_namesDeclaredInExceptBlocks(self): """Locals declared in except: blocks can be used after the block. This shows the example in test_undefinedExceptionName is different.""" self.flakes(''' try: raise ValueError('ve') except ValueError as exc: e = exc e ''') @skip('error reporting disabled due to false positives below') def test_undefinedExceptionNameObscuringLocalVariable(self): """Exception names obscure locals, can't be used after. Last line will raise UnboundLocalError on Python 3 after exiting the except: block. Note next two examples for false positives to watch out for.""" self.flakes(''' exc = 'Original value' try: raise ValueError('ve') except ValueError as exc: pass exc ''', m.UndefinedName) @skipIf(version_info < (3,), 'in Python 2 exception names stay bound after the except: block') def test_undefinedExceptionNameObscuringLocalVariable2(self): """Exception names are unbound after the `except:` block. Last line will raise UnboundLocalError on Python 3 but would print out 've' on Python 2. The exc variable is unused inside the exception handler.""" self.flakes(''' try: raise ValueError('ve') except ValueError as exc: pass print(exc) exc = 'Original value' ''', m.UndefinedName, m.UnusedVariable) def test_undefinedExceptionNameObscuringLocalVariableFalsePositive1(self): """Exception names obscure locals, can't be used after. Unless. Last line will never raise UnboundLocalError because it's only entered if no exception was raised.""" # The exc variable is unused inside the exception handler. expected = [] if version_info < (3,) else [m.UnusedVariable] self.flakes(''' exc = 'Original value' try: raise ValueError('ve') except ValueError as exc: print('exception logged') raise exc ''', *expected) def test_delExceptionInExcept(self): """The exception name can be deleted in the except: block.""" self.flakes(''' try: pass except Exception as exc: del exc ''') def test_undefinedExceptionNameObscuringLocalVariableFalsePositive2(self): """Exception names obscure locals, can't be used after. Unless. Last line will never raise UnboundLocalError because `error` is only falsy if the `except:` block has not been entered.""" # The exc variable is unused inside the exception handler. expected = [] if version_info < (3,) else [m.UnusedVariable] self.flakes(''' exc = 'Original value' error = None try: raise ValueError('ve') except ValueError as exc: error = 'exception logged' if error: print(error) else: exc ''', *expected) @skip('error reporting disabled due to false positives below') def test_undefinedExceptionNameObscuringGlobalVariable(self): """Exception names obscure globals, can't be used after. Last line will raise UnboundLocalError on both Python 2 and Python 3 because the existence of that exception name creates a local scope placeholder for it, obscuring any globals, etc.""" self.flakes(''' exc = 'Original value' def func(): try: pass # nothing is raised except ValueError as exc: pass # block never entered, exc stays unbound exc ''', m.UndefinedLocal) @skip('error reporting disabled due to false positives below') def test_undefinedExceptionNameObscuringGlobalVariable2(self): """Exception names obscure globals, can't be used after. Last line will raise NameError on Python 3 because the name is locally unbound after the `except:` block, even if it's nonlocal. We should issue an error in this case because code only working correctly if an exception isn't raised, is invalid. Unless it's explicitly silenced, see false positives below.""" self.flakes(''' exc = 'Original value' def func(): global exc try: raise ValueError('ve') except ValueError as exc: pass # block never entered, exc stays unbound exc ''', m.UndefinedLocal) def test_undefinedExceptionNameObscuringGlobalVariableFalsePositive1(self): """Exception names obscure globals, can't be used after. Unless. Last line will never raise NameError because it's only entered if no exception was raised.""" # The exc variable is unused inside the exception handler. expected = [] if version_info < (3,) else [m.UnusedVariable] self.flakes(''' exc = 'Original value' def func(): global exc try: raise ValueError('ve') except ValueError as exc: print('exception logged') raise exc ''', *expected) def test_undefinedExceptionNameObscuringGlobalVariableFalsePositive2(self): """Exception names obscure globals, can't be used after. Unless. Last line will never raise NameError because `error` is only falsy if the `except:` block has not been entered.""" # The exc variable is unused inside the exception handler. expected = [] if version_info < (3,) else [m.UnusedVariable] self.flakes(''' exc = 'Original value' def func(): global exc error = None try: raise ValueError('ve') except ValueError as exc: error = 'exception logged' if error: print(error) else: exc ''', *expected) def test_functionsNeedGlobalScope(self): self.flakes(''' class a: def b(): fu fu = 1 ''') def test_builtins(self): self.flakes('range(10)') def test_builtinWindowsError(self): """ C{WindowsError} is sometimes a builtin name, so no warning is emitted for using it. """ self.flakes('WindowsError') @skipIf(version_info < (3, 6), 'new feature in 3.6') def test_moduleAnnotations(self): """ Use of the C{__annotations__} in module scope should not emit an undefined name warning when version is greater than or equal to 3.6. """ self.flakes('__annotations__') def test_magicGlobalsFile(self): """ Use of the C{__file__} magic global should not emit an undefined name warning. """ self.flakes('__file__') def test_magicGlobalsBuiltins(self): """ Use of the C{__builtins__} magic global should not emit an undefined name warning. """ self.flakes('__builtins__') def test_magicGlobalsName(self): """ Use of the C{__name__} magic global should not emit an undefined name warning. """ self.flakes('__name__') def test_magicGlobalsPath(self): """ Use of the C{__path__} magic global should not emit an undefined name warning, if you refer to it from a file called __init__.py. """ self.flakes('__path__', m.UndefinedName) self.flakes('__path__', filename='package/__init__.py') def test_magicModuleInClassScope(self): """ Use of the C{__module__} magic builtin should not emit an undefined name warning if used in class scope. """ self.flakes('__module__', m.UndefinedName) self.flakes(''' class Foo: __module__ ''') self.flakes(''' class Foo: def bar(self): __module__ ''', m.UndefinedName) @skipIf(version_info < (3, 3), "Python >= 3.3 only") def test_magicQualnameInClassScope(self): """ Use of the C{__qualname__} magic builtin should not emit an undefined name warning if used in class scope. """ self.flakes('__qualname__', m.UndefinedName) self.flakes(''' class Foo: __qualname__ ''') self.flakes(''' class Foo: def bar(self): __qualname__ ''', m.UndefinedName) def test_globalImportStar(self): """Can't find undefined names with import *.""" self.flakes('from fu import *; bar', m.ImportStarUsed, m.ImportStarUsage) @skipIf(version_info >= (3,), 'obsolete syntax') def test_localImportStar(self): """ A local import * still allows undefined names to be found in upper scopes. """ self.flakes(''' def a(): from fu import * bar ''', m.ImportStarUsed, m.UndefinedName, m.UnusedImport) @skipIf(version_info >= (3,), 'obsolete syntax') def test_unpackedParameter(self): """Unpacked function parameters create bindings.""" self.flakes(''' def a((bar, baz)): bar; baz ''') def test_definedByGlobal(self): """ "global" can make an otherwise undefined name in another function defined. """ self.flakes(''' def a(): global fu; fu = 1 def b(): fu ''') self.flakes(''' def c(): bar def b(): global bar; bar = 1 ''') def test_definedByGlobalMultipleNames(self): """ "global" can accept multiple names. """ self.flakes(''' def a(): global fu, bar; fu = 1; bar = 2 def b(): fu; bar ''') def test_globalInGlobalScope(self): """ A global statement in the global scope is ignored. """ self.flakes(''' global x def foo(): print(x) ''', m.UndefinedName) def test_global_reset_name_only(self): """A global statement does not prevent other names being undefined.""" # Only different undefined names are reported. # See following test that fails where the same name is used. self.flakes(''' def f1(): s def f2(): global m ''', m.UndefinedName) @skip("todo") def test_unused_global(self): """An unused global statement does not define the name.""" self.flakes(''' def f1(): m def f2(): global m ''', m.UndefinedName) def test_del(self): """Del deletes bindings.""" self.flakes('a = 1; del a; a', m.UndefinedName) def test_delGlobal(self): """Del a global binding from a function.""" self.flakes(''' a = 1 def f(): global a del a a ''') def test_delUndefined(self): """Del an undefined name.""" self.flakes('del a', m.UndefinedName) def test_delConditional(self): """ Ignores conditional bindings deletion. """ self.flakes(''' context = None test = True if False: del(test) assert(test) ''') def test_delConditionalNested(self): """ Ignored conditional bindings deletion even if they are nested in other blocks. """ self.flakes(''' context = None test = True if False: with context(): del(test) assert(test) ''') def test_delWhile(self): """ Ignore bindings deletion if called inside the body of a while statement. """ self.flakes(''' def test(): foo = 'bar' while False: del foo assert(foo) ''') def test_delWhileTestUsage(self): """ Ignore bindings deletion if called inside the body of a while statement and name is used inside while's test part. """ self.flakes(''' def _worker(): o = True while o is not True: del o o = False ''') def test_delWhileNested(self): """ Ignore bindings deletions if node is part of while's test, even when del is in a nested block. """ self.flakes(''' context = None def _worker(): o = True while o is not True: while True: with context(): del o o = False ''') def test_globalFromNestedScope(self): """Global names are available from nested scopes.""" self.flakes(''' a = 1 def b(): def c(): a ''') def test_laterRedefinedGlobalFromNestedScope(self): """ Test that referencing a local name that shadows a global, before it is defined, generates a warning. """ self.flakes(''' a = 1 def fun(): a a = 2 return a ''', m.UndefinedLocal) def test_laterRedefinedGlobalFromNestedScope2(self): """ Test that referencing a local name in a nested scope that shadows a global declared in an enclosing scope, before it is defined, generates a warning. """ self.flakes(''' a = 1 def fun(): global a def fun2(): a a = 2 return a ''', m.UndefinedLocal) def test_intermediateClassScopeIgnored(self): """ If a name defined in an enclosing scope is shadowed by a local variable and the name is used locally before it is bound, an unbound local warning is emitted, even if there is a class scope between the enclosing scope and the local scope. """ self.flakes(''' def f(): x = 1 class g: def h(self): a = x x = None print(x, a) print(x) ''', m.UndefinedLocal) def test_doubleNestingReportsClosestName(self): """ Test that referencing a local name in a nested scope that shadows a variable declared in two different outer scopes before it is defined in the innermost scope generates an UnboundLocal warning which refers to the nearest shadowed name. """ exc = self.flakes(''' def a(): x = 1 def b(): x = 2 # line 5 def c(): x x = 3 return x return x return x ''', m.UndefinedLocal).messages[0] # _DoctestMixin.flakes adds two lines preceding the code above. expected_line_num = 7 if self.withDoctest else 5 self.assertEqual(exc.message_args, ('x', expected_line_num)) def test_laterRedefinedGlobalFromNestedScope3(self): """ Test that referencing a local name in a nested scope that shadows a global, before it is defined, generates a warning. """ self.flakes(''' def fun(): a = 1 def fun2(): a a = 1 return a return a ''', m.UndefinedLocal) def test_undefinedAugmentedAssignment(self): self.flakes( ''' def f(seq): a = 0 seq[a] += 1 seq[b] /= 2 c[0] *= 2 a -= 3 d += 4 e[any] = 5 ''', m.UndefinedName, # b m.UndefinedName, # c m.UndefinedName, m.UnusedVariable, # d m.UndefinedName, # e ) def test_nestedClass(self): """Nested classes can access enclosing scope.""" self.flakes(''' def f(foo): class C: bar = foo def f(self): return foo return C() f(123).f() ''') def test_badNestedClass(self): """Free variables in nested classes must bind at class creation.""" self.flakes(''' def f(): class C: bar = foo foo = 456 return foo f() ''', m.UndefinedName) def test_definedAsStarArgs(self): """Star and double-star arg names are defined.""" self.flakes(''' def f(a, *b, **c): print(a, b, c) ''') @skipIf(version_info < (3,), 'new in Python 3') def test_definedAsStarUnpack(self): """Star names in unpack are defined.""" self.flakes(''' a, *b = range(10) print(a, b) ''') self.flakes(''' *a, b = range(10) print(a, b) ''') self.flakes(''' a, *b, c = range(10) print(a, b, c) ''') @skipIf(version_info < (3,), 'new in Python 3') def test_usedAsStarUnpack(self): """ Star names in unpack are used if RHS is not a tuple/list literal. """ self.flakes(''' def f(): a, *b = range(10) ''') self.flakes(''' def f(): (*a, b) = range(10) ''') self.flakes(''' def f(): [a, *b, c] = range(10) ''') @skipIf(version_info < (3,), 'new in Python 3') def test_unusedAsStarUnpack(self): """ Star names in unpack are unused if RHS is a tuple/list literal. """ self.flakes(''' def f(): a, *b = any, all, 4, 2, 'un' ''', m.UnusedVariable, m.UnusedVariable) self.flakes(''' def f(): (*a, b) = [bool, int, float, complex] ''', m.UnusedVariable, m.UnusedVariable) self.flakes(''' def f(): [a, *b, c] = 9, 8, 7, 6, 5, 4 ''', m.UnusedVariable, m.UnusedVariable, m.UnusedVariable) @skipIf(version_info < (3,), 'new in Python 3') def test_keywordOnlyArgs(self): """Keyword-only arg names are defined.""" self.flakes(''' def f(*, a, b=None): print(a, b) ''') self.flakes(''' import default_b def f(*, a, b=default_b): print(a, b) ''') @skipIf(version_info < (3,), 'new in Python 3') def test_keywordOnlyArgsUndefined(self): """Typo in kwonly name.""" self.flakes(''' def f(*, a, b=default_c): print(a, b) ''', m.UndefinedName) @skipIf(version_info < (3,), 'new in Python 3') def test_annotationUndefined(self): """Undefined annotations.""" self.flakes(''' from abc import note1, note2, note3, note4, note5 def func(a: note1, *args: note2, b: note3=12, **kw: note4) -> note5: pass ''') self.flakes(''' def func(): d = e = 42 def func(a: {1, d}) -> (lambda c: e): pass ''') @skipIf(version_info < (3,), 'new in Python 3') def test_metaClassUndefined(self): self.flakes(''' from abc import ABCMeta class A(metaclass=ABCMeta): pass ''') def test_definedInGenExp(self): """ Using the loop variable of a generator expression results in no warnings. """ self.flakes('(a for a in [1, 2, 3] if a)') self.flakes('(b for b in (a for a in [1, 2, 3] if a) if b)') def test_undefinedInGenExpNested(self): """ The loop variables of generator expressions nested together are not defined in the other generator. """ self.flakes('(b for b in (a for a in [1, 2, 3] if b) if b)', m.UndefinedName) self.flakes('(b for b in (a for a in [1, 2, 3] if a) if a)', m.UndefinedName) def test_undefinedWithErrorHandler(self): """ Some compatibility code checks explicitly for NameError. It should not trigger warnings. """ self.flakes(''' try: socket_map except NameError: socket_map = {} ''') self.flakes(''' try: _memoryview.contiguous except (NameError, AttributeError): raise RuntimeError("Python >= 3.3 is required") ''') # If NameError is not explicitly handled, generate a warning self.flakes(''' try: socket_map except: socket_map = {} ''', m.UndefinedName) self.flakes(''' try: socket_map except Exception: socket_map = {} ''', m.UndefinedName) def test_definedInClass(self): """ Defined name for generator expressions and dict/set comprehension. """ self.flakes(''' class A: T = range(10) Z = (x for x in T) L = [x for x in T] B = dict((i, str(i)) for i in T) ''') self.flakes(''' class A: T = range(10) X = {x for x in T} Y = {x:x for x in T} ''') def test_definedInClassNested(self): """Defined name for nested generator expressions in a class.""" self.flakes(''' class A: T = range(10) Z = (x for x in (a for a in T)) ''') def test_undefinedInLoop(self): """ The loop variable is defined after the expression is computed. """ self.flakes(''' for i in range(i): print(i) ''', m.UndefinedName) self.flakes(''' [42 for i in range(i)] ''', m.UndefinedName) self.flakes(''' (42 for i in range(i)) ''', m.UndefinedName) def test_definedFromLambdaInDictionaryComprehension(self): """ Defined name referenced from a lambda function within a dict/set comprehension. """ self.flakes(''' {lambda: id(x) for x in range(10)} ''') def test_definedFromLambdaInGenerator(self): """ Defined name referenced from a lambda function within a generator expression. """ self.flakes(''' any(lambda: id(x) for x in range(10)) ''') def test_undefinedFromLambdaInDictionaryComprehension(self): """ Undefined name referenced from a lambda function within a dict/set comprehension. """ self.flakes(''' {lambda: id(y) for x in range(10)} ''', m.UndefinedName) def test_undefinedFromLambdaInComprehension(self): """ Undefined name referenced from a lambda function within a generator expression. """ self.flakes(''' any(lambda: id(y) for x in range(10)) ''', m.UndefinedName) def test_dunderClass(self): """ `__class__` is defined in class scope under Python 3, but is not in Python 2. """ code = ''' class Test(object): def __init__(self): print(__class__.__name__) self.x = 1 t = Test() ''' if version_info < (3,): self.flakes(code, m.UndefinedName) else: self.flakes(code) class NameTests(TestCase): """ Tests for some extra cases of name handling. """ def test_impossibleContext(self): """ A Name node with an unrecognized context results in a RuntimeError being raised. """ tree = ast.parse("x = 10") file_tokens = checker.make_tokens("x = 10") # Make it into something unrecognizable. tree.body[0].targets[0].ctx = object() self.assertRaises(RuntimeError, checker.Checker, tree, file_tokens=file_tokens)
# Copyright 2022 The Magenta Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Transcription metrics calculations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import functools from magenta.models.onsets_frames_transcription import constants from magenta.models.onsets_frames_transcription import data from magenta.models.onsets_frames_transcription import infer_util import mir_eval from note_seq import sequences_lib from note_seq.protobuf import music_pb2 import numpy as np import tensorflow.compat.v1 as tf # Disable for Numpy and Pandas containers. # pylint: disable=g-explicit-length-test def f1_score(precision, recall): """Creates an op for calculating the F1 score. Args: precision: A tensor representing precision. recall: A tensor representing recall. Returns: A tensor with the result of the F1 calculation. """ return tf.where( tf.greater(precision + recall, 0), 2 * ( (precision * recall) / (precision + recall)), 0) def accuracy_without_true_negatives(true_positives, false_positives, false_negatives): """Creates an op for calculating accuracy without true negatives. Args: true_positives: A tensor representing true_positives. false_positives: A tensor representing false_positives. false_negatives: A tensor representing false_negatives. Returns: A tensor with the result of the calculation. """ return tf.where( tf.greater(true_positives + false_positives + false_negatives, 0), true_positives / (true_positives + false_positives + false_negatives), 0) def calculate_frame_metrics(frame_labels, frame_predictions): """Calculate frame-based metrics.""" frame_labels_bool = tf.cast(frame_labels, tf.bool) frame_predictions_bool = tf.cast(frame_predictions, tf.bool) frame_true_positives = tf.reduce_sum(tf.to_float(tf.logical_and( tf.equal(frame_labels_bool, True), tf.equal(frame_predictions_bool, True)))) frame_false_positives = tf.reduce_sum(tf.to_float(tf.logical_and( tf.equal(frame_labels_bool, False), tf.equal(frame_predictions_bool, True)))) frame_false_negatives = tf.reduce_sum(tf.to_float(tf.logical_and( tf.equal(frame_labels_bool, True), tf.equal(frame_predictions_bool, False)))) frame_accuracy = ( tf.reduce_sum( tf.to_float(tf.equal(frame_labels_bool, frame_predictions_bool))) / tf.cast(tf.size(frame_labels), tf.float32)) frame_precision = tf.where( tf.greater(frame_true_positives + frame_false_positives, 0), tf.div(frame_true_positives, frame_true_positives + frame_false_positives), 0) frame_recall = tf.where( tf.greater(frame_true_positives + frame_false_negatives, 0), tf.div(frame_true_positives, frame_true_positives + frame_false_negatives), 0) frame_f1_score = f1_score(frame_precision, frame_recall) frame_accuracy_without_true_negatives = accuracy_without_true_negatives( frame_true_positives, frame_false_positives, frame_false_negatives) return { 'true_positives': [frame_true_positives], 'false_positives': [frame_false_positives], 'false_negatives': [frame_false_negatives], 'accuracy': [frame_accuracy], 'accuracy_without_true_negatives': [ frame_accuracy_without_true_negatives], 'precision': [frame_precision], 'recall': [frame_recall], 'f1_score': [frame_f1_score], } def _calculate_metrics_py(frame_probs, onset_probs, frame_predictions, onset_predictions, offset_predictions, velocity_values, sequence_label_str, frame_labels, sequence_id, hparams, min_pitch, max_pitch, onsets_only, restrict_to_pitch=None): """Python logic for calculating metrics on a single example.""" tf.logging.info('Calculating metrics for %s with length %d', sequence_id, frame_labels.shape[0]) sequence_prediction = infer_util.predict_sequence( frame_probs=frame_probs, onset_probs=onset_probs, frame_predictions=frame_predictions, onset_predictions=onset_predictions, offset_predictions=offset_predictions, velocity_values=velocity_values, min_pitch=min_pitch, hparams=hparams, onsets_only=onsets_only) note_density = len(sequence_prediction.notes) / sequence_prediction.total_time sequence_label = music_pb2.NoteSequence.FromString(sequence_label_str) if hparams.backward_shift_amount_ms: def shift_notesequence(ns_time): return ns_time + hparams.backward_shift_amount_ms / 1000. shifted_sequence_label, skipped_notes = ( sequences_lib.adjust_notesequence_times(sequence_label, shift_notesequence)) assert skipped_notes == 0 sequence_label = shifted_sequence_label est_intervals, est_pitches, est_velocities = ( sequences_lib.sequence_to_valued_intervals( sequence_prediction, restrict_to_pitch=restrict_to_pitch)) ref_intervals, ref_pitches, ref_velocities = ( sequences_lib.sequence_to_valued_intervals( sequence_label, restrict_to_pitch=restrict_to_pitch)) processed_frame_predictions = sequences_lib.sequence_to_pianoroll( sequence_prediction, frames_per_second=data.hparams_frames_per_second(hparams), min_pitch=min_pitch, max_pitch=max_pitch).active if processed_frame_predictions.shape[0] < frame_labels.shape[0]: # Pad transcribed frames with silence. pad_length = frame_labels.shape[0] - processed_frame_predictions.shape[0] processed_frame_predictions = np.pad(processed_frame_predictions, [(0, pad_length), (0, 0)], 'constant') elif processed_frame_predictions.shape[0] > frame_labels.shape[0]: # Truncate transcribed frames. processed_frame_predictions = ( processed_frame_predictions[:frame_labels.shape[0], :]) if len(ref_pitches) == 0: tf.logging.info( 'Reference pitches were length 0, returning empty metrics for %s:', sequence_id) return tuple([[]] * 13 + [processed_frame_predictions]) note_precision, note_recall, note_f1, _ = ( mir_eval.transcription.precision_recall_f1_overlap( ref_intervals=ref_intervals, ref_pitches=ref_pitches, est_intervals=est_intervals, est_pitches=est_pitches, offset_ratio=None)) (note_with_velocity_precision, note_with_velocity_recall, note_with_velocity_f1, _) = ( mir_eval.transcription_velocity.precision_recall_f1_overlap( ref_intervals=ref_intervals, ref_pitches=ref_pitches, ref_velocities=ref_velocities, est_intervals=est_intervals, est_pitches=est_pitches, est_velocities=est_velocities, offset_ratio=None)) (note_with_offsets_precision, note_with_offsets_recall, note_with_offsets_f1, _) = ( mir_eval.transcription.precision_recall_f1_overlap( ref_intervals=ref_intervals, ref_pitches=ref_pitches, est_intervals=est_intervals, est_pitches=est_pitches)) (note_with_offsets_velocity_precision, note_with_offsets_velocity_recall, note_with_offsets_velocity_f1, _) = ( mir_eval.transcription_velocity.precision_recall_f1_overlap( ref_intervals=ref_intervals, ref_pitches=ref_pitches, ref_velocities=ref_velocities, est_intervals=est_intervals, est_pitches=est_pitches, est_velocities=est_velocities)) tf.logging.info( 'Metrics for %s: Note F1 %f, Note w/ velocity F1 %f, Note w/ offsets F1 ' '%f, Note w/ offsets & velocity: %f', sequence_id, note_f1, note_with_velocity_f1, note_with_offsets_f1, note_with_offsets_velocity_f1) # Return 1-d tensors for the metrics return ([note_precision], [note_recall], [note_f1], [note_density], [note_with_velocity_precision], [note_with_velocity_recall], [note_with_velocity_f1], [note_with_offsets_precision], [note_with_offsets_recall], [note_with_offsets_f1 ], [note_with_offsets_velocity_precision], [note_with_offsets_velocity_recall], [note_with_offsets_velocity_f1 ], [processed_frame_predictions]) def calculate_metrics(frame_probs, onset_probs, frame_predictions, onset_predictions, offset_predictions, velocity_values, sequence_label, frame_labels, sequence_id, hparams, min_pitch, max_pitch, onsets_only=False, pitch_map=None): """Calculate metrics for a single example.""" def make_metrics(note_precision, note_recall, note_f1, note_density, note_with_velocity_precision, note_with_velocity_recall, note_with_velocity_f1, note_with_offsets_precision, note_with_offsets_recall, note_with_offsets_f1, note_with_offsets_velocity_precision, note_with_offsets_velocity_recall, note_with_offsets_velocity_f1, processed_frame_predictions, frame_labels, onsets_only=False, prefix=''): """Create a dict of onset, offset, frame and velocity metrics.""" def _add_prefix(name): return '_'.join(x for x in [prefix, name] if x) def _metrics(precision, recall, f1, name): """Create and return a dict of metrics.""" metrics = { _add_prefix(name) + '_precision': precision, _add_prefix(name) + '_recall': recall, _add_prefix(name) + '_f1_score': f1, } return metrics frame_metrics = calculate_frame_metrics( frame_labels=frame_labels, frame_predictions=processed_frame_predictions) metrics = _metrics(frame_metrics['precision'], frame_metrics['recall'], frame_metrics['f1_score'], 'frame') metrics.update({ _add_prefix('frame_accuracy'): frame_metrics['accuracy'], _add_prefix('frame_accuracy_without_true_negatives'): frame_metrics['accuracy_without_true_negatives'], _add_prefix('note_density'): note_density, }) metrics.update(_metrics(note_precision, note_recall, note_f1, 'note')) metrics.update( _metrics(note_with_velocity_precision, note_with_velocity_recall, note_with_velocity_f1, 'note_with_velocity')) if not onsets_only: metrics.update( _metrics(note_with_offsets_precision, note_with_offsets_recall, note_with_offsets_f1, 'note_with_offsets')) metrics.update( _metrics( note_with_offsets_velocity_precision, note_with_offsets_velocity_recall, note_with_offsets_velocity_f1, 'note_with_offsets_velocity')) return metrics (note_precision, note_recall, note_f1, note_density, note_with_velocity_precision, note_with_velocity_recall, note_with_velocity_f1, note_with_offsets_precision, note_with_offsets_recall, note_with_offsets_f1, note_with_offsets_velocity_precision, note_with_offsets_velocity_recall, note_with_offsets_velocity_f1, processed_frame_predictions) = tf.py_func( functools.partial( _calculate_metrics_py, hparams=hparams, min_pitch=min_pitch, max_pitch=max_pitch, onsets_only=onsets_only), inp=[ frame_probs, onset_probs, frame_predictions, onset_predictions, offset_predictions, velocity_values, sequence_label, frame_labels, sequence_id ], Tout=([tf.float64] * 13) + [tf.float32], stateful=False) metrics = make_metrics( note_precision, note_recall, note_f1, note_density, note_with_velocity_precision, note_with_velocity_recall, note_with_velocity_f1, note_with_offsets_precision, note_with_offsets_recall, note_with_offsets_f1, note_with_offsets_velocity_precision, note_with_offsets_velocity_recall, note_with_offsets_velocity_f1, processed_frame_predictions, frame_labels, onsets_only=onsets_only) if pitch_map: for pitch, name in pitch_map.items(): (note_precision, note_recall, note_f1, note_density, note_with_velocity_precision, note_with_velocity_recall, note_with_velocity_f1, note_with_offsets_precision, note_with_offsets_recall, note_with_offsets_f1, note_with_offsets_velocity_precision, note_with_offsets_velocity_recall, note_with_offsets_velocity_f1, processed_frame_predictions) = tf.py_func( functools.partial( _calculate_metrics_py, hparams=hparams, min_pitch=min_pitch, max_pitch=max_pitch, onsets_only=onsets_only, restrict_to_pitch=pitch), inp=[ frame_probs, onset_probs, frame_predictions, onset_predictions, offset_predictions, velocity_values, sequence_label, frame_labels, sequence_id + name ], Tout=([tf.float64] * 13) + [tf.float32], stateful=False) metrics.update( make_metrics( note_precision, note_recall, note_f1, note_density, note_with_velocity_precision, note_with_velocity_recall, note_with_velocity_f1, note_with_offsets_precision, note_with_offsets_recall, note_with_offsets_f1, note_with_offsets_velocity_precision, note_with_offsets_velocity_recall, note_with_offsets_velocity_f1, processed_frame_predictions, frame_labels, onsets_only=onsets_only, prefix='pitch/' + name)) return metrics def define_metrics(frame_probs, onset_probs, frame_predictions, onset_predictions, offset_predictions, velocity_values, length, sequence_label, frame_labels, sequence_id, hparams, min_pitch=constants.MIN_MIDI_PITCH, max_pitch=constants.MAX_MIDI_PITCH, prefix='', onsets_only=False, pitch_map=None): """Create a metric name to tf.metric pair dict for transcription metrics.""" with tf.device('/device:CPU:*'): metrics = collections.defaultdict(list) for i in range(hparams.eval_batch_size): for k, v in calculate_metrics( frame_probs=frame_probs[i][:length[i]], onset_probs=onset_probs[i][:length[i]], frame_predictions=frame_predictions[i][:length[i]], onset_predictions=onset_predictions[i][:length[i]], offset_predictions=offset_predictions[i][:length[i]], velocity_values=velocity_values[i][:length[i]], sequence_label=sequence_label[i], frame_labels=frame_labels[i][:length[i]], sequence_id=sequence_id[i], hparams=hparams, min_pitch=min_pitch, max_pitch=max_pitch, onsets_only=onsets_only, pitch_map=pitch_map).items(): metrics[k].append(v) return {'metrics/' + prefix + k: v for k, v in metrics.items()}
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Nova base exception handling. Includes decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ import functools import itertools import webob.exc from nova.openstack.common import excutils from nova.openstack.common import log as logging LOG = logging.getLogger(__name__) class ConvertedException(webob.exc.WSGIHTTPException): def __init__(self, code=0, title="", explanation=""): self.code = code self.title = title self.explanation = explanation super(ConvertedException, self).__init__() class ProcessExecutionError(IOError): def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): self.exit_code = exit_code self.stderr = stderr self.stdout = stdout self.cmd = cmd self.description = description if description is None: description = _('Unexpected error while running command.') if exit_code is None: exit_code = '-' message = _('%(description)s\nCommand: %(cmd)s\n' 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' 'Stderr: %(stderr)r') % locals() IOError.__init__(self, message) def wrap_db_error(f): def _wrap(*args, **kwargs): try: return f(*args, **kwargs) except UnicodeEncodeError: raise InvalidUnicodeParameter() except Exception, e: LOG.exception(_('DB exception wrapped.')) raise DBError(e) _wrap.func_name = f.func_name return _wrap def wrap_exception(notifier=None, publisher_id=None, event_type=None, level=None): """This decorator wraps a method to catch any exceptions that may get thrown. It logs the exception as well as optionally sending it to the notification system. """ # TODO(sandy): Find a way to import nova.notifier.api so we don't have # to pass it in as a parameter. Otherwise we get a cyclic import of # nova.notifier.api -> nova.utils -> nova.exception :( # TODO(johannes): Also, it would be nice to use # utils.save_and_reraise_exception() without an import loop def inner(f): def wrapped(*args, **kw): try: return f(*args, **kw) except Exception, e: with excutils.save_and_reraise_exception(): if notifier: payload = dict(args=args, exception=e) payload.update(kw) # Use a temp vars so we don't shadow # our outer definitions. temp_level = level if not temp_level: temp_level = notifier.ERROR temp_type = event_type if not temp_type: # If f has multiple decorators, they must use # functools.wraps to ensure the name is # propagated. temp_type = f.__name__ context = get_context_from_function_and_args(f, args, kw) notifier.notify(context, publisher_id, temp_type, temp_level, payload) return functools.wraps(f)(wrapped) return inner class NovaException(Exception): """Base Nova Exception To correctly use this class, inherit from it and define a 'message' property. That message will get printf'd with the keyword arguments provided to the constructor. """ message = _("An unknown exception occurred.") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.message % kwargs except Exception as e: # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_('Exception in string format operation')) for name, value in kwargs.iteritems(): LOG.error("%s: %s" % (name, value)) # at least get the core message out if something happened message = self.message super(NovaException, self).__init__(message) class EC2APIError(NovaException): message = _("Unknown") def __init__(self, message=None, code=None): self.msg = message self.code = code outstr = '%s' % message super(EC2APIError, self).__init__(outstr) class DBError(NovaException): """Wraps an implementation specific exception.""" def __init__(self, inner_exception=None): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) class DeprecatedConfig(NovaException): message = _("Fatal call to deprecated config %(msg)s") class DecryptionFailure(NovaException): message = _("Failed to decrypt text") class VirtualInterfaceCreateException(NovaException): message = _("Virtual Interface creation failed") class VirtualInterfaceMacAddressException(NovaException): message = _("5 attempts to create virtual interface" "with unique mac address failed") class GlanceConnectionFailed(NovaException): message = _("Connection to glance host %(host)s:%(port)s failed: " "%(reason)s") class NotAuthorized(NovaException): message = _("Not authorized.") code = 403 class AdminRequired(NotAuthorized): message = _("User does not have admin privileges") class PolicyNotAuthorized(NotAuthorized): message = _("Policy doesn't allow %(action)s to be performed.") class ImageNotAuthorized(NovaException): message = _("Not authorized for image %(image_id)s.") class Invalid(NovaException): message = _("Unacceptable parameters.") code = 400 class InvalidSnapshot(Invalid): message = _("Invalid snapshot") + ": %(reason)s" class VolumeUnattached(Invalid): message = _("Volume %(volume_id)s is not attached to anything") class VolumeAttached(Invalid): message = _("Volume %(volume_id)s is still attached, detach volume first.") class InvalidKeypair(Invalid): message = _("Keypair data is invalid") class SfJsonEncodeFailure(NovaException): message = _("Failed to load data into json format") class InvalidRequest(Invalid): message = _("The request is invalid.") class InvalidInput(Invalid): message = _("Invalid input received") + ": %(reason)s" class InvalidVolumeType(Invalid): message = _("Invalid volume type") + ": %(reason)s" class InvalidVolume(Invalid): message = _("Invalid volume") + ": %(reason)s" class InvalidMetadata(Invalid): message = _("Invalid metadata") + ": %(reason)s" class InvalidPortRange(Invalid): message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s") class InvalidIpProtocol(Invalid): message = _("Invalid IP protocol %(protocol)s.") class InvalidContentType(Invalid): message = _("Invalid content type %(content_type)s.") class InvalidCidr(Invalid): message = _("Invalid cidr %(cidr)s.") class InvalidUnicodeParameter(Invalid): message = _("Invalid Parameter: " "Unicode is not supported by the current database.") # Cannot be templated as the error syntax varies. # msg needs to be constructed when raised. class InvalidParameterValue(Invalid): message = _("%(err)s") class InvalidAggregateAction(Invalid): message = _("Cannot perform action '%(action)s' on aggregate " "%(aggregate_id)s. Reason: %(reason)s.") class InvalidGroup(Invalid): message = _("Group not valid. Reason: %(reason)s") class InvalidSortKey(Invalid): message = _("Sort key supplied was not valid.") class InstanceInvalidState(Invalid): message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot " "%(method)s while the instance is in this state.") class InstanceNotRunning(Invalid): message = _("Instance %(instance_id)s is not running.") class InstanceNotInRescueMode(Invalid): message = _("Instance %(instance_id)s is not in rescue mode") class InstanceNotReady(Invalid): message = _("Instance %(instance_id)s is not ready") class InstanceSuspendFailure(Invalid): message = _("Failed to suspend instance") + ": %(reason)s" class InstanceResumeFailure(Invalid): message = _("Failed to resume server") + ": %(reason)s." class InstanceRebootFailure(Invalid): message = _("Failed to reboot instance") + ": %(reason)s" class InstanceTerminationFailure(Invalid): message = _("Failed to terminate instance") + ": %(reason)s" class ServiceUnavailable(Invalid): message = _("Service is unavailable at this time.") class ComputeResourcesUnavailable(ServiceUnavailable): message = _("Insufficient compute resources.") class ComputeServiceUnavailable(ServiceUnavailable): message = _("Compute service is unavailable at this time.") class UnableToMigrateToSelf(Invalid): message = _("Unable to migrate instance (%(instance_id)s) " "to current host (%(host)s).") class InvalidHypervisorType(Invalid): message = _("The supplied hypervisor type of is invalid.") class DestinationHypervisorTooOld(Invalid): message = _("The instance requires a newer hypervisor version than " "has been provided.") class DestinationDiskExists(Invalid): message = _("The supplied disk path (%(path)s) already exists, " "it is expected not to exist.") class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") class DevicePathInUse(Invalid): message = _("The supplied device path (%(path)s) is in use.") class DeviceIsBusy(Invalid): message = _("The supplied device (%(device)s) is busy.") class InvalidCPUInfo(Invalid): message = _("Unacceptable CPU info") + ": %(reason)s" class InvalidIpAddressError(Invalid): message = _("%(address)s is not a valid IP v4/6 address.") class InvalidVLANTag(Invalid): message = _("VLAN tag is not appropriate for the port group " "%(bridge)s. Expected VLAN tag is %(tag)s, " "but the one associated with the port group is %(pgroup)s.") class InvalidVLANPortGroup(Invalid): message = _("vSwitch which contains the port group %(bridge)s is " "not associated with the desired physical adapter. " "Expected vSwitch is %(expected)s, but the one associated " "is %(actual)s.") class InvalidDiskFormat(Invalid): message = _("Disk format %(disk_format)s is not acceptable") class ImageUnacceptable(Invalid): message = _("Image %(image_id)s is unacceptable: %(reason)s") class InstanceUnacceptable(Invalid): message = _("Instance %(instance_id)s is unacceptable: %(reason)s") class InvalidEc2Id(Invalid): message = _("Ec2 id %(ec2_id)s is unacceptable.") class InvalidUUID(Invalid): message = _("Expected a uuid but received %(uuid)s.") class ConstraintNotMet(NovaException): message = _("Constraint not met.") code = 412 class NotFound(NovaException): message = _("Resource could not be found.") code = 404 class VirtDriverNotFound(NotFound): message = _("Could not find driver for connection_type %(name)s") class PersistentVolumeFileNotFound(NotFound): message = _("Volume %(volume_id)s persistence file could not be found.") class VolumeNotFound(NotFound): message = _("Volume %(volume_id)s could not be found.") class SfAccountNotFound(NotFound): message = _("Unable to locate account %(account_name)s on " "Solidfire device") class VolumeMetadataNotFound(NotFound): message = _("Volume %(volume_id)s has no metadata with " "key %(metadata_key)s.") class VolumeTypeNotFound(NotFound): message = _("Volume type %(volume_type_id)s could not be found.") class VolumeTypeNotFoundByName(VolumeTypeNotFound): message = _("Volume type with name %(volume_type_name)s " "could not be found.") class VolumeTypeExtraSpecsNotFound(NotFound): message = _("Volume Type %(volume_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class SnapshotNotFound(NotFound): message = _("Snapshot %(snapshot_id)s could not be found.") class VolumeIsBusy(NovaException): message = _("deleting volume %(volume_name)s that has snapshot") class SnapshotIsBusy(NovaException): message = _("deleting snapshot %(snapshot_name)s that has " "dependent volumes") class ISCSITargetNotFoundForVolume(NotFound): message = _("No target id found for volume %(volume_id)s.") class ISCSITargetCreateFailed(NovaException): message = _("Failed to create iscsi target for volume %(volume_id)s.") class ISCSITargetRemoveFailed(NovaException): message = _("Failed to remove iscsi target for volume %(volume_id)s.") class DiskNotFound(NotFound): message = _("No disk at %(location)s") class VolumeDriverNotFound(NotFound): message = _("Could not find a handler for %(driver_type)s volume.") class InvalidImageRef(Invalid): message = _("Invalid image href %(image_href)s.") class ImageNotFound(NotFound): message = _("Image %(image_id)s could not be found.") class ImageNotFoundEC2(ImageNotFound): message = _("Image %(image_id)s could not be found. The nova EC2 API " "assigns image ids dynamically when they are listed for the " "first time. Have you listed image ids since adding this " "image?") class ProjectNotFound(NotFound): message = _("Project %(project_id)s could not be found.") class StorageRepositoryNotFound(NotFound): message = _("Cannot find SR to read/write VDI.") class NetworkInUse(NovaException): message = _("Network %(network_id)s is still in use.") class NetworkNotCreated(NovaException): message = _("%(req)s is required to create a network.") class NetworkNotFound(NotFound): message = _("Network %(network_id)s could not be found.") class NetworkNotFoundForBridge(NetworkNotFound): message = _("Network could not be found for bridge %(bridge)s") class NetworkNotFoundForUUID(NetworkNotFound): message = _("Network could not be found for uuid %(uuid)s") class NetworkNotFoundForCidr(NetworkNotFound): message = _("Network could not be found with cidr %(cidr)s.") class NetworkNotFoundForInstance(NetworkNotFound): message = _("Network could not be found for instance %(instance_id)s.") class NoNetworksFound(NotFound): message = _("No networks defined.") class NetworkNotFoundForProject(NotFound): message = _("Either Network uuid %(network_uuid)s is not present or " "is not assigned to the project %(project_id)s.") class NetworkHostNotSet(NovaException): message = _("Host is not set to the network (%(network_id)s).") class DatastoreNotFound(NotFound): message = _("Could not find the datastore reference(s) which the VM uses.") class PortInUse(NovaException): message = _("Port %(port_id)s is still in use.") class PortNotFound(NotFound): message = _("Port %(port_id)s could not be found.") class FixedIpNotFound(NotFound): message = _("No fixed IP associated with id %(id)s.") class FixedIpNotFoundForAddress(FixedIpNotFound): message = _("Fixed ip not found for address %(address)s.") class FixedIpNotFoundForInstance(FixedIpNotFound): message = _("Instance %(instance_uuid)s has zero fixed ips.") class FixedIpNotFoundForNetworkHost(FixedIpNotFound): message = _("Network host %(host)s has zero fixed ips " "in network %(network_id)s.") class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.") class FixedIpNotFoundForNetwork(FixedIpNotFound): message = _("Fixed IP address (%(address)s) does not exist in " "network (%(network_uuid)s).") class FixedIpAlreadyInUse(NovaException): message = _("Fixed IP address %(address)s is already in use on instance " "%(instance_uuid)s.") class FixedIpAssociatedWithMultipleInstances(NovaException): message = _("More than one instance is associated with fixed ip address " "'%(address)s'.") class FixedIpInvalid(Invalid): message = _("Fixed IP address %(address)s is invalid.") class NoMoreFixedIps(NovaException): message = _("Zero fixed ips available.") class NoFixedIpsDefined(NotFound): message = _("Zero fixed ips could be found.") #TODO(bcwaldon): EOL this exception! class Duplicate(NovaException): pass class FloatingIpExists(Duplicate): message = _("Floating ip %(address)s already exists.") class FloatingIpNotFound(NotFound): message = _("Floating ip not found for id %(id)s.") class FloatingIpDNSExists(Invalid): message = _("The DNS entry %(name)s already exists in domain %(domain)s.") class FloatingIpNotFoundForAddress(FloatingIpNotFound): message = _("Floating ip not found for address %(address)s.") class FloatingIpNotFoundForHost(FloatingIpNotFound): message = _("Floating ip not found for host %(host)s.") class NoMoreFloatingIps(FloatingIpNotFound): message = _("Zero floating ips available.") safe = True class FloatingIpAssociated(NovaException): message = _("Floating ip %(address)s is associated.") class FloatingIpNotAssociated(NovaException): message = _("Floating ip %(address)s is not associated.") class NoFloatingIpsDefined(NotFound): message = _("Zero floating ips exist.") class NoFloatingIpInterface(NotFound): message = _("Interface %(interface)s not found.") class KeypairNotFound(NotFound): message = _("Keypair %(name)s not found for user %(user_id)s") class CertificateNotFound(NotFound): message = _("Certificate %(certificate_id)s not found.") class ServiceNotFound(NotFound): message = _("Service %(service_id)s could not be found.") class HostNotFound(NotFound): message = _("Host %(host)s could not be found.") class ComputeHostNotFound(HostNotFound): message = _("Compute host %(host)s could not be found.") class HostBinaryNotFound(NotFound): message = _("Could not find binary %(binary)s on host %(host)s.") class InvalidReservationExpiration(Invalid): message = _("Invalid reservation expiration %(expire)s.") class InvalidQuotaValue(Invalid): message = _("Change would make usage less than 0 for the following " "resources: %(unders)s") class QuotaNotFound(NotFound): message = _("Quota could not be found") class QuotaResourceUnknown(QuotaNotFound): message = _("Unknown quota resources %(unknown)s.") class ProjectQuotaNotFound(QuotaNotFound): message = _("Quota for project %(project_id)s could not be found.") class QuotaClassNotFound(QuotaNotFound): message = _("Quota class %(class_name)s could not be found.") class QuotaUsageNotFound(QuotaNotFound): message = _("Quota usage for project %(project_id)s could not be found.") class ReservationNotFound(QuotaNotFound): message = _("Quota reservation %(uuid)s could not be found.") class OverQuota(NovaException): message = _("Quota exceeded for resources: %(overs)s") class SecurityGroupNotFound(NotFound): message = _("Security group %(security_group_id)s not found.") class SecurityGroupNotFoundForProject(SecurityGroupNotFound): message = _("Security group %(security_group_id)s not found " "for project %(project_id)s.") class SecurityGroupNotFoundForRule(SecurityGroupNotFound): message = _("Security group with rule %(rule_id)s not found.") class SecurityGroupExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is already associated" " with the instance %(instance_id)s") class SecurityGroupNotExistsForInstance(Invalid): message = _("Security group %(security_group_id)s is not associated with" " the instance %(instance_id)s") class MigrationNotFound(NotFound): message = _("Migration %(migration_id)s could not be found.") class MigrationNotFoundByStatus(MigrationNotFound): message = _("Migration not found for instance %(instance_id)s " "with status %(status)s.") class ConsolePoolNotFound(NotFound): message = _("Console pool %(pool_id)s could not be found.") class ConsolePoolNotFoundForHostType(NotFound): message = _("Console pool of type %(console_type)s " "for compute host %(compute_host)s " "on proxy host %(host)s not found.") class ConsoleNotFound(NotFound): message = _("Console %(console_id)s could not be found.") class ConsoleNotFoundForInstance(ConsoleNotFound): message = _("Console for instance %(instance_uuid)s could not be found.") class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): message = _("Console for instance %(instance_uuid)s " "in pool %(pool_id)s could not be found.") class ConsoleTypeInvalid(Invalid): message = _("Invalid console type %(console_type)s ") class InstanceTypeNotFound(NotFound): message = _("Instance type %(instance_type_id)s could not be found.") class InstanceTypeNotFoundByName(InstanceTypeNotFound): message = _("Instance type with name %(instance_type_name)s " "could not be found.") class FlavorNotFound(NotFound): message = _("Flavor %(flavor_id)s could not be found.") class FlavorAccessNotFound(NotFound): message = _("Flavor access not found for %(flavor_id) / " "%(project_id) combination.") class SchedulerHostFilterNotFound(NotFound): message = _("Scheduler Host Filter %(filter_name)s could not be found.") class SchedulerCostFunctionNotFound(NotFound): message = _("Scheduler cost function %(cost_fn_str)s could" " not be found.") class SchedulerWeightFlagNotFound(NotFound): message = _("Scheduler weight flag not found: %(flag_name)s") class InstanceMetadataNotFound(NotFound): message = _("Instance %(instance_uuid)s has no metadata with " "key %(metadata_key)s.") class InstanceSystemMetadataNotFound(NotFound): message = _("Instance %(instance_uuid)s has no system metadata with " "key %(metadata_key)s.") class InstanceTypeExtraSpecsNotFound(NotFound): message = _("Instance Type %(instance_type_id)s has no extra specs with " "key %(extra_specs_key)s.") class FileNotFound(NotFound): message = _("File %(file_path)s could not be found.") class NoFilesFound(NotFound): message = _("Zero files could be found.") class SwitchNotFoundForNetworkAdapter(NotFound): message = _("Virtual switch associated with the " "network adapter %(adapter)s not found.") class NetworkAdapterNotFound(NotFound): message = _("Network adapter %(adapter)s could not be found.") class ClassNotFound(NotFound): message = _("Class %(class_name)s could not be found: %(exception)s") class NotAllowed(NovaException): message = _("Action not allowed.") class ImageRotationNotAllowed(NovaException): message = _("Rotation is not allowed for snapshots") class RotationRequiredForBackup(NovaException): message = _("Rotation param is required for backup image_type") class KeyPairExists(Duplicate): message = _("Key pair %(key_name)s already exists.") class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") class InstanceTypeExists(Duplicate): message = _("Instance Type %(name)s already exists.") class FlavorAccessExists(Duplicate): message = _("Flavor access alreay exists for flavor %(flavor_id)s " "and project %(project_id)s combination.") class VolumeTypeExists(Duplicate): message = _("Volume Type %(name)s already exists.") class InvalidSharedStorage(NovaException): message = _("%(path)s is not on shared storage: %(reason)s") class InvalidLocalStorage(NovaException): message = _("%(path)s is not on local storage: %(reason)s") class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" class MalformedRequestBody(NovaException): message = _("Malformed message body: %(reason)s") # NOTE(johannes): NotFound should only be used when a 404 error is # appropriate to be returned class ConfigNotFound(NovaException): message = _("Could not find config at %(path)s") class PasteAppNotFound(NovaException): message = _("Could not load paste app '%(name)s' from %(path)s") class CannotResizeToSameFlavor(NovaException): message = _("When resizing, instances must change flavor!") class ImageTooLarge(NovaException): message = _("Image is larger than instance type allows") class InstanceTypeMemoryTooSmall(NovaException): message = _("Instance type's memory is too small for requested image.") class InstanceTypeDiskTooSmall(NovaException): message = _("Instance type's disk is too small for requested image.") class InsufficientFreeMemory(NovaException): message = _("Insufficient free memory on compute node to start %(uuid)s.") class CouldNotFetchMetrics(NovaException): message = _("Could not fetch bandwidth/cpu/disk metrics for this host.") class NoValidHost(NovaException): message = _("No valid host was found. %(reason)s") class WillNotSchedule(NovaException): message = _("Host %(host)s is not up or doesn't exist.") class QuotaError(NovaException): message = _("Quota exceeded") + ": code=%(code)s" code = 413 headers = {'Retry-After': 0} safe = True class TooManyInstances(QuotaError): message = _("Quota exceeded for %(overs)s: Requested %(req)s," " but already used %(used)d of %(allowed)d %(resource)s") class VolumeSizeTooLarge(QuotaError): message = _("Maximum volume size exceeded") class VolumeLimitExceeded(QuotaError): message = _("Maximum number of volumes allowed (%(allowed)d) exceeded") class FloatingIpLimitExceeded(QuotaError): message = _("Maximum number of floating ips exceeded") class MetadataLimitExceeded(QuotaError): message = _("Maximum number of metadata items exceeds %(allowed)d") class OnsetFileLimitExceeded(QuotaError): message = _("Personality file limit exceeded") class OnsetFilePathLimitExceeded(QuotaError): message = _("Personality file path too long") class OnsetFileContentLimitExceeded(QuotaError): message = _("Personality file content too long") class KeypairLimitExceeded(QuotaError): message = _("Maximum number of key pairs exceeded") class SecurityGroupLimitExceeded(QuotaError): message = _("Maximum number of security groups or rules exceeded") class AggregateError(NovaException): message = _("Aggregate %(aggregate_id)s: action '%(action)s' " "caused an error: %(reason)s.") class AggregateNotFound(NotFound): message = _("Aggregate %(aggregate_id)s could not be found.") class AggregateNameExists(Duplicate): message = _("Aggregate %(aggregate_name)s already exists.") class AggregateHostNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no host %(host)s.") class AggregateMetadataNotFound(NotFound): message = _("Aggregate %(aggregate_id)s has no metadata with " "key %(metadata_key)s.") class AggregateHostExists(Duplicate): message = _("Aggregate %(aggregate_id)s already has host %(host)s.") class DuplicateSfVolumeNames(Duplicate): message = _("Detected more than one volume with name %(vol_name)s") class VolumeTypeCreateFailed(NovaException): message = _("Cannot create volume_type with " "name %(name)s and specs %(extra_specs)s") class VolumeBackendAPIException(NovaException): message = _("Bad or unexpected response from the storage volume " "backend API: %(data)s") class NfsException(NovaException): message = _("Unknown NFS exception") class NfsNoSharesMounted(NotFound): message = _("No mounted NFS shares found") class NfsNoSuitableShareFound(NotFound): message = _("There is no share which can host %(volume_size)sG") class InstanceTypeCreateFailed(NovaException): message = _("Unable to create instance type") class InstancePasswordSetFailed(NovaException): message = _("Failed to set admin password on %(instance)s " "because %(reason)s") safe = True class SolidFireAPIException(NovaException): message = _("Bad response from SolidFire API") class SolidFireAPIDataException(SolidFireAPIException): message = _("Error in SolidFire API response: data=%(data)s") class DuplicateVlan(Duplicate): message = _("Detected existing vlan with id %(vlan)d") class InstanceNotFound(NotFound): message = _("Instance %(instance_id)s could not be found.") class MarkerNotFound(NotFound): message = _("Marker %(marker)s could not be found.") class InvalidInstanceIDMalformed(Invalid): message = _("Invalid id: %(val)s (expecting \"i-...\").") class CouldNotFetchImage(NovaException): message = _("Could not fetch image %(image_id)s") class TaskAlreadyRunning(NovaException): message = _("Task %(task_name)s is already running on host %(host)s") class TaskNotRunning(NovaException): message = _("Task %(task_name)s is not running on host %(host)s") class InstanceIsLocked(InstanceInvalidState): message = _("Instance %(instance_uuid)s is locked") class ConfigDriveMountFailed(NovaException): message = _("Could not mount vfat config drive. %(operation)s failed. " "Error: %(error)s") class ConfigDriveUnknownFormat(NovaException): message = _("Unknown config drive format %(format)s. Select one of " "iso9660 or vfat.") class InstanceUserDataTooLarge(NovaException): message = _("User data too large. User data must be no larger than " "%(maxsize)s bytes once base64 encoded. Your data is " "%(length)d bytes") class InstanceUserDataMalformed(NovaException): message = _("User data needs to be valid base 64.") class UnexpectedTaskStateError(NovaException): message = _("unexpected task state: expecting %(expected)s but " "the actual state is %(actual)s") class CryptoCAFileNotFound(FileNotFound): message = _("The CA file for %(project)s could not be found") class CryptoCRLFileNotFound(FileNotFound): message = _("The CRL file for %(project)s could not be found") def get_context_from_function_and_args(function, args, kwargs): """Find an arg of type RequestContext and return it. This is useful in a couple of decorators where we don't know much about the function we're wrapping. """ # import here to avoid circularity: from nova import context for arg in itertools.chain(kwargs.values(), args): if isinstance(arg, context.RequestContext): return arg return None
import sys import sysconfig import os import re import errno import shlex import shutil import subprocess import itertools import concurrent.futures from site import USER_SITE from glob import iglob from collections import namedtuple, deque from xml.sax.saxutils import escape from distutils import version from email.parser import HeaderParser import urllib.request import xmlrpc.client import pkg_resources try: import docutils.core except ImportError: docutils = None from AnyQt.QtWidgets import ( QWidget, QDialog, QLabel, QLineEdit, QTreeView, QHeaderView, QTextBrowser, QDialogButtonBox, QProgressDialog, QVBoxLayout, QStyle, QStyledItemDelegate, QStyleOptionViewItem, QApplication, QHBoxLayout ) from AnyQt.QtGui import ( QStandardItemModel, QStandardItem, QPalette, QTextOption ) from AnyQt.QtCore import ( QSortFilterProxyModel, QItemSelectionModel, Qt, QObject, QMetaObject, QEvent, QSize, QTimer, QThread, Q_ARG ) from AnyQt.QtCore import pyqtSignal as Signal, pyqtSlot as Slot from ..config import ADDON_KEYWORD from ..gui.utils import message_warning, message_information, \ message_critical as message_error, \ OSX_NSURL_toLocalFile from ..help.manager import get_dist_meta, trim, parse_meta OFFICIAL_ADDONS = [ "Orange-Bioinformatics", "Orange3-DataFusion", "Orange3-Prototypes", "Orange3-Text", "Orange3-Network", "Orange3-Associate", ] Installable = namedtuple( "Installable", ["name", "version", "summary", "description", "package_url", "release_urls"] ) ReleaseUrl = namedtuple( "ReleaseUrl", ["filename", "url", "size", "python_version", "package_type" ] ) Available = namedtuple( "Available", ["installable"] ) Installed = namedtuple( "Installed", ["installable", "local"] ) def is_updatable(item): if isinstance(item, Available): return False elif item.installable is None: return False else: inst, dist = item try: v1 = version.StrictVersion(dist.version) v2 = version.StrictVersion(inst.version) except ValueError: pass else: return v1 < v2 return (version.LooseVersion(dist.version) < version.LooseVersion(inst.version)) class TristateCheckItemDelegate(QStyledItemDelegate): """ A QStyledItemDelegate which properly toggles Qt.ItemIsTristate check state transitions on user interaction. """ def editorEvent(self, event, model, option, index): flags = model.flags(index) if not flags & Qt.ItemIsUserCheckable or \ not option.state & QStyle.State_Enabled or \ not flags & Qt.ItemIsEnabled: return False checkstate = model.data(index, Qt.CheckStateRole) if checkstate is None: return False widget = option.widget style = widget.style() if widget else QApplication.style() if event.type() in {QEvent.MouseButtonPress, QEvent.MouseButtonRelease, QEvent.MouseButtonDblClick}: pos = event.pos() opt = QStyleOptionViewItem(option) self.initStyleOption(opt, index) rect = style.subElementRect( QStyle.SE_ItemViewItemCheckIndicator, opt, widget) if event.button() != Qt.LeftButton or not rect.contains(pos): return False if event.type() in {QEvent.MouseButtonPress, QEvent.MouseButtonDblClick}: return True elif event.type() == QEvent.KeyPress: if event.key() != Qt.Key_Space and event.key() != Qt.Key_Select: return False else: return False if model.flags(index) & Qt.ItemIsTristate: checkstate = (checkstate + 1) % 3 else: checkstate = \ Qt.Unchecked if checkstate == Qt.Checked else Qt.Checked return model.setData(index, checkstate, Qt.CheckStateRole) def get_meta_from_archive(path): """Return project name, version and summary extracted from sdist or wheel metadata in a ZIP or tar.gz archive, or None if metadata can't be found.""" def is_metadata(fname): return fname.endswith(('PKG-INFO', 'METADATA')) meta = None if path.endswith(('.zip', '.whl')): from zipfile import ZipFile with ZipFile(path) as archive: meta = next(filter(is_metadata, archive.namelist()), None) if meta: meta = archive.read(meta).decode('utf-8') elif path.endswith(('.tar.gz', '.tgz')): import tarfile with tarfile.open(path) as archive: meta = next(filter(is_metadata, archive.getnames()), None) if meta: meta = archive.extractfile(meta).read().decode('utf-8') if meta: meta = parse_meta(meta) return [meta.get(key, '') for key in ('Name', 'Version', 'Description', 'Summary')] class AddonManagerWidget(QWidget): statechanged = Signal() def __init__(self, parent=None, **kwargs): super(AddonManagerWidget, self).__init__(parent, **kwargs) self.__items = [] self.setLayout(QVBoxLayout()) self.__header = QLabel( wordWrap=True, textFormat=Qt.RichText ) self.__search = QLineEdit( placeholderText=self.tr("Filter") ) self.layout().addWidget(self.__search) self.__view = view = QTreeView( rootIsDecorated=False, editTriggers=QTreeView.NoEditTriggers, selectionMode=QTreeView.SingleSelection, alternatingRowColors=True ) self.__view.setItemDelegateForColumn(0, TristateCheckItemDelegate()) self.layout().addWidget(view) self.__model = model = QStandardItemModel() model.setHorizontalHeaderLabels(["", "Name", "Version", "Action"]) model.dataChanged.connect(self.__data_changed) proxy = QSortFilterProxyModel( filterKeyColumn=1, filterCaseSensitivity=Qt.CaseInsensitive ) proxy.setSourceModel(model) self.__search.textChanged.connect(proxy.setFilterFixedString) view.setModel(proxy) view.selectionModel().selectionChanged.connect( self.__update_details ) header = self.__view.header() header.setSectionResizeMode(0, QHeaderView.Fixed) header.setSectionResizeMode(2, QHeaderView.ResizeToContents) self.__details = QTextBrowser( frameShape=QTextBrowser.NoFrame, readOnly=True, lineWrapMode=QTextBrowser.WidgetWidth, openExternalLinks=True, ) self.__details.setWordWrapMode(QTextOption.WordWrap) palette = QPalette(self.palette()) palette.setColor(QPalette.Base, Qt.transparent) self.__details.setPalette(palette) self.layout().addWidget(self.__details) def set_items(self, items): self.__items = items model = self.__model model.clear() model.setHorizontalHeaderLabels(["", "Name", "Version", "Action"]) for item in items: if isinstance(item, Installed): installed = True ins, dist = item name = dist.project_name summary = get_dist_meta(dist).get("Summary", "") version = ins.version if ins is not None else dist.version else: installed = False (ins,) = item dist = None name = ins.name summary = ins.summary version = ins.version updatable = is_updatable(item) item1 = QStandardItem() item1.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | (Qt.ItemIsTristate if updatable else 0)) if installed and updatable: item1.setCheckState(Qt.PartiallyChecked) elif installed: item1.setCheckState(Qt.Checked) else: item1.setCheckState(Qt.Unchecked) item2 = QStandardItem(name) item2.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) item2.setToolTip(summary) item2.setData(item, Qt.UserRole) if updatable: version = "{} < {}".format(dist.version, ins.version) item3 = QStandardItem(version) item3.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) item4 = QStandardItem() item4.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) model.appendRow([item1, item2, item3, item4]) self.__view.resizeColumnToContents(0) self.__view.setColumnWidth( 1, max(150, self.__view.sizeHintForColumn(1))) self.__view.setColumnWidth( 2, max(150, self.__view.sizeHintForColumn(2))) if self.__items: self.__view.selectionModel().select( self.__view.model().index(0, 0), QItemSelectionModel.Select | QItemSelectionModel.Rows ) def item_state(self): steps = [] for i, item in enumerate(self.__items): modelitem = self.__model.item(i, 0) state = modelitem.checkState() if modelitem.flags() & Qt.ItemIsTristate and state == Qt.Checked: steps.append((Upgrade, item)) elif isinstance(item, Available) and state == Qt.Checked: steps.append((Install, item)) elif isinstance(item, Installed) and state == Qt.Unchecked: steps.append((Uninstall, item)) return steps def __selected_row(self): indices = self.__view.selectedIndexes() if indices: proxy = self.__view.model() indices = [proxy.mapToSource(index) for index in indices] return indices[0].row() else: return -1 def set_install_projects(self, names): """Mark for installation the add-ons that match any of names""" model = self.__model for row in range(model.rowCount()): item = model.item(row, 1) if item.text() in names: model.item(row, 0).setCheckState(Qt.Checked) def __data_changed(self, topleft, bottomright): rows = range(topleft.row(), bottomright.row() + 1) for i in rows: modelitem = self.__model.item(i, 0) actionitem = self.__model.item(i, 3) item = self.__items[i] state = modelitem.checkState() flags = modelitem.flags() if flags & Qt.ItemIsTristate and state == Qt.Checked: actionitem.setText("Update") elif isinstance(item, Available) and state == Qt.Checked: actionitem.setText("Install") elif isinstance(item, Installed) and state == Qt.Unchecked: actionitem.setText("Uninstall") else: actionitem.setText("") self.statechanged.emit() def __update_details(self): index = self.__selected_row() if index == -1: self.__details.setText("") else: item = self.__model.item(index, 1) item = item.data(Qt.UserRole) assert isinstance(item, (Installed, Available)) # if isinstance(item, Available): # self.__installed_label.setText("") # self.__available_label.setText(str(item.available.version)) # elif item.installable is not None: # self.__installed_label.setText(str(item.local.version)) # self.__available_label.setText(str(item.available.version)) # else: # self.__installed_label.setText(str(item.local.version)) # self.__available_label.setText("") text = self._detailed_text(item) self.__details.setText(text) def _detailed_text(self, item): if isinstance(item, Installed): remote, dist = item if remote is None: meta = get_dist_meta(dist) description = meta.get("Description") or meta.get('Summary') else: description = remote.description else: description = item[0].description if docutils is not None: try: html = docutils.core.publish_string( trim(description), writer_name="html", settings_overrides={ "output-encoding": "utf-8", # "embed-stylesheet": False, # "stylesheet": [], # "stylesheet_path": [] } ).decode("utf-8") except docutils.utils.SystemMessage: html = "<pre>{}<pre>".format(escape(description)) except Exception: html = "<pre>{}<pre>".format(escape(description)) else: html = "<pre>{}<pre>".format(escape(description)) return html def sizeHint(self): return QSize(480, 420) def method_queued(method, sig, conntype=Qt.QueuedConnection): name = method.__name__ obj = method.__self__ assert isinstance(obj, QObject) def call(*args): args = [Q_ARG(atype, arg) for atype, arg in zip(sig, args)] return QMetaObject.invokeMethod(obj, name, conntype, *args) return call class AddonManagerDialog(QDialog): _packages = None def __init__(self, parent=None, **kwargs): super().__init__(parent, acceptDrops=True, **kwargs) self.setLayout(QVBoxLayout()) self.layout().setContentsMargins(0, 0, 0, 0) self.addonwidget = AddonManagerWidget() self.layout().addWidget(self.addonwidget) info_bar = QWidget() info_layout = QHBoxLayout() info_bar.setLayout(info_layout) self.layout().addWidget(info_bar) buttons = QDialogButtonBox( orientation=Qt.Horizontal, standardButtons=QDialogButtonBox.Ok | QDialogButtonBox.Cancel ) buttons.accepted.connect(self.__accepted) buttons.rejected.connect(self.reject) self.layout().addWidget(buttons) # No system access => install into user site-packages self.user_install = not os.access(sysconfig.get_path("purelib"), os.W_OK) self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=1) if AddonManagerDialog._packages is None: self._f_pypi_addons = self._executor.submit(list_pypi_addons) else: self._f_pypi_addons = concurrent.futures.Future() self._f_pypi_addons.set_result(AddonManagerDialog._packages) self._f_pypi_addons.add_done_callback( method_queued(self._set_packages, (object,)) ) self.__progress = QProgressDialog( self, Qt.Sheet, minimum=0, maximum=0, labelText=self.tr("Retrieving package list"), sizeGripEnabled=False, windowTitle="Progress" ) self.__progress.rejected.connect(self.reject) self.__thread = None self.__installer = None @Slot(object) def _set_packages(self, f): if self.__progress.isVisible(): self.__progress.close() try: packages = f.result() except (IOError, OSError) as err: message_warning( "Could not retrieve package list", title="Error", informative_text=str(err), parent=self ) packages = [] except Exception: raise else: AddonManagerDialog._packages = packages installed = list_installed_addons() dists = {dist.project_name: dist for dist in installed} packages = {pkg.name: pkg for pkg in packages} # For every pypi available distribution not listed by # list_installed_addons, check if it is actually already # installed. ws = pkg_resources.WorkingSet() for pkg_name in set(packages.keys()).difference(set(dists.keys())): try: d = ws.find(pkg_resources.Requirement.parse(pkg_name)) except pkg_resources.VersionConflict: pass except ValueError: # Requirements.parse error ? pass else: if d is not None: dists[d.project_name] = d project_names = unique( itertools.chain(packages.keys(), dists.keys()) ) items = [] for name in project_names: if name in dists and name in packages: item = Installed(packages[name], dists[name]) elif name in dists: item = Installed(None, dists[name]) elif name in packages: item = Available(packages[name]) else: assert False items.append(item) self.addonwidget.set_items(items) def showEvent(self, event): super().showEvent(event) if not self._f_pypi_addons.done(): QTimer.singleShot(0, self.__progress.show) def done(self, retcode): super().done(retcode) self._f_pypi_addons.cancel() self._executor.shutdown(wait=False) if self.__thread is not None: self.__thread.quit() self.__thread.wait(1000) def closeEvent(self, event): super().closeEvent(event) self._f_pypi_addons.cancel() self._executor.shutdown(wait=False) if self.__thread is not None: self.__thread.quit() self.__thread.wait(1000) ADDON_EXTENSIONS = ('.zip', '.whl', '.tar.gz') def dragEnterEvent(self, event): urls = event.mimeData().urls() if any((OSX_NSURL_toLocalFile(url) or url.toLocalFile()) .endswith(self.ADDON_EXTENSIONS) for url in urls): event.acceptProposedAction() def dropEvent(self, event): """Allow dropping add-ons (zip or wheel archives) on this dialog to install them""" packages = [] names = [] for url in event.mimeData().urls(): path = OSX_NSURL_toLocalFile(url) or url.toLocalFile() if path.endswith(self.ADDON_EXTENSIONS): name, vers, summary, descr = (get_meta_from_archive(path) or (os.path.basename(path), '', '', '')) names.append(name) packages.append( Installable(name, vers, summary, descr or summary, path, [path])) future = concurrent.futures.Future() future.set_result((AddonManagerDialog._packages or []) + packages) self._set_packages(future) self.addonwidget.set_install_projects(names) def __accepted(self): steps = self.addonwidget.item_state() if steps: # Move all uninstall steps to the front steps = sorted( steps, key=lambda step: 0 if step[0] == Uninstall else 1 ) self.__installer = Installer(steps=steps, user_install=self.user_install) self.__thread = QThread(self) self.__thread.start() self.__installer.moveToThread(self.__thread) self.__installer.finished.connect(self.__on_installer_finished) self.__installer.error.connect(self.__on_installer_error) self.__installer.installStatusChanged.connect( self.__progress.setLabelText) self.__progress.show() self.__progress.setLabelText("Installing") self.__installer.start() else: self.accept() def __on_installer_error(self, command, pkg, retcode, output): message_error( "An error occurred while running a subprocess", title="Error", informative_text="{} exited with non zero status.".format(command), details="".join(output), parent=self ) self.reject() def __on_installer_finished(self): message = ( ("Changes successfully applied in <i>{}</i>.<br>".format( USER_SITE) if self.user_install else '') + "Please restart Orange for changes to take effect.") message_information(message, parent=self) self.accept() class SafeUrllibTransport(xmlrpc.client.Transport): """Urllib for HTTPS connections that automatically handles proxies.""" def single_request(self, host, handler, request_body, verbose=False): req = urllib.request.Request('https://%s%s' % (host, handler), request_body) req.add_header('User-agent', self.user_agent) req.add_header('Content-Type', 'text/xml') self.verbose = verbose opener = urllib.request.build_opener() return self.parse_response(opener.open(req)) def list_pypi_addons(): """ List add-ons available on pypi. """ from ..config import ADDON_PYPI_SEARCH_SPEC pypi = xmlrpc.client.ServerProxy( "https://pypi.python.org/pypi/", transport=xmlrpc.client.SafeTransport() ) addons = pypi.search(ADDON_PYPI_SEARCH_SPEC) for addon in OFFICIAL_ADDONS: if not any(a for a in addons if a['name'] == addon): addons.append({"name": addon, "version": '0'}) multicall = xmlrpc.client.MultiCall(pypi) for addon in addons: name = addon["name"] multicall.package_releases(name) releases = multicall() multicall = xmlrpc.client.MultiCall(pypi) for addon, versions in zip(addons, releases): # Workaround for PyPI bug of search not returning the latest versions # https://bitbucket.org/pypa/pypi/issues/326/my-package-doesnt-appear-in-the-search version_ = max(versions, key=version.LooseVersion) name = addon["name"] multicall.release_data(name, version_) multicall.release_urls(name, version_) results = list(multicall()) release_data = results[::2] release_urls = results[1::2] packages = [] for release, urls in zip(release_data, release_urls): if release and urls: # ignore releases without actual source/wheel/egg files, # or with empty metadata (deleted from PyPi?). urls = [ReleaseUrl(url["filename"], url["url"], url["size"], url["python_version"], url["packagetype"]) for url in urls] packages.append( Installable(release["name"], release["version"], release["summary"], release["description"], release["package_url"], urls) ) return packages def list_installed_addons(): from ..config import ADDON_ENTRY workingset = pkg_resources.WorkingSet(sys.path) return [ep.dist for ep in workingset.iter_entry_points(ADDON_ENTRY)] def unique(iterable): seen = set() def observed(el): observed = el in seen seen.add(el) return observed return (el for el in iterable if not observed(el)) def _env_with_proxies(): """ Return system environment with proxies obtained from urllib so that they can be used with pip. """ proxies = urllib.request.getproxies() env = dict(os.environ) if "http" in proxies: env["HTTP_PROXY"] = proxies["http"] if "https" in proxies: env["HTTPS_PROXY"] = proxies["https"] return env Install, Upgrade, Uninstall = 1, 2, 3 class Installer(QObject): installStatusChanged = Signal(str) started = Signal() finished = Signal() error = Signal(str, object, int, list) def __init__(self, parent=None, steps=[], user_install=False): QObject.__init__(self, parent) self.__interupt = False self.__queue = deque(steps) self.__user_install = user_install def start(self): QTimer.singleShot(0, self._next) def interupt(self): self.__interupt = True def setStatusMessage(self, message): self.__statusMessage = message self.installStatusChanged.emit(message) @Slot() def _next(self): def fmt_cmd(cmd): return "Command failed: python " + " ".join(map(shlex.quote, cmd)) command, pkg = self.__queue.popleft() if command == Install: inst = pkg.installable inst_name = inst.name if inst.package_url.startswith("http://") else inst.package_url self.setStatusMessage("Installing {}".format(inst.name)) cmd = (["-m", "pip", "install"] + (["--user"] if self.__user_install else []) + [inst_name]) process = python_process(cmd, bufsize=-1, universal_newlines=True, env=_env_with_proxies()) retcode, output = self.__subprocessrun(process) if retcode != 0: self.error.emit(fmt_cmd(cmd), pkg, retcode, output) return elif command == Upgrade: inst = pkg.installable inst_name = inst.name if inst.package_url.startswith("http://") else inst.package_url self.setStatusMessage("Upgrading {}".format(inst.name)) cmd = (["-m", "pip", "install", "--upgrade", "--no-deps"] + (["--user"] if self.__user_install else []) + [inst_name]) process = python_process(cmd, bufsize=-1, universal_newlines=True, env=_env_with_proxies()) retcode, output = self.__subprocessrun(process) if retcode != 0: self.error.emit(fmt_cmd(cmd), pkg, retcode, output) return # Why is this here twice?? cmd = (["-m", "pip", "install"] + (["--user"] if self.__user_install else []) + [inst_name]) process = python_process(cmd, bufsize=-1, universal_newlines=True, env=_env_with_proxies()) retcode, output = self.__subprocessrun(process) if retcode != 0: self.error.emit(fmt_cmd(cmd), pkg, retcode, output) return elif command == Uninstall: dist = pkg.local self.setStatusMessage("Uninstalling {}".format(dist.project_name)) cmd = ["-m", "pip", "uninstall", "--yes", dist.project_name] process = python_process(cmd, bufsize=-1, universal_newlines=True, env=_env_with_proxies()) retcode, output = self.__subprocessrun(process) if self.__user_install: # Remove the package forcefully; pip doesn't (yet) uninstall # --user packages (or any package outside sys.prefix?) # google: pip "Not uninstalling ?" "outside environment" install_path = os.path.join( USER_SITE, re.sub('[^\w]', '_', dist.project_name)) pip_record = next(iglob(install_path + '*.dist-info/RECORD'), None) if pip_record: with open(pip_record) as f: files = [line.rsplit(',', 2)[0] for line in f] else: files = [os.path.join( USER_SITE, 'orangecontrib', dist.project_name.split('-')[-1].lower()),] for match in itertools.chain(files, iglob(install_path + '*')): print('rm -rf', match) if os.path.isdir(match): shutil.rmtree(match) elif os.path.exists(match): os.unlink(match) if retcode != 0: self.error.emit(fmt_cmd(cmd), pkg, retcode, output) return if self.__queue: QTimer.singleShot(0, self._next) else: self.finished.emit() def __subprocessrun(self, process): output = [] while process.poll() is None: try: line = process.stdout.readline() except IOError as ex: if ex.errno != errno.EINTR: raise else: output.append(line) print(line, end="") # Read remaining output if any line = process.stdout.read() if line: output.append(line) print(line, end="") return process.returncode, output def python_process(args, script_name=None, cwd=None, env=None, **kwargs): """ Run a `sys.executable` in a subprocess with `args`. """ executable = sys.executable if os.name == "nt" and os.path.basename(executable) == "pythonw.exe": # Don't run the script with a 'gui' (detached) process. dirname = os.path.dirname(executable) executable = os.path.join(dirname, "python.exe") # by default a new console window would show up when executing the # script startupinfo = subprocess.STARTUPINFO() if hasattr(subprocess, "STARTF_USESHOWWINDOW"): startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW else: # This flag was missing in inital releases of 2.7 startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW kwargs["startupinfo"] = startupinfo if script_name is not None: script = script_name else: script = executable process = subprocess.Popen( [script] + args, executable=executable, cwd=cwd, env=env, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, **kwargs ) return process
from __future__ import annotations from statsmodels.compat.pandas import ( is_float_index, is_int_index, is_numeric_dtype, ) import numbers import warnings import numpy as np from pandas import ( DatetimeIndex, Index, Period, PeriodIndex, RangeIndex, Series, Timestamp, date_range, period_range, to_datetime, ) from pandas.tseries.frequencies import to_offset from statsmodels.base.data import PandasData import statsmodels.base.model as base import statsmodels.base.wrapper as wrap from statsmodels.tools.sm_exceptions import ValueWarning _tsa_doc = """ %(model)s Parameters ---------- %(params)s dates : array_like, optional An array-like object of datetime objects. If a pandas object is given for endog or exog, it is assumed to have a DateIndex. freq : str, optional The frequency of the time-series. A Pandas offset or 'B', 'D', 'W', 'M', 'A', or 'Q'. This is optional if dates are given. %(extra_params)s %(extra_sections)s""" _model_doc = "Timeseries model base class" _generic_params = base._model_params_doc _missing_param_doc = base._missing_param_doc def get_index_loc(key, index): """ Get the location of a specific key in an index Parameters ---------- key : label The key for which to find the location if the underlying index is a DateIndex or a location if the underlying index is a RangeIndex or an Index with an integer dtype. index : pd.Index The index to search. Returns ------- loc : int The location of the key index : pd.Index The index including the key; this is a copy of the original index unless the index had to be expanded to accommodate `key`. index_was_expanded : bool Whether or not the index was expanded to accommodate `key`. Notes ----- If `key` is past the end of of the given index, and the index is either an Index with an integral dtype or a date index, this function extends the index up to and including key, and then returns the location in the new index. """ base_index = index index = base_index date_index = isinstance(base_index, (PeriodIndex, DatetimeIndex)) int_index = is_int_index(base_index) range_index = isinstance(base_index, RangeIndex) index_class = type(base_index) nobs = len(index) # Special handling for RangeIndex if range_index and isinstance(key, (int, np.integer)): # Negative indices (that lie in the Index) if key < 0 and -key <= nobs: key = nobs + key # Out-of-sample (note that we include key itself in the new index) elif key > nobs - 1: # See gh5835. Remove the except after pandas 0.25 required. try: base_index_start = base_index.start base_index_step = base_index.step except AttributeError: base_index_start = base_index._start base_index_step = base_index._step stop = base_index_start + (key + 1) * base_index_step index = RangeIndex( start=base_index_start, stop=stop, step=base_index_step ) # Special handling for NumericIndex if ( not range_index and int_index and not date_index and isinstance(key, (int, np.integer)) ): # Negative indices (that lie in the Index) if key < 0 and -key <= nobs: key = nobs + key # Out-of-sample (note that we include key itself in the new index) elif key > base_index[-1]: index = Index(np.arange(base_index[0], int(key + 1))) # Special handling for date indexes if date_index: # Use index type to choose creation function if index_class is DatetimeIndex: index_fn = date_range else: index_fn = period_range # Integer key (i.e. already given a location) if isinstance(key, (int, np.integer)): # Negative indices (that lie in the Index) if key < 0 and -key < nobs: key = index[nobs + key] # Out-of-sample (note that we include key itself in the new # index) elif key > len(base_index) - 1: index = index_fn( start=base_index[0], periods=int(key + 1), freq=base_index.freq, ) key = index[-1] else: key = index[key] # Other key types (i.e. string date or some datetime-like object) else: # Covert the key to the appropriate date-like object if index_class is PeriodIndex: date_key = Period(key, freq=base_index.freq) else: date_key = Timestamp(key) # Out-of-sample if date_key > base_index[-1]: # First create an index that may not always include `key` index = index_fn( start=base_index[0], end=date_key, freq=base_index.freq ) # Now make sure we include `key` if not index[-1] == date_key: index = index_fn( start=base_index[0], periods=len(index) + 1, freq=base_index.freq, ) # To avoid possible inconsistencies with `get_loc` below, # set the key directly equal to the last index location key = index[-1] # Get the location if date_index: # (note that get_loc will throw a KeyError if key is invalid) loc = index.get_loc(key) elif int_index or range_index: # For NumericIndex and RangeIndex, key is assumed to be the location # and not an index value (this assumption is required to support # RangeIndex) try: index[key] # We want to raise a KeyError in this case, to keep the exception # consistent across index types. # - Attempting to index with an out-of-bound location (e.g. # index[10] on an index of length 9) will raise an IndexError # (as of Pandas 0.22) # - Attemtping to index with a type that cannot be cast to integer # (e.g. a non-numeric string) will raise a ValueError if the # index is RangeIndex (otherwise will raise an IndexError) # (as of Pandas 0.22) except (IndexError, ValueError) as e: raise KeyError(str(e)) loc = key else: loc = index.get_loc(key) # Check if we now have a modified index index_was_expanded = index is not base_index # Return the index through the end of the loc / slice if isinstance(loc, slice): end = loc.stop - 1 else: end = loc return loc, index[: end + 1], index_was_expanded def get_index_label_loc(key, index, row_labels): """ Get the location of a specific key in an index or model row labels Parameters ---------- key : label The key for which to find the location if the underlying index is a DateIndex or is only being used as row labels, or a location if the underlying index is a RangeIndex or a NumericIndex. index : pd.Index The index to search. row_labels : pd.Index Row labels to search if key not found in index Returns ------- loc : int The location of the key index : pd.Index The index including the key; this is a copy of the original index unless the index had to be expanded to accommodate `key`. index_was_expanded : bool Whether or not the index was expanded to accommodate `key`. Notes ----- This function expands on `get_index_loc` by first trying the given base index (or the model's index if the base index was not given) and then falling back to try again with the model row labels as the base index. """ try: loc, index, index_was_expanded = get_index_loc(key, index) except KeyError as e: try: if not isinstance(key, (int, np.integer)): loc = row_labels.get_loc(key) else: raise # Require scalar # Pandas may return a slice if there are multiple matching # locations that are monotonic increasing (otherwise it may # return an array of integer locations, see below). if isinstance(loc, slice): loc = loc.start if isinstance(loc, np.ndarray): # Pandas may return a mask (boolean array), for e.g.: # pd.Index(list('abcb')).get_loc('b') if loc.dtype == bool: # Return the first True value # (we know there is at least one True value if we're # here because otherwise the get_loc call would have # raised an exception) loc = np.argmax(loc) # Finally, Pandas may return an integer array of # locations that match the given value, for e.g. # pd.DatetimeIndex(['2001-02', '2001-01']).get_loc('2001') # (this appears to be slightly undocumented behavior, since # only int, slice, and mask are mentioned in docs for # pandas.Index.get_loc as of 0.23.4) else: loc = loc[0] if not isinstance(loc, numbers.Integral): raise index = row_labels[: loc + 1] index_was_expanded = False except: raise e return loc, index, index_was_expanded def get_prediction_index( start, end, nobs, base_index, index=None, silent=False, index_none=False, index_generated=None, data=None, ) -> tuple[int, int, int, Index | None]: """ Get the location of a specific key in an index or model row labels Parameters ---------- start : label The key at which to start prediction. Depending on the underlying model's index, may be an integer, a date (string, datetime object, pd.Timestamp, or pd.Period object), or some other object in the model's row labels. end : label The key at which to end prediction (note that this key will be *included* in prediction). Depending on the underlying model's index, may be an integer, a date (string, datetime object, pd.Timestamp, or pd.Period object), or some other object in the model's row labels. nobs : int base_index : pd.Index index : pd.Index, optional Optionally an index to associate the predicted results to. If None, an attempt is made to create an index for the predicted results from the model's index or model's row labels. silent : bool, optional Argument to silence warnings. Returns ------- start : int The index / observation location at which to begin prediction. end : int The index / observation location at which to end in-sample prediction. The maximum value for this is nobs-1. out_of_sample : int The number of observations to forecast after the end of the sample. prediction_index : pd.Index or None The index associated with the prediction results. This index covers the range [start, end + out_of_sample]. If the model has no given index and no given row labels (i.e. endog/exog is not Pandas), then this will be None. Notes ----- The arguments `start` and `end` behave differently, depending on if they are integer or not. If either is an integer, then it is assumed to refer to a *location* in the index, not to an index value. On the other hand, if it is a date string or some other type of object, then it is assumed to refer to an index *value*. In all cases, the returned `start` and `end` values refer to index *locations* (so in the former case, the given location is validated and returned whereas in the latter case a location is found that corresponds to the given index value). This difference in behavior is necessary to support `RangeIndex`. This is because integers for a RangeIndex could refer either to index values or to index locations in an ambiguous way (while for `NumericIndex`, since we have required them to be full indexes, there is no ambiguity). """ # Convert index keys (start, end) to index locations and get associated # indexes. try: start, _, start_oos = get_index_label_loc( start, base_index, data.row_labels ) except KeyError: raise KeyError( "The `start` argument could not be matched to a" " location related to the index of the data." ) if end is None: end = max(start, len(base_index) - 1) try: end, end_index, end_oos = get_index_label_loc( end, base_index, data.row_labels ) except KeyError: raise KeyError( "The `end` argument could not be matched to a" " location related to the index of the data." ) # Handle slices (if the given index keys cover more than one date) if isinstance(start, slice): start = start.start if isinstance(end, slice): end = end.stop - 1 # Get the actual index for the prediction prediction_index = end_index[start:] # Validate prediction options if end < start: raise ValueError("Prediction must have `end` after `start`.") # Handle custom prediction index # First, if we were given an index, check that it's the right size and # use it if so if index is not None: if not len(prediction_index) == len(index): raise ValueError( "Invalid `index` provided in prediction." " Must have length consistent with `start`" " and `end` arguments." ) # But if we weren't given Pandas input, this index will not be # used because the data will not be wrapped; in that case, issue # a warning if not isinstance(data, PandasData) and not silent: warnings.warn( "Because the model data (`endog`, `exog`) were" " not given as Pandas objects, the prediction" " output will be Numpy arrays, and the given" " `index` argument will only be used" " internally.", ValueWarning, stacklevel=2, ) prediction_index = Index(index) # Now, if we *do not* have a supported index, but we were given some # kind of index... elif index_generated and not index_none: # If we are in sample, and have row labels, use them if data.row_labels is not None and not (start_oos or end_oos): prediction_index = data.row_labels[start : end + 1] # Otherwise, warn the user that they will get an NumericIndex else: if not silent: warnings.warn( "No supported index is available." " Prediction results will be given with" " an integer index beginning at `start`.", ValueWarning, stacklevel=2, ) warnings.warn( "No supported index is available. In the next" " version, calling this method in a model" " without a supported index will result in an" " exception.", DeprecationWarning, stacklevel=2, ) elif index_none: prediction_index = None # For backwards compatibility, set `predict_*` values if prediction_index is not None: data.predict_start = prediction_index[0] data.predict_end = prediction_index[-1] data.predict_dates = prediction_index else: data.predict_start = None data.predict_end = None data.predict_dates = None # Compute out-of-sample observations out_of_sample = max(end - (nobs - 1), 0) end -= out_of_sample return start, end, out_of_sample, prediction_index class TimeSeriesModel(base.LikelihoodModel): __doc__ = _tsa_doc % { "model": _model_doc, "params": _generic_params, "extra_params": _missing_param_doc, "extra_sections": "", } def __init__( self, endog, exog=None, dates=None, freq=None, missing="none", **kwargs ): super().__init__(endog, exog, missing=missing, **kwargs) # Date handling in indexes self._init_dates(dates, freq) def _init_dates(self, dates=None, freq=None): """ Initialize dates Parameters ---------- dates : array_like, optional An array like object containing dates. freq : str, tuple, datetime.timedelta, DateOffset or None, optional A frequency specification for either `dates` or the row labels from the endog / exog data. Notes ----- Creates `self._index` and related attributes. `self._index` is always a Pandas index, and it is always NumericIndex, DatetimeIndex, or PeriodIndex. If Pandas objects, endog / exog may have any type of index. If it is an NumericIndex with values 0, 1, ..., nobs-1 or if it is (coerceable to) a DatetimeIndex or PeriodIndex *with an associated frequency*, then it is called a "supported" index. Otherwise it is called an "unsupported" index. Supported indexes are standardized (i.e. a list of date strings is converted to a DatetimeIndex) and the result is put in `self._index`. Unsupported indexes are ignored, and a supported NumericIndex is generated and put in `self._index`. Warnings are issued in this case to alert the user if the returned index from some operation (e.g. forecasting) is different from the original data's index. However, whenever possible (e.g. purely in-sample prediction), the original index is returned. The benefit of supported indexes is that they allow *forecasting*, i.e. it is possible to extend them in a reasonable way. Thus every model must have an underlying supported index, even if it is just a generated NumericIndex. """ # Get our index from `dates` if available, otherwise from whatever # Pandas index we might have retrieved from endog, exog if dates is not None: index = dates else: index = self.data.row_labels # Sanity check that we do not have a `freq` without an index if index is None and freq is not None: raise ValueError("Frequency provided without associated index.") # If an index is available, see if it is a date-based index or if it # can be coerced to one. (If it cannot we'll fall back, below, to an # internal, 0, 1, ... nobs-1 integer index for modeling purposes) inferred_freq = False if index is not None: # Try to coerce to date-based index if not isinstance(index, (DatetimeIndex, PeriodIndex)): try: # Only try to coerce non-numeric index types (string, # list of date-times, etc.) # Note that np.asarray(Float64Index([...])) yields an # object dtype array in earlier versions of Pandas (and so # will not have is_numeric_dtype == True), so explicitly # check for it here. But note also that in very early # Pandas (~0.12), Float64Index does not exist (and so the # statsmodels compat makes it an empty tuple, so in that # case also check if the first element is a float. _index = np.asarray(index) if ( is_numeric_dtype(_index) or is_float_index(index) or (isinstance(_index[0], float)) ): raise ValueError("Numeric index given") # If a non-index Pandas series was given, only keep its # values (because we must have a pd.Index type, below, and # pd.to_datetime will return a Series when passed # non-list-like objects) if isinstance(index, Series): index = index.values # All coercion is done via pd.to_datetime # Note: date coercion via pd.to_datetime does not handle # string versions of PeriodIndex objects most of the time. _index = to_datetime(index) # Older versions of Pandas can sometimes fail here and # return a numpy array - check to make sure it's an index if not isinstance(_index, Index): raise ValueError("Could not coerce to date index") index = _index except: # Only want to actually raise an exception if `dates` was # provided but cannot be coerced. If we got the index from # the row_labels, we'll just ignore it and use the integer # index below if dates is not None: raise ValueError( "Non-date index index provided to" " `dates` argument." ) # Now, if we were given, or coerced, a date-based index, make sure # it has an associated frequency if isinstance(index, (DatetimeIndex, PeriodIndex)): # If no frequency, try to get an inferred frequency if freq is None and index.freq is None: freq = index.inferred_freq # If we got an inferred frequncy, alert the user if freq is not None: inferred_freq = True if freq is not None: warnings.warn( "No frequency information was" " provided, so inferred frequency %s" " will be used." % freq, ValueWarning, stacklevel = 2, ) # Convert the passed freq to a pandas offset object if freq is not None: freq = to_offset(freq) # Now, if no frequency information is available from the index # itself or from the `freq` argument, raise an exception if freq is None and index.freq is None: # But again, only want to raise the exception if `dates` # was provided. if dates is not None: raise ValueError( "No frequency information was" " provided with date index and no" " frequency could be inferred." ) # However, if the index itself has no frequency information but # the `freq` argument is available (or was inferred), construct # a new index with an associated frequency elif freq is not None and index.freq is None: resampled_index = date_range( start=index[0], end=index[-1], freq=freq ) if not inferred_freq and not resampled_index.equals(index): raise ValueError( "The given frequency argument could" " not be matched to the given index." ) index = resampled_index # Finally, if the index itself has a frequency and there was # also a given frequency, raise an exception if they are not # equal elif ( freq is not None and not inferred_freq and not (index.freq == freq) ): raise ValueError( "The given frequency argument is" " incompatible with the given index." ) # Finally, raise an exception if we could not coerce to date-based # but we were given a frequency argument elif freq is not None: raise ValueError( "Given index could not be coerced to dates" " but `freq` argument was provided." ) # Get attributes of the index has_index = index is not None date_index = isinstance(index, (DatetimeIndex, PeriodIndex)) period_index = isinstance(index, PeriodIndex) int_index = is_int_index(index) range_index = isinstance(index, RangeIndex) has_freq = index.freq is not None if date_index else None increment = Index(range(self.endog.shape[0])) is_increment = index.equals(increment) if int_index else None if date_index: try: is_monotonic = index.is_monotonic_increasing except AttributeError: # Remove after pandas 1.5 is minimum is_monotonic = index.is_monotonic else: is_monotonic = None # Issue warnings for unsupported indexes if has_index and not (date_index or range_index or is_increment): warnings.warn( "An unsupported index was provided and will be" " ignored when e.g. forecasting.", ValueWarning, stacklevel=2, ) if date_index and not has_freq: warnings.warn( "A date index has been provided, but it has no" " associated frequency information and so will be" " ignored when e.g. forecasting.", ValueWarning, stacklevel=2, ) if date_index and not is_monotonic: warnings.warn( "A date index has been provided, but it is not" " monotonic and so will be ignored when e.g." " forecasting.", ValueWarning, stacklevel=2, ) # Construct the internal index index_generated = False valid_index = ( (date_index and has_freq and is_monotonic) or (int_index and is_increment) or range_index ) if valid_index: _index = index else: _index = increment index_generated = True self._index = _index self._index_generated = index_generated self._index_none = index is None self._index_int64 = int_index and not range_index and not date_index self._index_dates = date_index and not index_generated self._index_freq = self._index.freq if self._index_dates else None self._index_inferred_freq = inferred_freq # For backwards compatibility, set data.dates, data.freq self.data.dates = self._index if self._index_dates else None self.data.freq = self._index.freqstr if self._index_dates else None def _get_index_loc(self, key, base_index=None): """ Get the location of a specific key in an index Parameters ---------- key : label The key for which to find the location if the underlying index is a DateIndex or a location if the underlying index is a RangeIndex or an NumericIndex. base_index : pd.Index, optional Optionally the base index to search. If None, the model's index is searched. Returns ------- loc : int The location of the key index : pd.Index The index including the key; this is a copy of the original index unless the index had to be expanded to accommodate `key`. index_was_expanded : bool Whether or not the index was expanded to accommodate `key`. Notes ----- If `key` is past the end of of the given index, and the index is either an NumericIndex or a date index, this function extends the index up to and including key, and then returns the location in the new index. """ if base_index is None: base_index = self._index return get_index_loc(key, base_index) def _get_index_label_loc(self, key, base_index=None): """ Get the location of a specific key in an index or model row labels Parameters ---------- key : label The key for which to find the location if the underlying index is a DateIndex or is only being used as row labels, or a location if the underlying index is a RangeIndex or an NumericIndex. base_index : pd.Index, optional Optionally the base index to search. If None, the model's index is searched. Returns ------- loc : int The location of the key index : pd.Index The index including the key; this is a copy of the original index unless the index had to be expanded to accommodate `key`. index_was_expanded : bool Whether or not the index was expanded to accommodate `key`. Notes ----- This method expands on `_get_index_loc` by first trying the given base index (or the model's index if the base index was not given) and then falling back to try again with the model row labels as the base index. """ if base_index is None: base_index = self._index return get_index_label_loc(key, base_index, self.data.row_labels) def _get_prediction_index(self, start, end, index=None, silent=False) -> tuple[int, int, int, Index | None]: """ Get the location of a specific key in an index or model row labels Parameters ---------- start : label The key at which to start prediction. Depending on the underlying model's index, may be an integer, a date (string, datetime object, pd.Timestamp, or pd.Period object), or some other object in the model's row labels. end : label The key at which to end prediction (note that this key will be *included* in prediction). Depending on the underlying model's index, may be an integer, a date (string, datetime object, pd.Timestamp, or pd.Period object), or some other object in the model's row labels. index : pd.Index, optional Optionally an index to associate the predicted results to. If None, an attempt is made to create an index for the predicted results from the model's index or model's row labels. silent : bool, optional Argument to silence warnings. Returns ------- start : int The index / observation location at which to begin prediction. end : int The index / observation location at which to end in-sample prediction. The maximum value for this is nobs-1. out_of_sample : int The number of observations to forecast after the end of the sample. prediction_index : pd.Index or None The index associated with the prediction results. This index covers the range [start, end + out_of_sample]. If the model has no given index and no given row labels (i.e. endog/exog is not Pandas), then this will be None. Notes ----- The arguments `start` and `end` behave differently, depending on if they are integer or not. If either is an integer, then it is assumed to refer to a *location* in the index, not to an index value. On the other hand, if it is a date string or some other type of object, then it is assumed to refer to an index *value*. In all cases, the returned `start` and `end` values refer to index *locations* (so in the former case, the given location is validated and returned whereas in the latter case a location is found that corresponds to the given index value). This difference in behavior is necessary to support `RangeIndex`. This is because integers for a RangeIndex could refer either to index values or to index locations in an ambiguous way (while for `NumericIndex`, since we have required them to be full indexes, there is no ambiguity). """ nobs = len(self.endog) return get_prediction_index( start, end, nobs, base_index=self._index, index=index, silent=silent, index_none=self._index_none, index_generated=self._index_generated, data=self.data, ) def _get_exog_names(self): return self.data.xnames def _set_exog_names(self, vals): if not isinstance(vals, list): vals = [vals] self.data.xnames = vals # TODO: This is an antipattern, fix/remove with VAR # overwrite with writable property for (V)AR models exog_names = property( _get_exog_names, _set_exog_names, None, "The names of the exogenous variables.", ) class TimeSeriesModelResults(base.LikelihoodModelResults): def __init__(self, model, params, normalized_cov_params, scale=1.0): self.data = model.data super().__init__(model, params, normalized_cov_params, scale) class TimeSeriesResultsWrapper(wrap.ResultsWrapper): _attrs = {} _wrap_attrs = wrap.union_dicts( base.LikelihoodResultsWrapper._wrap_attrs, _attrs ) _methods = {"predict": "dates"} _wrap_methods = wrap.union_dicts( base.LikelihoodResultsWrapper._wrap_methods, _methods ) wrap.populate_wrapper( TimeSeriesResultsWrapper, TimeSeriesModelResults # noqa:E305 )
"""Utilities for drawing random numbers.""" import contextlib import numpy as np import zlib from caput import config from ..core import task _rng = None _default_bitgen = np.random.SFC64 def default_rng(): """Returns an instance of the default random number generator to use. This creates a randomly seeded generator using the fast SFC64 bit generator underneath. This is only initialise on the first call, subsequent calls will return the same Generator. Returns ------- rng : np.random.Generator """ global _rng if _rng is None: _rng = np.random.Generator(_default_bitgen()) return _rng def complex_normal(size=None, loc=0.0, scale=1.0, dtype=None, rng=None, out=None): """ Get a set of complex normal variables. By default generate standard complex normal variables. Parameters ---------- size : tuple Shape of the array of variables. loc : np.ndarray or complex float, optional The mean of the complex output. Can be any array which broadcasts against an array of `size`. scale : np.ndarray or float, optional The standard deviation of the complex output. Can be any array which broadcasts against an array of `size`. dtype : {np.complex64, np.complex128}, optional Output datatype. rng : np.random.Generator, optional Generator object to use. out : np.ndarray[shape], optional Array to place output directly into. Returns ------- out : np.ndarray[shape] Complex gaussian variates. """ # Validate/set size argument if size is None and out is None: size = (1,) elif out is not None and size is None: size = out.shape elif out is not None and size is not None and out.shape != size: raise ValueError( f"Shape of output array ({out.shape}) != size argument ({size}" ) # Validate/set dtype argument if dtype is None and out is None: dtype = np.complex128 elif dtype is None and out is not None: dtype = out.dtype.type elif out is not None and dtype is not None and out.dtype.type != dtype: raise ValueError( f"Dtype of output array ({out.dtype.type}) != dtype argument ({dtype}" ) if rng is None: rng = default_rng() _type_map = { np.complex64: np.float32, np.complex128: np.float64, } if dtype not in _type_map: raise ValueError( f"Only dtype must be complex64 or complex128. Got dtype={dtype}." ) if out is None: out = np.ndarray(size, dtype=dtype) # Fill the complex array by creating a real type view of it rtype = _type_map[dtype] rsize = size[:-1] + (size[-1] * 2,) rng.standard_normal(rsize, dtype=rtype, out=out.view(rtype)) # Use inplace ops for scaling and adding to avoid intermediate arrays rscale = scale / 2**0.5 out *= rscale # Don't bother with the additions if not needed if np.any(loc != 0.0): out += loc return out def standard_complex_normal(shape, dtype=None, rng=None): """ Get a set of standard complex normal variables. Parameters ---------- shape : tuple Shape of the array of variables. dtype : {np.complex64, np.complex128}, optional Output datatype. rng : np.random.Generator, optional Generator object to use. Returns ------- out : np.ndarray[shape] Complex gaussian variates. """ return complex_normal(shape, dtype=dtype, rng=rng) def standard_complex_wishart(m, n, rng=None): """Draw a standard Wishart matrix. Parameters ---------- m : integer Number of variables (i.e. size of matrix). n : integer Number of measurements the covariance matrix is estimated from. rng : np.random.Generator, optional Random number generator to use. Returns ------- B : np.ndarray[m, m] """ if rng is None: rng = default_rng() # Fill in normal variables in the lower triangle T = np.zeros((m, m), dtype=np.complex128) T[np.tril_indices(m, k=-1)] = ( rng.standard_normal(m * (m - 1) // 2) + 1.0j * rng.standard_normal(m * (m - 1) // 2) ) / 2**0.5 # Gamma variables on the diagonal for i in range(m): T[i, i] = rng.gamma(n - i) ** 0.5 # Return the square to get the Wishart matrix return np.dot(T, T.T.conj()) def complex_wishart(C, n, rng=None): """Draw a complex Wishart matrix. Parameters ---------- C_exp : np.ndarray[:, :] Expected covaraince matrix. n : integer Number of measurements the covariance matrix is estimated from. rng : np.random.Generator, optional Random number generator to use. Returns ------- C_samp : np.ndarray Sample covariance matrix. """ import scipy.linalg as la # Find Cholesky of C L = la.cholesky(C, lower=True) # Generate a standard Wishart A = standard_complex_wishart(C.shape[0], n, rng=rng) # Transform to get the Wishart variable return np.dot(L, np.dot(A, L.T.conj())) @contextlib.contextmanager def mpi_random_seed(seed, extra=0, gen=None): """Use a specific random seed and return to the original state on exit. This is designed to work for MPI computations, incrementing the actual seed of each process by the MPI rank. Overall each process gets the numpy seed: `numpy_seed = seed + mpi_rank + 4096 * extra`. This can work for either the global numpy.random context or for new np.random.Generator. Parameters ---------- seed : int Base seed to set. If seed is :obj:`None`, re-seed randomly. extra : int, optional An extra part of the seed, which should be changed for calculations using the same seed, but that want different random sequences. gen: :class: `Generator` A RandomGen bit_generator whose internal seed state we are going to influence. Yields ------ If we are setting the numpy.random context, nothing is yielded. :class: `Generator` If we are setting the RandomGen bit_generator, it will be returned. """ import warnings from caput import mpiutil warnings.warn( "This routine has fatal flaws. Try using `RandomTask` instead", category=DeprecationWarning, ) # Just choose a random number per process as the seed if nothing was set. if seed is None: seed = np.random.randint(2**30) # Construct the new process specific seed new_seed = seed + mpiutil.rank + 4096 * extra np.random.seed(new_seed) # we will be setting the numpy.random context if gen is None: # Copy the old state for restoration later. old_state = np.random.get_state() # Enter the context block, and reset the state on exit. try: yield finally: np.random.set_state(old_state) # we will be setting the randomgen context else: # Copy the old state for restoration later. old_state = gen.state # Enter the context block, and reset the state on exit. try: yield gen finally: gen.state = old_state class RandomTask(task.MPILoggedTask): """A base class for MPI tasks that needs to generate random numbers. Attributes ---------- seed : int, optional Set the seed for use in the task. If not set, a random seed is generated and broadcast to all ranks. The seed being used is logged, to repeat a previous run, simply set this as the seed parameter. """ seed = config.Property(proptype=int, default=None) _rng = None @property def rng(self): """A random number generator for this task. .. warning:: Initialising the RNG is a collective operation if the seed is not set, and so all ranks must participate in the first access of this property. Returns ------- rng : np.random.Generator A deterministically seeded random number generator suitable for use in MPI jobs. """ if self._rng is None: # Generate a new base seed for all MPI ranks if self.seed is None: # Use seed sequence to generate a random seed seed = np.random.SeedSequence().entropy seed = self.comm.bcast(seed, root=0) else: seed = self.seed self.log.info("Using random seed: %i", seed) # Construct the new MPI-process and task specific seed. This mixes an # integer checksum of the class name with the MPI-rank to generate a new # hash. # NOTE: the slightly odd (rank + 1) is to ensure that even rank=0 mixes in # the class seed cls_name = "%s.%s" % (self.__module__, self.__class__.__name__) cls_seed = zlib.adler32(cls_name.encode()) new_seed = seed + (self.comm.rank + 1) * cls_seed self._rng = np.random.Generator(_default_bitgen(new_seed)) return self._rng
"""Test Axis config flow.""" from unittest.mock import Mock, patch from homeassistant.components import axis from homeassistant.components.axis import config_flow from tests.common import MockConfigEntry, mock_coro async def test_configured_devices(hass): """Test that configured devices works as expected.""" result = config_flow.configured_devices(hass) assert not result entry = MockConfigEntry( domain=axis.DOMAIN, data={axis.config_flow.CONF_MAC: "1234"} ) entry.add_to_hass(hass) result = config_flow.configured_devices(hass) assert len(result) == 1 async def test_flow_works(hass): """Test that config flow works.""" with patch("axis.AxisDevice") as mock_device: def mock_constructor(loop, host, username, password, port, web_proto): """Fake the controller constructor.""" mock_device.loop = loop mock_device.host = host mock_device.username = username mock_device.password = password mock_device.port = port return mock_device mock_device.side_effect = mock_constructor mock_device.vapix.params.system_serialnumber = "serialnumber" mock_device.vapix.params.prodnbr = "prodnbr" mock_device.vapix.params.prodtype = "prodtype" mock_device.vapix.params.firmware_version = "firmware_version" result = await hass.config_entries.flow.async_init( config_flow.DOMAIN, context={"source": "user"} ) assert result["type"] == "form" assert result["step_id"] == "user" result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={ config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, }, ) assert result["type"] == "create_entry" assert result["title"] == "{} - {}".format("prodnbr", "serialnumber") assert result["data"] == { axis.CONF_DEVICE: { config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, }, config_flow.CONF_MAC: "serialnumber", config_flow.CONF_MODEL: "prodnbr", config_flow.CONF_NAME: "prodnbr 0", } async def test_flow_fails_already_configured(hass): """Test that config flow fails on already configured device.""" flow = config_flow.AxisFlowHandler() flow.hass = hass entry = MockConfigEntry( domain=axis.DOMAIN, data={axis.config_flow.CONF_MAC: "1234"} ) entry.add_to_hass(hass) mock_device = Mock() mock_device.vapix.params.system_serialnumber = "1234" with patch( "homeassistant.components.axis.config_flow.get_device", return_value=mock_coro(mock_device), ): result = await flow.async_step_user( user_input={ config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, } ) assert result["errors"] == {"base": "already_configured"} async def test_flow_fails_faulty_credentials(hass): """Test that config flow fails on faulty credentials.""" flow = config_flow.AxisFlowHandler() flow.hass = hass with patch( "homeassistant.components.axis.config_flow.get_device", side_effect=config_flow.AuthenticationRequired, ): result = await flow.async_step_user( user_input={ config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, } ) assert result["errors"] == {"base": "faulty_credentials"} async def test_flow_fails_device_unavailable(hass): """Test that config flow fails on device unavailable.""" flow = config_flow.AxisFlowHandler() flow.hass = hass with patch( "homeassistant.components.axis.config_flow.get_device", side_effect=config_flow.CannotConnect, ): result = await flow.async_step_user( user_input={ config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, } ) assert result["errors"] == {"base": "device_unavailable"} async def test_flow_create_entry(hass): """Test that create entry can generate a name without other entries.""" flow = config_flow.AxisFlowHandler() flow.hass = hass flow.model = "model" result = await flow._create_entry() assert result["data"][config_flow.CONF_NAME] == "model 0" async def test_flow_create_entry_more_entries(hass): """Test that create entry can generate a name with other entries.""" entry = MockConfigEntry( domain=axis.DOMAIN, data={config_flow.CONF_NAME: "model 0", config_flow.CONF_MODEL: "model"}, ) entry.add_to_hass(hass) entry2 = MockConfigEntry( domain=axis.DOMAIN, data={config_flow.CONF_NAME: "model 1", config_flow.CONF_MODEL: "model"}, ) entry2.add_to_hass(hass) flow = config_flow.AxisFlowHandler() flow.hass = hass flow.model = "model" result = await flow._create_entry() assert result["data"][config_flow.CONF_NAME] == "model 2" async def test_zeroconf_flow(hass): """Test that zeroconf discovery for new devices work.""" with patch.object(axis, "get_device", return_value=mock_coro(Mock())): result = await hass.config_entries.flow.async_init( config_flow.DOMAIN, data={ config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_PORT: 80, "hostname": "name", "properties": {"macaddress": "00408C12345"}, }, context={"source": "zeroconf"}, ) assert result["type"] == "form" assert result["step_id"] == "user" async def test_zeroconf_flow_known_device(hass): """Test that zeroconf discovery for known devices work. This is legacy support from devices registered with configurator. """ with patch( "homeassistant.components.axis.config_flow.load_json", return_value={ "00408C12345": { config_flow.CONF_HOST: "2.3.4.5", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, } }, ), patch("axis.AxisDevice") as mock_device: def mock_constructor(loop, host, username, password, port, web_proto): """Fake the controller constructor.""" mock_device.loop = loop mock_device.host = host mock_device.username = username mock_device.password = password mock_device.port = port return mock_device mock_device.side_effect = mock_constructor mock_device.vapix.params.system_serialnumber = "serialnumber" mock_device.vapix.params.prodnbr = "prodnbr" mock_device.vapix.params.prodtype = "prodtype" mock_device.vapix.params.firmware_version = "firmware_version" result = await hass.config_entries.flow.async_init( config_flow.DOMAIN, data={ config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_PORT: 80, "hostname": "name", "properties": {"macaddress": "00408C12345"}, }, context={"source": "zeroconf"}, ) assert result["type"] == "create_entry" async def test_zeroconf_flow_already_configured(hass): """Test that zeroconf doesn't setup already configured devices.""" entry = MockConfigEntry( domain=axis.DOMAIN, data={ axis.CONF_DEVICE: {axis.config_flow.CONF_HOST: "1.2.3.4"}, axis.config_flow.CONF_MAC: "00408C12345", }, ) entry.add_to_hass(hass) result = await hass.config_entries.flow.async_init( config_flow.DOMAIN, data={ config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, "hostname": "name", "properties": {"macaddress": "00408C12345"}, }, context={"source": "zeroconf"}, ) assert result["type"] == "abort" assert result["reason"] == "already_configured" async def test_zeroconf_flow_ignore_non_axis_device(hass): """Test that zeroconf doesn't setup devices with link local addresses.""" result = await hass.config_entries.flow.async_init( config_flow.DOMAIN, data={ config_flow.CONF_HOST: "169.254.3.4", "properties": {"macaddress": "01234567890"}, }, context={"source": "zeroconf"}, ) assert result["type"] == "abort" assert result["reason"] == "not_axis_device" async def test_zeroconf_flow_ignore_link_local_address(hass): """Test that zeroconf doesn't setup devices with link local addresses.""" result = await hass.config_entries.flow.async_init( config_flow.DOMAIN, data={ config_flow.CONF_HOST: "169.254.3.4", "properties": {"macaddress": "00408C12345"}, }, context={"source": "zeroconf"}, ) assert result["type"] == "abort" assert result["reason"] == "link_local_address" async def test_zeroconf_flow_bad_config_file(hass): """Test that zeroconf discovery with bad config files abort.""" with patch( "homeassistant.components.axis.config_flow.load_json", return_value={ "00408C12345": { config_flow.CONF_HOST: "2.3.4.5", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, } }, ), patch( "homeassistant.components.axis.config_flow.DEVICE_SCHEMA", side_effect=config_flow.vol.Invalid(""), ): result = await hass.config_entries.flow.async_init( config_flow.DOMAIN, data={ config_flow.CONF_HOST: "1.2.3.4", "hostname": "name", "properties": {"macaddress": "00408C12345"}, }, context={"source": "zeroconf"}, ) assert result["type"] == "abort" assert result["reason"] == "bad_config_file" async def test_import_flow_works(hass): """Test that import flow works.""" with patch("axis.AxisDevice") as mock_device: def mock_constructor(loop, host, username, password, port, web_proto): """Fake the controller constructor.""" mock_device.loop = loop mock_device.host = host mock_device.username = username mock_device.password = password mock_device.port = port return mock_device mock_device.side_effect = mock_constructor mock_device.vapix.params.system_serialnumber = "serialnumber" mock_device.vapix.params.prodnbr = "prodnbr" mock_device.vapix.params.prodtype = "prodtype" mock_device.vapix.params.firmware_version = "firmware_version" result = await hass.config_entries.flow.async_init( config_flow.DOMAIN, data={ config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, config_flow.CONF_NAME: "name", }, context={"source": "import"}, ) assert result["type"] == "create_entry" assert result["title"] == "{} - {}".format("prodnbr", "serialnumber") assert result["data"] == { axis.CONF_DEVICE: { config_flow.CONF_HOST: "1.2.3.4", config_flow.CONF_USERNAME: "user", config_flow.CONF_PASSWORD: "pass", config_flow.CONF_PORT: 80, }, config_flow.CONF_MAC: "serialnumber", config_flow.CONF_MODEL: "prodnbr", config_flow.CONF_NAME: "name", }