signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def random_pairs_without_replacement_large_frames ( n , shape , random_state = None ) : """Make a sample of random pairs with replacement"""
n_max = max_pairs ( shape ) sample = np . array ( [ ] ) # Run as long as the number of pairs is less than the requested number # of pairs n . while len ( sample ) < n : # The number of pairs to sample ( sample twice as much record pairs # because the duplicates are dropped ) . n_sample_size = ( n - len ( sample ) ) * 2 sample = random_state . randint ( n_max , size = n_sample_size ) # concatenate pairs and deduplicate pairs_non_unique = np . append ( sample , sample ) sample = _unique_rows_numpy ( pairs_non_unique ) # return 2d indices if len ( shape ) == 1 : return _map_tril_1d_on_2d ( sample [ 0 : n ] , shape [ 0 ] ) else : return np . unravel_index ( sample [ 0 : n ] , shape )
def success ( headers = None , data = '' ) : """Generate success JSON to send to client"""
passed_headers = { } if headers is None else headers if isinstance ( data , dict ) : data = json . dumps ( data ) ret_headers = { 'status' : 'ok' } ret_headers . update ( passed_headers ) return server_responce ( ret_headers , data )
def augment_observation ( observation , reward , cum_reward , frame_index , bar_color = None , header_height = 27 ) : """Augments an observation with debug info ."""
img = PIL_Image ( ) . new ( "RGB" , ( observation . shape [ 1 ] , header_height , ) ) draw = PIL_ImageDraw ( ) . Draw ( img ) draw . text ( ( 1 , 0 ) , "c:{:3}, r:{:3}" . format ( int ( cum_reward ) , int ( reward ) ) , fill = ( 255 , 0 , 0 ) ) draw . text ( ( 1 , 15 ) , "f:{:3}" . format ( int ( frame_index ) ) , fill = ( 255 , 0 , 0 ) ) header = np . copy ( np . asarray ( img ) ) del img if bar_color is not None : header [ 0 , : , : ] = bar_color return np . concatenate ( [ header , observation ] , axis = 0 )
def _result ( self , timeout = None ) : """Return the result , if available . It may take an unknown amount of time to return the result , so a timeout option is provided . If the given number of seconds pass with no result , a TimeoutError will be thrown . If a previous call timed out , additional calls to this function will still wait for a result and return it if available . If a result was returned on one call , additional calls will return / raise the same result ."""
if timeout is None : warnings . warn ( "Unlimited timeouts are deprecated." , DeprecationWarning , stacklevel = 3 ) # Queue . get ( None ) won ' t get interrupted by Ctrl - C . . . timeout = 2 ** 31 self . _result_set . wait ( timeout ) # In Python 2.6 we can ' t rely on the return result of wait ( ) , so we # have to check manually : if not self . _result_set . is_set ( ) : raise TimeoutError ( ) self . _result_retrieved = True return self . _value
def to_add ( self ) : '''To add the entity .'''
kwd = { 'pager' : '' , } self . render ( 'misc/entity/entity_add.html' , cfg = config . CMS_CFG , kwd = kwd , userinfo = self . userinfo )
def register_quantity ( quantity , name ) : """Register ` name ` as a type to validate as an instance of class ` cls ` ."""
x = TypeDefinition ( name , ( quantity . __class__ , ) , ( ) ) Validator . types_mapping [ name ] = x
def fetch_url ( url ) : """Fetch the given url , strip formfeeds and decode it into the defined encoding"""
with closing ( urllib . urlopen ( url ) ) as f : if f . code is 200 : response = f . read ( ) return strip_formfeeds ( response ) . decode ( ENCODING )
def OnSelectCard ( self , event ) : """Called when the user selects a card in the tree ."""
item = event . GetItem ( ) if item : itemdata = self . readertreepanel . cardtreectrl . GetItemPyData ( item ) if isinstance ( itemdata , smartcard . Card . Card ) : self . dialogpanel . OnSelectCard ( itemdata ) else : self . dialogpanel . OnDeselectCard ( itemdata )
def create ( self , validated_data ) : '''We want to set the username to be the same as the email , and use the correct create function to make use of password hashing .'''
validated_data [ 'username' ] = validated_data [ 'email' ] admin = validated_data . pop ( 'is_superuser' , None ) if admin is True : user = User . objects . create_superuser ( ** validated_data ) else : user = User . objects . create_user ( ** validated_data ) return user
def list ( cls , args ) : # pylint : disable = unused - argument """List all installed NApps and inform whether they are enabled ."""
mgr = NAppsManager ( ) # Add status napps = [ napp + ( '[ie]' , ) for napp in mgr . get_enabled ( ) ] napps += [ napp + ( '[i-]' , ) for napp in mgr . get_disabled ( ) ] # Sort , add description and reorder columns napps . sort ( ) napps_ordered = [ ] for user , name , status in napps : description = mgr . get_description ( user , name ) version = mgr . get_version ( user , name ) napp_id = f'{user}/{name}' if version : napp_id += f':{version}' napps_ordered . append ( ( status , napp_id , description ) ) cls . print_napps ( napps_ordered )
async def main ( ) : """The main part of the example script ."""
async with aiohttp . ClientSession ( ) as session : zaehler = Volkszaehler ( loop , session , UUID , host = HOST ) # Get the data await zaehler . get_data ( ) print ( "Average:" , zaehler . average ) print ( "Max:" , zaehler . max ) print ( "Min:" , zaehler . min ) print ( "Consumption:" , zaehler . consumption ) print ( "Data tuples:" , zaehler . tuples )
def get_DOI ( self ) : """This method defines how the Article tries to detect the DOI . It attempts to determine the article DOI string by DTD - appropriate inspection of the article metadata . This method should be made as flexible as necessary to properly collect the DOI for any XML publishing specification . Returns doi : str or None The full ( publisher / article ) DOI string for the article , or None on failure ."""
if self . dtd_name == 'JPTS' : doi = self . root . xpath ( "./front/article-meta/article-id[@pub-id-type='doi']" ) if doi : return doi [ 0 ] . text log . warning ( 'Unable to locate DOI string for this article' ) return None else : log . warning ( 'Unable to locate DOI string for this article' ) return None
def compute_kwinners ( x , k , dutyCycles , boostStrength ) : """Use the boost strength to compute a boost factor for each unit represented in x . These factors are used to increase the impact of each unit to improve their chances of being chosen . This encourages participation of more columns in the learning process . The boosting function is a curve defined as : boostFactors = exp [ - boostStrength * ( dutyCycle - targetDensity ) ] Intuitively this means that units that have been active ( i . e . in the top - k ) at the target activation level have a boost factor of 1 , meaning their activity is not boosted . Columns whose duty cycle drops too much below that of their neighbors are boosted depending on how infrequently they have been active . Unit that has been active more than the target activation level have a boost factor below 1 , meaning their activity is suppressed and they are less likely to be in the top - k . Note that we do not transmit the boosted values . We only use boosting to determine the winning units . The target activation density for each unit is k / number of units . The boostFactor depends on the dutyCycle via an exponential function : boostFactor | | 1 _ | | _ | _ _ | _ _ _ _ + - - - - - > dutyCycle targetDensity : param x : Current activity of each unit . : param k : The activity of the top k units will be allowed to remain , the rest are set to zero . : param dutyCycles : The averaged duty cycle of each unit . : param boostStrength : A boost strength of 0.0 has no effect on x . : return : A tensor representing the activity of x after k - winner take all ."""
k = tf . convert_to_tensor ( k , dtype = tf . int32 ) boostStrength = tf . math . maximum ( boostStrength , 0.0 , name = "boostStrength" ) targetDensity = tf . cast ( k / x . shape [ 1 ] , tf . float32 , name = "targetDensity" ) boostFactors = tf . exp ( ( targetDensity - dutyCycles ) * boostStrength , name = "boostFactors" ) boosted = tf . multiply ( x , boostFactors , name = "boosted" ) # Take the boosted version of the input x , find the top k winners . # Compute an output that contains the values of x corresponding to the top k # boosted values topk , _ = tf . math . top_k ( input = boosted , k = k , sorted = False , name = "compute_kwinners" ) bottom = tf . reduce_min ( topk , axis = 1 , keepdims = True , name = "compute_kwinners" ) mask = tf . cast ( tf . greater_equal ( boosted , bottom ) , dtype = x . dtype , name = "compute_kwinners" ) return x * mask
def config ( name = 'DATABASE_URL' , default = 'sqlite://:memory:' ) : """Returns configured DATABASE dictionary from DATABASE _ URL ."""
config = { } s = env ( name , default ) if s : config = parse_database_url ( s ) return config
def pick_env_and_run_and_report ( self , env : env_tools . PreparedEnv , env_py2 : Optional [ env_tools . PreparedEnv ] , verbose : bool , previous_failures : Set [ 'Check' ] ) -> CheckResult : """Evaluates this check in python 3 or 2.7 , and reports to github . If the prepared environments are not linked to a github repository , with a known access token , reporting to github is skipped . Args : env : A prepared python 3 environment . env _ py2 : A prepared python 2.7 environment . verbose : When set , more progress output is produced . previous _ failures : Checks that have already run and failed . Returns : A CheckResult instance ."""
env . report_status_to_github ( 'pending' , 'Running...' , self . context ( ) ) chosen_env = cast ( env_tools . PreparedEnv , env_py2 if self . needs_python2_env ( ) else env ) os . chdir ( cast ( str , chosen_env . destination_directory ) ) result = self . run ( chosen_env , verbose , previous_failures ) if result . unexpected_error is not None : env . report_status_to_github ( 'error' , 'Unexpected error.' , self . context ( ) ) else : env . report_status_to_github ( 'success' if result . success else 'failure' , result . message , self . context ( ) ) return result
def xstep ( self ) : r"""Minimise Augmented Lagrangian with respect to : math : ` \ mathbf { x } ` ."""
self . X = np . asarray ( sl . cho_solve_ATAI ( self . D , self . rho , self . DTS + self . rho * ( self . Y - self . U ) , self . lu , self . piv ) , dtype = self . dtype ) if self . opt [ 'LinSolveCheck' ] : b = self . DTS + self . rho * ( self . Y - self . U ) ax = self . D . T . dot ( self . D . dot ( self . X ) ) + self . rho * self . X self . xrrs = sl . rrs ( ax , b ) else : self . xrrs = None
def rotate ( self , shift ) : '''Rotate 90 degrees clockwise ` shift ` times . If ` shift ` is negative , rotate counter - clockwise .'''
self . child_corners . values [ : ] = np . roll ( self . child_corners . values , shift , axis = 0 ) self . update_transform ( )
def _add_to_ref ( self , rec_curr , line ) : """Add new fields to the current reference ."""
# Examples of record lines containing ' : ' include : # id : GO : 000002 # name : mitochondrial genome maintenance # namespace : biological _ process # def : " The maintenance of . . . # is _ a : GO : 0007005 ! mitochondrion organization if line [ : 4 ] == "id: " : assert not rec_curr . item_id item_id = line [ 4 : ] rec_curr . item_id = item_id rec_curr . id = item_id elif line [ : 8 ] == "alt_id: " : rec_curr . alt_ids . add ( line [ 8 : ] ) elif line [ : 6 ] == "name: " : assert not rec_curr . name rec_curr . name = line [ 6 : ] elif line [ : 11 ] == "namespace: " : assert not rec_curr . namespace rec_curr . namespace = line [ 11 : ] elif line [ : 6 ] == "is_a: " : rec_curr . _parents . add ( line [ 6 : ] . split ( ) [ 0 ] ) elif line [ : 13 ] == "is_obsolete: " and line [ 13 : ] == "true" : rec_curr . is_obsolete = True elif self . optobj and ':' in line : self . optobj . update_rec ( rec_curr , line )
def _resetSelection ( self , moveToTop = False ) : """Reset selection . If moveToTop is True - move cursor to the top position"""
ancor , pos = self . _qpart . selectedPosition dst = min ( ancor , pos ) if moveToTop else pos self . _qpart . cursorPosition = dst
def verification_count ( self ) : """Get Verification Count . Uses HEAD to / verifications interface . : Returns : ( int ) Number of verifications"""
response = self . _head ( url . verifications ) self . _check_response ( response , 200 ) return int ( response . headers . get ( 'x-verification-count' , - 1 ) )
def verify_rsa_sha1 ( request , rsa_public_key ) : """Verify a RSASSA - PKCS # 1 v1.5 base64 encoded signature . Per ` section 3.4.3 ` _ of the spec . Note this method requires the jwt and cryptography libraries . . . _ ` section 3.4.3 ` : https : / / tools . ietf . org / html / rfc5849 # section - 3.4.3 To satisfy ` RFC2616 section 5.2 ` _ item 1 , the request argument ' s uri attribute MUST be an absolute URI whose netloc part identifies the origin server or gateway on which the resource resides . Any Host item of the request argument ' s headers dict attribute will be ignored . . . _ ` RFC2616 section 5.2 ` : https : / / tools . ietf . org / html / rfc2616 # section - 5.2"""
norm_params = normalize_parameters ( request . params ) bs_uri = base_string_uri ( request . uri ) sig_base_str = signature_base_string ( request . http_method , bs_uri , norm_params ) . encode ( 'utf-8' ) sig = binascii . a2b_base64 ( request . signature . encode ( 'utf-8' ) ) alg = _jwt_rs1_signing_algorithm ( ) key = _prepare_key_plus ( alg , rsa_public_key ) verify_ok = alg . verify ( sig_base_str , key , sig ) if not verify_ok : log . debug ( 'Verify RSA-SHA1 failed: signature base string: %s' , sig_base_str ) return verify_ok
def glob ( self , pat ) : """` pat ` can be an extended glob pattern , e . g . ` ' * * / * . less ' ` This code handles negations similarly to node . js ' minimatch , i . e . a leading ` ! ` will negate the entire pattern ."""
r = "" negate = int ( pat . startswith ( '!' ) ) i = negate while i < len ( pat ) : if pat [ i : i + 3 ] == '**/' : r += "(?:.*/)?" i += 3 elif pat [ i ] == "*" : r += "[^/]*" i += 1 elif pat [ i ] == "." : r += "[.]" i += 1 elif pat [ i ] == "?" : r += "." i += 1 else : r += pat [ i ] i += 1 r += r'\Z(?ms)' # print ' \ n \ npat ' , pat # print ' regex : ' , r # print [ s . relpath ( self ) . replace ( ' \ \ ' , ' / ' ) for s in self ] rx = re . compile ( r ) def match ( d ) : m = rx . match ( d ) return not m if negate else m return [ s for s in self if match ( s . relpath ( self ) . replace ( '\\' , '/' ) ) ]
def get_instance ( self , payload ) : """Build an instance of TaskChannelInstance : param dict payload : Payload response from the API : returns : twilio . rest . taskrouter . v1 . workspace . task _ channel . TaskChannelInstance : rtype : twilio . rest . taskrouter . v1 . workspace . task _ channel . TaskChannelInstance"""
return TaskChannelInstance ( self . _version , payload , workspace_sid = self . _solution [ 'workspace_sid' ] , )
def check_name ( name , safe_chars ) : '''Check whether the specified name contains invalid characters'''
regexp = re . compile ( '[^{0}]' . format ( safe_chars ) ) if regexp . search ( name ) : raise SaltCloudException ( '{0} contains characters not supported by this cloud provider. ' 'Valid characters are: {1}' . format ( name , safe_chars ) )
def get_collector_path ( base_url = None ) : """Returns the IOpipe collector ' s path . By default this is ` / v0 / event ` . : param base _ url : An optional base URL to use . : returns : The collector ' s path . : rtype : str"""
if not base_url : return "/v0/event" event_url = urlparse ( base_url ) event_path = urljoin ( event_url . path , "v0/event" ) if not event_path . startswith ( "/" ) : event_path = "/%s" % event_path if event_url . query : event_path = "?" . join ( [ event_path , event_url . query ] ) return event_path
def nodeSatisfiesNumericFacet ( cntxt : Context , n : Node , nc : ShExJ . NodeConstraint , _c : DebugContext ) -> bool : """` 5.4.5 XML Schema Numeric Facet Constraints < http : / / shex . io / shex - semantics / # xs - numeric > ` _ Numeric facet constraints apply to the numeric value of RDF Literals with datatypes listed in SPARQL 1.1 Operand Data Types [ sparql11 - query ] . Numeric constraints on non - numeric values fail . totaldigits and fractiondigits constraints on values not derived from xsd : decimal fail ."""
if nc . mininclusive is not None or nc . minexclusive is not None or nc . maxinclusive is not None or nc . maxexclusive is not None or nc . totaldigits is not None or nc . fractiondigits is not None : if is_numeric ( n ) : v = n . value if isinstance ( v , numbers . Number ) : if ( nc . mininclusive is None or v >= nc . mininclusive ) and ( nc . minexclusive is None or v > nc . minexclusive ) and ( nc . maxinclusive is None or v <= nc . maxinclusive ) and ( nc . maxexclusive is None or v < nc . maxexclusive ) and ( nc . totaldigits is None or ( total_digits ( n ) is not None and total_digits ( n ) <= nc . totaldigits ) ) and ( nc . fractiondigits is None or ( fraction_digits ( n ) is not None and fraction_digits ( n ) <= nc . fractiondigits ) ) : return True else : if nc . mininclusive is not None and v < nc . mininclusive : cntxt . fail_reason = f"Numeric value volation - minimum inclusive: " f"{nc.mininclusive} actual: {v}" elif nc . minexclusive is not None and v <= nc . minexclusive : cntxt . fail_reason = f"Numeric value volation - minimum exclusive: " f"{nc.minexclusive} actual: {v}" elif nc . maxinclusive is not None and v > nc . maxinclusive : cntxt . fail_reason = f"Numeric value volation - maximum inclusive: " f"{nc.maxinclusive} actual: {v}" elif nc . maxexclusive is not None and v >= nc . maxexclusive : cntxt . fail_reason = f"Numeric value volation - maximum exclusive: " f"{nc.maxexclusive} actual: {v}" elif nc . totaldigits is not None and ( total_digits ( n ) is None or total_digits ( n ) > nc . totaldigits ) : cntxt . fail_reason = f"Numeric value volation - max total digits: " f"{nc.totaldigits} value: {v}" elif nc . fractiondigits is not None and ( fraction_digits ( n ) is None or total_digits ( n ) > nc . fractiondigits ) : cntxt . fail_reason = f"Numeric value volation - max fractional digits: " f"{nc.fractiondigits} value: {v}" else : cntxt . fail_reason = "Impossible error - kick the programmer" return False else : cntxt . fail_reason = "Numeric test on non-number: {v}" return False else : cntxt . fail_reason = "Numeric test on non-number: {n}" return False return True
def normalize_input_value ( value ) : """Returns an input value normalized for RightScale API 2.0. This typically means adjusting the * input type * prefix to be one of the valid values : : blank ignore inherit text : env : cred : key : array : This list comes from the table published here : http : / / reference . rightscale . com / api1.5 / resources / ResourceInputs . html # multi _ update If unspecified , value is assumed to be a of type ` ` text ` ` ."""
if value in ( 'blank' , 'ignore' , 'inherit' ) : return value # assume any unspecified or unknown types are text tokens = value . split ( ':' ) if ( len ( tokens ) < 2 or tokens [ 0 ] not in ( 'text' , 'env' , 'cred' , 'key' , 'array' ) ) : return 'text:%s' % value return value
def bake ( self ) : """Bake a ` rubocop ` command so it ' s ready to execute and returns None . : return : None"""
self . _rubocop_command = sh . rubocop . bake ( self . options , self . _tests , _env = self . env , _out = LOG . out , _err = LOG . error )
def lookup ( self , istring ) : """istring = the ilwd : char string corresponding to a unique id"""
try : return self . uqids [ istring ] except KeyError : curs = self . curs curs . execute ( 'VALUES BLOB(GENERATE_UNIQUE())' ) self . uqids [ istring ] = curs . fetchone ( ) [ 0 ] return self . uqids [ istring ]
def h ( self ) : r"""Returns the step size to be used in numerical differentiation with respect to the model parameters . The step size is given as a vector with length ` ` n _ modelparams ` ` so that each model parameter can be weighted independently ."""
if np . size ( self . _h ) > 1 : assert np . size ( self . _h ) == self . n_modelparams return self . _h else : return self . _h * np . ones ( self . n_modelparams )
def validate_api_call ( schema , raw_request , raw_response ) : """Validate the request / response cycle of an api call against a swagger schema . Request / Response objects from the ` requests ` and ` urllib ` library are supported ."""
request = normalize_request ( raw_request ) with ErrorDict ( ) as errors : try : validate_request ( request = request , schema = schema , ) except ValidationError as err : errors [ 'request' ] . add_error ( err . messages or getattr ( err , 'detail' ) ) return response = normalize_response ( raw_response , raw_request ) try : validate_response ( response = response , request_method = request . method , schema = schema ) except ValidationError as err : errors [ 'response' ] . add_error ( err . messages or getattr ( err , 'detail' ) )
def _wait_ready ( self , timeout_sec = 1 ) : """Wait until the PN532 is ready to receive commands . At most wait timeout _ sec seconds for the PN532 to be ready . If the PN532 is ready before the timeout is exceeded then True will be returned , otherwise False is returned when the timeout is exceeded ."""
start = time . time ( ) # Send a SPI status read command and read response . self . _gpio . set_low ( self . _cs ) self . _busy_wait_ms ( 2 ) response = self . _spi . transfer ( [ PN532_SPI_STATREAD , 0x00 ] ) self . _gpio . set_high ( self . _cs ) # Loop until a ready response is received . while response [ 1 ] != PN532_SPI_READY : # Check if the timeout has been exceeded . if time . time ( ) - start >= timeout_sec : return False # Wait a little while and try reading the status again . time . sleep ( 0.01 ) self . _gpio . set_low ( self . _cs ) self . _busy_wait_ms ( 2 ) response = self . _spi . transfer ( [ PN532_SPI_STATREAD , 0x00 ] ) self . _gpio . set_high ( self . _cs ) return True
def IncrementCounter ( self , metric_name , delta = 1 , fields = None ) : """See base class ."""
if delta < 0 : raise ValueError ( "Invalid increment for counter: %d." % delta ) self . _counter_metrics [ metric_name ] . Increment ( delta , fields )
def intersect_range_array ( bed1 , beds2 , payload = None , is_sorted = False ) : """Does not do a merge if the payload has been set : param bed1: : param bed2: : param payload : payload = 1 return the payload of bed1 on each of the intersect set , payload = 2 return the payload of bed2 on each of the union set , payload = 3 return the payload of bed1 and bed2 on each of the union set : param is _ sorted : : type bed1 : GenomicRange : type bed2 : GenomicRange : type payload : int : type is _ sorted : bool"""
if not is_sorted : beds2 = sort_ranges ( beds2 ) output = [ ] for bed2 in beds2 : cval = bed2 . cmp ( bed1 ) # print str ( cval ) + " " + bed1 . get _ range _ string ( ) + " " + bed2 . get _ range _ string ( ) if cval == - 1 : continue elif cval == 0 : output . append ( bed1 . intersect ( bed2 ) ) if payload == 1 : output [ - 1 ] . set_payload ( bed1 . payload ) if payload == 2 : output [ - 1 ] . set_payload ( bed2 . payload ) elif cval == 1 : break if payload : return sort_ranges ( output ) return merge_ranges ( output )
def main ( ) : '''Main routine .'''
# validate command line arguments arg_parser = argparse . ArgumentParser ( ) arg_parser . add_argument ( '--uri' , '-u' , required = True , action = 'store' , help = 'Template URI' ) arg_parser . add_argument ( '--params' , '-p' , required = True , action = 'store' , help = 'Parameters json file' ) arg_parser . add_argument ( '--rg' , '-g' , required = True , action = 'store' , help = 'Resource Group name' ) arg_parser . add_argument ( '--sub' , '-s' , required = False , action = 'store' , help = 'subscription id (optional)' ) args = arg_parser . parse_args ( ) template_uri = args . uri params = args . params rgname = args . rg subscription_id = args . sub # load parameters file try : with open ( params ) as params_file : param_data = json . load ( params_file ) except FileNotFoundError : print ( 'Error: Expecting ' + params + ' in current folder' ) sys . exit ( ) access_token = azurerm . get_access_token_from_cli ( ) if subscription_id is None : subscription_id = azurerm . get_subscription_from_cli ( ) deployment_name = Haikunator ( ) . haikunate ( ) print ( 'Deployment name:' + deployment_name ) deploy_return = azurerm . deploy_template_uri ( access_token , subscription_id , rgname , deployment_name , template_uri , param_data ) print ( json . dumps ( deploy_return . json ( ) , sort_keys = False , indent = 2 , separators = ( ',' , ': ' ) ) )
def offset ( self , offset ) : """Fetch results after ` offset ` value"""
clone = self . _clone ( ) if isinstance ( offset , int ) : clone . _offset = offset return clone
def get_comparable_values ( self ) : """Return a tupple of values representing the unicity of the object"""
return ( not self . generic , int ( self . code ) , str ( self . message ) , str ( self . description ) )
def InstallNanny ( self ) : """Install the nanny program ."""
# We need to copy the nanny sections to the registry to ensure the # service is correctly configured . new_config = config . CONFIG . MakeNewConfig ( ) new_config . SetWriteBack ( config . CONFIG [ "Config.writeback" ] ) for option in self . nanny_options : new_config . Set ( option , config . CONFIG . Get ( option ) ) new_config . Write ( ) args = [ config . CONFIG [ "Nanny.binary" ] , "--service_key" , config . CONFIG [ "Client.config_key" ] , "install" ] logging . debug ( "Calling %s" , ( args , ) ) output = subprocess . check_output ( args , shell = True , stdin = subprocess . PIPE , stderr = subprocess . PIPE ) logging . debug ( "%s" , output )
def add_new_target ( self , address , target_type , target_base = None , dependencies = None , derived_from = None , ** kwargs ) : """Creates a new target , adds it to the context and returns it . This method ensures the target resolves files against the given target _ base , creating the directory if needed and registering a source root . : API : public"""
rel_target_base = target_base or address . spec_path abs_target_base = os . path . join ( get_buildroot ( ) , rel_target_base ) if not os . path . exists ( abs_target_base ) : os . makedirs ( abs_target_base ) # TODO : Adding source roots on the fly like this is yucky , but hopefully this # method will go away entirely under the new engine . It ' s primarily used for injecting # synthetic codegen targets , and that isn ' t how codegen will work in the future . if not self . source_roots . find_by_path ( rel_target_base ) : # TODO : Set the lang and root category ( source / test / thirdparty ) based on the target type ? self . source_roots . add_source_root ( rel_target_base ) if dependencies : dependencies = [ dep . address for dep in dependencies ] self . build_graph . inject_synthetic_target ( address = address , target_type = target_type , dependencies = dependencies , derived_from = derived_from , ** kwargs ) new_target = self . build_graph . get_target ( address ) return new_target
def deal_with_changeset_stack_policy ( self , fqn , stack_policy ) : """Set a stack policy when using changesets . ChangeSets don ' t allow you to set stack policies in the same call to update them . This sets it before executing the changeset if the stack policy is passed in . Args : stack _ policy ( : class : ` stacker . providers . base . Template ` ) : A template object representing a stack policy ."""
if stack_policy : kwargs = generate_stack_policy_args ( stack_policy ) kwargs [ "StackName" ] = fqn logger . debug ( "Setting stack policy on %s." , fqn ) self . cloudformation . set_stack_policy ( ** kwargs )
def Print ( self , output_writer ) : """Prints a human readable version of the filter . Args : output _ writer ( CLIOutputWriter ) : output writer ."""
if self . _date_time_ranges : for date_time_range in self . _date_time_ranges : if date_time_range . start_date_time is None : end_time_string = date_time_range . end_date_time . CopyToDateTimeString ( ) output_writer . Write ( '\t{0:s} after {1:s}\n' . format ( date_time_range . time_value , end_time_string ) ) elif date_time_range . end_date_time is None : start_time_string = ( date_time_range . start_date_time . CopyToDateTimeString ( ) ) output_writer . Write ( '\t{0:s} before {1:s}\n' . format ( date_time_range . time_value , start_time_string ) ) else : start_time_string = ( date_time_range . start_date_time . CopyToDateTimeString ( ) ) end_time_string = date_time_range . end_date_time . CopyToDateTimeString ( ) output_writer . Write ( '\t{0:s} between {1:s} and {2:s}\n' . format ( date_time_range . time_value , start_time_string , end_time_string ) )
def __create_log_props ( cls , log_props , _getdict , _setdict ) : # @ NoSelf """Creates all the logical property . The list of names of properties to be created is passed with frozenset log _ props . The getter / setter information is taken from _ { get , set } dict . This method resolves also wildcards in names , and performs all checks to ensure correctness . Returns the frozen set of the actually created properties ( as not log _ props may be really created , e . g . when no getter is provided , and a warning is issued ) ."""
real_log_props = set ( ) resolved_getdict = { } resolved_setdict = { } for _dict_name , _dict , _resolved_dict in ( ( "getter" , _getdict , resolved_getdict ) , ( "setter" , _setdict , resolved_setdict ) ) : # first resolve all wildcards for pat , ai in ( ( pat , ai ) for pat , ai in _dict . items ( ) if frozenset ( pat ) & WILDCARDS ) : matches = fnmatch . filter ( log_props , pat ) for match in matches : if match in _resolved_dict : raise NameError ( "In class %s.%s %s property '%s' " "is matched multiple times" " by patterns" % ( cls . __module__ , cls . __name__ , _dict_name , match ) ) _resolved_dict [ match ] = ai if not matches : logger . warning ( "In class %s.%s %s pattern '%s' " "did not match any existing " "logical property" , cls . __module__ , cls . __name__ , _dict_name , pat ) # now adds the exact matches ( no wilcards ) which override # the pattern - matches _resolved_dict . update ( ( name , ai ) for name , ai in _dict . items ( ) if name in log_props ) # checks that all getter / setter have a corresponding logical # property not_found = [ name for name in _resolved_dict if name not in log_props ] if not_found : logger . warning ( "In class %s.%s logical %s were declared for " "non-existent observables: %s" , cls . __module__ , cls . __name__ , _dict_name , str ( not_found ) ) # creates the properties for name in log_props : # finds the getter ai_get = resolved_getdict . get ( name , None ) if ai_get : # decorator - based _getter = type ( cls ) . get_getter ( cls , name , ai_get . func , ai_get . has_args ) _deps = ai_get . deps else : # old style _getter = type ( cls ) . get_getter ( cls , name ) if _getter is None : raise RuntimeError ( "In class %s.%s " "logical observable '%s' " "has no getter method" % ( cls . __module__ , cls . __name__ , name ) ) _deps = type ( cls ) . _get_old_style_getter_deps ( cls , name , _getter ) # finds the setter ai_set = resolved_setdict . get ( name , None ) if ai_set : # decorator - based if ai_get : _setter = type ( cls ) . get_setter ( cls , name , ai_set . func , ai_set . has_args , ai_get . func , ai_get . has_args ) else : # the getter is old style . _ getter is already # resolved wrt the name it may take , so # getter _ takes _ name is False _setter = type ( cls ) . get_setter ( cls , name , ai_set . func , ai_set . has_args , _getter , False ) else : # old style setter if ai_get : _setter = type ( cls ) . get_setter ( cls , name , None , None , ai_get . func , ai_get . has_args ) else : _setter = type ( cls ) . get_setter ( cls , name ) # creates the logical property , here _ setter can be None prop = PropertyMeta . LogicalOP ( _getter , _setter , frozenset ( _deps ) ) setattr ( cls , name , prop ) real_log_props . add ( name ) # checks that all setters have a getter setters_no_getters = ( set ( resolved_setdict ) - real_log_props ) & log_props if setters_no_getters : logger . warning ( "In class %s.%s logical setters have no " "getters: %s" , cls . __module__ , cls . __name__ , ", " . join ( setters_no_getters ) ) return frozenset ( real_log_props )
def get_filtered_keys ( self , suffix , * args , ** kwargs ) : """Returns the index key for the given args " value " ( ` args ` ) Parameters kwargs : dict use _ lua : bool Default to ` ` True ` ` , if scripting is supported . If ` ` True ` ` , the process of reading from the sorted - set , extracting the primary keys , excluding some values if needed , and putting the primary keys in a set or zset , is done in lua at the redis level . Else , data is fetched , manipulated here , then returned to redis . For the other parameters , see ` ` BaseIndex . get _ filtered _ keys ` `"""
accepted_key_types = kwargs . get ( 'accepted_key_types' , None ) if accepted_key_types and 'set' not in accepted_key_types and 'zset' not in accepted_key_types : raise ImplementationError ( '%s can only return keys of type "set" or "zset"' % self . __class__ . __name__ ) key_type = 'set' if not accepted_key_types or 'set' in accepted_key_types else 'zset' tmp_key = unique_key ( self . connection ) args = list ( args ) # special " in " case : we get n keys and make an unionstore with them then return this key if suffix == 'in' : values = set ( args . pop ( ) ) if not values : return [ ] # no keys in_keys = [ self . get_filtered_keys ( 'eq' , * ( args + [ value ] ) , ** kwargs ) [ 0 ] [ 0 ] for value in values ] if key_type == 'set' : self . connection . sunionstore ( tmp_key , * in_keys ) else : self . connection . zunionstore ( tmp_key , * in_keys ) # we can delete the temporary keys for in_key in in_keys : self . connection . delete ( in_key ) return [ ( tmp_key , key_type , True ) ] use_lua = self . model . database . support_scripting ( ) and kwargs . get ( 'use_lua' , True ) key = self . get_storage_key ( * args ) value = self . normalize_value ( args [ - 1 ] , transform = False ) real_suffix = self . remove_prefix ( suffix ) if use_lua : start , end , exclude = self . get_boundaries ( real_suffix , value ) self . call_script ( key , tmp_key , key_type , start , end , exclude ) else : pks = self . get_pks_for_filter ( key , real_suffix , value ) if pks : if key_type == 'set' : self . connection . sadd ( tmp_key , * pks ) else : self . connection . zadd ( tmp_key , ** { pk : idx for idx , pk in enumerate ( pks ) } ) return [ ( tmp_key , key_type , True ) ]
def load_config ( self , config_file_name ) : """Load configuration file from prt or str . Configuration file type is extracted from the file suffix - prt or str . : param config _ file _ name : full path to the configuration file . IxTclServer must have access to the file location . either : The config file is on shared folder . IxTclServer run on the client machine ."""
config_file_name = config_file_name . replace ( '\\' , '/' ) ext = path . splitext ( config_file_name ) [ - 1 ] . lower ( ) if ext == '.prt' : self . api . call_rc ( 'port import "{}" {}' . format ( config_file_name , self . uri ) ) elif ext == '.str' : self . reset ( ) self . api . call_rc ( 'stream import "{}" {}' . format ( config_file_name , self . uri ) ) else : raise ValueError ( 'Configuration file type {} not supported.' . format ( ext ) ) self . write ( ) self . discover ( )
def main ( ) : """Main function for the deprecated ' sl ' command ."""
print ( "ERROR: Use the 'slcli' command instead." , file = sys . stderr ) print ( "> slcli %s" % ' ' . join ( sys . argv [ 1 : ] ) , file = sys . stderr ) exit ( - 1 )
def split_file_urls_by_size ( self , size ) : """Return tuple that contains a list large files and a list of small files based on size parameter : param size : int : size ( in bytes ) that determines if a file is large or small : return : ( [ ProjectFileUrl ] , [ ProjectFileUrl ] ) : ( large file urls , small file urls )"""
large_items = [ ] small_items = [ ] for file_url in self . file_urls : if file_url . size >= size : large_items . append ( file_url ) else : small_items . append ( file_url ) return large_items , small_items
def _getphoto_location ( self , pid ) : """Asks fb for photo location information returns tuple with lat , lon , accuracy"""
logger . debug ( '%s - Getting location from fb' % ( pid ) ) lat = None lon = None accuracy = None resp = self . fb . photos_geo_getLocation ( photo_id = pid ) if resp . attrib [ 'stat' ] != 'ok' : logger . error ( "%s - fb: photos_geo_getLocation failed with status: %s" , resp . attrib [ 'stat' ] ) ; return ( None , None , None ) for location in resp . find ( 'photo' ) : lat = location . attrib [ 'latitude' ] lon = location . attrib [ 'longitude' ] accuracy = location . attrib [ 'accuracy' ] return ( lat , lon , accuracy )
def preprocessFastqs ( fastqFNs , seqFNPrefix , offsetFN , abtFN , areUniform , logger ) : '''This function does the grunt work behind string extraction for fastq files @ param fastqFNs - a list of . fq filenames for parsing @ param seqFNPrefix - this is always of the form ' < DIR > / seqs . npy ' @ param offsetFN - this is always of the form ' < DIR > / offsets . npy ' @ param abtFN - this is always of the form ' < DIR > / about . npy ' @ param areUniform - True if all sequences are of uniform length @ param logger - logger object for output'''
# create a seqArray seqArray = [ ] tempFileId = 0 seqsPerFile = 1000000 maxSeqLen = - 1 numSeqs = 0 subSortFNs = [ ] for fnID , fn in enumerate ( fastqFNs ) : # open the file and read in starting form the second , every 4th line logger . info ( 'Loading \'' + fn + '\'...' ) if fn . endswith ( '.gz' ) : fp = gzip . open ( fn , 'r' ) else : fp = open ( fn , 'r' ) i = - 1 # go through each line for line in fp : if i % 4 == 0 : seqArray . append ( ( line . strip ( '\n' ) + '$' , fnID , i / 4 ) ) if len ( seqArray ) == seqsPerFile : if not areUniform or maxSeqLen == - 1 : maxSeqLen = 0 for seq , fID , seqID in seqArray : if len ( seq ) > maxSeqLen : maxSeqLen = len ( seq ) tempFN = seqFNPrefix + '.sortTemp.' + str ( tempFileId ) + '.npy' subSortFNs . append ( tempFN ) tempArray = np . lib . format . open_memmap ( tempFN , 'w+' , 'a' + str ( maxSeqLen ) + ',<u1,<u8' , ( len ( seqArray ) , ) ) tempArray [ : ] = sorted ( seqArray ) numSeqs += len ( seqArray ) del tempArray tempFileId += 1 seqArray = [ ] i += 1 fp . close ( ) if len ( seqArray ) > 0 : if not areUniform or maxSeqLen == - 1 : maxSeqLen = 0 for seq , fID , seqID in seqArray : if len ( seq ) > maxSeqLen : maxSeqLen = len ( seq ) tempFN = seqFNPrefix + '.sortTemp.' + str ( tempFileId ) + '.npy' subSortFNs . append ( tempFN ) tempArray = np . lib . format . open_memmap ( tempFN , 'w+' , 'a' + str ( maxSeqLen ) + ',<u1,<u8' , ( len ( seqArray ) , ) ) tempArray [ : ] = sorted ( seqArray ) numSeqs += len ( seqArray ) del tempArray tempFileId += 1 seqArray = [ ] logger . info ( 'Pre-sorting ' + str ( numSeqs ) + ' sequences...' ) iters = [ ] for fn in subSortFNs : iters . append ( customiter ( np . load ( fn , 'r' ) ) ) # save it tempFN = seqFNPrefix + '.temp.npy' fp = open ( tempFN , 'w+' ) aboutFile = np . lib . format . open_memmap ( abtFN , 'w+' , '<u1,<u8' , ( numSeqs , ) ) ind = 0 for tup in heapq . merge ( * iters ) : ( seq , fID , seqID ) = tup aboutFile [ ind ] = ( fID , seqID ) fp . write ( seq ) ind += 1 fp . close ( ) # clean up disk space for fn in subSortFNs : os . remove ( fn ) # convert the sequences into uint8s and then save it del seqArray seqArray = np . memmap ( tempFN ) if areUniform : uniformLength = maxSeqLen else : uniformLength = 0 logger . info ( 'Saving sorted sequences for BWT construction...' ) MSBWTGen . writeSeqsToFiles ( seqArray , seqFNPrefix , offsetFN , uniformLength ) # wipe this del seqArray os . remove ( tempFN )
def destroy ( self ) : """Destroy and close the App . : return : None . : note : Once destroyed an App can no longer be used ."""
# if this is the main _ app - set the _ main _ app class variable to ` None ` . if self == App . _main_app : App . _main_app = None self . tk . destroy ( )
def is_valid_github_uri ( uri : URI , expected_path_terms : Tuple [ str , ... ] ) -> bool : """Return a bool indicating whether or not the URI fulfills the following specs Valid Github URIs * must * : - Have ' https ' scheme - Have ' api . github . com ' authority - Have a path that contains all " expected _ path _ terms " """
if not is_text ( uri ) : return False parsed = parse . urlparse ( uri ) path , scheme , authority = parsed . path , parsed . scheme , parsed . netloc if not all ( ( path , scheme , authority ) ) : return False if any ( term for term in expected_path_terms if term not in path ) : return False if scheme != "https" : return False if authority != GITHUB_API_AUTHORITY : return False return True
def listdir ( dir_pathname , recursive = True , topdown = True , followlinks = False ) : """Enlists all items using their absolute paths in a directory , optionally recursively . : param dir _ pathname : The directory to traverse . : param recursive : ` ` True ` ` for walking recursively through the directory tree ; ` ` False ` ` otherwise . : param topdown : Please see the documentation for : func : ` os . walk ` : param followlinks : Please see the documentation for : func : ` os . walk `"""
for root , dirnames , filenames in walk ( dir_pathname , recursive , topdown , followlinks ) : for dirname in dirnames : yield absolute_path ( os . path . join ( root , dirname ) ) for filename in filenames : yield absolute_path ( os . path . join ( root , filename ) )
def run ( user , port = 4242 ) : """Build a temporary directory with a visualization and serve it over HTTP . Examples > > > bandicoot . visualization . run ( U ) Successfully exported the visualization to / tmp / tmpsIyncS Serving bandicoot visualization at http : / / 0.0.0.0:4242"""
owd = os . getcwd ( ) dir = export ( user ) os . chdir ( dir ) Handler = SimpleHTTPServer . SimpleHTTPRequestHandler try : httpd = SocketServer . TCPServer ( ( "" , port ) , Handler ) print ( "Serving bandicoot visualization at http://0.0.0.0:%i" % port ) httpd . serve_forever ( ) except KeyboardInterrupt : print ( "^C received, shutting down the web server" ) httpd . server_close ( ) finally : os . chdir ( owd )
def shorten_aead ( aead ) : """Produce pretty - printable version of long AEAD ."""
head = aead . data [ : 4 ] . encode ( 'hex' ) tail = aead . data [ - 4 : ] . encode ( 'hex' ) return "%s...%s" % ( head , tail )
def _set_interface_detail ( self , v , load = False ) : """Setter method for interface _ detail , mapped from YANG variable / isis _ state / interface _ detail ( container ) If this variable is read - only ( config : false ) in the source YANG file , then _ set _ interface _ detail is considered as a private method . Backends looking to populate this variable should do so via calling thisObj . _ set _ interface _ detail ( ) directly . YANG Description : ISIS Interface info detail"""
if hasattr ( v , "_utype" ) : v = v . _utype ( v ) try : t = YANGDynClass ( v , base = interface_detail . interface_detail , is_container = 'container' , presence = False , yang_name = "interface-detail" , rest_name = "interface-detail" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'isis-port-isis-detail' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-isis-operational' , defining_module = 'brocade-isis-operational' , yang_type = 'container' , is_config = False ) except ( TypeError , ValueError ) : raise ValueError ( { 'error-string' : """interface_detail must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=interface_detail.interface_detail, is_container='container', presence=False, yang_name="interface-detail", rest_name="interface-detail", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-port-isis-detail', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""" , } ) self . __interface_detail = t if hasattr ( self , '_set' ) : self . _set ( )
def show_firmware_version_output_show_firmware_version_os_version ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) show_firmware_version = ET . Element ( "show_firmware_version" ) config = show_firmware_version output = ET . SubElement ( show_firmware_version , "output" ) show_firmware_version = ET . SubElement ( output , "show-firmware-version" ) os_version = ET . SubElement ( show_firmware_version , "os-version" ) os_version . text = kwargs . pop ( 'os_version' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def normalize_variable_name ( node , reachability_tester ) : # type : ( Dict [ str , Any ] , ReferenceReachabilityTester ) - > Optional [ str ] """Returns normalized variable name . Normalizing means that variable names get explicit visibility by visibility prefix such as : " g : " , " s : " , . . . Returns None if the specified node is unanalyzable . A node is unanalyzable if : - the node is not identifier - like - the node is named dynamically"""
node_type = NodeType ( node [ 'type' ] ) if not is_analyzable_identifier ( node ) : return None if node_type is NodeType . IDENTIFIER : return _normalize_identifier_value ( node , reachability_tester ) # Nodes identifier - like without identifier is always normalized because # the nodes can not have a visibility prefix . if node_type in IdentifierLikeNodeTypes : return node [ 'value' ]
def _partialParseQUnits ( self , s , sourceTime ) : """test if giving C { s } matched CRE _ QUNITS , used by L { parse ( ) } @ type s : string @ param s : date / time text to evaluate @ type sourceTime : struct _ time @ param sourceTime : C { struct _ time } value to use as the base @ rtype : tuple @ return : tuple of remained date / time text , datetime object and an boolean value to describ if matched or not"""
parseStr = None chunk1 = chunk2 = '' # Quantity + Units m = self . ptc . CRE_QUNITS . search ( s ) if m is not None : debug and log . debug ( 'CRE_QUNITS matched' ) if self . _UnitsTrapped ( s , m , 'qunits' ) : debug and log . debug ( 'day suffix trapped by qunit match' ) else : if ( m . group ( 'qty' ) != s ) : # capture remaining string parseStr = m . group ( 'qty' ) chunk1 = s [ : m . start ( 'qty' ) ] . strip ( ) chunk2 = s [ m . end ( 'qty' ) : ] . strip ( ) if chunk1 [ - 1 : ] == '-' : parseStr = '-%s' % parseStr chunk1 = chunk1 [ : - 1 ] s = '%s %s' % ( chunk1 , chunk2 ) else : parseStr = s s = '' if parseStr : debug and log . debug ( 'found (qunits) [%s][%s][%s]' , parseStr , chunk1 , chunk2 ) sourceTime = self . _evalQUnits ( parseStr , sourceTime ) return s , sourceTime , bool ( parseStr )
def to_object ( self , data ) : """Deserialize input data : param data : serialized input Data object : return : Deserialized object"""
if not isinstance ( data , Data ) : return data if is_null_data ( data ) : return None inp = self . _create_data_input ( data ) try : type_id = data . get_type ( ) serializer = self . _registry . serializer_by_type_id ( type_id ) if serializer is None : if self . _active : raise HazelcastSerializationError ( "Missing Serializer for type-id:{}" . format ( type_id ) ) else : raise HazelcastInstanceNotActiveError ( ) return serializer . read ( inp ) except : handle_exception ( sys . exc_info ( ) [ 1 ] , sys . exc_info ( ) [ 2 ] ) finally : pass
def parse_navigation_html_to_tree ( html , id ) : """Parse the given ` ` html ` ` ( an etree object ) to a tree . The ` ` id ` ` is required in order to assign the top - level tree id value ."""
def xpath ( x ) : return html . xpath ( x , namespaces = HTML_DOCUMENT_NAMESPACES ) try : value = xpath ( '//*[@data-type="binding"]/@data-value' ) [ 0 ] is_translucent = value == 'translucent' except IndexError : is_translucent = False if is_translucent : id = TRANSLUCENT_BINDER_ID tree = { 'id' : id , 'title' : xpath ( '//*[@data-type="document-title"]/text()' ) [ 0 ] , 'contents' : [ x for x in _nav_to_tree ( xpath ( '//xhtml:nav' ) [ 0 ] ) ] } return tree
def create ( cls , request_inquiries , total_amount_inquired , monetary_account_id = None , status = None , event_id = None , custom_headers = None ) : """Create a request batch by sending an array of single request objects , that will become part of the batch . : type user _ id : int : type monetary _ account _ id : int : param request _ inquiries : The list of request inquiries we want to send in 1 batch . : type request _ inquiries : list [ RequestInquiry ] : param total _ amount _ inquired : The total amount originally inquired for this batch . : type total _ amount _ inquired : object _ . Amount : param status : The status of the request . : type status : str : param event _ id : The ID of the associated event if the request batch was made using ' split the bill ' . : type event _ id : int : type custom _ headers : dict [ str , str ] | None : rtype : BunqResponseInt"""
if custom_headers is None : custom_headers = { } request_map = { cls . FIELD_REQUEST_INQUIRIES : request_inquiries , cls . FIELD_STATUS : status , cls . FIELD_TOTAL_AMOUNT_INQUIRED : total_amount_inquired , cls . FIELD_EVENT_ID : event_id } request_map_string = converter . class_to_json ( request_map ) request_map_string = cls . _remove_field_for_request ( request_map_string ) api_client = client . ApiClient ( cls . _get_api_context ( ) ) request_bytes = request_map_string . encode ( ) endpoint_url = cls . _ENDPOINT_URL_CREATE . format ( cls . _determine_user_id ( ) , cls . _determine_monetary_account_id ( monetary_account_id ) ) response_raw = api_client . post ( endpoint_url , request_bytes , custom_headers ) return BunqResponseInt . cast_from_bunq_response ( cls . _process_for_id ( response_raw ) )
def templateParametersStringAsRestList ( self , nodeByRefid ) : '''. . todo : : document this , create another method for creating this without the need for generating links , to be used in making the node titles and labels'''
if not self . template_params : return None else : param_stream = StringIO ( ) for param_t , decl_n , def_n in self . template_params : refid , typeid = param_t # Say you wanted a custom link text ' custom ' , and somewhere # else you had an internal link ' . . _ some _ link : ' . Then you do # ` custom < some _ link _ > ` _ # LOL . RST is confusing if refid : # Easy case : the refid is something Exhale is explicitly documenting if refid in nodeByRefid : link = "{0}_" . format ( nodeByRefid [ refid ] . link_name ) else : # It ' s going to get generated by Breathe down the line , we need # to reference the page the directive will appear on . parent_refid = "" for key in nodeByRefid : if len ( key ) > len ( parent_refid ) and key in refid : parent_refid = key parent = nodeByRefid [ parent_refid ] parent_page = os . path . basename ( parent . file_name . replace ( ".rst" , ".html" ) ) link = "{page}#{refid}" . format ( page = parent_page , refid = refid ) param_stream . write ( "#. `{typeid} <{link}>`_" . format ( typeid = typeid , # Not necessarily an ExhaleNode link , should be a link by # the time Breathe is finished ? link = link ) ) close_please = False else : param_stream . write ( "#. ``{typeid}" . format ( typeid = typeid ) ) close_please = True # The type is in there , but when parsed it may have given something like # ` class X ` for the typeid ( meaning nothing else to write ) . For others , # the decl _ n is the declared name of the template parameter . E . g . it # was parsed as ` typeid < - class ` and ` decl _ n < - X ` . if decl_n : param_stream . write ( " " ) if not close_please : param_stream . write ( "``" ) param_stream . write ( "{decl_n}" . format ( decl_n = decl_n ) ) close_please = True # When templates provide a default value , ` def _ n ` is it . When parsed , # if the ` decl _ n ` and ` def _ n ` are the same , ` def _ n ` is explicitly set # to be None . if def_n : param_stream . write ( " " ) if not close_please : param_stream . write ( "``" ) param_stream . write ( "= {def_n}``" . format ( def_n = def_n ) ) close_please = True if close_please : param_stream . write ( "``" ) param_stream . write ( "\n" ) param_stream . write ( "\n" ) param_value = param_stream . getvalue ( ) param_stream . close ( ) return param_value
def markdown_search_user ( request ) : """Json usernames of the users registered & actived . url ( method = get ) : / martor / search - user / ? username = { username } Response : error : - ` status ` is status code ( 204) - ` error ` is error message . success : - ` status ` is status code ( 204) - ` data ` is list dict of usernames . { ' status ' : 200, ' data ' : [ { ' usernane ' : ' john ' } , { ' usernane ' : ' albert ' } ]"""
data = { } username = request . GET . get ( 'username' ) if username is not None and username != '' and ' ' not in username : users = User . objects . filter ( Q ( username__icontains = username ) ) . filter ( is_active = True ) if users . exists ( ) : data . update ( { 'status' : 200 , 'data' : [ { 'username' : u . username } for u in users ] } ) return HttpResponse ( json . dumps ( data , cls = LazyEncoder ) , content_type = 'application/json' ) data . update ( { 'status' : 204 , 'error' : _ ( 'No users registered as `%(username)s` ' 'or user is unactived.' ) % { 'username' : username } } ) else : data . update ( { 'status' : 204 , 'error' : _ ( 'Validation Failed for field `username`' ) } ) return HttpResponse ( json . dumps ( data , cls = LazyEncoder ) , content_type = 'application/json' )
def is_all_field_none ( self ) : """: rtype : bool"""
if self . _id_ is not None : return False if self . _time_responded is not None : return False if self . _time_expiry is not None : return False if self . _monetary_account_id is not None : return False if self . _amount_inquired is not None : return False if self . _amount_responded is not None : return False if self . _alias is not None : return False if self . _counterparty_alias is not None : return False if self . _description is not None : return False if self . _attachment is not None : return False if self . _status is not None : return False if self . _minimum_age is not None : return False if self . _require_address is not None : return False if self . _address_shipping is not None : return False if self . _address_billing is not None : return False if self . _geolocation is not None : return False if self . _redirect_url is not None : return False if self . _type_ is not None : return False if self . _sub_type is not None : return False if self . _allow_chat is not None : return False if self . _eligible_whitelist_id is not None : return False return True
def getStickXY ( TableName ) : """Get X and Y for fine plotting of a stick spectrum . Usage : X , Y = getStickXY ( TableName ) ."""
cent , intens = getColumns ( TableName , ( 'nu' , 'sw' ) ) n = len ( cent ) cent_ = zeros ( n * 3 ) intens_ = zeros ( n * 3 ) for i in range ( n ) : intens_ [ 3 * i ] = 0 intens_ [ 3 * i + 1 ] = intens [ i ] intens_ [ 3 * i + 2 ] = 0 cent_ [ ( 3 * i ) : ( 3 * i + 3 ) ] = cent [ i ] return cent_ , intens_
def parse_and_normalize_url_date ( date_str ) : """Parse a ISO 8601 date - time with optional timezone . - Return as datetime with timezone adjusted to UTC . - Return naive date - time set to UTC ."""
if date_str is None : return None try : return d1_common . date_time . dt_from_iso8601_str ( date_str ) except d1_common . date_time . iso8601 . ParseError as e : raise d1_common . types . exceptions . InvalidRequest ( 0 , 'Invalid date format for URL parameter. date="{}" error="{}"' . format ( date_str , str ( e ) ) , )
def list_themes ( dark = True ) : """List all installed theme files ."""
dark = "dark" if dark else "light" themes = os . scandir ( os . path . join ( MODULE_DIR , "colorschemes" , dark ) ) return [ t for t in themes if os . path . isfile ( t . path ) ]
def subjects_download ( self , subject_id ) : """Get data file for subject with given identifier . Parameters subject _ id : string Unique subject identifier Returns FileInfo Information about subject ' s data file on disk or None if identifier is unknown"""
# Retrieve subject to ensure that it exist subject = self . subjects_get ( subject_id ) if subject is None : # Return None if subject is unknown return None else : # Reference and information for original uploaded file return FileInfo ( subject . data_file , subject . properties [ datastore . PROPERTY_MIMETYPE ] , subject . properties [ datastore . PROPERTY_FILENAME ] )
def create_device_role ( role , color ) : '''. . versionadded : : 2019.2.0 Create a device role role String of device role , e . g . , ` ` router ` ` CLI Example : . . code - block : : bash salt myminion netbox . create _ device _ role router'''
nb_role = get_ ( 'dcim' , 'device-roles' , name = role ) if nb_role : return False else : payload = { 'name' : role , 'slug' : slugify ( role ) , 'color' : color } role = _add ( 'dcim' , 'device-roles' , payload ) if role : return { 'dcim' : { 'device-roles' : payload } } else : return False
def _execute_level ( self , level , audio_file_mfcc , text_files , sync_roots , force_aba_auto = False ) : """Compute the alignment for all the nodes in the given level . Return a pair ( next _ level _ text _ files , next _ level _ sync _ roots ) , containing two lists of text file subtrees and sync map subtrees on the next level . : param int level : the level : param audio _ file _ mfcc : the audio MFCC representation for this level : type audio _ file _ mfcc : : class : ` ~ aeneas . audiofilemfcc . AudioFileMFCC ` : param list text _ files : a list of : class : ` ~ aeneas . textfile . TextFile ` objects , each representing a ( sub ) tree of the Task text file : param list sync _ roots : a list of : class : ` ~ aeneas . tree . Tree ` objects , each representing a SyncMapFragment tree , one for each element in ` ` text _ files ` ` : param bool force _ aba _ auto : if ` ` True ` ` , force using the AUTO ABA algorithm : rtype : ( list , list )"""
self . _set_synthesizer ( ) next_level_text_files = [ ] next_level_sync_roots = [ ] for text_file_index , text_file in enumerate ( text_files ) : self . log ( [ u"Text level %d, fragment %d" , level , text_file_index ] ) self . log ( [ u" Len: %d" , len ( text_file ) ] ) sync_root = sync_roots [ text_file_index ] if ( level > 1 ) and ( len ( text_file ) == 1 ) : self . log ( u"Level > 1 and only one text fragment => return trivial tree" ) self . _append_trivial_tree ( text_file , sync_root ) elif ( level > 1 ) and ( sync_root . value . begin == sync_root . value . end ) : self . log ( u"Level > 1 and parent has begin == end => return trivial tree" ) self . _append_trivial_tree ( text_file , sync_root ) else : self . log ( u"Level == 1 or more than one text fragment with non-zero parent => compute tree" ) if not sync_root . is_empty : begin = sync_root . value . begin end = sync_root . value . end self . log ( [ u" Setting begin: %.3f" , begin ] ) self . log ( [ u" Setting end: %.3f" , end ] ) audio_file_mfcc . set_head_middle_tail ( head_length = begin , middle_length = ( end - begin ) ) else : self . log ( u" No begin or end to set" ) self . _execute_inner ( audio_file_mfcc , text_file , sync_root = sync_root , force_aba_auto = force_aba_auto , log = False , leaf_level = ( level == 3 ) ) # store next level roots next_level_text_files . extend ( text_file . children_not_empty ) # we added head and tail , we must not pass them to the next level next_level_sync_roots . extend ( sync_root . children [ 1 : - 1 ] ) self . _clear_cache_synthesizer ( ) return ( next_level_text_files , next_level_sync_roots )
def _mk_adjacency_matrix ( self , section , proportion , flats , elev , mag , dX , dY ) : """Calculates the adjacency of connectivity matrix . This matrix tells which pixels drain to which . For example , the pixel i , will recieve area from np . nonzero ( A [ i , : ] ) at the proportions given in A [ i , : ] . So , the row gives the pixel drain to , and the columns the pixels drained from ."""
shp = section . shape mat_data = np . row_stack ( ( proportion , 1 - proportion ) ) NN = np . prod ( shp ) i12 = np . arange ( NN ) . reshape ( shp ) j1 = - np . ones_like ( i12 ) j2 = - np . ones_like ( i12 ) # make the connectivity for the non - flats / pits j1 , j2 = self . _mk_connectivity ( section , i12 , j1 , j2 ) j = np . row_stack ( ( j1 , j2 ) ) i = np . row_stack ( ( i12 , i12 ) ) # connectivity for flats / pits if self . drain_pits : pit_i , pit_j , pit_prop , flats , mag = self . _mk_connectivity_pits ( i12 , flats , elev , mag , dX , dY ) j = np . concatenate ( [ j . ravel ( ) , pit_j ] ) . astype ( 'int64' ) i = np . concatenate ( [ i . ravel ( ) , pit_i ] ) . astype ( 'int64' ) mat_data = np . concatenate ( [ mat_data . ravel ( ) , pit_prop ] ) elif self . drain_flats : j1 , j2 , mat_data , flat_i , flat_j , flat_prop = self . _mk_connectivity_flats ( i12 , j1 , j2 , mat_data , flats , elev , mag ) j = np . concatenate ( [ j . ravel ( ) , flat_j ] ) . astype ( 'int64' ) i = np . concatenate ( [ i . ravel ( ) , flat_j ] ) . astype ( 'int64' ) mat_data = np . concatenate ( [ mat_data . ravel ( ) , flat_prop ] ) # This prevents no - data values , remove connections when not present , # and makes sure that floating point precision errors do not # create circular references where a lower elevation cell drains # to a higher elevation cell I = ~ np . isnan ( mat_data ) & ( j != - 1 ) & ( mat_data > 1e-8 ) & ( elev . ravel ( ) [ j ] <= elev . ravel ( ) [ i ] ) mat_data = mat_data [ I ] j = j [ I ] i = i [ I ] # % % Make the matrix and initialize # What is A ? The row i area receives area contributions from the # entries in its columns . If all the entries in my columns have # drained , then I can drain . A = sps . csc_matrix ( ( mat_data . ravel ( ) , np . row_stack ( ( j . ravel ( ) , i . ravel ( ) ) ) ) , shape = ( NN , NN ) ) normalize = np . array ( A . sum ( 0 ) + 1e-16 ) . squeeze ( ) A = np . dot ( A , sps . diags ( 1 / normalize , 0 ) ) return A
def float_constructor ( loader , node ) : """Construct Decimal from YAML float encoding ."""
s = loader . construct_scalar ( node ) if s == '.inf' : return Decimal ( 'Infinity' ) elif s == '-.inf' : return - Decimal ( 'Infinity' ) elif s == '.nan' : return Decimal ( 'NaN' ) return Decimal ( s )
def overlap1d ( l1 , l2 , PAx , PBx , gamma ) : """The one - dimensional component of the overlap integral . Taken from THO eq . 2.12 > > > isclose ( overlap1d ( 0,0,0,0,1 ) , 1.0) True"""
total = 0 for i in range ( 1 + int ( floor ( 0.5 * ( l1 + l2 ) ) ) ) : total += binomial_prefactor ( 2 * i , l1 , l2 , PAx , PBx ) * fact2 ( 2 * i - 1 ) / pow ( 2 * gamma , i ) return total
def analyze ( self , text ) : """Analyze text and return pretty format . Args : text : string , the input text . Returns : res : dict . Examples : > > > text = ' President Obama is speaking at the White House . ' > > > model . analyze ( text ) " words " : [ " President " , " Obama " , " is " , " speaking " , " at " , " the " , " White " , " House . " " entities " : [ " beginOffset " : 1, " endOffset " : 2, " score " : 1, " text " : " Obama " , " type " : " PER " " beginOffset " : 6, " endOffset " : 8, " score " : 1, " text " : " White House . " , " type " : " ORG " """
pred = self . predict_proba ( text ) tags = self . _get_tags ( pred ) prob = self . _get_prob ( pred ) res = self . _build_response ( text , tags , prob ) return res
def modify_content ( request , page_id , content_type , language_id ) : """Modify the content of a page ."""
page = get_object_or_404 ( Page , pk = page_id ) perm = request . user . has_perm ( 'pages.change_page' ) if perm and request . method == 'POST' : content = request . POST . get ( 'content' , False ) if not content : raise Http404 page = Page . objects . get ( pk = page_id ) if settings . PAGE_CONTENT_REVISION : Content . objects . create_content_if_changed ( page , language_id , content_type , content ) else : Content . objects . set_or_create_content ( page , language_id , content_type , content ) page . invalidate ( ) # to update last modification date page . save ( ) return HttpResponse ( 'ok' ) raise Http404
def Start ( self , Minimized = False , Nosplash = False ) : """Starts Skype application . : Parameters : Minimized : bool If True , Skype is started minimized in system tray . Nosplash : bool If True , no splash screen is displayed upon startup ."""
self . _Skype . _Api . startup ( Minimized , Nosplash )
def get_slice ( self , slice_type , slice_number , time_point = 0 ) : """Returns a slice of the dataset . slice . data contains the window / levelled values , in uint8 slice . original _ data contains the original data for this slice : param time _ point : in case of 4d nifti the 4th dimension : param slice _ number : the slice number : param slice _ type : tye slice type ( AXIAL , SAGITTAL , CORONAL )"""
slice_ = Slice ( ) slice_ . slice_number = slice_number # assert that slice _ number is withing the range assert slice_number >= 0 assert slice_number < self . _get_number_of_slices ( slice_type ) slice_data = None if slice_type == SliceType . AXIAL : slice_data = self . __get_raw_slice__ ( slice_number , self . axial_orientation , time_point ) slice_ . slice_orientation = self . axial_orientation elif slice_type == SliceType . SAGITTAL : slice_data = self . __get_raw_slice__ ( slice_number , self . sagittal_orientation , time_point ) slice_ . slice_orientation = self . sagittal_orientation elif slice_type == SliceType . CORONAL : slice_data = self . __get_raw_slice__ ( slice_number , self . coronal_orientation , time_point ) slice_ . slice_orientation = self . coronal_orientation # make a copy of the slice _ so we do not modify the orignal slice_ . original_data = slice_data return slice_
def user_create ( name , passwd , database = None , user = None , password = None , host = None , port = None ) : '''Create a cluster admin or a database user . If a database is specified : it will create database user . If a database is not specified : it will create a cluster admin . name User name for the new user to create passwd Password for the new user to create database The database to create the user in user The user to connect as password The password of the user host The host to connect to port The port to connect to CLI Example : . . code - block : : bash salt ' * ' influxdb08 . user _ create < name > < passwd > salt ' * ' influxdb08 . user _ create < name > < passwd > < database > salt ' * ' influxdb08 . user _ create < name > < passwd > < database > < user > < password > < host > < port >'''
if user_exists ( name , database , user , password , host , port ) : if database : log . info ( 'User \'%s\' already exists for DB \'%s\'' , name , database ) else : log . info ( 'Cluster admin \'%s\' already exists' , name ) return False client = _client ( user = user , password = password , host = host , port = port ) if not database : return client . add_cluster_admin ( name , passwd ) client . switch_database ( database ) return client . add_database_user ( name , passwd )
def main ( ) : '''Main function'''
# We should only steal the root logger if we ' re the application , not the module logging . basicConfig ( level = logging . DEBUG ) args = get_args ( ) if args . password : password = args . password else : password = getpass ( prompt = 'Enter password for {}@{}: ' . format ( args . user , args . host ) ) opsview = Opsview ( host = args . host , port = args . port , use_ssl = args . ssl , verify_ssl = not args . skip_ssl_verification , username = args . user , password = password , verbose = args . verbose , ) d = { } with open ( 'vcenter.json' ) as f : d = json . load ( f ) logger . debug ( pformat ( opsview . create_host ( params = d , verbose = args . verbose ) ) )
def normalize_allele_name ( raw_allele , omit_dra1 = False , infer_class2_pair = True ) : """MHC alleles are named with a frustratingly loose system . It ' s not uncommon to see dozens of different forms for the same allele . Note : this function works with both class I and class II allele names ( including alpha / beta pairs ) . For example , these all refer to the same MHC sequence : - HLA - A * 02:01 - HLA - A02:01 - HLA - A : 02:01 - HLA - A0201 - HLA - A00201 Additionally , for human alleles , the species prefix is often omitted : - A * 02:01 - A * 00201 - A * 0201 - A02:01 - A : 02:01 - A : 002:01 - A0201 - A00201 We might also encounter " 6 digit " and " 8 digit " MHC types ( which specify variants that don ' t affect amino acid sequence ) , for our purposes these should be truncated to their " 4 - digit " forms : - A * 02:01:01 - A * 02:01:01:01 There are also suffixes which we ' re going to ignore : - HLA - A * 02:01:01G And lastly , for human alleles , there are serotypes which we ' ll treat as approximately equal to a 4 - digit type . - HLA - A2 - A2 These should all be normalized to : HLA - A * 02:01"""
cache_key = ( raw_allele , omit_dra1 , infer_class2_pair ) if cache_key in _normalized_allele_cache : return _normalized_allele_cache [ cache_key ] parsed_alleles = parse_classi_or_classii_allele_name ( raw_allele , infer_pair = infer_class2_pair ) species = parsed_alleles [ 0 ] . species normalized_list = [ species ] # Optionally omit the alpha allele , e . g . for IEDB predictors . if omit_dra1 and len ( parsed_alleles ) == 2 : alpha , beta = parsed_alleles # by convention the alpha allelle is omitted since it ' s assumed # to be DRA1*01:01 if alpha == _DRA1_0101 : parsed_alleles = [ beta ] for parsed_allele in parsed_alleles : if len ( parsed_allele . allele_family ) > 0 : normalized_list . append ( "%s*%s:%s" % ( parsed_allele . gene , parsed_allele . allele_family , parsed_allele . allele_code ) ) else : # mice don ' t have allele families # e . g . H - 2 - Kd # species = H - 2 # gene = K # allele = d normalized_list . append ( "%s%s" % ( parsed_allele . gene , parsed_allele . allele_code ) ) normalized = "-" . join ( normalized_list ) _normalized_allele_cache [ cache_key ] = normalized return normalized
async def get_value ( self ) : """Get the value from the API . Make sure to use a lock in order not to fetch the value twice at the same time ."""
cc = self . request . custom_content async with self . lock : if self . content_key not in cc : cc [ self . content_key ] = await self . call_api ( ) return cc [ self . content_key ]
def _safe_sendBreak_v2_7 ( self ) : # pylint : disable = invalid - name """! pyserial 2.7 API implementation of sendBreak / setBreak @ details Below API is deprecated for pyserial 3 . x versions ! http : / / pyserial . readthedocs . org / en / latest / pyserial _ api . html # serial . Serial . sendBreak http : / / pyserial . readthedocs . org / en / latest / pyserial _ api . html # serial . Serial . setBreak"""
result = True try : self . sendBreak ( ) except : # pylint : disable = bare - except # In Linux a termios . error is raised in sendBreak and in setBreak . # The following setBreak ( ) is needed to release the reset signal on the target mcu . try : self . setBreak ( False ) except : # pylint : disable = bare - except result = False return result
def change_max_svc_check_attempts ( self , service , check_attempts ) : """Modify max service check attempt Format of the line that triggers function call : : CHANGE _ MAX _ SVC _ CHECK _ ATTEMPTS ; < host _ name > ; < service _ description > ; < check _ attempts > : param service : service to edit : type service : alignak . objects . service . Service : param check _ attempts : new value to set : type check _ attempts : int : return : None"""
service . modified_attributes |= DICT_MODATTR [ "MODATTR_MAX_CHECK_ATTEMPTS" ] . value service . max_check_attempts = check_attempts if service . state_type == u'HARD' and service . state == u'OK' and service . attempt > 1 : service . attempt = service . max_check_attempts self . send_an_element ( service . get_update_status_brok ( ) )
def matched_interpreters ( interpreters , constraints ) : """Given some filters , yield any interpreter that matches at least one of them . : param interpreters : a list of PythonInterpreter objects for filtering : param constraints : A sequence of strings that constrain the interpreter compatibility for this pex . Each string uses the Requirement - style format , e . g . ' CPython > = 3 ' or ' > = 2.7 , < 3 ' for requirements agnostic to interpreter class . Multiple requirement strings may be combined into a list to OR the constraints , such as [ ' CPython > = 2.7 , < 3 ' , ' CPython > = 3.4 ' ] . : return interpreter : returns a generator that yields compatible interpreters"""
for interpreter in interpreters : if any ( interpreter . identity . matches ( filt ) for filt in constraints ) : TRACER . log ( "Constraints on interpreters: %s, Matching Interpreter: %s" % ( constraints , interpreter . binary ) , V = 3 ) yield interpreter
def crypto_aead_chacha20poly1305_encrypt ( message , aad , nonce , key ) : """Encrypt the given ` ` message ` ` using the " legacy " construction described in draft - agl - tls - chacha20poly1305. : param message : : type message : bytes : param aad : : type aad : bytes : param nonce : : type nonce : bytes : param key : : type key : bytes : return : authenticated ciphertext : rtype : bytes"""
ensure ( isinstance ( message , bytes ) , 'Input message type must be bytes' , raising = exc . TypeError ) mlen = len ( message ) ensure ( mlen <= crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX , 'Message must be at most {0} bytes long' . format ( crypto_aead_chacha20poly1305_MESSAGEBYTES_MAX ) , raising = exc . ValueError ) ensure ( isinstance ( aad , bytes ) or ( aad is None ) , 'Additional data must be bytes or None' , raising = exc . TypeError ) ensure ( isinstance ( nonce , bytes ) and len ( nonce ) == crypto_aead_chacha20poly1305_NPUBBYTES , 'Nonce must be a {0} bytes long bytes sequence' . format ( crypto_aead_chacha20poly1305_NPUBBYTES ) , raising = exc . TypeError ) ensure ( isinstance ( key , bytes ) and len ( key ) == crypto_aead_chacha20poly1305_KEYBYTES , 'Key must be a {0} bytes long bytes sequence' . format ( crypto_aead_chacha20poly1305_KEYBYTES ) , raising = exc . TypeError ) if aad : _aad = aad aalen = len ( aad ) else : _aad = ffi . NULL aalen = 0 mlen = len ( message ) mxout = mlen + crypto_aead_chacha20poly1305_ietf_ABYTES clen = ffi . new ( "unsigned long long *" ) ciphertext = ffi . new ( "unsigned char[]" , mxout ) res = lib . crypto_aead_chacha20poly1305_encrypt ( ciphertext , clen , message , mlen , _aad , aalen , ffi . NULL , nonce , key ) ensure ( res == 0 , "Encryption failed." , raising = exc . CryptoError ) return ffi . buffer ( ciphertext , clen [ 0 ] ) [ : ]
def p_param_definition ( p ) : """param _ definition : param _ def"""
p [ 0 ] = p [ 1 ] if p [ 0 ] is not None : p [ 0 ] . byref = OPTIONS . byref . value
def loadCats ( self , ids = [ ] ) : """Load cats with the specified ids . : param ids ( int array ) : integer ids specifying cats : return : cats ( object array ) : loaded cat objects"""
if _isArrayLike ( ids ) : return [ self . cats [ id ] for id in ids ] elif type ( ids ) == int : return [ self . cats [ ids ] ]
def start ( self ) : """Starts the coordinator thread and all related worker threads ."""
assert not self . interrupted for thread in self . worker_threads : thread . start ( ) WorkerThread . start ( self )
def Space ( self , n = 1 , dl = 0 ) : """空格键n次"""
self . Delay ( dl ) self . keyboard . tap_key ( " " , n )
def augpath ( path , augsuf = '' , augext = '' , augpref = '' , augdir = None , newext = None , newfname = None , ensure = False , prefix = None , suffix = None ) : """augments end of path before the extension . augpath Args : path ( str ) : augsuf ( str ) : augment filename before extension Returns : str : newpath Example : > > > # DISABLE _ DOCTEST > > > from utool . util _ path import * # NOQA > > > path = ' somefile . txt ' > > > augsuf = ' _ aug ' > > > newpath = augpath ( path , augsuf ) > > > result = str ( newpath ) > > > print ( result ) somefile _ aug . txt Example : > > > # DISABLE _ DOCTEST > > > from utool . util _ path import * # NOQA > > > path = ' somefile . txt ' > > > augsuf = ' _ aug2' > > > newext = ' . bak ' > > > augdir = ' backup ' > > > newpath = augpath ( path , augsuf , newext = newext , augdir = augdir ) > > > result = str ( newpath ) > > > print ( result ) backup / somefile _ aug2 . bak"""
if prefix is not None : augpref = prefix if suffix is not None : augsuf = suffix # Breakup path dpath , fname = split ( path ) fname_noext , ext = splitext ( fname ) if newfname is not None : fname_noext = newfname # Augment ext if newext is None : newext = ext # Augment fname new_fname = '' . join ( ( augpref , fname_noext , augsuf , newext , augext ) ) # Augment dpath if augdir is not None : new_dpath = join ( dpath , augdir ) if ensure : # create new dir if needebe ensuredir ( new_dpath ) else : new_dpath = dpath # Recombine into new path newpath = join ( new_dpath , new_fname ) return newpath
def deploy ( self , initial_instance_count , instance_type , accelerator_type = None , endpoint_name = None , use_compiled_model = False , update_endpoint = False , ** kwargs ) : """Deploy the trained model to an Amazon SageMaker endpoint and return a ` ` sagemaker . RealTimePredictor ` ` object . More information : http : / / docs . aws . amazon . com / sagemaker / latest / dg / how - it - works - training . html Args : initial _ instance _ count ( int ) : Minimum number of EC2 instances to deploy to an endpoint for prediction . instance _ type ( str ) : Type of EC2 instance to deploy to an endpoint for prediction , for example , ' ml . c4 . xlarge ' . accelerator _ type ( str ) : Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference , for example , ' ml . eia1 . medium ' . If not specified , no Elastic Inference accelerator will be attached to the endpoint . For more information : https : / / docs . aws . amazon . com / sagemaker / latest / dg / ei . html endpoint _ name ( str ) : Name to use for creating an Amazon SageMaker endpoint . If not specified , the name of the training job is used . use _ compiled _ model ( bool ) : Flag to select whether to use compiled ( optimized ) model . Default : False . update _ endpoint ( bool ) : Flag to update the model in an existing Amazon SageMaker endpoint . If True , this will deploy a new EndpointConfig to an already existing endpoint and delete resources corresponding to the previous EndpointConfig . Default : False tags ( List [ dict [ str , str ] ] ) : Optional . The list of tags to attach to this specific endpoint . Example : > > > tags = [ { ' Key ' : ' tagname ' , ' Value ' : ' tagvalue ' } ] For more information about tags , see https : / / boto3 . amazonaws . com / v1 / documentation / api / latest / reference / services / sagemaker . html # SageMaker . Client . add _ tags * * kwargs : Passed to invocation of ` ` create _ model ( ) ` ` . Implementations may customize ` ` create _ model ( ) ` ` to accept ` ` * * kwargs ` ` to customize model creation during deploy . For more , see the implementation docs . Returns : sagemaker . predictor . RealTimePredictor : A predictor that provides a ` ` predict ( ) ` ` method , which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences ."""
self . _ensure_latest_training_job ( ) endpoint_name = endpoint_name or self . latest_training_job . name self . deploy_instance_type = instance_type if use_compiled_model : family = '_' . join ( instance_type . split ( '.' ) [ : - 1 ] ) if family not in self . _compiled_models : raise ValueError ( "No compiled model for {}. " "Please compile one with compile_model before deploying." . format ( family ) ) model = self . _compiled_models [ family ] else : model = self . create_model ( ** kwargs ) return model . deploy ( instance_type = instance_type , initial_instance_count = initial_instance_count , accelerator_type = accelerator_type , endpoint_name = endpoint_name , update_endpoint = update_endpoint , tags = self . tags )
def has_node_with_value ( self , value ) : """Whether any node in ` ` self . node _ list ` ` has the value ` ` value ` ` . Args : value ( Any ) : The value to find in ` ` self . node _ list ` ` Returns : bool Example : > > > from blur . markov . node import Node > > > node _ 1 = Node ( ' One ' ) > > > graph = Graph ( [ node _ 1 ] ) > > > graph . has _ node _ with _ value ( ' One ' ) True > > > graph . has _ node _ with _ value ( ' Foo ' ) False"""
for node in self . node_list : if node . value == value : return True else : return False
def getRemote ( self , name : str = None , ha : HA = None ) : """Find the remote by name or ha . : param name : the name of the remote to find : param ha : host address pair the remote to find : raises : RemoteNotFound"""
return self . findInRemotesByName ( name ) if name else self . findInRemotesByHA ( ha )
def get_annotations ( self , atype = None , label = None ) : """Retrieve the annotations for this item from the server : type atype : String : param atype : return only results with a matching Type field : type label : String : param label : return only results with a matching Label field : rtype : String : returns : the annotations as a JSON string : raises : APIError if the API request is not successful"""
return self . client . get_item_annotations ( self . url ( ) , atype , label )
def plot_quadpole_evolution ( dataobj , quadpole , cols , threshold = 5 , rolling = False , ax = None ) : """Visualize time - lapse evolution of a single quadropole . Parameters dataobj : : py : class : ` pandas . DataFrame ` DataFrame containing the data . Please refer to the documentation for required columns . quadpole : list of integers Electrode numbers of the the quadropole . cols : str The column / parameter to plot over time . threshold : float Allowed percentage deviation from the rolling standard deviation . rolling : bool Calculate rolling median values ( the default is False ) . ax : mpl . axes Optional axes object to plot to ."""
if isinstance ( dataobj , pd . DataFrame ) : df = dataobj else : df = dataobj . data subquery = df . query ( 'a == {0} and b == {1} and m == {2} and n == {3}' . format ( * quadpole ) ) # rhoa = subquery [ ' rho _ a ' ] . values # rhoa [ 30 ] = 300 # subquery [ ' rho _ a ' ] = rhoa if ax is not None : fig = ax . get_figure ( ) else : fig , ax = plt . subplots ( 1 , 1 , figsize = ( 20 / 2.54 , 7 / 2.54 ) ) ax . plot ( subquery [ 'timestep' ] , subquery [ cols ] , '.' , color = 'blue' , label = 'valid data' , ) if rolling : # rolling mean rolling_m = subquery . rolling ( 3 , center = True , min_periods = 1 ) . median ( ) ax . plot ( rolling_m [ 'timestep' ] . values , rolling_m [ 'rho_a' ] . values , '-' , label = 'rolling median' , ) ax . fill_between ( rolling_m [ 'timestep' ] . values , rolling_m [ 'rho_a' ] . values * ( 1 - threshold ) , rolling_m [ 'rho_a' ] . values * ( 1 + threshold ) , alpha = 0.4 , color = 'blue' , label = '{0}\% confidence region' . format ( threshold * 100 ) , ) # find all values that deviate by more than X percent from the # rolling _ m bad_values = ( np . abs ( np . abs ( subquery [ 'rho_a' ] . values - rolling_m [ 'rho_a' ] . values ) / rolling_m [ 'rho_a' ] . values ) > threshold ) bad = subquery . loc [ bad_values ] ax . plot ( bad [ 'timestep' ] . values , bad [ 'rho_a' ] . values , '.' , # s = 15, color = 'r' , label = 'discarded data' , ) ax . legend ( loc = 'upper center' , fontsize = 6 ) # ax . set _ xlim ( 10 , 20) ax . set_ylabel ( r'$\rho_a$ [$\Omega$m]' ) ax . set_xlabel ( 'timestep' ) return fig , ax
def comment_lines ( lines , prefix ) : """Return commented lines"""
if not prefix : return lines return [ prefix + ' ' + line if line else prefix for line in lines ]
def _parse_publisher ( details ) : """Parse publisher of the book . Args : details ( obj ) : HTMLElement containing slice of the page with details . Returns : str / None : Publisher ' s name as string or None if not found ."""
publisher = _get_td_or_none ( details , "ctl00_ContentPlaceHolder1_tblRowNakladatel" ) # publisher is not specified if not publisher : return None publisher = dhtmlparser . removeTags ( publisher ) . strip ( ) # return None instead of blank string if not publisher : return None return publisher
def bin_stream ( stream , content_type , status = '200 OK' , headers = None ) : """Utility method for constructing a binary response . : param Any stream : The response body stream : param str content _ type : The content - type of the response : param str status : The HTTP status line : param list [ tuple [ str , str ] ] headers : Additional headers for this response : return : WbResponse that is a binary stream : rtype : WbResponse"""
def_headers = [ ( 'Content-Type' , content_type ) ] if headers : def_headers += headers status_headers = StatusAndHeaders ( status , def_headers ) return WbResponse ( status_headers , value = stream )
def move ( self , path , raise_if_exists = False ) : """Call MockFileSystem ' s move command"""
self . fs . move ( self . path , path , raise_if_exists )
def PushItem ( self , item , block = True ) : """Push an item on to the queue . If no ZeroMQ socket has been created , one will be created the first time this method is called . Args : item ( object ) : item to push on the queue . block ( Optional [ bool ] ) : whether the push should be performed in blocking or non - blocking mode . Raises : QueueAlreadyClosed : if the queue is closed . QueueFull : if the internal buffer was full and it was not possible to push the item to the buffer within the timeout . RuntimeError : if closed event is missing ."""
if not self . _closed_event : raise RuntimeError ( 'Missing closed event.' ) if self . _closed_event . is_set ( ) : raise errors . QueueAlreadyClosed ( ) if not self . _zmq_socket : self . _CreateZMQSocket ( ) try : if block : self . _queue . put ( item , timeout = self . timeout_seconds ) else : self . _queue . put ( item , block = False ) except Queue . Full as exception : raise errors . QueueFull ( exception )
def _deconstruct_url ( self , url : str ) -> List [ str ] : """Split a regular URL into parts : param url : A normalized URL : return : Parts of the URL : raises kua . routes . RouteError : If the depth of the URL exceeds the max depth of the deepest registered pattern : private :"""
parts = url . split ( '/' , self . _max_depth + 1 ) if depth_of ( parts ) > self . _max_depth : raise RouteError ( 'No match' ) return parts