signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def set_proxy_bypass ( domains , network_service = "Ethernet" ) :
'''Sets the domains that can bypass the proxy
domains
An array of domains allowed to bypass the proxy
network _ service
The network service to apply the changes to , this only necessary on
macOS
CLI Example :
. . code - block : : bash
salt ' * ' proxy . set _ proxy _ bypass " [ ' 127.0.0.1 ' , ' localhost ' ] "''' | servers_str = ' ' . join ( domains )
cmd = 'networksetup -setproxybypassdomains {0} {1}' . format ( network_service , servers_str , )
out = __salt__ [ 'cmd.run' ] ( cmd )
return 'error' not in out |
def clean ( self ) :
"""Checks for the identification and password .
If the combination can ' t be found will raise an invalid sign in error .""" | identification = self . cleaned_data . get ( 'identification' )
password = self . cleaned_data . get ( 'password' )
if identification and password :
self . user_cache = authenticate ( identification = identification , password = password )
if self . user_cache is None :
raise forms . ValidationError ( _ ( u"Please enter a correct " "username or email address and password. " "Note that both fields are case-sensitive." ) )
return self . cleaned_data |
def ppo_original_world_model_stochastic_discrete ( ) :
"""Atari parameters with stochastic discrete world model as policy .""" | hparams = ppo_original_params ( )
hparams . policy_network = "next_frame_basic_stochastic_discrete"
hparams_keys = hparams . values ( ) . keys ( )
video_hparams = basic_stochastic . next_frame_basic_stochastic_discrete ( )
for ( name , value ) in six . iteritems ( video_hparams . values ( ) ) :
if name in hparams_keys :
hparams . set_hparam ( name , value )
else :
hparams . add_hparam ( name , value )
# To avoid OOM . Probably way to small .
hparams . optimization_batch_size = 1
hparams . weight_decay = 0
return hparams |
def dtime ( sdat , tstart = None , tend = None ) :
"""Time increment dt .
Compute dt as a function of time .
Args :
sdat ( : class : ` ~ stagpy . stagyydata . StagyyData ` ) : a StagyyData instance .
tstart ( float ) : time at which the computation should start . Use the
beginning of the time series data if set to None .
tend ( float ) : time at which the computation should end . Use the
end of the time series data if set to None .
Returns :
tuple of : class : ` numpy . array ` : dt and time arrays .""" | tseries = sdat . tseries_between ( tstart , tend )
time = tseries [ 't' ] . values
return time [ 1 : ] - time [ : - 1 ] , time [ : - 1 ] |
def _iter_full_paths ( path_list ) :
"""Iterates over all paths that are in a directory and its subdirectory , returning
fully - specified paths .""" | for path in path_list :
if not os . path . isdir ( path ) :
full_path = os . path . realpath ( path )
yield path
else :
for root , dirs , filenames in os . walk ( path ) :
for filename in filenames :
full_path = os . path . realpath ( os . path . join ( root , filename ) )
yield full_path |
def moderate_model ( ParentModel , publication_date_field = None , enable_comments_field = None ) :
"""Register a parent model ( e . g . ` ` Blog ` ` or ` ` Article ` ` ) that should receive comment moderation .
: param ParentModel : The parent model , e . g . a ` ` Blog ` ` or ` ` Article ` ` model .
: param publication _ date _ field : The field name of a : class : ` ~ django . db . models . DateTimeField ` in the parent model which stores the publication date .
: type publication _ date _ field : str
: param enable _ comments _ field : The field name of a : class : ` ~ django . db . models . BooleanField ` in the parent model which stores the whether comments are enabled .
: type enable _ comments _ field : str""" | attrs = { 'auto_close_field' : publication_date_field , 'auto_moderate_field' : publication_date_field , 'enable_field' : enable_comments_field , }
ModerationClass = type ( ParentModel . __name__ + 'Moderator' , ( FluentCommentsModerator , ) , attrs )
moderator . register ( ParentModel , ModerationClass ) |
def create_service ( self , name , ** kwargs ) :
"""Creates a service with a name . All other parameters are optional . They
are : ` note ` , ` hourly _ rate ` , ` billable ` , and ` archived ` .""" | data = self . _wrap_dict ( "service" , kwargs )
data [ "customer" ] [ "name" ] = name
return self . post ( "/services.json" , data = data ) |
def Kdiag ( self , X , target ) :
"""Compute the diagonal of the covariance matrix for X .""" | np . add ( target , self . variance , target ) |
def get_winfunc ( libname , funcname , restype = None , argtypes = ( ) , _libcache = { } ) :
"""Retrieve a function from a library / DLL , and set the data types .""" | if libname not in _libcache :
_libcache [ libname ] = windll . LoadLibrary ( libname )
func = getattr ( _libcache [ libname ] , funcname )
func . argtypes = argtypes
func . restype = restype
return func |
def _combine_coverages ( items , work_dir , input_backs = None ) :
"""Combine coverage cnns calculated for individual inputs into single file .
Optionally moves over pre - calculated coverage samples from a background file .""" | out_file = os . path . join ( work_dir , "sample_coverages.txt" )
if not utils . file_exists ( out_file ) :
with file_transaction ( items [ 0 ] , out_file ) as tx_out_file :
with open ( tx_out_file , 'w' ) as out_f :
for data in items :
cov_file = tz . get_in ( [ "depth" , "bins" , "seq2c" ] , data )
with open ( cov_file ) as cov_f :
out_f . write ( cov_f . read ( ) )
if input_backs :
for input_back in input_backs :
with open ( input_back ) as in_handle :
for line in in_handle :
if len ( line . split ( ) ) >= 4 :
out_f . write ( line )
return out_file |
def validate ( ctx , mapfiles , expand ) :
"""Validate Mapfile ( s ) against the Mapfile schema
The MAPFILES argument is a list of paths , either to individual Mapfiles , or a folders containing Mapfiles .
Wildcards are supported ( natively on Linux , and up to one level deep on Windows ) .
Validation errors are reported to the console . The program returns the error count - this will be 0 if no
validation errors are encountered .
Example of validating a single Mapfile :
mappyfile validate C : / Temp / valid . map
Example of validating two folders containing Mapfiles , without expanding INCLUDES :
mappyfile validate C : / Temp / * . map D : / GitHub / mappyfile / tests / mapfiles / * . map - - no - expand""" | all_mapfiles = get_mapfiles ( mapfiles )
if len ( all_mapfiles ) == 0 :
click . echo ( "No Mapfiles found at the following paths: {}" . format ( "," . join ( mapfiles ) ) )
return
validation_count = 0
errors = 0
for fn in all_mapfiles :
fn = click . format_filename ( fn )
d = mappyfile . open ( fn , expand_includes = expand , include_position = True )
validation_messages = mappyfile . validate ( d )
if validation_messages :
for v in validation_messages :
v [ "fn" ] = fn
msg = "{fn} (Line: {line} Column: {column}) {message} - {error}" . format ( ** v )
click . echo ( msg )
errors += 1
else :
click . echo ( "{} validated successfully" . format ( fn ) )
validation_count += 1
click . echo ( "{} file(s) validated ({} successfully)" . format ( len ( all_mapfiles ) , validation_count ) )
sys . exit ( errors ) |
def _transform ( self , data , transform , step_size ) :
'''Transform the data . If the transform is not supported by this series ,
returns the data unaltered .''' | if transform == 'mean' :
total = sum ( data )
count = len ( data )
data = float ( total ) / float ( count ) if count > 0 else 0
elif transform == 'count' :
data = len ( data )
elif transform == 'min' :
data = min ( data or [ 0 ] )
elif transform == 'max' :
data = max ( data or [ 0 ] )
elif transform == 'sum' :
data = sum ( data )
elif transform == 'rate' :
data = len ( data ) / float ( step_size )
elif callable ( transform ) :
data = transform ( data )
return data |
def sense_ttf ( self , target ) :
"""Sense for a Type F Target is supported for 212 and 424 kbps .""" | log . debug ( "polling for NFC-F technology" )
if target . brty not in ( "212F" , "424F" ) :
message = "unsupported bitrate {0}" . format ( target . brty )
raise nfc . clf . UnsupportedTargetError ( message )
self . chipset . in_set_rf ( target . brty )
self . chipset . in_set_protocol ( self . chipset . in_set_protocol_defaults )
self . chipset . in_set_protocol ( initial_guard_time = 24 )
sensf_req = ( target . sensf_req if target . sensf_req else bytearray . fromhex ( "00FFFF0100" ) )
log . debug ( "send SENSF_REQ " + hexlify ( sensf_req ) )
try :
frame = chr ( len ( sensf_req ) + 1 ) + sensf_req
frame = self . chipset . in_comm_rf ( frame , 10 )
except CommunicationError as error :
if error != "RECEIVE_TIMEOUT_ERROR" :
log . debug ( error )
return None
if len ( frame ) >= 18 and frame [ 0 ] == len ( frame ) and frame [ 1 ] == 1 :
log . debug ( "rcvd SENSF_RES " + hexlify ( frame [ 1 : ] ) )
return nfc . clf . RemoteTarget ( target . brty , sensf_res = frame [ 1 : ] ) |
def generate_epochs_info ( epoch_list ) :
"""use epoch _ list to generate epoch _ info defined below
Parameters
epoch _ list : list of 3D ( binary ) array in shape [ condition , nEpochs , nTRs ]
Contains specification of epochs and conditions , assuming
1 . all subjects have the same number of epochs ;
2 . len ( epoch _ list ) equals the number of subjects ;
3 . an epoch is always a continuous time course .
Returns
epoch _ info : list of tuple ( label , sid , start , end ) .
label is the condition labels of the epochs ;
sid is the subject id , corresponding to the index of raw _ data ;
start is the start TR of an epoch ( inclusive ) ;
end is the end TR of an epoch ( exclusive ) .
Assuming len ( labels ) labels equals the number of epochs and
the epochs of the same sid are adjacent in epoch _ info""" | time1 = time . time ( )
epoch_info = [ ]
for sid , epoch in enumerate ( epoch_list ) :
for cond in range ( epoch . shape [ 0 ] ) :
sub_epoch = epoch [ cond , : , : ]
for eid in range ( epoch . shape [ 1 ] ) :
r = np . sum ( sub_epoch [ eid , : ] )
if r > 0 : # there is an epoch in this condition
start = np . nonzero ( sub_epoch [ eid , : ] ) [ 0 ] [ 0 ]
epoch_info . append ( ( cond , sid , start , start + r ) )
time2 = time . time ( )
logger . debug ( 'epoch separation done, takes %.2f s' % ( time2 - time1 ) )
return epoch_info |
def get_user ( self , user ) :
"""Get user ' s data ( first and last name , email , etc ) .
Args :
user ( string ) : User name .
Returns :
( dictionary ) : User ' s data encoded in a dictionary .
Raises :
requests . HTTPError on failure .""" | self . project_service . set_auth ( self . _token_project )
return self . project_service . get_user ( user ) |
def create ( cls , ** kwargs ) :
"""Build and return a ` ScatterGather ` object""" | linkname = kwargs . setdefault ( 'linkname' , cls . clientclass . linkname_default )
# Don ' t use setdefault b / c we don ' t want to build a JobArchive
# Unless it is needed
job_archive = kwargs . get ( 'job_archive' , None )
if job_archive is None :
job_archive = JobArchive . build_temp_job_archive ( )
kwargs . setdefault ( 'job_archive' , job_archive )
kwargs_client = dict ( linkname = linkname , link_prefix = kwargs . get ( 'link_prefix' , '' ) , file_stage = kwargs . get ( 'file_stage' , None ) , job_archive = job_archive )
link = cls . clientclass . create ( ** kwargs_client )
sg = cls ( link , ** kwargs )
return sg |
def load ( self , filename ) :
'''load points from a file .
returns number of points loaded''' | f = open ( filename , mode = 'r' )
self . clear ( )
for line in f :
if line . startswith ( '#' ) :
continue
line = line . strip ( )
if not line :
continue
a = line . split ( )
if len ( a ) != 2 :
raise MAVFenceError ( "invalid fence point line: %s" % line )
self . add_latlon ( float ( a [ 0 ] ) , float ( a [ 1 ] ) )
f . close ( )
return len ( self . points ) |
def insert_before ( self , obj , value , recursive = True ) :
"""Insert * value * immediately before * obj * .
* obj * can be either a string , a : class : ` . Node ` , or another
: class : ` . Wikicode ` object ( as created by : meth : ` get _ sections ` , for
example ) . If * obj * is a string , we will operate on all instances of
that string within the code , otherwise only on the specific instance
given . * value * can be anything parsable by : func : ` . parse _ anything ` . If
* recursive * is ` ` True ` ` , we will try to find * obj * within our child
nodes even if it is not a direct descendant of this : class : ` . Wikicode `
object . If * obj * is not found , : exc : ` ValueError ` is raised .""" | if isinstance ( obj , ( Node , Wikicode ) ) :
context , index = self . _do_strong_search ( obj , recursive )
context . insert ( index . start , value )
else :
for exact , context , index in self . _do_weak_search ( obj , recursive ) :
if exact :
context . insert ( index . start , value )
else :
obj = str ( obj )
self . _slice_replace ( context , index , obj , str ( value ) + obj ) |
def filter_args_to_dict ( filter_dict , accepted_filter_keys = [ ] ) :
"""Cast and validate filter args .
: param filter _ dict : Filter kwargs
: param accepted _ filter _ keys : List of keys that are acceptable to use .""" | out_dict = { }
for k , v in filter_dict . items ( ) : # make sure that the filter k is acceptable
# and that there is a value associated with the key
if k not in accepted_filter_keys or v is None :
logger . debug ( 'Filter was not in accepted_filter_keys or value is None.' )
# skip it
continue
filter_type = filter_type_map . get ( k , None )
if filter_type is None :
logger . debug ( 'Filter key not foud in map.' )
# hmm , this was an acceptable filter type but not in the map . . .
# Going to skip it .
continue
# map of casting funcitons to filter types
filter_cast_map = { 'int' : cast_integer_filter , 'datetime' : cast_datetime_filter }
cast_function = filter_cast_map . get ( filter_type , None )
# if we get a cast function , call it with v . If not , just use v .
if cast_function :
out_value = cast_function ( v )
else :
out_value = v
out_dict [ k ] = out_value
return out_dict |
def gen_radio_edit ( sig_dic ) :
'''editing for HTML radio control .''' | edit_zuoxiang = '''7
<label for="{0}"><span>
<a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">
</a>{1}</span>
''' . format ( sig_dic [ 'en' ] , sig_dic [ 'zh' ] )
dic_tmp = sig_dic [ 'dic' ]
for key in dic_tmp . keys ( ) :
tmp_str = '''
<input id="{0}" name="{0}" type="radio" class="form-control" value="{1}"
{{% if '{0}' in postinfo.extinfo and postinfo.extinfo['{0}'] == '{1}' %}}
checked
{{% end %}}
>{2}
''' . format ( sig_dic [ 'en' ] , key , dic_tmp [ key ] )
edit_zuoxiang += tmp_str
edit_zuoxiang += '''</label>'''
return edit_zuoxiang |
def fog ( x , severity = 1 ) :
"""Fog corruption to images .
Adding fog to images . Fog is generated by diamond - square algorithm .
Args :
x : numpy array , uncorrupted image , assumed to have uint8 pixel in [ 0,255 ] .
severity : integer , severity of corruption .
Returns :
numpy array , image with uint8 pixels in [ 0,255 ] . Added fog .""" | c = [ ( 1.5 , 2 ) , ( 2. , 2 ) , ( 2.5 , 1.7 ) , ( 2.5 , 1.5 ) , ( 3. , 1.4 ) ] [ severity - 1 ]
x = np . array ( x ) / 255.
max_val = x . max ( )
mapsize = 512
shape = x . shape
max_length = max ( shape [ 0 ] , shape [ 1 ] )
if max_length > mapsize :
mapsize = 2 ** int ( np . ceil ( np . log2 ( float ( max_length ) ) ) )
tmp = plasma_fractal ( mapsize = mapsize , wibbledecay = c [ 1 ] )
tmp = tmp [ : x . shape [ 0 ] , : x . shape [ 1 ] ]
tmp = tmp [ ... , np . newaxis ]
x += c [ 0 ] * tmp
x_clip = np . clip ( x * max_val / ( max_val + c [ 0 ] ) , 0 , 1 ) * 255
return around_and_astype ( x_clip ) |
def fetchThreadMessages ( self , thread_id = None , limit = 20 , before = None ) :
"""Get the last messages in a thread
: param thread _ id : User / Group ID to get messages from . See : ref : ` intro _ threads `
: param limit : Max . number of messages to retrieve
: param before : A timestamp , indicating from which point to retrieve messages
: type limit : int
: type before : int
: return : : class : ` models . Message ` objects
: rtype : list
: raises : FBchatException if request failed""" | thread_id , thread_type = self . _getThread ( thread_id , None )
params = { "id" : thread_id , "message_limit" : limit , "load_messages" : True , "load_read_receipts" : True , "before" : before , }
j = self . graphql_request ( GraphQL ( doc_id = "1860982147341344" , params = params ) )
if j . get ( "message_thread" ) is None :
raise FBchatException ( "Could not fetch thread {}: {}" . format ( thread_id , j ) )
messages = [ Message . _from_graphql ( message ) for message in j [ "message_thread" ] [ "messages" ] [ "nodes" ] ]
messages . reverse ( )
read_receipts = j [ "message_thread" ] [ "read_receipts" ] [ "nodes" ]
for message in messages :
for receipt in read_receipts :
if int ( receipt [ "watermark" ] ) >= int ( message . timestamp ) :
message . read_by . append ( receipt [ "actor" ] [ "id" ] )
return messages |
def remove_column ( self , key ) :
""": param key : str of the column to remove from every row in the table
: return : None""" | if isinstance ( key , int ) :
index = key
key = self . row_columns [ key ]
else :
index = self . _column_index [ key ]
for row in self . table :
row . pop ( index )
self . row_columns = self . row_columns [ : index ] + self . row_columns [ index + 1 : ]
self . pop_column ( key ) |
def enkf ( self ) :
"""Loop over time windows and apply da
: return :""" | for cycle_index , time_point in enumerate ( self . timeline ) :
if cycle_index >= len ( self . timeline ) - 1 : # Logging : Last Update cycle has finished
break
print ( "Print information about this assimilation Cycle ???" )
# should be handeled in Logger
# each cycle should have a dictionary of template files and instruction files to update the model inout
# files
# get current cycle update information
current_cycle_files = self . cycle_update_files [ cycle_index ]
# (1 ) update model input files for this cycle
self . model_temporal_evolotion ( cycle_index , current_cycle_files )
# (2 ) generate new Pst object for the current time cycle
current_pst = copy . deepcopy ( self . pst )
# update observation dataframe
# update parameter dataframe
# update in / out files if needed
# At this stage the problem is equivalent to smoother problem
self . smoother ( current_pst ) |
def _wait_for_machine_finish ( self , name ) :
"""Interna method
wait until machine is really destroyed , machine does not exist .
: param name : str machine name
: return : True or exception""" | # TODO : rewrite it using probes module in utils
for foo in range ( constants . DEFAULT_RETRYTIMEOUT ) :
time . sleep ( constants . DEFAULT_SLEEP )
out = run_cmd ( [ "machinectl" , "--no-pager" , "status" , name ] , ignore_status = True , return_output = True )
if out != 0 :
return True
raise ConuException ( "Unable to stop machine %s within %d" % ( name , constants . DEFAULT_RETRYTIMEOUT ) ) |
def is_magic ( self ) :
"""Return True iff this method is a magic method ( e . g . , ` _ _ str _ _ ` ) .""" | return ( self . name . startswith ( '__' ) and self . name . endswith ( '__' ) and self . name not in VARIADIC_MAGIC_METHODS ) |
def check_permission ( self , request , page , permission ) :
"""Runs the custom permission check and raises an
exception if False .""" | if not getattr ( page , "can_" + permission ) ( request ) :
raise PermissionDenied |
def _get_primary_type ( ttypes , parent , logstream = stderr ) :
"""Check for multiple transcript types and , if possible , select one .""" | if len ( ttypes ) > 1 :
if logstream : # pragma : no branch
message = '[tag::transcript::primary_transcript]'
message += ' WARNING: feature {:s}' . format ( parent . slug )
message += ' has multiple associated transcript types'
message += ' {}' . format ( ttypes )
print ( message , file = logstream )
if 'mRNA' not in ttypes :
message = ( 'cannot resolve multiple transcript types if "mRNA" is' ' not one of those types {}' . format ( ttypes ) )
raise Exception ( message )
ttypes = [ 'mRNA' ]
return ttypes [ 0 ] |
def trace ( self , * attributes ) :
"""Function decorator that traces functions
NOTE : Must be placed after the @ app . route decorator
@ param attributes any number of flask . Request attributes
( strings ) to be set as tags on the created span""" | def decorator ( f ) :
def wrapper ( * args , ** kwargs ) :
if self . _trace_all_requests :
return f ( * args , ** kwargs )
self . _before_request_fn ( list ( attributes ) )
try :
r = f ( * args , ** kwargs )
self . _after_request_fn ( )
except Exception as e :
self . _after_request_fn ( error = e )
raise
self . _after_request_fn ( )
return r
wrapper . __name__ = f . __name__
return wrapper
return decorator |
def example_df ( ) :
"""Create an example dataframe .""" | country_names = [ 'Germany' , 'France' , 'Indonesia' , 'Ireland' , 'Spain' , 'Vatican' ]
population = [ 82521653 , 66991000 , 255461700 , 4761865 , 46549045 , None ]
population_time = [ dt . datetime ( 2016 , 12 , 1 ) , dt . datetime ( 2017 , 1 , 1 ) , dt . datetime ( 2017 , 1 , 1 ) , None , # Ireland
dt . datetime ( 2017 , 6 , 1 ) , # Spain
None , ]
euro = [ True , True , False , True , True , True ]
df = pd . DataFrame ( { 'country' : country_names , 'population' : population , 'population_time' : population_time , 'EUR' : euro } )
df = df [ [ 'country' , 'population' , 'population_time' , 'EUR' ] ]
return df |
def verify ( self , ** kwargs ) :
"""Authorization Request parameters that are OPTIONAL in the OAuth 2.0
specification MAY be included in the OpenID Request Object without also
passing them as OAuth 2.0 Authorization Request parameters , with one
exception : The scope parameter MUST always be present in OAuth 2.0
Authorization Request parameters .
All parameter values that are present both in the OAuth 2.0
Authorization Request and in the OpenID Request Object MUST exactly
match .""" | super ( AuthorizationRequest , self ) . verify ( ** kwargs )
clear_verified_claims ( self )
args = { }
for arg in [ "keyjar" , "opponent_id" , "sender" , "alg" , "encalg" , "encenc" ] :
try :
args [ arg ] = kwargs [ arg ]
except KeyError :
pass
if "opponent_id" not in kwargs :
args [ "opponent_id" ] = self [ "client_id" ]
if "request" in self :
if isinstance ( self [ "request" ] , str ) : # Try to decode the JWT , checks the signature
oidr = OpenIDRequest ( ) . from_jwt ( str ( self [ "request" ] ) , ** args )
# check if something is change in the original message
for key , val in oidr . items ( ) :
if key in self :
if self [ key ] != val : # log but otherwise ignore
logger . warning ( '{} != {}' . format ( self [ key ] , val ) )
# remove all claims
_keys = list ( self . keys ( ) )
for key in _keys :
if key not in oidr :
del self [ key ]
self . update ( oidr )
# replace the JWT with the parsed and verified instance
self [ verified_claim_name ( "request" ) ] = oidr
if "id_token_hint" in self :
if isinstance ( self [ "id_token_hint" ] , str ) :
idt = IdToken ( ) . from_jwt ( str ( self [ "id_token_hint" ] ) , ** args )
self [ "verified_id_token_hint" ] = idt
if "response_type" not in self :
raise MissingRequiredAttribute ( "response_type missing" , self )
_rt = self [ "response_type" ]
if "id_token" in _rt :
if "nonce" not in self :
raise MissingRequiredAttribute ( "Nonce missing" , self )
else :
try :
if self [ 'nonce' ] != kwargs [ 'nonce' ] :
raise ValueError ( 'Nonce in id_token not matching nonce in authz ' 'request' )
except KeyError :
pass
if "openid" not in self . get ( "scope" , [ ] ) :
raise MissingRequiredValue ( "openid not in scope" , self )
if "offline_access" in self . get ( "scope" , [ ] ) :
if "prompt" not in self or "consent" not in self [ "prompt" ] :
raise MissingRequiredValue ( "consent in prompt" , self )
if "prompt" in self :
if "none" in self [ "prompt" ] and len ( self [ "prompt" ] ) > 1 :
raise InvalidRequest ( "prompt none combined with other value" , self )
return True |
def _get_state ( ) :
'''Returns the state of connman''' | try :
return pyconnman . ConnManager ( ) . get_property ( 'State' )
except KeyError :
return 'offline'
except dbus . DBusException as exc :
raise salt . exceptions . CommandExecutionError ( 'Connman daemon error: {0}' . format ( exc ) ) |
def _get_and_write_fp ( self , iso_path , outfp , blocksize ) : # type : ( bytes , BinaryIO , int ) - > None
'''An internal method to fetch a single file from the ISO and write it out
to the file object .
Parameters :
iso _ path - The absolute path to the file to get data from .
outfp - The file object to write data to .
blocksize - The blocksize to use when copying data .
Returns :
Nothing .''' | try :
return self . _get_file_from_iso_fp ( outfp , blocksize , None , None , iso_path )
except pycdlibexception . PyCdlibException :
pass
try :
return self . _get_file_from_iso_fp ( outfp , blocksize , iso_path , None , None )
except pycdlibexception . PyCdlibException :
pass
self . _get_file_from_iso_fp ( outfp , blocksize , None , iso_path , None ) |
def _pre_job_handling ( self , job ) :
"""Some code executed before actually processing the job .
: param VFGJob job : the VFGJob object .
: return : None""" | # did we reach the final address ?
if self . _final_address is not None and job . addr == self . _final_address : # our analysis should be termianted here
l . debug ( "%s is viewed as a final state. Skip." , job )
raise AngrSkipJobNotice ( )
l . debug ( "Handling VFGJob %s" , job )
if not self . _top_task :
l . debug ( "No more tasks available. Skip the job." )
raise AngrSkipJobNotice ( )
assert isinstance ( self . _top_task , FunctionAnalysis )
if job not in self . _top_task . jobs : # it seems that all jobs of the top task has been done . unwind the task stack
# make sure this job is at least recorded somewhere
unwind_count = None
for i , task in enumerate ( reversed ( self . _task_stack ) ) :
if isinstance ( task , FunctionAnalysis ) :
if job in task . jobs : # nice
unwind_count = i
if unwind_count is None :
l . debug ( "%s is not recorded. Skip the job." , job )
raise AngrSkipJobNotice ( )
else : # unwind the stack till the target , unless we see any pending jobs for each new top task
for i in range ( unwind_count ) :
if isinstance ( self . _top_task , FunctionAnalysis ) : # are there any pending job belonging to the current function that we should handle first ?
pending_job_key = self . _get_pending_job ( self . _top_task . function_address )
if pending_job_key is not None : # ah there is
# analyze it first
self . _trace_pending_job ( pending_job_key )
l . debug ( "A pending job is found for function %#x. Delay %s." , self . _top_task . function_address , job )
raise AngrDelayJobNotice ( )
task = self . _task_stack . pop ( )
if not task . done :
l . warning ( "Removing an unfinished task %s. Might be a bug." , task )
assert job in self . _top_task . jobs
# check if this is considered to be a final state
if self . _final_state_callback is not None and self . _final_state_callback ( job . state , job . call_stack ) :
l . debug ( "%s.state is considered as a final state. Skip the job." , job )
self . final_states . append ( job . state )
raise AngrSkipJobNotice ( )
# increment the execution counter
self . _execution_counter [ job . addr ] += 1
self . _top_task . jobs . remove ( job )
# set up some essential variables and parameters
job . call_stack_suffix = job . get_call_stack_suffix ( )
job . jumpkind = 'Ijk_Boring' if job . state . history . jumpkind is None else job . state . history . jumpkind
src_block_id = job . src_block_id
src_exit_stmt_idx = job . src_exit_stmt_idx
addr = job . state . solver . eval ( job . state . regs . ip )
input_state = job . state
block_id = BlockID . new ( addr , job . call_stack_suffix , job . jumpkind )
if self . _tracing_times [ block_id ] > self . _max_iterations :
l . debug ( '%s has been traced too many times. Skip' , job )
raise AngrSkipJobNotice ( )
self . _tracing_times [ block_id ] += 1
if block_id not in self . _nodes :
vfg_node = VFGNode ( addr , block_id , state = input_state )
self . _nodes [ block_id ] = vfg_node
else :
vfg_node = self . _nodes [ block_id ]
job . vfg_node = vfg_node
# log the current state
vfg_node . state = input_state
# Execute this basic block with input state , and get a new SimSuccessors instance
# unused result var is ` error _ occured `
job . sim_successors , _ , restart_analysis = self . _get_simsuccessors ( input_state , addr )
if restart_analysis : # We should restart the analysis because of something must be changed in the very initial state
raise AngrVFGRestartAnalysisNotice ( )
if job . sim_successors is None : # Ouch , we cannot get the SimSuccessors for some reason
# Skip this guy
l . debug ( 'Cannot create SimSuccessors for %s. Skip.' , job )
raise AngrSkipJobNotice ( )
self . _graph_add_edge ( src_block_id , block_id , jumpkind = job . jumpkind , src_exit_stmt_idx = src_exit_stmt_idx ) |
def save ( self , update_site = False , * args , ** kwargs ) :
"""Set the site to the current site when the record is first
created , or the ` ` update _ site ` ` argument is explicitly set
to ` ` True ` ` .""" | if update_site or ( self . id is None and self . site_id is None ) :
self . site_id = current_site_id ( )
super ( SiteRelated , self ) . save ( * args , ** kwargs ) |
def connections_from_graph ( env , G , edge_data = False ) :
"""Create connections for agents in the given environment from the given
NetworkX graph structure .
: param env :
Environment where the agents live . The environment should be derived
from : class : ` ~ creamas . core . environment . Environment ` ,
: class : ` ~ creamas . mp . MultiEnvironment ` or
: class : ` ~ creamas . ds . DistributedEnvironment ` .
: param G :
NetworkX graph structure , either : class : ` networkx . graph . Graph ` or
: class : ` networkx . digraph . DiGraph ` . The graph needs to have the same
number of nodes as the environment has agents ( excluding the managers ) .
: param bool edge _ data :
If ` ` True ` ` , edge data from the given graph is copied to the agents '
: attr : ` connections ` .
. . note : :
By design , manager agents are excluded from the connections and should
not be counted towards environment ' s agent count .
The created connections are stored in each agent ' s
: attr : ` ~ creamas . core . agent . CreativeAgent . connections ` and the possible
edge data is stored as key - value pairs in the connection dictionary .
The agents are sorted by their environments ' hosts and ports before each
agent is mapped to a node in * * G * * . This should cause some network
generation methods in NetworkX , e . g .
: func : ` ~ networkx . generators . random _ graphs . connected _ watts _ strogatz _ graph ` ,
to create more connections between agents in the same environment and / or
node when using : class : ` ~ creamas . mp . MultiEnvironment ` or
: class : ` ~ creamas . ds . DistributedEnvironment ` .""" | if not issubclass ( G . __class__ , ( Graph , DiGraph ) ) :
raise TypeError ( "Graph structure must be derived from Networkx's " "Graph or DiGraph." )
if not hasattr ( env , 'get_agents' ) :
raise TypeError ( "Parameter 'env' must have get_agents." )
addrs = env . get_agents ( addr = True )
if len ( addrs ) != len ( G ) :
raise ValueError ( "The number of graph nodes and agents in the " "environment (excluding the manager agent) must " "match. Now got {} nodes and {} agents." . format ( len ( G ) , len ( addrs ) ) )
# Sort agent addresses to the order they were added to the environment .
addrs = sort_addrs ( addrs )
_addrs2nodes ( addrs , G )
conn_map = _edges2conns ( G , edge_data )
env . create_connections ( conn_map ) |
def comment_filter ( comment_text ) :
"""Passed comment text to be rendered through the function defined
by the ` ` COMMENT _ FILTER ` ` setting . If no function is defined
( the default ) , Django ' s ` ` linebreaksbr ` ` and ` ` urlize ` ` filters
are used .""" | filter_func = settings . COMMENT_FILTER
if not filter_func :
def filter_func ( s ) :
return linebreaksbr ( urlize ( s , autoescape = True ) , autoescape = True )
elif not callable ( filter_func ) :
filter_func = import_dotted_path ( filter_func )
return filter_func ( comment_text ) |
def saved ( name , source = 'running' , user = None , group = None , mode = None , attrs = None , makedirs = False , dir_mode = None , replace = True , backup = '' , show_changes = True , create = True , tmp_dir = '' , tmp_ext = '' , encoding = None , encoding_errors = 'strict' , allow_empty = False , follow_symlinks = True , check_cmd = None , win_owner = None , win_perms = None , win_deny_perms = None , win_inheritance = True , win_perms_reset = False , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Save the configuration to a file on the local file system .
name
Absolute path to file where to save the configuration .
To push the files to the Master , use
: mod : ` cp . push < salt . modules . cp . push > ` Execution function .
source : ` ` running ` `
The configuration source . Choose from : ` ` running ` ` , ` ` candidate ` ` ,
` ` startup ` ` . Default : ` ` running ` ` .
user
The user to own the file , this defaults to the user salt is running as
on the minion
group
The group ownership set for the file , this defaults to the group salt
is running as on the minion . On Windows , this is ignored
mode
The permissions to set on this file , e . g . ` ` 644 ` ` , ` ` 0775 ` ` , or
` ` 4664 ` ` .
The default mode for new files and directories corresponds to the
umask of the salt process . The mode of existing files and directories
will only be changed if ` ` mode ` ` is specified .
. . note : :
This option is * * not * * supported on Windows .
attrs
The attributes to have on this file , e . g . ` ` a ` ` , ` ` i ` ` . The attributes
can be any or a combination of the following characters :
` ` aAcCdDeijPsStTu ` ` .
. . note : :
This option is * * not * * supported on Windows .
makedirs : ` ` False ` `
If set to ` ` True ` ` , then the parent directories will be created to
facilitate the creation of the named file . If ` ` False ` ` , and the parent
directory of the destination file doesn ' t exist , the state will fail .
dir _ mode
If directories are to be created , passing this option specifies the
permissions for those directories . If this is not set , directories
will be assigned permissions by adding the execute bit to the mode of
the files .
The default mode for new files and directories corresponds umask of salt
process . For existing files and directories it ' s not enforced .
replace : ` ` True ` `
If set to ` ` False ` ` and the file already exists , the file will not be
modified even if changes would otherwise be made . Permissions and
ownership will still be enforced , however .
backup
Overrides the default backup mode for this specific file . See
: ref : ` backup _ mode documentation < file - state - backups > ` for more details .
show _ changes : ` ` True ` `
Output a unified diff of the old file and the new file . If ` ` False ` `
return a boolean if any changes were made .
create : ` ` True ` `
If set to ` ` False ` ` , then the file will only be managed if the file
already exists on the system .
encoding
If specified , then the specified encoding will be used . Otherwise , the
file will be encoded using the system locale ( usually UTF - 8 ) . See
https : / / docs . python . org / 3 / library / codecs . html # standard - encodings for
the list of available encodings .
encoding _ errors : ` ` ' strict ' ` `
Error encoding scheme . Default is ` ` ` ' strict ' ` ` ` .
See https : / / docs . python . org / 2 / library / codecs . html # codec - base - classes
for the list of available schemes .
allow _ empty : ` ` True ` `
If set to ` ` False ` ` , then the state will fail if the contents specified
by ` ` contents _ pillar ` ` or ` ` contents _ grains ` ` are empty .
follow _ symlinks : ` ` True ` `
If the desired path is a symlink follow it and make changes to the
file to which the symlink points .
check _ cmd
The specified command will be run with an appended argument of a
* temporary * file containing the new managed contents . If the command
exits with a zero status the new managed contents will be written to
the managed destination . If the command exits with a nonzero exit
code , the state will fail and no changes will be made to the file .
tmp _ dir
Directory for temp file created by ` ` check _ cmd ` ` . Useful for checkers
dependent on config file location ( e . g . daemons restricted to their
own config directories by an apparmor profile ) .
tmp _ ext
Suffix for temp file created by ` ` check _ cmd ` ` . Useful for checkers
dependent on config file extension ( e . g . the init - checkconf upstart
config checker ) .
win _ owner : ` ` None ` `
The owner of the directory . If this is not passed , user will be used . If
user is not passed , the account under which Salt is running will be
used .
win _ perms : ` ` None ` `
A dictionary containing permissions to grant and their propagation . For
example : ` ` { ' Administrators ' : { ' perms ' : ' full _ control ' } } ` ` Can be a
single basic perm or a list of advanced perms . ` ` perms ` ` must be
specified . ` ` applies _ to ` ` does not apply to file objects .
win _ deny _ perms : ` ` None ` `
A dictionary containing permissions to deny and their propagation . For
example : ` ` { ' Administrators ' : { ' perms ' : ' full _ control ' } } ` ` Can be a
single basic perm or a list of advanced perms . ` ` perms ` ` must be
specified . ` ` applies _ to ` ` does not apply to file objects .
win _ inheritance : ` ` True ` `
True to inherit permissions from the parent directory , False not to
inherit permission .
win _ perms _ reset : ` ` False ` `
If ` ` True ` ` the existing DACL will be cleared and replaced with the
settings defined in this function . If ` ` False ` ` , new entries will be
appended to the existing DACL . Default is ` ` False ` ` .
State SLS Example :
. . code - block : : yaml
/ var / backups / { { opts . id } } / { { salt . status . time ( ' % s ' ) } } . cfg :
netconfig . saved :
- source : running
- makedirs : true
The state SLS above would create a backup config grouping the files by the
Minion ID , in chronological files . For example , if the state is executed at
on the 3rd of August 2018 , at 5:15PM , on the Minion ` ` core1 . lon01 ` ` , the
configuration would saved in the file :
` ` / var / backups / core01 . lon01/1533316558 . cfg ` `''' | ret = __salt__ [ 'net.config' ] ( source = source )
if not ret [ 'result' ] :
return { 'name' : name , 'changes' : { } , 'result' : False , 'comment' : ret [ 'comment' ] }
return __states__ [ 'file.managed' ] ( name , user = user , group = group , mode = mode , attrs = attrs , makedirs = makedirs , dir_mode = dir_mode , replace = replace , backup = backup , show_changes = show_changes , create = create , contents = ret [ 'out' ] [ source ] , tmp_dir = tmp_dir , tmp_ext = tmp_ext , encoding = encoding , encoding_errors = encoding_errors , allow_empty = allow_empty , follow_symlinks = follow_symlinks , check_cmd = check_cmd , win_owner = win_owner , win_perms = win_perms , win_deny_perms = win_deny_perms , win_inheritance = win_inheritance , win_perms_reset = win_perms_reset , ** kwargs ) |
def encode_chunk ( dataframe ) :
"""Return a file - like object of CSV - encoded rows .
Args :
dataframe ( pandas . DataFrame ) : A chunk of a dataframe to encode""" | csv_buffer = six . StringIO ( )
dataframe . to_csv ( csv_buffer , index = False , header = False , encoding = "utf-8" , float_format = "%.15g" , date_format = "%Y-%m-%d %H:%M:%S.%f" , )
# Convert to a BytesIO buffer so that unicode text is properly handled .
# See : https : / / github . com / pydata / pandas - gbq / issues / 106
body = csv_buffer . getvalue ( )
if isinstance ( body , bytes ) :
body = body . decode ( "utf-8" )
body = body . encode ( "utf-8" )
return six . BytesIO ( body ) |
def to_dict ( self ) :
"""Converts the set of parameters into a dict""" | return dict ( ( parameter . name , parameter . value ) for parameter in self . values ( ) ) |
def by_group ( self ) : # pragma : no cover
"""Display group membership sorted by group .
Returns :
Array with a dictionary of group membership .
For example : { ' testgroup ' : [ ' test . user ' , ' test . user2 ' ] }""" | group_membership = { }
for record in self . __get_groups_with_membership ( ) :
group_membership [ record . cn . value ] = [ i for i in record . memberUid . values ]
return group_membership |
def cycle_string ( key_arn , source_plaintext , botocore_session = None ) :
"""Encrypts and then decrypts a string under a KMS customer master key ( CMK ) .
: param str key _ arn : Amazon Resource Name ( ARN ) of the KMS CMK
: param bytes source _ plaintext : Data to encrypt
: param botocore _ session : existing botocore session instance
: type botocore _ session : botocore . session . Session""" | # Create a KMS master key provider
kms_kwargs = dict ( key_ids = [ key_arn ] )
if botocore_session is not None :
kms_kwargs [ "botocore_session" ] = botocore_session
master_key_provider = aws_encryption_sdk . KMSMasterKeyProvider ( ** kms_kwargs )
# Encrypt the plaintext source data
ciphertext , encryptor_header = aws_encryption_sdk . encrypt ( source = source_plaintext , key_provider = master_key_provider )
# Decrypt the ciphertext
cycled_plaintext , decrypted_header = aws_encryption_sdk . decrypt ( source = ciphertext , key_provider = master_key_provider )
# Verify that the " cycled " ( encrypted , then decrypted ) plaintext is identical to the source plaintext
assert cycled_plaintext == source_plaintext
# Verify that the encryption context used in the decrypt operation includes all key pairs from
# the encrypt operation . ( The SDK can add pairs , so don ' t require an exact match . )
# In production , always use a meaningful encryption context . In this sample , we omit the
# encryption context ( no key pairs ) .
assert all ( pair in decrypted_header . encryption_context . items ( ) for pair in encryptor_header . encryption_context . items ( ) ) |
def run ( connection ) :
"""Ensure that we have snapshots for a given volume
: type connection : boto . ec2 . connection . EC2Connection
: param connection : EC2 connection object
: returns : None""" | volumes = volume_manager . get_watched_volumes ( connection )
for volume in volumes :
_ensure_snapshot ( connection , volume )
_remove_old_snapshots ( connection , volume ) |
def results ( self ) :
"""Print results""" | print ( "" )
per = int ( round ( ( float ( self . cf ) / ( self . cf + self . cn ) ) * 100 ) )
if per > 90 :
color = self . meta . color [ "GREEN" ]
elif per < 90 and per > 60 :
color = self . meta . color [ "YELLOW" ]
elif per < 60 :
color = self . meta . color [ "RED" ]
health = "{0}{1}%{2}" . format ( color , str ( per ) , self . meta . color [ "ENDC" ] )
self . msg . template ( 78 )
print ( "| {0}{1}{2}{3}{4}" . format ( "Total files" , " " * 7 , "Not installed" , " " * 40 , "Health" ) )
self . msg . template ( 78 )
print ( "| {0}{1}{2}{3}{4:>4}" . format ( self . cf , " " * ( 18 - len ( str ( self . cf ) ) ) , self . cn , " " * ( 55 - len ( str ( self . cn ) ) ) , health ) )
self . msg . template ( 78 ) |
def get_what_txt ( self ) :
"""Overrides the base behaviour defined in ValidationError in order to add details about the class field .
: return :""" | return 'field [{field}] for class [{clazz}]' . format ( field = self . get_variable_str ( ) , clazz = self . validator . get_validated_class_display_name ( ) ) |
def host_diskpool_get_info ( self , disk_pool = None ) :
"""Retrieve diskpool information .
: param str disk _ pool : the disk pool info . It use ' : ' to separate
disk pool type and pool name , eg " ECKD : eckdpool " or " FBA : fbapool "
: returns : Dictionary describing disk pool usage info""" | # disk _ pool must be assigned . disk _ pool default to None because
# it is more convenient for users to just type function name when
# they want to get the disk pool info of CONF . zvm . disk _ pool
if disk_pool is None :
disk_pool = CONF . zvm . disk_pool
if ':' not in disk_pool :
msg = ( 'Invalid input parameter disk_pool, expect ":" in' 'disk_pool, eg. ECKD:eckdpool' )
LOG . error ( msg )
raise exception . SDKInvalidInputFormat ( msg )
diskpool_type = disk_pool . split ( ':' ) [ 0 ] . upper ( )
diskpool_name = disk_pool . split ( ':' ) [ 1 ]
if diskpool_type not in ( 'ECKD' , 'FBA' ) :
msg = ( 'Invalid disk pool type found in disk_pool, expect' 'disk_pool like ECKD:eckdpool or FBA:fbapool' )
LOG . error ( msg )
raise exception . SDKInvalidInputFormat ( msg )
action = "get information of disk pool: '%s'" % disk_pool
with zvmutils . log_and_reraise_sdkbase_error ( action ) :
return self . _hostops . diskpool_get_info ( diskpool_name ) |
def _hash_to_sh_address ( script_hash , witness = False , cashaddr = True ) :
'''bytes , bool , bool - > str
cashaddrs are preferred where possible
but cashaddr is ignored in most cases
is there a better way to structure this ?''' | addr_bytes = bytearray ( )
if riemann . network . CASHADDR_P2SH is not None and cashaddr :
addr_bytes . extend ( riemann . network . CASHADDR_P2SH )
addr_bytes . extend ( script_hash )
return riemann . network . CASHADDR_ENCODER . encode ( addr_bytes )
if witness :
addr_bytes . extend ( riemann . network . P2WSH_PREFIX )
addr_bytes . extend ( script_hash )
return riemann . network . SEGWIT_ENCODER . encode ( addr_bytes )
else :
addr_bytes . extend ( riemann . network . P2SH_PREFIX )
addr_bytes . extend ( script_hash )
return riemann . network . LEGACY_ENCODER . encode ( addr_bytes ) |
def check_stat ( self , path ) :
"""Checks logfile stat information for excluding files not in datetime period .
On Linux it ' s possible to checks only modification time , because file creation info
are not available , so it ' s possible to exclude only older files .
In Unix BSD systems and windows information about file creation date and times are available ,
so is possible to exclude too newer files .""" | statinfo = os . stat ( path )
st_mtime = datetime . fromtimestamp ( statinfo . st_mtime )
if platform . system ( ) == 'Linux' :
check = st_mtime >= self . start_dt
else :
st_ctime = datetime . fromtimestamp ( statinfo . st_ctime )
check = st_mtime >= self . start_dt and st_ctime <= self . end_dt
if not check :
logger . info ( "file %r not in datetime period!" , path )
return check |
def setDocuments ( self , documenting_pid , documented_pid ) :
"""Add a CiTO , the Citation Typing Ontology , triple asserting that
` ` documenting _ pid ` ` documents ` ` documented _ pid ` ` .
Adds assertion : ` ` documenting _ pid cito : documents documented _ pid ` `
Args :
documenting _ pid : str
PID of a Science Object that documents ` ` documented _ pid ` ` .
documented _ pid : str
PID of a Science Object that is documented by ` ` documenting _ pid ` ` .""" | self . _check_initialized ( )
documenting_id = self . getObjectByPid ( documenting_pid )
documented_id = self . getObjectByPid ( documented_pid )
self . add ( ( documenting_id , CITO . documents , documented_id ) ) |
def split_shard ( self , project_name , logstore_name , shardId , split_hash ) :
"""split a readwrite shard into two shards
Unsuccessful opertaion will cause an LogException .
: type project _ name : string
: param project _ name : the Project name
: type logstore _ name : string
: param logstore _ name : the logstore name
: type shardId : int
: param shardId : the shard id
: type split _ hash : string
: param split _ hash : the internal hash between the shard begin and end hash
: return : ListShardResponse
: raise : LogException""" | headers = { }
params = { "action" : "split" , "key" : split_hash }
resource = "/logstores/" + logstore_name + "/shards/" + str ( shardId )
( resp , header ) = self . _send ( "POST" , project_name , None , resource , params , headers )
return ListShardResponse ( resp , header ) |
def vector_generate ( start_pt , end_pt , normalize = False ) :
"""Generates a vector from 2 input points .
: param start _ pt : start point of the vector
: type start _ pt : list , tuple
: param end _ pt : end point of the vector
: type end _ pt : list , tuple
: param normalize : if True , the generated vector is normalized
: type normalize : bool
: return : a vector from start _ pt to end _ pt
: rtype : list""" | try :
if start_pt is None or len ( start_pt ) == 0 or end_pt is None or len ( end_pt ) == 0 :
raise ValueError ( "Input points cannot be empty" )
except TypeError as e :
print ( "An error occurred: {}" . format ( e . args [ - 1 ] ) )
raise TypeError ( "Input must be a list or tuple" )
except Exception :
raise
ret_vec = [ ]
for sp , ep in zip ( start_pt , end_pt ) :
ret_vec . append ( ep - sp )
if normalize :
ret_vec = vector_normalize ( ret_vec )
return ret_vec |
def xgboost_installed ( ) :
"""Checks that * xgboost * is available .""" | try :
import xgboost
except ImportError :
return False
from xgboost . core import _LIB
try :
_LIB . XGBoosterDumpModelEx
except AttributeError : # The version is not recent enough even though it is version 0.6.
# You need to install xgboost from github and not from pypi .
return False
from xgboost import __version__
vers = LooseVersion ( __version__ )
allowed = LooseVersion ( '0.7' )
if vers < allowed :
warnings . warn ( 'The converter works for xgboost >= 0.7. Earlier versions might not.' )
return True |
def get_baremetal_physnet ( self , context ) :
"""Returns dictionary which contains mac to hostname mapping""" | port = context . current
host_id = context . host
cmd = [ 'show network physical-topology hosts' ]
try :
response = self . _run_eos_cmds ( cmd )
binding_profile = port . get ( portbindings . PROFILE , { } )
link_info = binding_profile . get ( 'local_link_information' , [ ] )
for link in link_info :
switch_id = link . get ( 'switch_id' )
for host in response [ 0 ] [ 'hosts' ] . values ( ) :
if switch_id == host [ 'name' ] :
physnet = host [ 'hostname' ]
LOG . debug ( "get_physical_network: Physical Network for " "%(host)s is %(physnet)s" , { 'host' : host_id , 'physnet' : physnet } )
return physnet
LOG . debug ( "Physical network not found for %(host)s" , { 'host' : host_id } )
except Exception as exc :
LOG . error ( _LE ( 'command %(cmd)s failed with ' '%(exc)s' ) , { 'cmd' : cmd , 'exc' : exc } )
return None |
def currentText ( self ) :
"""Returns the text that is available currently , if the user has set standard text , then that is returned , otherwise the hint is returned .
: return < str >""" | text = nativestring ( self . text ( ) )
if text or not self . useHintValue ( ) :
return text
return self . hint ( ) |
def write_input ( self , output_dir , make_dir_if_not_present = True , write_cif = False , write_path_cif = False , write_endpoint_inputs = False ) :
"""NEB inputs has a special directory structure where inputs are in 00,
01 , 02 , . . . .
Args :
output _ dir ( str ) : Directory to output the VASP input files
make _ dir _ if _ not _ present ( bool ) : Set to True if you want the
directory ( and the whole path ) to be created if it is not
present .
write _ cif ( bool ) : If true , writes a cif along with each POSCAR .
write _ path _ cif ( bool ) : If true , writes a cif for each image .
write _ endpoint _ inputs ( bool ) : If true , writes input files for
running endpoint calculations .""" | output_dir = Path ( output_dir )
if make_dir_if_not_present and not output_dir . exists ( ) :
output_dir . mkdir ( parents = True )
self . incar . write_file ( str ( output_dir / 'INCAR' ) )
self . kpoints . write_file ( str ( output_dir / 'KPOINTS' ) )
self . potcar . write_file ( str ( output_dir / 'POTCAR' ) )
for i , p in enumerate ( self . poscars ) :
d = output_dir / str ( i ) . zfill ( 2 )
if not d . exists ( ) :
d . mkdir ( parents = True )
p . write_file ( str ( d / 'POSCAR' ) )
if write_cif :
p . structure . to ( filename = str ( d / '{}.cif' . format ( i ) ) )
if write_endpoint_inputs :
end_point_param = MITRelaxSet ( self . structures [ 0 ] , user_incar_settings = self . user_incar_settings )
for image in [ '00' , str ( len ( self . structures ) - 1 ) . zfill ( 2 ) ] :
end_point_param . incar . write_file ( str ( output_dir / image / 'INCAR' ) )
end_point_param . kpoints . write_file ( str ( output_dir / image / 'KPOINTS' ) )
end_point_param . potcar . write_file ( str ( output_dir / image / 'POTCAR' ) )
if write_path_cif :
sites = set ( )
l = self . structures [ 0 ] . lattice
for site in chain ( * ( s . sites for s in self . structures ) ) :
sites . add ( PeriodicSite ( site . species , site . frac_coords , l ) )
nebpath = Structure . from_sites ( sorted ( sites ) )
nebpath . to ( filename = str ( output_dir / 'path.cif' ) ) |
def sent_tokenize ( self , text , ** kwargs ) :
"""Returns a list of sentences .
Each sentence is a space - separated string of tokens ( words ) .
Handles common cases of abbreviations ( e . g . , etc . , . . . ) .
Punctuation marks are split from other words . Periods ( or ? ! ) mark the end of a sentence .
Headings without an ending period are inferred by line breaks .""" | sentences = find_sentences ( text , punctuation = kwargs . get ( "punctuation" , PUNCTUATION ) , abbreviations = kwargs . get ( "abbreviations" , ABBREVIATIONS_DE ) , replace = kwargs . get ( "replace" , replacements ) , linebreak = r"\n{2,}" )
return sentences |
def get_fields ( model , fields = None ) :
"""Assigns fields for model .""" | include = [ f . strip ( ) for f in fields . split ( ',' ) ] if fields else None
return utils . get_fields ( model , include ) |
def RS ( S , second_pass = False ) :
"""Compute a C / F splitting using Ruge - Stuben coarsening
Parameters
S : csr _ matrix
Strength of connection matrix indicating the strength between nodes i
and j ( S _ ij )
second _ pass : bool , default False
Perform second pass of classical AMG coarsening . Can be important for
classical AMG interpolation . Typically not done in parallel ( e . g . Hypre ) .
Returns
splitting : ndarray
Array of length of S of ones ( coarse ) and zeros ( fine )
Examples
> > > from pyamg . gallery import poisson
> > > from pyamg . classical import RS
> > > S = poisson ( ( 7 , ) , format = ' csr ' ) # 1D mesh with 7 vertices
> > > splitting = RS ( S )
See Also
amg _ core . rs _ cf _ splitting
References
. . [ 1 ] Ruge JW , Stuben K . " Algebraic multigrid ( AMG ) "
In Multigrid Methods , McCormick SF ( ed . ) ,
Frontiers in Applied Mathematics , vol . 3.
SIAM : Philadelphia , PA , 1987 ; 73-130.""" | if not isspmatrix_csr ( S ) :
raise TypeError ( 'expected csr_matrix' )
S = remove_diagonal ( S )
T = S . T . tocsr ( )
# transpose S for efficient column access
splitting = np . empty ( S . shape [ 0 ] , dtype = 'intc' )
influence = np . zeros ( ( S . shape [ 0 ] , ) , dtype = 'intc' )
amg_core . rs_cf_splitting ( S . shape [ 0 ] , S . indptr , S . indices , T . indptr , T . indices , influence , splitting )
if second_pass :
amg_core . rs_cf_splitting_pass2 ( S . shape [ 0 ] , S . indptr , S . indices , splitting )
return splitting |
def get_user ( self , identified_with , identifier , req , resp , resource , uri_kwargs ) :
"""Return default user object .""" | return self . user |
def _db_install ( self , db_name ) :
"""Install nipap database schema""" | self . _logger . info ( "Installing NIPAP database schemas into db" )
self . _execute ( db_schema . ip_net % ( db_name ) )
self . _execute ( db_schema . functions )
self . _execute ( db_schema . triggers ) |
def submit_msql_object_query ( object_query , client = None ) :
"""Submit ` object _ query ` to MemberSuite , returning
. models . MemberSuiteObjects .
So this is a converter from MSQL to . models . MemberSuiteObjects .
Returns query results as a list of MemberSuiteObjects .""" | client = client or get_new_client ( )
if not client . session_id :
client . request_session ( )
result = client . execute_object_query ( object_query )
execute_msql_result = result [ "body" ] [ "ExecuteMSQLResult" ]
membersuite_object_list = [ ]
if execute_msql_result [ "Success" ] :
result_value = execute_msql_result [ "ResultValue" ]
if result_value [ "ObjectSearchResult" ] [ "Objects" ] : # Multiple results .
membersuite_object_list = [ ]
for obj in ( result_value [ "ObjectSearchResult" ] [ "Objects" ] [ "MemberSuiteObject" ] ) :
membersuite_object = membersuite_object_factory ( obj )
membersuite_object_list . append ( membersuite_object )
elif result_value [ "SingleObject" ] [ "ClassType" ] : # Only one result .
membersuite_object = membersuite_object_factory ( execute_msql_result [ "ResultValue" ] [ "SingleObject" ] )
membersuite_object_list . append ( membersuite_object )
elif ( result_value [ "ObjectSearchResult" ] [ "Objects" ] is None and result_value [ "SingleObject" ] [ "ClassType" ] is None ) :
raise NoResultsError ( result = execute_msql_result )
return membersuite_object_list
else : # @ TODO Fix - exposing only the first of possibly many errors here .
raise ExecuteMSQLError ( result = execute_msql_result ) |
def _apicall ( self , method , ** params ) :
"""Call an API method and return response data . For more info , see :
https : / / ccp . netcup . net / run / webservice / servers / endpoint""" | LOGGER . debug ( '%s(%r)' , method , params )
auth = { 'customernumber' : self . _get_provider_option ( 'auth_customer_id' ) , 'apikey' : self . _get_provider_option ( 'auth_api_key' ) , }
if method == 'login' :
auth [ 'apipassword' ] = self . _get_provider_option ( 'auth_api_password' )
else :
auth [ 'apisessionid' ] = self . api_session_id
if not all ( auth . values ( ) ) :
raise Exception ( 'No valid authentication mechanism found' )
data = self . _request ( 'POST' , url = '' , data = { 'action' : method , 'param' : dict ( params , ** auth ) , } )
if data [ 'status' ] != 'success' :
raise Exception ( "{} ({})" . format ( data [ 'longmessage' ] , data [ 'statuscode' ] ) )
return data . get ( 'responsedata' , { } ) |
def delete_persistent_data ( role , zk_node ) :
"""Deletes any persistent data associated with the specified role , and zk node .
: param role : the mesos role to delete , or None to omit this
: type role : str
: param zk _ node : the zookeeper node to be deleted , or None to skip this deletion
: type zk _ node : str""" | if role :
destroy_volumes ( role )
unreserve_resources ( role )
if zk_node :
delete_zk_node ( zk_node ) |
def snake_case_backend_name ( self ) :
"""CamelCase - > camel _ case""" | s1 = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , type ( self ) . __name__ )
return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s1 ) . lower ( ) |
def find_resource ( r , * , pkg = 'cyther' ) :
"""Finds a given cyther resource in the ' test ' subdirectory in
' cyther ' package""" | file_path = pkg_resources . resource_filename ( pkg , os . path . join ( 'test' , r ) )
if not os . path . isfile ( file_path ) :
msg = "Resource '{}' does not exist"
raise FileNotFoundError ( msg . format ( file_path ) )
return file_path |
def _add_ticks ( ax : Axes , h1 : Histogram1D , kwargs : dict ) :
"""Customize ticks for an axis ( 1D histogram ) .
Parameters
ticks : { " center " , " edge " } , optional
Position of the ticks
tick _ handler : Callable [ [ Histogram1D , float , float ] , Tuple [ List [ float ] , List [ str ] ] ]""" | ticks = kwargs . pop ( "ticks" , None )
tick_handler = kwargs . pop ( "tick_handler" , None )
if tick_handler :
if ticks :
raise ValueError ( "Cannot specify both tick and tick_handler" )
ticks , labels = tick_handler ( h1 , * ax . get_xlim ( ) )
ax . set_xticks ( ticks )
ax . set_xticklabels ( labels )
if ticks == "center" :
ax . set_xticks ( h1 . bin_centers )
if ticks == "edge" :
ax . set_xticks ( h1 . bin_left_edges ) |
def _read_para_hip_transport_mode ( self , code , cbit , clen , * , desc , length , version ) :
"""Read HIP HIP _ TRANSPORT _ MODE parameter .
Structure of HIP HIP _ TRANSPORT _ MODE parameter [ RFC 6261 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Type | Length |
| Port | Mode ID # 1 |
| Mode ID # 2 | Mode ID # 3 |
| Mode ID # n | Padding |
Octets Bits Name Description
0 0 hip _ transport _ mode . type Parameter Type
1 15 hip _ transport _ mode . critical Critical Bit
2 16 hip _ transport _ mode . length Length of Contents
4 32 hip _ transport _ mode . port Port
6 48 hip _ transport _ mode . id Mode ID
? ? - Padding""" | if clen % 2 != 0 :
raise ProtocolError ( f'HIPv{version}: [Parano {code}] invalid format' )
_port = self . _read_unpack ( 2 )
_mdid = list ( )
for _ in range ( ( clen - 2 ) // 2 ) :
_mdid . append ( _TP_MODE_ID . get ( self . _read_unpack ( 2 ) , 'Unassigned' ) )
hip_transport_mode = dict ( type = desc , critical = cbit , length = clen , port = _port , id = tuple ( _mdid ) , )
_plen = length - clen
if _plen :
self . _read_fileng ( _plen )
return hip_transport_mode |
def get_minions ( ) :
'''Return a list of minions''' | log . debug ( 'sdstack_etcd returner <get_minions> called' )
ret = [ ]
client , path = _get_conn ( __opts__ )
items = client . get ( '/' . join ( ( path , 'minions' ) ) )
for item in items . children :
comps = str ( item . key ) . split ( '/' )
ret . append ( comps [ - 1 ] )
return ret |
def align ( fastq_file , pair_file , ref_file , names , align_dir , data , extra_args = None ) :
"""Alignment with bowtie2.""" | config = data [ "config" ]
analysis_config = ANALYSIS . get ( data [ "analysis" ] . lower ( ) )
assert analysis_config , "Analysis %s is not supported by bowtie2" % ( data [ "analysis" ] )
out_file = os . path . join ( align_dir , "{0}-sort.bam" . format ( dd . get_sample_name ( data ) ) )
if data . get ( "align_split" ) :
final_file = out_file
out_file , data = alignprep . setup_combine ( final_file , data )
fastq_file , pair_file = alignprep . split_namedpipe_cls ( fastq_file , pair_file , data )
else :
final_file = None
if not utils . file_exists ( out_file ) and ( final_file is None or not utils . file_exists ( final_file ) ) :
with postalign . tobam_cl ( data , out_file , pair_file is not None ) as ( tobam_cl , tx_out_file ) :
cl = [ config_utils . get_program ( "bowtie2" , config ) ]
cl += extra_args if extra_args is not None else [ ]
cl += [ "-q" , "-x" , ref_file ]
cl += analysis_config . get ( "params" , [ ] )
if pair_file :
cl += [ "-1" , fastq_file , "-2" , pair_file ]
else :
cl += [ "-U" , fastq_file ]
if names and "rg" in names :
cl += [ "--rg-id" , names [ "rg" ] ]
for key , tag in [ ( "sample" , "SM" ) , ( "pl" , "PL" ) , ( "pu" , "PU" ) , ( "lb" , "LB" ) ] :
if names . get ( key ) :
cl += [ "--rg" , "%s:%s" % ( tag , names [ key ] ) ]
cl += _bowtie2_args_from_config ( config , cl )
cl = [ str ( i ) for i in cl ]
cmd = "unset JAVA_HOME && " + " " . join ( cl ) + " | " + tobam_cl
do . run ( cmd , "Aligning %s and %s with Bowtie2." % ( fastq_file , pair_file ) )
return out_file |
def _GetElementDataTypeDefinition ( self , data_type_definition ) :
"""Retrieves the element data type definition .
Args :
data _ type _ definition ( DataTypeDefinition ) : data type definition .
Returns :
DataTypeDefinition : element data type definition .
Raises :
FormatError : if the element data type cannot be determined from the data
type definition .""" | if not data_type_definition :
raise errors . FormatError ( 'Missing data type definition' )
element_data_type_definition = getattr ( data_type_definition , 'element_data_type_definition' , None )
if not element_data_type_definition :
raise errors . FormatError ( 'Invalid data type definition missing element' )
return element_data_type_definition |
def refresh_committed_offsets_if_needed ( self ) :
"""Fetch committed offsets for assigned partitions .""" | if self . _subscription . needs_fetch_committed_offsets :
offsets = self . fetch_committed_offsets ( self . _subscription . assigned_partitions ( ) )
for partition , offset in six . iteritems ( offsets ) : # verify assignment is still active
if self . _subscription . is_assigned ( partition ) :
self . _subscription . assignment [ partition ] . committed = offset . offset
self . _subscription . needs_fetch_committed_offsets = False |
def _get_approved_attributes ( self , idp , idp_policy , sp_entity_id , state ) :
"""Returns a list of approved attributes
: type idp : saml . server . Server
: type idp _ policy : saml2 . assertion . Policy
: type sp _ entity _ id : str
: type state : satosa . state . State
: rtype : list [ str ]
: param idp : The saml frontend idp server
: param idp _ policy : The idp policy
: param sp _ entity _ id : The requesting sp entity id
: param state : The current state
: return : A list containing approved attributes""" | name_format = idp_policy . get_name_form ( sp_entity_id )
attrconvs = idp . config . attribute_converters
idp_policy . acs = attrconvs
attribute_filter = [ ]
for aconv in attrconvs :
if aconv . name_format == name_format :
all_attributes = { v : None for v in aconv . _fro . values ( ) }
attribute_filter = list ( idp_policy . restrict ( all_attributes , sp_entity_id , idp . metadata ) . keys ( ) )
break
attribute_filter = self . converter . to_internal_filter ( self . attribute_profile , attribute_filter )
satosa_logging ( logger , logging . DEBUG , "Filter: %s" % attribute_filter , state )
return attribute_filter |
def is_valid_package_name ( name , raise_error = False ) :
"""Test the validity of a package name string .
Args :
name ( str ) : Name to test .
raise _ error ( bool ) : If True , raise an exception on failure
Returns :
bool .""" | is_valid = PACKAGE_NAME_REGEX . match ( name )
if raise_error and not is_valid :
raise PackageRequestError ( "Not a valid package name: %r" % name )
return is_valid |
def update_result_ctrl ( self , event ) :
"""Update event result following execution by main window""" | # Check to see if macro window still exists
if not self :
return
printLen = 0
self . result_ctrl . SetValue ( '' )
if hasattr ( event , 'msg' ) : # Output of script ( from print statements , for example )
self . result_ctrl . AppendText ( event . msg )
printLen = len ( event . msg )
if hasattr ( event , 'err' ) : # Error messages
errLen = len ( event . err )
errStyle = wx . TextAttr ( wx . RED )
self . result_ctrl . AppendText ( event . err )
self . result_ctrl . SetStyle ( printLen , printLen + errLen , errStyle )
if not hasattr ( event , 'err' ) or event . err == '' : # No error passed . Close dialog if user requested it .
if self . _ok_pressed :
self . Destroy ( )
self . _ok_pressed = False |
def install ( config_file , store , overwrite = False , hooks = False , hook_type = 'pre-commit' , skip_on_missing_conf = False , ) :
"""Install the pre - commit hooks .""" | if cmd_output ( 'git' , 'config' , 'core.hooksPath' , retcode = None ) [ 1 ] . strip ( ) :
logger . error ( 'Cowardly refusing to install hooks with `core.hooksPath` set.\n' 'hint: `git config --unset-all core.hooksPath`' , )
return 1
hook_path , legacy_path = _hook_paths ( hook_type )
mkdirp ( os . path . dirname ( hook_path ) )
# If we have an existing hook , move it to pre - commit . legacy
if os . path . lexists ( hook_path ) and not is_our_script ( hook_path ) :
shutil . move ( hook_path , legacy_path )
# If we specify overwrite , we simply delete the legacy file
if overwrite and os . path . exists ( legacy_path ) :
os . remove ( legacy_path )
elif os . path . exists ( legacy_path ) :
output . write_line ( 'Running in migration mode with existing hooks at {}\n' 'Use -f to use only pre-commit.' . format ( legacy_path ) , )
params = { 'CONFIG' : config_file , 'HOOK_TYPE' : hook_type , 'INSTALL_PYTHON' : sys . executable , 'SKIP_ON_MISSING_CONFIG' : skip_on_missing_conf , }
with io . open ( hook_path , 'w' ) as hook_file :
contents = resource_text ( 'hook-tmpl' )
before , rest = contents . split ( TEMPLATE_START )
to_template , after = rest . split ( TEMPLATE_END )
before = before . replace ( '#!/usr/bin/env python3' , shebang ( ) )
hook_file . write ( before + TEMPLATE_START )
for line in to_template . splitlines ( ) :
var = line . split ( ) [ 0 ]
hook_file . write ( '{} = {!r}\n' . format ( var , params [ var ] ) )
hook_file . write ( TEMPLATE_END + after )
make_executable ( hook_path )
output . write_line ( 'pre-commit installed at {}' . format ( hook_path ) )
# If they requested we install all of the hooks , do so .
if hooks :
install_hooks ( config_file , store )
return 0 |
def ecef2enuv ( u : float , v : float , w : float , lat0 : float , lon0 : float , deg : bool = True ) -> Tuple [ float , float , float ] :
"""VECTOR from observer to target ECEF = > ENU
Parameters
u : float or numpy . ndarray of float
target x ECEF coordinate ( meters )
v : float or numpy . ndarray of float
target y ECEF coordinate ( meters )
w : float or numpy . ndarray of float
target z ECEF coordinate ( meters )
lat0 : float
Observer geodetic latitude
lon0 : float
Observer geodetic longitude
h0 : float
observer altitude above geodetic ellipsoid ( meters )
deg : bool , optional
degrees input / output ( False : radians in / out )
Returns
uEast : float or numpy . ndarray of float
target east ENU coordinate ( meters )
vNorth : float or numpy . ndarray of float
target north ENU coordinate ( meters )
wUp : float or numpy . ndarray of float
target up ENU coordinate ( meters )""" | if deg :
lat0 = radians ( lat0 )
lon0 = radians ( lon0 )
t = cos ( lon0 ) * u + sin ( lon0 ) * v
uEast = - sin ( lon0 ) * u + cos ( lon0 ) * v
wUp = cos ( lat0 ) * t + sin ( lat0 ) * w
vNorth = - sin ( lat0 ) * t + cos ( lat0 ) * w
return uEast , vNorth , wUp |
def geneways_action_to_indra_statement_type ( actiontype , plo ) :
"""Return INDRA Statement corresponding to Geneways action type .
Parameters
actiontype : str
The verb extracted by the Geneways processor
plo : str
A one character string designating whether Geneways classifies
this verb as a physical , logical , or other interaction
Returns
statement _ generator :
If there is no mapping to INDRA statements from this action type
the return value is None .
If there is such a mapping , statement _ generator is an anonymous
function that takes in the subject agent , object agent , and evidence ,
in that order , and returns an INDRA statement object .""" | actiontype = actiontype . lower ( )
statement_generator = None
is_direct = ( plo == 'P' )
if actiontype == 'bind' :
statement_generator = lambda substance1 , substance2 , evidence : Complex ( [ substance1 , substance2 ] , evidence = evidence )
is_direct = True
elif actiontype == 'phosphorylate' :
statement_generator = lambda substance1 , substance2 , evidence : Phosphorylation ( substance1 , substance2 , evidence = evidence )
is_direct = True
return ( statement_generator , is_direct ) |
def sort_idxs ( cls , similarities ) :
"Sorts ` similarities ` and return the indexes in pairs ordered by highest similarity ." | idxs = cls . largest_indices ( similarities , len ( similarities ) )
idxs = [ ( idxs [ 0 ] [ i ] , idxs [ 1 ] [ i ] ) for i in range ( len ( idxs [ 0 ] ) ) ]
return [ e for l in idxs for e in l ] |
def get_roles ( self ) :
"""Get the roles associated with the hosts .
Returns
dict of role - > [ host ]""" | machines = self . c_resources [ "machines" ]
result = { }
for desc in machines :
roles = utils . get_roles_as_list ( desc )
hosts = self . _denormalize ( desc )
for role in roles :
result . setdefault ( role , [ ] )
result [ role ] . extend ( hosts )
return result |
def disable_paging ( self , command = "no pager" , delay_factor = 1 ) :
"""Disable paging""" | return super ( QuantaMeshSSH , self ) . disable_paging ( command = command ) |
def compute_between_collection_interval_duration ( self , prefix ) :
"""Calculates BETWEEN - collection intervals for the current collection and measure type
and takes their mean .
: param str prefix : Prefix for the key entry in self . measures .
Negative intervals ( for overlapping clusters ) are counted as 0 seconds . Intervals are
calculated as being the difference between the ending time of the last word in a collection
and the start time of the first word in the subsequent collection .
Note that these intervals are not necessarily silences , and may include asides , filled
pauses , words from the examiner , etc .
Adds the following measures to the self . measures dictionary :
- TIMING _ ( similarity _ measure ) _ ( collection _ type ) _ between _ collection _ interval _ duration _ mean :
average interval duration separating clusters""" | durations = [ ]
# duration of each collection
for collection in self . collection_list : # Entry , with timing , in timed _ response for first word in collection
start = collection [ 0 ] . start_time
# Entry , with timing , in timed _ response for last word in collection
end = collection [ - 1 ] . end_time
durations . append ( ( start , end ) )
# calculation between - duration intervals
interstices = [ durations [ i + 1 ] [ 0 ] - durations [ i ] [ 1 ] for i , d in enumerate ( durations [ : - 1 ] ) ]
# Replace negative interstices ( for overlapping clusters ) with
# interstices of duration 0
for i , entry in enumerate ( interstices ) :
if interstices [ i ] < 0 :
interstices [ i ] = 0
self . measures [ prefix + 'between_collection_interval_duration_mean' ] = get_mean ( interstices ) if len ( interstices ) > 0 else 'NA'
if not self . quiet :
print
print self . current_similarity_measure + " between-" + self . current_collection_type + " durations"
table = [ ( self . current_collection_type + " 1 (start,end)" , "Interval" , self . current_collection_type + " 2 (start,end)" ) ] + [ ( str ( d1 ) , str ( i1 ) , str ( d2 ) ) for d1 , i1 , d2 in zip ( durations [ : - 1 ] , interstices , durations [ 1 : ] ) ]
print_table ( table )
print
print "Mean " + self . current_similarity_measure + " between-" + self . current_collection_type + " duration" , self . measures [ prefix + 'between_collection_interval_duration_mean' ] |
def str_encode ( value , encoder = 'base64' ) :
'''. . versionadded : : 2014.7.0
value
The value to be encoded .
encoder : base64
The encoder to use on the subsequent string .
CLI Example :
. . code - block : : bash
salt ' * ' random . str _ encode ' I am a new string ' base64''' | if six . PY2 :
try :
out = value . encode ( encoder )
except LookupError :
raise SaltInvocationError ( 'You must specify a valid encoder' )
except AttributeError :
raise SaltInvocationError ( 'Value must be an encode-able string' )
else :
if isinstance ( value , six . string_types ) :
value = value . encode ( __salt_system_encoding__ )
if encoder == 'base64' :
try :
out = base64 . b64encode ( value )
out = out . decode ( __salt_system_encoding__ )
except TypeError :
raise SaltInvocationError ( 'Value must be an encode-able string' )
else :
try :
out = value . encode ( encoder )
except LookupError :
raise SaltInvocationError ( 'You must specify a valid encoder' )
except AttributeError :
raise SaltInvocationError ( 'Value must be an encode-able string' )
return out |
def is_protein_or_chemical ( agent ) :
'''Return True if the agent is a protein / protein family or chemical .''' | # Default is True if agent is None
if agent is None :
return True
dbs = set ( [ 'UP' , 'HGNC' , 'CHEBI' , 'PFAM-DEF' , 'IP' , 'INDRA' , 'PUBCHEM' , 'CHEMBL' ] )
agent_refs = set ( agent . db_refs . keys ( ) )
if agent_refs . intersection ( dbs ) :
return True
return False |
def set_from_matrix44 ( self , mat ) :
"""Create a new Quat from a Matrix44.
Note that the matrix and indexes are column major .""" | # Matrix trace
trace = mat . data [ 0 ] [ 0 ] + mat . data [ 1 ] [ 1 ] + mat . data [ 2 ] [ 2 ] + 1.0
if trace > 0.00000001 : # n4 is norm of quaternion multiplied by 4.
n4 = math . sqrt ( trace ) * 2
self . x = ( mat . data [ 1 ] [ 2 ] - mat . data [ 2 ] [ 1 ] ) / n4
self . y = ( mat . data [ 2 ] [ 0 ] - mat . data [ 0 ] [ 2 ] ) / n4
self . z = ( mat . data [ 0 ] [ 1 ] - mat . data [ 1 ] [ 0 ] ) / n4
self . w = n4 / 4.0
return self
# TODO : unittests for code below when trace is small .
# matrix trace < = 0
if mat . data [ 0 ] [ 0 ] > mat . data [ 1 ] [ 1 ] and mat . data [ 0 ] [ 0 ] > mat . data [ 2 ] [ 2 ] :
s = 2.0 * math . sqrt ( 1.0 + mat . data [ 0 ] [ 0 ] - mat . data [ 1 ] [ 1 ] - mat . data [ 2 ] [ 2 ] )
self . x = s / 4.0
self . y = ( mat . data [ 1 ] [ 0 ] + mat . data [ 0 ] [ 1 ] ) / s
self . z = ( mat . data [ 2 ] [ 0 ] + mat . data [ 0 ] [ 2 ] ) / s
self . w = ( mat . data [ 2 ] [ 1 ] - mat . data [ 1 ] [ 2 ] ) / s
return self
elif mat . data [ 1 ] [ 1 ] > mat . data [ 2 ] [ 2 ] :
s = 2.0 * math . sqrt ( 1.0 - mat . data [ 0 ] [ 0 ] + mat . data [ 1 ] [ 1 ] - mat . data [ 2 ] [ 2 ] )
self . x = ( mat . data [ 1 ] [ 0 ] + mat . data [ 0 ] [ 1 ] ) / s
self . y = s / 4.0
self . z = ( mat . data [ 2 ] [ 1 ] + mat . data [ 1 ] [ 2 ] ) / s
self . w = ( mat . data [ 2 ] [ 0 ] - mat . data [ 0 ] [ 2 ] ) / s
return self
else :
s = 2.0 * math . sqrt ( 1.0 - mat . data [ 0 ] [ 0 ] - mat . data [ 1 ] [ 1 ] + mat . data [ 2 ] [ 2 ] )
self . x = ( mat . data [ 2 ] [ 0 ] + mat . data [ 0 ] [ 2 ] ) / s
self . y = ( mat . data [ 2 ] [ 1 ] + mat . data [ 1 ] [ 2 ] ) / s
self . z = s / 4.0
self . w = ( mat . data [ 1 ] [ 0 ] - mat . data [ 0 ] [ 1 ] ) / s
return self |
def get_active_conditions ( self , manager ) :
'''Returns a generator which yields groups of lists of conditions .
> > > conditions = switch . get _ active _ conditions ( )
> > > for label , set _ id , field , value , exc in conditions : # doctest : + SKIP
> > > print ( " % ( label ) s : % ( field ) s = % ( value ) s ( exclude : % ( exc ) s ) "
> > > % ( label , field . label , value , exc ) ) # doctest : + SKIP''' | for condition_set in sorted ( manager . get_condition_sets ( ) , key = lambda x : x . get_group_label ( ) ) :
ns = condition_set . get_namespace ( )
condition_set_id = condition_set . get_id ( )
if ns in self . value :
group = condition_set . get_group_label ( )
for name , field in condition_set . fields . iteritems ( ) :
for value in self . value [ ns ] . get ( name , [ ] ) :
try :
yield ( condition_set_id , group , field , value [ 1 ] , value [ 0 ] == EXCLUDE )
except TypeError :
continue |
def _set_flask_alembic ( ) :
from flask_alembic import Alembic
"""Add the SQLAlchemy object in the global extension""" | application . app . extensions [ "sqlalchemy" ] = type ( '' , ( ) , { "db" : db } )
alembic = Alembic ( )
alembic . init_app ( application . app )
return alembic |
def search ( self , category = None , cuisine = None , location = ( None , None ) , radius = None , tl_coord = ( None , None ) , br_coord = ( None , None ) , name = None , country = None , locality = None , region = None , postal_code = None , street_address = None , website_url = None , has_menu = None , open_at = None ) :
"""Locu Venue Search API Call Wrapper
Args :
* Note that none of the arguments are required
category : List of category types that need to be filtered by : [ ' restaurant ' , ' spa ' , ' beauty salon ' , ' gym ' , ' laundry ' , ' hair care ' , ' other ' ]
type : [ string ]
cuisine : List of cuisine types that need to be filtered by : [ ' american ' , ' italian ' , . . . ]
type : [ string ]
location : Tuple that consists of ( latitude , longtitude ) coordinates
type : tuple ( float , float )
radius : Radius around the given lat , long
type : float
tl _ coord : Tuple that consists of ( latitude , longtitude ) for bounding box top left coordinates
type : tuple ( float , float )
br _ coord : Tuple that consists of ( latitude , longtitude ) for bounding box bottom right coordinates
type : tuple ( float , float )
name : Name of the venue
type : string
country : Country where venue is located
type : string
locality : Locality . Ex ' San Francisco '
type : string
region : Region / state . Ex . ' CA '
type : string
postal _ code : Postal code
type : string
street _ address : Address
type : string
open _ at : Search for venues open at the specified time
type : datetime
website _ url : Filter by the a website url
type : string
has _ menu : Filter venues that have menus in them
type : boolean
Returns :
A dictionary with a data returned by the server
Raises :
HttpException with the error message from the server""" | params = self . _get_params ( category = category , cuisine = cuisine , location = location , radius = radius , tl_coord = tl_coord , br_coord = br_coord , name = name , country = country , locality = locality , region = region , postal_code = postal_code , street_address = street_address , website_url = website_url , has_menu = has_menu , open_at = open_at )
return self . _create_query ( 'search' , params ) |
def plotE ( self , * args , ** kwargs ) :
"""NAME :
plotE
PURPOSE :
plot E ( . ) along the orbit
INPUT :
bovy _ plot . bovy _ plot inputs
OUTPUT :
figure to output device
HISTORY :
2014-06-16 - Written - Bovy ( IAS )""" | if kwargs . pop ( 'normed' , False ) :
kwargs [ 'd2' ] = 'Enorm'
else :
kwargs [ 'd2' ] = 'E'
return self . plot ( * args , ** kwargs ) |
def panels ( self ) :
"""Add 2 panels to the figure , top for signal and bottom for gene models""" | ax1 = self . fig . add_subplot ( 211 )
ax2 = self . fig . add_subplot ( 212 , sharex = ax1 )
return ( ax2 , self . gene_panel ) , ( ax1 , self . signal_panel ) |
def column_stack_2d ( data ) :
"""Perform column - stacking on a list of 2d data blocks .""" | return list ( list ( itt . chain . from_iterable ( _ ) ) for _ in zip ( * data ) ) |
def to_pdf ( self , outFileName , imageFileName = None , showBoundingboxes = False , fontname = "Helvetica" , invisibleText = False , interwordSpaces = False , ) :
"""Creates a PDF file with an image superimposed on top of the text .
Text is positioned according to the bounding box of the lines in
the hOCR file .
The image need not be identical to the image used to create the hOCR
file .
It can have a lower resolution , different color mode , etc .""" | # create the PDF file
# page size in points ( 1/72 in . )
pdf = Canvas ( outFileName , pagesize = ( self . width , self . height ) , pageCompression = 1 )
# draw bounding box for each paragraph
# light blue for bounding box of paragraph
pdf . setStrokeColorRGB ( 0 , 1 , 1 )
# light blue for bounding box of paragraph
pdf . setFillColorRGB ( 0 , 1 , 1 )
pdf . setLineWidth ( 0 )
# no line for bounding box
for elem in self . hocr . findall ( ".//%sp[@class='%s']" % ( self . xmlns , "ocr_par" ) ) :
elemtxt = self . _get_element_text ( elem ) . rstrip ( )
if len ( elemtxt ) == 0 :
continue
pxl_coords = self . element_coordinates ( elem )
pt = self . pt_from_pixel ( pxl_coords )
# draw the bbox border
if showBoundingboxes :
pdf . rect ( pt . x1 , self . height - pt . y2 , pt . x2 - pt . x1 , pt . y2 - pt . y1 , fill = 1 )
found_lines = False
for line in self . hocr . findall ( ".//%sspan[@class='%s']" % ( self . xmlns , "ocr_line" ) ) :
found_lines = True
self . _do_line ( pdf , line , "ocrx_word" , fontname , invisibleText , interwordSpaces , showBoundingboxes , )
if not found_lines : # Tesseract did not report any lines ( just words )
root = self . hocr . find ( ".//%sdiv[@class='%s']" % ( self . xmlns , "ocr_page" ) )
self . _do_line ( pdf , root , "ocrx_word" , fontname , invisibleText , interwordSpaces , showBoundingboxes , )
# put the image on the page , scaled to fill the page
if imageFileName is not None :
pdf . drawImage ( imageFileName , 0 , 0 , width = self . width , height = self . height )
# finish up the page and save it
pdf . showPage ( )
pdf . save ( ) |
def draw_instances ( self , X , y , ** kwargs ) :
"""Draw the instances colored by the target y such that each line is a
single instance . This is the " slow " mode of drawing , since each
instance has to be drawn individually . However , in so doing , the
density of instances in braids is more apparent since lines have an
independent alpha that is compounded in the figure .
This is the default method of drawing .
Parameters
X : ndarray of shape n x m
A matrix of n instances with m features
y : ndarray of length n
An array or series of target or class values
Notes
This method can be used to draw additional instances onto the parallel
coordinates before the figure is finalized .""" | # Get alpha from param or default
alpha = self . alpha or 0.25
for idx in range ( len ( X ) ) :
Xi = X [ idx ]
yi = y [ idx ]
# TODO : generalize this duplicated code into a single function
if isinstance ( yi , str ) :
label = yi
else : # TODO : what happens if yi is not in classes ? !
label = self . classes_ [ yi ]
self . ax . plot ( self . _increments , Xi , color = self . _colors [ label ] , alpha = alpha , ** kwargs )
return self . ax |
def export_image3d ( input , output , size = ( 800 , 600 ) , pcb_rotate = ( 0 , 0 , 0 ) , timeout = 20 , showgui = False ) :
'''Exporting eagle . brd file into 3D image file
using Eagle3D and povray .
GUI is not displayed if ` ` pyvirtualdisplay ` ` is installed .
If export is blocked somehow ( e . g . popup window is displayed ) then after timeout operation is canceled with exception .
Problem can be investigated by setting ' showgui ' flag .
: param input : eagle . brd file name
: param output : image file name ( . png )
: param timeout : operation is canceled after this timeout ( sec )
: param showgui : eagle GUI is displayed
: param size : tuple ( width , size ) , image size
: rtype : None''' | input = norm_path ( input )
output = norm_path ( output )
ext = os . path . splitext ( input ) [ 1 ]
if ext not in [ '.brd' ] :
raise ValueError ( 'Input extension is not ".brd", brd=' + str ( input ) )
commands = [ ]
eagle3d = Path ( __file__ ) . dirname ( ) / 'eagle3d'
ulp = ( eagle3d / '3d50.ulp' ) . abspath ( )
commands += [ 'RUN ' + ulp ]
commands += [ 'QUIT' ]
def render ( dir , f ) : # povray has strange file access policy ,
# better to generate under tmp
# cli doc :
# http : / / library . thinkquest . org / 3285 / language / cmdln . html
templ = '#local pcb_rotate_%s = %s'
pov = Path ( f . replace ( '.brd' , '.pov' ) )
if pcb_rotate != ( 0 , 0 , 0 ) :
s = pov . bytes ( )
s = s . replace ( templ % ( 'x' , 0 ) , templ % ( 'x' , pcb_rotate [ 0 ] ) )
s = s . replace ( templ % ( 'y' , 0 ) , templ % ( 'y' , pcb_rotate [ 1 ] ) )
s = s . replace ( templ % ( 'z' , 0 ) , templ % ( 'z' , pcb_rotate [ 2 ] ) )
pov . write_bytes ( s )
fpng = Path ( f . replace ( '.brd' , '.png' ) )
cmd = [ ]
cmd += [ "povray" ]
cmd += [ "-d" ]
# no display
cmd += [ "-a" ]
# anti - aliasing
cmd += [ '+W' + str ( size [ 0 ] ) ]
# width
cmd += [ '+H' + str ( size [ 1 ] ) ]
# height
cmd += [ '-o' + fpng ]
cmd += [ '-L' + eagle3d ]
cmd += [ pov ]
p = Proc ( cmd ) . call ( )
if not fpng . exists ( ) :
raise EagleError ( 'povray error, proc=%s' % p )
fpng . copy ( output )
command_eagle ( input = input , timeout = timeout , commands = commands , showgui = showgui , callback = render ) |
def plot_curvature ( self , curv_type = 'mean' , ** kwargs ) :
"""Plots the curvature of the external surface of the grid
Parameters
curv _ type : str , optional
One of the following strings indicating curvature types
- mean
- gaussian
- maximum
- minimum
* * kwargs : optional
Optional keyword arguments . See help ( vtki . plot )
Returns
cpos : list
Camera position , focal point , and view up . Used for storing and
setting camera view .""" | trisurf = self . extract_surface ( ) . tri_filter ( )
return trisurf . plot_curvature ( curv_type , ** kwargs ) |
def chemicals ( self ) :
"""List of namedtuples representing chemical entities in the form
( source , chemical _ name , cas _ registry _ number ) . In case multiple
numbers given , they are joined on " ; " .""" | path = [ 'enhancement' , 'chemicalgroup' , 'chemicals' ]
items = listify ( chained_get ( self . _head , path , [ ] ) )
chemical = namedtuple ( 'Chemical' , 'source chemical_name cas_registry_number' )
out = [ ]
for item in items :
for chem in listify ( item [ 'chemical' ] ) :
number = chem . get ( 'cas-registry-number' )
try : # Multiple numbers given
num = ";" . join ( [ n [ '$' ] for n in number ] )
except TypeError :
num = number
new = chemical ( source = item [ '@source' ] , cas_registry_number = num , chemical_name = chem [ 'chemical-name' ] )
out . append ( new )
return out or None |
def stream_url ( self ) :
'''stream for this song - not re - encoded''' | path = '/Audio/{}/universal' . format ( self . id )
return self . connector . get_url ( path , userId = self . connector . userid , MaxStreamingBitrate = 140000000 , Container = 'opus' , TranscodingContainer = 'opus' , AudioCodec = 'opus' , MaxSampleRate = 48000 , PlaySessionId = 1496213367201 # TODO no hard code
) |
def _split_python ( python ) :
"""Split Python source into chunks .
Chunks are separated by at least two return lines . The break must not
be followed by a space . Also , long Python strings spanning several lines
are not splitted .""" | python = _preprocess ( python )
if not python :
return [ ]
lexer = PythonSplitLexer ( )
lexer . read ( python )
return lexer . chunks |
def strftime ( self , date_format ) :
"""Convert to Index using specified date _ format .
Return an Index of formatted strings specified by date _ format , which
supports the same string format as the python standard library . Details
of the string format can be found in ` python string format
doc < % ( URL ) s > ` _ _ .
Parameters
date _ format : str
Date format string ( e . g . " % % Y - % % m - % % d " ) .
Returns
Index
Index of formatted strings .
See Also
to _ datetime : Convert the given argument to datetime .
DatetimeIndex . normalize : Return DatetimeIndex with times to midnight .
DatetimeIndex . round : Round the DatetimeIndex to the specified freq .
DatetimeIndex . floor : Floor the DatetimeIndex to the specified freq .
Examples
> > > rng = pd . date _ range ( pd . Timestamp ( " 2018-03-10 09:00 " ) ,
. . . periods = 3 , freq = ' s ' )
> > > rng . strftime ( ' % % B % % d , % % Y , % % r ' )
Index ( [ ' March 10 , 2018 , 09:00:00 AM ' , ' March 10 , 2018 , 09:00:01 AM ' ,
' March 10 , 2018 , 09:00:02 AM ' ] ,
dtype = ' object ' )""" | from pandas import Index
return Index ( self . _format_native_types ( date_format = date_format ) ) |
def cancel ( self , username , project , build_num ) :
"""Cancel the build and return its summary .""" | method = 'POST'
url = ( '/project/{username}/{project}/{build_num}/cancel?' 'circle-token={token}' . format ( username = username , project = project , build_num = build_num , token = self . client . api_token ) )
json_data = self . client . request ( method , url )
return json_data |
def _get_I ( self , a , b , size , plus_transpose = True ) :
"""Return I matrix in Chaput ' s PRL paper .
None is returned if I is zero matrix .""" | r_sum = np . zeros ( ( 3 , 3 ) , dtype = 'double' , order = 'C' )
for r in self . _rotations_cartesian :
for i in range ( 3 ) :
for j in range ( 3 ) :
r_sum [ i , j ] += r [ a , i ] * r [ b , j ]
if plus_transpose :
r_sum += r_sum . T
# Return None not to consume computer for diagonalization
if ( np . abs ( r_sum ) < 1e-10 ) . all ( ) :
return None
# Same as np . kron ( np . eye ( size ) , r _ sum ) , but writen as below
# to be sure the values in memory C - congiguous with ' double ' .
I_mat = np . zeros ( ( 3 * size , 3 * size ) , dtype = 'double' , order = 'C' )
for i in range ( size ) :
I_mat [ ( i * 3 ) : ( ( i + 1 ) * 3 ) , ( i * 3 ) : ( ( i + 1 ) * 3 ) ] = r_sum
return I_mat |