signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
---|---|
def better_exec_command ( ssh , command , msg ) :
"""Uses paramiko to execute a command but handles failure by raising a ParamikoError if the command fails .
Note that unlike paramiko . SSHClient . exec _ command this is not asynchronous because we wait until the exit status is known
: Parameter ssh : a paramiko SSH Client
: Parameter command : the command to execute
: Parameter msg : message to print on failure
: Returns ( paramiko . Channel )
the underlying channel so that the caller can extract stdout or send to stdin
: Raises SSHException : if paramiko would raise an SSHException
: Raises ParamikoError : if the command produces output to stderr""" | chan = ssh . get_transport ( ) . open_session ( )
chan . exec_command ( command )
exit_status = chan . recv_exit_status ( )
if exit_status != 0 :
msg_str = chan . recv_stderr ( 1024 )
err_msgs = [ ]
while len ( msg_str ) > 0 :
err_msgs . append ( msg_str )
msg_str = chan . recv_stderr ( 1024 )
err_msg = '' . join ( err_msgs )
logger . error ( err_msg )
raise ParamikoError ( msg , err_msg )
return chan |
def from_element ( self , element , defaults = { } ) :
"""Populate object variables from SVD element""" | if isinstance ( defaults , SvdElement ) :
defaults = vars ( defaults )
for key in self . props :
try :
value = element . find ( key ) . text
except AttributeError : # Maybe it ' s attribute ?
default = defaults [ key ] if key in defaults else None
value = element . get ( key , default )
if value is not None :
if key in self . props_to_integer :
try :
value = int ( value )
except ValueError : # It has to be hex
value = int ( value , 16 )
elif key in self . props_to_boolean :
value = value . lower ( ) in ( "yes" , "true" , "t" , "1" )
setattr ( self , key , value ) |
def _refresh_editor_and_scrollbars ( self ) :
"""Refrehes editor content and scollbars .
We generate a fake resize event to refresh scroll bar .
We have the same problem as described here :
http : / / www . qtcentre . org / threads / 44803 and we apply the same solution
( don ' t worry , there is no visual effect , the editor does not grow up
at all , even with a value = 500)""" | TextHelper ( self . editor ) . mark_whole_doc_dirty ( )
self . editor . repaint ( )
s = self . editor . size ( )
s . setWidth ( s . width ( ) + 1 )
self . editor . resizeEvent ( QResizeEvent ( self . editor . size ( ) , s ) ) |
def update_wrapper ( wrapper , wrapped , assigned = functools . WRAPPER_ASSIGNMENTS , updated = functools . WRAPPER_UPDATES ) :
"""Patch two bugs in functools . update _ wrapper .""" | # workaround for http : / / bugs . python . org / issue3445
assigned = tuple ( attr for attr in assigned if hasattr ( wrapped , attr ) )
wrapper = functools . update_wrapper ( wrapper , wrapped , assigned , updated )
# workaround for https : / / bugs . python . org / issue17482
wrapper . __wrapped__ = wrapped
return wrapper |
def to_unicode ( x , unaccent = False ) :
"""Convert a string to unicode""" | s = str ( x )
if unaccent :
cs = [ c for c in unicodedata . normalize ( 'NFD' , s ) if unicodedata . category ( c ) != 'Mn' ]
s = '' . join ( cs )
return s |
def _get_areas ( self ) :
"""Return surface elements area values in a numpy array .""" | if self . areas is None :
self . areas = [ ]
for surf in self . surfaces :
self . areas . append ( surf . get_area ( ) )
self . areas = numpy . array ( self . areas )
return self . areas |
def from_json_dict ( cls , json_dict # type : Dict [ str , Any ]
) : # type : ( . . . ) - > EnumSpec
"""Make a EnumSpec object from a dictionary containing its
properties .
: param dict json _ dict : This dictionary must contain an
` ' enum ' ` key specifying the permitted values . In
addition , it must contain a ` ' hashing ' ` key , whose
contents are passed to : class : ` FieldHashingProperties ` .""" | # noinspection PyCompatibility
result = cast ( EnumSpec , # Appease the gods of Mypy .
super ( ) . from_json_dict ( json_dict ) )
format_ = json_dict [ 'format' ]
result . values = set ( format_ [ 'values' ] )
return result |
def ld_prune ( df , ld_beds , snvs = None ) :
"""Prune set of GWAS based on LD and significance . A graph of all SNVs is
constructed with edges for LD > = 0.8 and the most significant SNV per
connected component is kept .
Parameters
df : pandas . DataFrame
Pandas dataframe with unique SNVs . The index is of the form chrom : pos
where pos is the one - based position of the SNV . The columns must include
chrom , start , end , and pvalue . chrom , start , end make a zero - based bed
file with the SNV coordinates .
ld _ beds : dict
Dict whose keys are chromosomes and whose values are filenames of
tabixed LD bed files . An LD bed file looks like " chr1 11007 11008
11008:11012:1 " where the first three columns are the zero - based
half - open coordinate of the SNV and the fourth column has the one - based
coordinate followed of the SNV followed by the one - based coordinate of a
different SNV and the LD between them . In this example , the variants are
in perfect LD . The bed file should also contain the reciprocal line for
this LD relationship : " chr1 11011 11012 11012:11008:1 " .
snvs : list
List of SNVs to filter against . If a SNV is not in this list , it will
not be included . If you are working with GWAS SNPs , this is useful for
filtering out SNVs that aren ' t in the SNPsnap database for instance .
Returns
out : pandas . DataFrame
Pandas dataframe in the same format as the input dataframe but with only
independent SNVs .""" | import networkx as nx
import tabix
if snvs :
df = df . ix [ set ( df . index ) & set ( snvs ) ]
keep = set ( )
for chrom in ld_beds . keys ( ) :
tdf = df [ df [ 'chrom' ] . astype ( str ) == chrom ]
if tdf . shape [ 0 ] > 0 :
f = tabix . open ( ld_beds [ chrom ] )
# Make a dict where each key is a SNP and the values are all of the
# other SNPs in LD with the key .
ld_d = { }
for j in tdf . index :
p = tdf . ix [ j , 'end' ]
ld_d [ p ] = [ ]
try :
r = f . query ( chrom , p - 1 , p )
while True :
try :
n = r . next ( )
p1 , p2 , r2 = n [ - 1 ] . split ( ':' )
if float ( r2 ) >= 0.8 :
ld_d [ p ] . append ( int ( p2 ) )
except StopIteration :
break
except TabixError :
continue
# Make adjacency matrix for LD .
cols = sorted ( list ( set ( [ item for sublist in ld_d . values ( ) for item in sublist ] ) ) )
t = pd . DataFrame ( 0 , index = ld_d . keys ( ) , columns = cols )
for k in ld_d . keys ( ) :
t . ix [ k , ld_d [ k ] ] = 1
t . index = [ '{}:{}' . format ( chrom , x ) for x in t . index ]
t . columns = [ '{}:{}' . format ( chrom , x ) for x in t . columns ]
# Keep all SNPs not in LD with any others . These will be in the index
# but not in the columns .
keep |= set ( t . index ) - set ( t . columns )
# Filter so we only have SNPs that are in LD with at least one other
# SNP .
ind = list ( set ( t . columns ) & set ( t . index ) )
# Keep one most sig . SNP per connected subgraph .
t = t . ix [ ind , ind ]
g = nx . Graph ( t . values )
c = nx . connected_components ( g )
while True :
try :
sg = c . next ( )
s = tdf . ix [ t . index [ list ( sg ) ] ]
keep . add ( s [ s . pvalue == s . pvalue . min ( ) ] . index [ 0 ] )
except StopIteration :
break
out = df . ix [ keep ]
return out |
def set_meta ( self , name , format , * args ) :
"""Set certificate metadata from formatted string .""" | return lib . zcert_set_meta ( self . _as_parameter_ , name , format , * args ) |
def _get_config_dir ( ) :
"""Returns the sawtooth configuration directory based on the
SAWTOOTH _ HOME environment variable ( if set ) or OS defaults .""" | if 'SAWTOOTH_HOME' in os . environ :
return os . path . join ( os . environ [ 'SAWTOOTH_HOME' ] , 'etc' )
if os . name == 'nt' :
base_dir = os . path . dirname ( os . path . dirname ( os . path . abspath ( sys . argv [ 0 ] ) ) )
return os . path . join ( base_dir , 'conf' )
return '/etc/sawtooth' |
def pause_knocks ( obj ) :
"""Context manager to suspend sending knocks for the given model
: param obj : model instance""" | if not hasattr ( _thread_locals , 'knock_enabled' ) :
_thread_locals . knock_enabled = { }
obj . __class__ . _disconnect ( )
_thread_locals . knock_enabled [ obj . __class__ ] = False
yield
_thread_locals . knock_enabled [ obj . __class__ ] = True
obj . __class__ . _connect ( ) |
def apply_encoding_options ( self , min_token_count = 1 , limit_top_tokens = None ) :
"""Applies the given settings for subsequent calls to ` encode _ texts ` and ` decode _ texts ` . This allows you to
play with different settings without having to re - run tokenization on the entire corpus .
Args :
min _ token _ count : The minimum token count ( frequency ) in order to include during encoding . All tokens
below this frequency will be encoded to ` 0 ` which corresponds to unknown token . ( Default value = 1)
limit _ top _ tokens : The maximum number of tokens to keep , based their frequency . Only the most common ` limit _ top _ tokens `
tokens will be kept . Set to None to keep everything . ( Default value : None )""" | if not self . has_vocab :
raise ValueError ( "You need to build the vocabulary using `build_vocab` " "before using `apply_encoding_options`" )
if min_token_count < 1 :
raise ValueError ( "`min_token_count` should atleast be 1" )
# Remove tokens with freq < min _ token _ count
token_counts = list ( self . _token_counts . items ( ) )
token_counts = [ x for x in token_counts if x [ 1 ] >= min_token_count ]
# Clip to max _ tokens .
if limit_top_tokens is not None :
token_counts . sort ( key = lambda x : x [ 1 ] , reverse = True )
filtered_tokens = list ( zip ( * token_counts ) ) [ 0 ]
filtered_tokens = filtered_tokens [ : limit_top_tokens ]
else :
filtered_tokens = zip ( * token_counts ) [ 0 ]
# Generate indices based on filtered tokens .
self . create_token_indices ( filtered_tokens ) |
def _unkown_type ( self , uridecodebin , decodebin , caps ) :
"""The callback for decodebin ' s " unknown - type " signal .""" | # This is called * before * the stream becomes ready when the
# file can ' t be read .
streaminfo = caps . to_string ( )
if not streaminfo . startswith ( 'audio/' ) : # Ignore non - audio ( e . g . , video ) decode errors .
return
self . read_exc = UnknownTypeError ( streaminfo )
self . ready_sem . release ( ) |
def version_cmd ( argv = sys . argv [ 1 : ] ) : # pragma : no cover
"""Print the version number of Palladium .
Usage :
pld - version [ options ]
Options :
- h - - help Show this screen .""" | docopt ( version_cmd . __doc__ , argv = argv )
print ( __version__ ) |
def noaa_to_lpd ( files ) :
"""Convert NOAA format to LiPD format
: param dict files : Files metadata
: return None :""" | logger_noaa . info ( "enter process_noaa" )
# only continue if the user selected a mode correctly
logger_noaa . info ( "Found {} NOAA txt file(s)" . format ( str ( len ( files [ ".txt" ] ) ) ) )
print ( "Found {} NOAA txt file(s)" . format ( str ( len ( files [ ".txt" ] ) ) ) )
# Process each available file of the specified . lpd or . txt type
for file in files [ ".txt" ] : # try to filter out example files and stuff without real data
if "template" not in file [ "filename_ext" ] and "example" not in file [ "filename_ext" ] :
os . chdir ( file [ "dir" ] )
print ( 'processing: {}' . format ( file [ "filename_ext" ] ) )
logger_noaa . info ( "processing: {}" . format ( file [ "filename_ext" ] ) )
# Unzip file and get tmp directory path
dir_tmp = create_tmp_dir ( )
try :
NOAA_LPD ( file [ "dir" ] , dir_tmp , file [ "filename_no_ext" ] ) . main ( )
except Exception as e :
print ( "Error: Unable to convert file: {}, {}" . format ( file [ "filename_no_ext" ] , e ) )
# Create the lipd archive in the original file ' s directory .
zipper ( root_dir = dir_tmp , name = "bag" , path_name_ext = os . path . join ( file [ "dir" ] , file [ "filename_no_ext" ] + ".lpd" ) )
# Delete tmp folder and all contents
os . chdir ( file [ "dir" ] )
try :
shutil . rmtree ( dir_tmp )
except FileNotFoundError : # directory is already gone . keep going .
pass
logger_noaa . info ( "exit noaa_to_lpd" )
return |
def to_point ( self , timestamp ) :
"""Get a Point conversion of this aggregation .
: type timestamp : : class : ` datetime . datetime `
: param timestamp : The time to report the point as having been recorded .
: rtype : : class : ` opencensus . metrics . export . point . Point `
: return : a : class : ` opencensus . metrics . export . value . ValueLong ` - valued
Point with value equal to ` count _ data ` .""" | return point . Point ( value . ValueLong ( self . count_data ) , timestamp ) |
def get_kernel_spec ( self , kernel_name ) :
"""Returns a : class : ` KernelSpec ` instance for the given kernel _ name .
Raises : exc : ` NoSuchKernel ` if the given kernel name is not found .""" | if kernel_name == CURRENT_ENV_KERNEL_NAME :
return self . kernel_spec_class ( resource_dir = ipykernel . kernelspec . RESOURCES , ** ipykernel . kernelspec . get_kernel_dict ( ) )
else :
return super ( NbvalKernelspecManager , self ) . get_kernel_spec ( kernel_name ) |
def plot_data ( orig_data , data ) :
'''plot data in 3D''' | import numpy as np
from mpl_toolkits . mplot3d import Axes3D
import matplotlib . pyplot as plt
for dd , c in [ ( orig_data , 'r' ) , ( data , 'b' ) ] :
fig = plt . figure ( )
ax = fig . add_subplot ( 111 , projection = '3d' )
xs = [ d . x for d in dd ]
ys = [ d . y for d in dd ]
zs = [ d . z for d in dd ]
ax . scatter ( xs , ys , zs , c = c , marker = 'o' )
ax . set_xlabel ( 'X Label' )
ax . set_ylabel ( 'Y Label' )
ax . set_zlabel ( 'Z Label' )
plt . show ( ) |
def rooms ( self , sid , namespace = None ) :
"""Return the rooms a client is in .
: param sid : Session ID of the client .
: param namespace : The Socket . IO namespace for the event . If this
argument is omitted the default namespace is used .""" | namespace = namespace or '/'
return self . manager . get_rooms ( sid , namespace ) |
def insert_attribute ( self , att , index ) :
"""Inserts the attribute at the specified location .
: param att : the attribute to insert
: type att : Attribute
: param index : the index to insert the attribute at
: type index : int""" | javabridge . call ( self . jobject , "insertAttributeAt" , "(Lweka/core/Attribute;I)V" , att . jobject , index ) |
def project_describe ( object_id , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / project - xxxx / describe API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Projects # API - method % 3A - % 2Fproject - xxxx % 2Fdescribe""" | return DXHTTPRequest ( '/%s/describe' % object_id , input_params , always_retry = always_retry , ** kwargs ) |
def update_widget_channels ( self ) :
"""Parameters
axis : ' x ' or ' y '
event : pick of list text""" | sel1 = self . x_axis_list . GetSelection ( )
sel2 = self . y_axis_list . GetSelection ( )
if sel1 >= 0 and sel2 >= 0 :
channel_1 = self . x_axis_list . GetString ( sel1 )
channel_2 = self . y_axis_list . GetString ( sel2 )
self . fcgatemanager . set_axes ( ( channel_1 , channel_2 ) , self . ax ) |
def reread ( self ) :
"""Read configuration file and substitute references into checks conf""" | logger . debug ( "Loading settings from %s" , os . path . abspath ( self . filename ) )
conf = self . read_conf ( )
changed = self . creds . reread ( )
checks = self . parser . parse_checks ( conf )
if self . checks != checks :
self . checks = checks
return True
else :
return changed |
def get_workflow_status_of ( brain_or_object , state_var = "review_state" ) :
"""Get the current workflow status of the given brain or context .
: param brain _ or _ object : A single catalog brain or content object
: type brain _ or _ object : ATContentType / DexterityContentType / CatalogBrain
: param state _ var : The name of the state variable
: type state _ var : string
: returns : Status
: rtype : str""" | workflow = get_tool ( "portal_workflow" )
obj = get_object ( brain_or_object )
return workflow . getInfoFor ( ob = obj , name = state_var ) |
def attention_lm_moe_base_memeff ( ) :
"""Base model with attention expert .""" | hparams = attention_lm_moe_base_long_seq ( )
hparams . use_sepconv = False
hparams . diet_experts = True
hparams . layer_preprocess_sequence = "n"
hparams . layer_postprocess_sequence = "da"
hparams . layer_prepostprocess_dropout = 0.0
hparams . memory_efficient_ffn = True
hparams . attention_type = AttentionType . MEMORY_EFFICIENT
hparams . num_heads = 8
hparams . factored_logits = True
return hparams |
def set_headers ( self ) -> None :
"""Sets the content and caching headers on the response .
. . versionadded : : 3.1""" | self . set_header ( "Accept-Ranges" , "bytes" )
self . set_etag_header ( )
if self . modified is not None :
self . set_header ( "Last-Modified" , self . modified )
content_type = self . get_content_type ( )
if content_type :
self . set_header ( "Content-Type" , content_type )
cache_time = self . get_cache_time ( self . path , self . modified , content_type )
if cache_time > 0 :
self . set_header ( "Expires" , datetime . datetime . utcnow ( ) + datetime . timedelta ( seconds = cache_time ) , )
self . set_header ( "Cache-Control" , "max-age=" + str ( cache_time ) )
self . set_extra_headers ( self . path ) |
def prefixes ( self ) :
"""list all prefixes used""" | pset = set ( )
for n in self . nodes ( ) :
pfx = self . prefix ( n )
if pfx is not None :
pset . add ( pfx )
return list ( pset ) |
def _prepare_uri ( self , path , query_params = { } ) :
"""Prepares a full URI with the selected information .
` ` path ` ` :
Path can be in one of two formats :
- If : attr : ` server ` was defined , the ` ` path ` ` will be appended
to the existing host , or
- an absolute URL
` ` query _ params ` ` :
Used to generate a query string , which will be appended to the end
of the absolute URL .
Returns an absolute URL .""" | query_str = urllib . urlencode ( query_params )
# If we have a relative path ( as opposed to a full URL ) , build it of
# the connection info
if path . startswith ( '/' ) and self . server :
protocol = self . protocol
server = self . server
else :
protocol , server , path , _ , _ , _ = urlparse . urlparse ( path )
assert server , "%s is not a valid URL" % path
return urlparse . urlunparse ( ( protocol , server , path , None , query_str , None ) ) |
def CopyFromStringTuple ( self , time_elements_tuple ) :
"""Copies time elements from string - based time elements tuple .
Args :
time _ elements _ tuple ( Optional [ tuple [ str , str , str , str , str , str ] ] ) :
time elements , contains year , month , day of month , hours , minutes and
seconds .
Raises :
ValueError : if the time elements tuple is invalid .""" | if len ( time_elements_tuple ) < 6 :
raise ValueError ( ( 'Invalid time elements tuple at least 6 elements required,' 'got: {0:d}' ) . format ( len ( time_elements_tuple ) ) )
try :
year = int ( time_elements_tuple [ 0 ] , 10 )
except ( TypeError , ValueError ) :
raise ValueError ( 'Invalid year value: {0!s}' . format ( time_elements_tuple [ 0 ] ) )
try :
month = int ( time_elements_tuple [ 1 ] , 10 )
except ( TypeError , ValueError ) :
raise ValueError ( 'Invalid month value: {0!s}' . format ( time_elements_tuple [ 1 ] ) )
try :
day_of_month = int ( time_elements_tuple [ 2 ] , 10 )
except ( TypeError , ValueError ) :
raise ValueError ( 'Invalid day of month value: {0!s}' . format ( time_elements_tuple [ 2 ] ) )
try :
hours = int ( time_elements_tuple [ 3 ] , 10 )
except ( TypeError , ValueError ) :
raise ValueError ( 'Invalid hours value: {0!s}' . format ( time_elements_tuple [ 3 ] ) )
try :
minutes = int ( time_elements_tuple [ 4 ] , 10 )
except ( TypeError , ValueError ) :
raise ValueError ( 'Invalid minutes value: {0!s}' . format ( time_elements_tuple [ 4 ] ) )
try :
seconds = int ( time_elements_tuple [ 5 ] , 10 )
except ( TypeError , ValueError ) :
raise ValueError ( 'Invalid seconds value: {0!s}' . format ( time_elements_tuple [ 5 ] ) )
self . _normalized_timestamp = None
self . _number_of_seconds = self . _GetNumberOfSecondsFromElements ( year , month , day_of_month , hours , minutes , seconds )
self . _time_elements_tuple = ( year , month , day_of_month , hours , minutes , seconds ) |
def plt_goids ( gosubdag , fout_img , goids , ** kws_plt ) :
"""Plot GO IDs in a DAG ( Directed Acyclic Graph ) .""" | gosubdag_plt = GoSubDag ( goids , gosubdag . go2obj , rcntobj = gosubdag . rcntobj , ** kws_plt )
godagplot = GoSubDagPlot ( gosubdag_plt , ** kws_plt )
godagplot . plt_dag ( fout_img )
return godagplot |
def execute ( self , sql , args = None ) :
"""It is used for update , delete records .
: param sql string : the sql stamtement like ' select * from % s '
: param args list : Wen set None , will use dbi execute ( sql ) , else
dbi execute ( sql , args ) , the args keep the original rules , it shuld be tuple or list of list
eg : :
execute ( ' insert into users values ( % s , % s ) ' , [ ( 1L , ' blablabla ' ) , ( 2L , ' animer ' ) ] )
execute ( ' delete from users ' )""" | con = self . pool . pop ( )
c = None
try :
c = con . cursor ( )
LOGGER . debug ( "Execute sql: " + sql + " args:" + str ( args ) )
if type ( args ) is tuple :
c . execute ( sql , args )
elif type ( args ) is list :
if len ( args ) > 1 and type ( args [ 0 ] ) in ( list , tuple ) :
c . executemany ( sql , args )
else :
c . execute ( sql , args )
elif args is None :
c . execute ( sql )
if sql . lstrip ( ) [ : 6 ] . upper ( ) == 'INSERT' :
return c . lastrowid
return c . rowcount
except Exception as e :
LOGGER . error ( "Error Execute on %s" , str ( e ) )
raise DBError ( str ( e ) )
finally :
c and c . close ( )
con and self . pool . push ( con ) |
def get_lab_text ( lab_slug , language ) :
"""Gets text description in English or Italian from a single lab from makeinitaly . foundation .""" | if language == "English" or language == "english" or language == "EN" or language == "En" :
language = "en"
elif language == "Italian" or language == "italian" or language == "IT" or language == "It" or language == "it" :
language = "it"
else :
language = "en"
wiki = MediaWiki ( makeinitaly__foundation_api_url )
wiki_response = wiki . call ( { 'action' : 'query' , 'titles' : lab_slug + "/" + language , 'prop' : 'revisions' , 'rvprop' : 'content' } )
# If we don ' t know the pageid . . .
for i in wiki_response [ "query" ] [ "pages" ] :
if "revisions" in wiki_response [ "query" ] [ "pages" ] [ i ] :
content = wiki_response [ "query" ] [ "pages" ] [ i ] [ "revisions" ] [ 0 ] [ "*" ]
else :
content = ""
# Clean the resulting string / list
newstr01 = content . replace ( "}}" , "" )
newstr02 = newstr01 . replace ( "{{" , "" )
result = newstr02 . rstrip ( "\n|" ) . split ( "\n|" )
return result [ 0 ] |
def pypi_search ( self ) :
"""Search PyPI by metadata keyword
e . g . yolk - S name = yolk AND license = GPL
@ param spec : Cheese Shop search spec
@ type spec : list of strings
spec examples :
[ " name = yolk " ]
[ " license = GPL " ]
[ " name = yolk " , " AND " , " license = GPL " ]
@ returns : 0 on success or 1 if mal - formed search spec""" | spec = self . pkg_spec
# Add remainging cli arguments to options . pypi _ search
search_arg = self . options . pypi_search
spec . insert ( 0 , search_arg . strip ( ) )
( spec , operator ) = self . parse_search_spec ( spec )
if not spec :
return 1
for pkg in self . pypi . search ( spec , operator ) :
if pkg [ 'summary' ] :
summary = pkg [ 'summary' ] . encode ( 'utf-8' )
else :
summary = ""
print ( """%s (%s):
%s
""" % ( pkg [ 'name' ] . encode ( 'utf-8' ) , pkg [ "version" ] , summary ) )
return 0 |
def secure ( new_user = env . user ) :
"""Minimal security steps for brand new servers .
Installs system updates , creates new user ( with sudo privileges ) for future
usage , and disables root login via SSH .""" | run ( "apt-get update -q" )
run ( "apt-get upgrade -y -q" )
run ( "adduser --gecos '' %s" % new_user )
run ( "usermod -G sudo %s" % new_user )
run ( "sed -i 's:RootLogin yes:RootLogin no:' /etc/ssh/sshd_config" )
run ( "service ssh restart" )
print ( green ( "Security steps completed. Log in to the server as '%s' from " "now on." % new_user , bold = True ) ) |
def rectangle ( self , edge_style , pat , x1 , y1 , x2 , y2 , shadow = None ) :
"""Draw a rectangle with EDGE _ STYLE , fill with PAT , and the
bounding box ( X1 , Y1 , X2 , Y2 ) . SHADOW is either None or a
tuple ( XDELTA , YDELTA , fillstyle ) . If non - null , a shadow of
FILLSTYLE is drawn beneath the polygon at the offset of
( XDELTA , YDELTA ) .""" | self . polygon ( edge_style , pat , [ ( x1 , y1 ) , ( x1 , y2 ) , ( x2 , y2 ) , ( x2 , y1 ) ] , shadow ) |
def get_symmetry_operations ( self , cartesian = False ) :
"""Return symmetry operations as a list of SymmOp objects .
By default returns fractional coord symmops .
But cartesian can be returned too .
Returns :
( [ SymmOp ] ) : List of symmetry operations .""" | rotation , translation = self . _get_symmetry ( )
symmops = [ ]
mat = self . _structure . lattice . matrix . T
invmat = np . linalg . inv ( mat )
for rot , trans in zip ( rotation , translation ) :
if cartesian :
rot = np . dot ( mat , np . dot ( rot , invmat ) )
trans = np . dot ( trans , self . _structure . lattice . matrix )
op = SymmOp . from_rotation_and_translation ( rot , trans )
symmops . append ( op )
return symmops |
def newchild ( self , chld = False ) :
"""Like givebirth ( ) , but also appends the new child to the list of children .""" | if not chld :
chld = self . givebirth ( )
lchld = [ chld ] if type ( chld ) != list else chld
for chldx in lchld :
chldx . parent = self
self . children . append ( chld )
return chld |
def user_can_edit_news ( user ) :
"""Check if the user has permission to edit any of the registered NewsItem
types .""" | newsitem_models = [ model . get_newsitem_model ( ) for model in NEWSINDEX_MODEL_CLASSES ]
if user . is_active and user . is_superuser : # admin can edit news iff any news types exist
return bool ( newsitem_models )
for NewsItem in newsitem_models :
for perm in format_perms ( NewsItem , [ 'add' , 'change' , 'delete' ] ) :
if user . has_perm ( perm ) :
return True
return False |
def next ( self ) :
"""Return next line without end of line marker or raise StopIteration .""" | try :
next_line = next ( self . _f )
except StopIteration :
self . _FinalCheck ( )
raise
self . _line_number += 1
m_eol = re . search ( r"[\x0a\x0d]*$" , next_line )
if m_eol . group ( ) == "\x0d\x0a" :
self . _crlf += 1
if self . _crlf <= 5 :
self . _crlf_examples . append ( self . _line_number )
elif m_eol . group ( ) == "\x0a" :
self . _lf += 1
if self . _lf <= 5 :
self . _lf_examples . append ( self . _line_number )
elif m_eol . group ( ) == "" : # Should only happen at the end of the file
try :
next ( self . _f )
raise RuntimeError ( "Unexpected row without new line sequence" )
except StopIteration : # Will be raised again when EndOfLineChecker . next ( ) is next called
pass
else :
self . _problems . InvalidLineEnd ( codecs . getencoder ( 'string_escape' ) ( m_eol . group ( ) ) [ 0 ] , ( self . _name , self . _line_number ) )
next_line_contents = next_line [ 0 : m_eol . start ( ) ]
for seq , name in INVALID_LINE_SEPARATOR_UTF8 . items ( ) :
if next_line_contents . find ( seq ) != - 1 :
self . _problems . OtherProblem ( "Line contains %s" % name , context = ( self . _name , self . _line_number ) )
return next_line_contents |
def autodecode ( b ) :
"""Try to decode ` ` bytes ` ` to text - try default encoding first , otherwise try to autodetect
Args :
b ( bytes ) : byte string
Returns :
str : decoded text string""" | import warnings
import chardet
try :
return b . decode ( )
except UnicodeError :
result = chardet . detect ( b )
if result [ 'confidence' ] < 0.95 :
warnings . warn ( 'autodecode failed with utf-8; guessing %s' % result [ 'encoding' ] )
return result . decode ( result [ 'encoding' ] ) |
def probe ( path ) :
"""Probe a repository for its type .
: param str path : The path of the repository
: raises UnknownVCSType : if the repository type couldn ' t be inferred
: returns str : either ` ` git ` ` , ` ` hg ` ` , or ` ` svn ` `
This function employs some heuristics to guess the type of the repository .""" | import os
from . common import UnknownVCSType
if os . path . isdir ( os . path . join ( path , '.git' ) ) :
return 'git'
elif os . path . isdir ( os . path . join ( path , '.hg' ) ) :
return 'hg'
elif ( os . path . isfile ( os . path . join ( path , 'config' ) ) and os . path . isdir ( os . path . join ( path , 'objects' ) ) and os . path . isdir ( os . path . join ( path , 'refs' ) ) and os . path . isdir ( os . path . join ( path , 'branches' ) ) ) :
return 'git'
elif ( os . path . isfile ( os . path . join ( path , 'format' ) ) and os . path . isdir ( os . path . join ( path , 'conf' ) ) and os . path . isdir ( os . path . join ( path , 'db' ) ) and os . path . isdir ( os . path . join ( path , 'locks' ) ) ) :
return 'svn'
else :
raise UnknownVCSType ( path ) |
def insert_before ( self , sibling , row = None ) :
"""insert _ before ( sibling , row = None )
: param sibling : A valid : obj : ` Gtk . TreeIter ` , or : obj : ` None `
: type sibling : : obj : ` Gtk . TreeIter ` or : obj : ` None `
: param row : a list of values to apply to the newly inserted row or : obj : ` None `
: type row : [ : obj : ` object ` ] or : obj : ` None `
: returns : : obj : ` Gtk . TreeIter ` pointing to the new row
: rtype : : obj : ` Gtk . TreeIter `
Inserts a new row before ` sibling ` . If ` sibling ` is : obj : ` None ` , then
the row will be appended to the end of the list .
The row will be empty if ` row ` is : obj : ` None . To fill in values , you
need to call : obj : ` Gtk . ListStore . set ` \\ ( ) or
: obj : ` Gtk . ListStore . set _ value ` \\ ( ) .
If ` row ` isn ' t : obj : ` None ` it has to be a list of values which will be
used to fill the row .""" | treeiter = Gtk . ListStore . insert_before ( self , sibling )
if row is not None :
self . set_row ( treeiter , row )
return treeiter |
def main ( self , x ) :
"""Transposed FIR structure""" | self . acc [ 0 ] = x * self . TAPS [ - 1 ]
for i in range ( 1 , len ( self . acc ) ) :
self . acc [ i ] = self . acc [ i - 1 ] + x * self . TAPS [ len ( self . TAPS ) - 1 - i ]
self . out = self . acc [ - 1 ]
return self . out |
def cholesky ( A , ordering_method = 'default' , return_type = RETURN_P_L , use_long = False ) :
'''P A P ' = L L ' ''' | logger . debug ( 'Calculating cholesky decomposition for matrix {!r} with ordering method {}, return type {} and use_long {}.' . format ( A , ordering_method , return_type , use_long ) )
# # check input
return_types = ( RETURN_L , RETURN_L_D , RETURN_P_L , RETURN_P_L_D )
if ordering_method not in CHOLMOD_ORDERING_METHODS :
raise ValueError ( 'Unknown ordering method {}. Only values in {} are supported.' . format ( ordering_method , CHOLMOD_ORDERING_METHODS ) )
if return_type not in return_types :
raise ValueError ( 'Unknown return type {}. Only values in {} are supported.' . format ( return_type , return_types ) )
if ordering_method != 'natural' and return_type in ( RETURN_L , RETURN_L_D ) :
raise ValueError ( 'Return type {} is only supported for "natural" ordering method.' . format ( return_type ) )
# TODO symmetry check
A = util . math . sparse . check . sorted_squared_csc ( A )
# # calculate cholesky decomposition
try :
try :
f = sksparse . cholmod . cholesky ( A , ordering_method = ordering_method , use_long = use_long )
except sksparse . cholmod . CholmodTooLargeError as e :
if not use_long :
warnings . warn ( 'Problem to large for int, switching to long.' )
return cholesky ( A , ordering_method = ordering_method , return_type = return_type , use_long = True )
else :
raise
except sksparse . cholmod . CholmodNotPositiveDefiniteError as e :
raise util . math . matrix . NoPositiveDefiniteMatrixError ( A , 'Row/column {} makes matrix not positive definite.' . format ( e . column ) )
del A
# # calculate permutation matrix
p = f . P ( )
n = len ( p )
if return_type in ( RETURN_P_L , RETURN_P_L_D ) :
P = scipy . sparse . dok_matrix ( ( n , n ) , dtype = np . int8 )
for i in range ( n ) :
P [ i , p [ i ] ] = 1
P = P . tocsr ( )
P . astype ( np . int8 )
# # return P , L
if return_type in ( RETURN_L , RETURN_P_L ) :
L = f . L ( ) . tocsr ( )
if return_type == RETURN_L :
assert np . all ( p == np . arange ( n ) )
logger . debug ( 'Returning lower triangular matrix {!r}.' . format ( L ) )
return ( L , )
else :
logger . debug ( 'Returning permutation matrix {!r} and lower triangular matrix {!r}.' . format ( P , L ) )
return ( P , L )
# # return P , L , D
if return_type in ( RETURN_L_D , RETURN_P_L_D ) :
L , D = f . L_D ( )
# Do not use f . L _ D ( ) - > higher memory consumption
# LD = f . LD ( )
if return_type == RETURN_L_D :
logger . debug ( 'Returning lower triangular matrix {!r} and diagonal matrix {!r}.' . format ( P , L , D ) )
return ( L , D )
else :
logger . debug ( 'Returning permutation matrix {!r}, lower triangular matrix {!r} and diagonal matrix {!r}.' . format ( P , L , D ) )
return ( P , L , D ) |
def add ( name , device ) :
'''Add new device to RAID array .
CLI Example :
. . code - block : : bash
salt ' * ' raid . add / dev / md0 / dev / sda1''' | cmd = 'mdadm --manage {0} --add {1}' . format ( name , device )
if __salt__ [ 'cmd.retcode' ] ( cmd ) == 0 :
return True
return False |
def _flush_batch_incr_counter ( self ) :
"""Increments any unflushed counter values .""" | for key , count in six . iteritems ( self . _counter_dict ) :
if count == 0 :
continue
args = list ( key ) + [ count ]
self . _incr_counter ( * args )
self . _counter_dict [ key ] = 0 |
def get_all_reserved_instances_offerings ( self , reserved_instances_id = None , instance_type = None , availability_zone = None , product_description = None , filters = None ) :
"""Describes Reserved Instance offerings that are available for purchase .
: type reserved _ instances _ id : str
: param reserved _ instances _ id : Displays Reserved Instances with the
specified offering IDs .
: type instance _ type : str
: param instance _ type : Displays Reserved Instances of the specified
instance type .
: type availability _ zone : str
: param availability _ zone : Displays Reserved Instances within the
specified Availability Zone .
: type product _ description : str
: param product _ description : Displays Reserved Instances with the
specified product description .
: type filters : dict
: param filters : Optional filters that can be used to limit
the results returned . Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value . The set of allowable filter
names / values is dependent on the request
being performed . Check the EC2 API guide
for details .
: rtype : list
: return : A list of : class : ` boto . ec2 . reservedinstance . ReservedInstancesOffering `""" | params = { }
if reserved_instances_id :
params [ 'ReservedInstancesId' ] = reserved_instances_id
if instance_type :
params [ 'InstanceType' ] = instance_type
if availability_zone :
params [ 'AvailabilityZone' ] = availability_zone
if product_description :
params [ 'ProductDescription' ] = product_description
if filters :
self . build_filter_params ( params , filters )
return self . get_list ( 'DescribeReservedInstancesOfferings' , params , [ ( 'item' , ReservedInstancesOffering ) ] , verb = 'POST' ) |
def get_fine_tune_model ( symbol , arg_params , num_classes , layer_name , dtype = 'float32' ) :
"""symbol : the pre - trained network symbol
arg _ params : the argument parameters of the pre - trained model
num _ classes : the number of classes for the fine - tune datasets
layer _ name : the layer name before the last fully - connected layer""" | all_layers = symbol . get_internals ( )
net = all_layers [ layer_name + '_output' ]
net = mx . symbol . FullyConnected ( data = net , num_hidden = num_classes , name = 'fc' )
if dtype == 'float16' :
net = mx . sym . Cast ( data = net , dtype = np . float32 )
net = mx . symbol . SoftmaxOutput ( data = net , name = 'softmax' )
new_args = dict ( { k : arg_params [ k ] for k in arg_params if 'fc' not in k } )
return ( net , new_args ) |
def element_info ( cls_or_slf , node , siblings , level , value_dims ) :
"""Return the information summary for an Element . This consists
of the dotted name followed by an value dimension names .""" | info = cls_or_slf . component_type ( node )
if len ( node . kdims ) >= 1 :
info += cls_or_slf . tab + '[%s]' % ',' . join ( d . name for d in node . kdims )
if value_dims and len ( node . vdims ) >= 1 :
info += cls_or_slf . tab + '(%s)' % ',' . join ( d . name for d in node . vdims )
return level , [ ( level , info ) ] |
def get_autoflow ( cls , obj , name ) :
"""Extracts from an object existing dictionary with tensors specified by name .
If there is no such object then new one will be created . Intenally , it appends
autoflow prefix to the name and saves it as an attribute .
: param obj : target GPflow object .
: param name : unique part of autoflow attribute ' s name .
: raises : ValueError exception if ` name ` is not a string .""" | if not isinstance ( name , str ) :
raise ValueError ( 'Name must be string.' )
prefix = cls . __autoflow_prefix__
autoflow_name = prefix + name
store = misc . get_attribute ( obj , autoflow_name , allow_fail = True , default = { } )
if not store :
setattr ( obj , autoflow_name , store )
return store |
def pack_value ( self , val ) :
"""Convert 8 - byte string into 16 - byte list""" | if isinstance ( val , bytes ) :
val = list ( iterbytes ( val ) )
slen = len ( val )
if self . pad :
pad = b'\0\0' * ( slen % 2 )
else :
pad = b''
return struct . pack ( '>' + 'H' * slen , * val ) + pad , slen , None |
def getChecked ( self ) :
"""Gets the checked attributes
: returns : list < str > - - checked attribute names""" | attrs = [ ]
layout = self . layout ( )
for i in range ( layout . count ( ) ) :
w = layout . itemAt ( i ) . widget ( )
if w . isChecked ( ) :
attrs . append ( str ( w . text ( ) ) )
return attrs |
def update_notification_settings ( self , api_token , event , service , should_notify ) :
"""Update a user ' s notification settings .
: param api _ token : The user ' s login api _ token .
: type api _ token : str
: param event : Update the notification settings of this event .
: type event : str
: param service : ` ` email ` ` or ` ` push ` `
: type service : str
: param should _ notify : If ` ` 0 ` ` notify , otherwise do not .
: type should _ notify : int
: return : The HTTP response to the request .
: rtype : : class : ` requests . Response `
> > > from pytodoist . api import TodoistAPI
> > > api = TodoistAPI ( )
> > > response = api . login ( ' john . doe @ gmail . com ' , ' password ' )
> > > user _ info = response . json ( )
> > > user _ api _ token = user _ info [ ' api _ token ' ]
> > > response = api . update _ notification _ settings ( user _ api _ token ,
. . . ' user _ left _ project ' ,
. . . ' email ' , 0)""" | params = { 'token' : api_token , 'notification_type' : event , 'service' : service , 'dont_notify' : should_notify }
return self . _post ( 'update_notification_setting' , params ) |
def request_update ( self , context ) :
"""Requests a sink info update ( sink _ info _ cb is called )""" | pa_operation_unref ( pa_context_get_sink_info_by_name ( context , self . current_sink . encode ( ) , self . _sink_info_cb , None ) ) |
def plot_graph_folium ( G , graph_map = None , popup_attribute = None , tiles = 'cartodbpositron' , zoom = 1 , fit_bounds = True , edge_color = '#333333' , edge_width = 5 , edge_opacity = 1 ) :
"""Plot a graph on an interactive folium web map .
Note that anything larger than a small city can take a long time to plot and
create a large web map file that is very slow to load as JavaScript .
Parameters
G : networkx multidigraph
graph _ map : folium . folium . Map
if not None , plot the graph on this preexisting folium map object
popup _ attribute : string
edge attribute to display in a pop - up when an edge is clicked
tiles : string
name of a folium tileset
zoom : int
initial zoom level for the map
fit _ bounds : bool
if True , fit the map to the boundaries of the route ' s edges
edge _ color : string
color of the edge lines
edge _ width : numeric
width of the edge lines
edge _ opacity : numeric
opacity of the edge lines
Returns
graph _ map : folium . folium . Map""" | # check if we were able to import folium successfully
if not folium :
raise ImportError ( 'The folium package must be installed to use this optional feature.' )
# create gdf of the graph edges
gdf_edges = graph_to_gdfs ( G , nodes = False , fill_edge_geometry = True )
# get graph centroid
x , y = gdf_edges . unary_union . centroid . xy
graph_centroid = ( y [ 0 ] , x [ 0 ] )
# create the folium web map if one wasn ' t passed - in
if graph_map is None :
graph_map = folium . Map ( location = graph_centroid , zoom_start = zoom , tiles = tiles )
# add each graph edge to the map
for _ , row in gdf_edges . iterrows ( ) :
pl = make_folium_polyline ( edge = row , edge_color = edge_color , edge_width = edge_width , edge_opacity = edge_opacity , popup_attribute = popup_attribute )
pl . add_to ( graph_map )
# if fit _ bounds is True , fit the map to the bounds of the route by passing
# list of lat - lng points as [ southwest , northeast ]
if fit_bounds :
tb = gdf_edges . total_bounds
bounds = [ ( tb [ 1 ] , tb [ 0 ] ) , ( tb [ 3 ] , tb [ 2 ] ) ]
graph_map . fit_bounds ( bounds )
return graph_map |
def disassemble ( nex ) :
"""Given a NumExpr object , return a list which is the program disassembled .""" | rev_opcodes = { }
for op in interpreter . opcodes :
rev_opcodes [ interpreter . opcodes [ op ] ] = op
r_constants = 1 + len ( nex . signature )
r_temps = r_constants + len ( nex . constants )
def getArg ( pc , offset ) :
if sys . version_info [ 0 ] < 3 :
arg = ord ( nex . program [ pc + offset ] )
op = rev_opcodes . get ( ord ( nex . program [ pc ] ) )
else :
arg = nex . program [ pc + offset ]
op = rev_opcodes . get ( nex . program [ pc ] )
try :
code = op . split ( b'_' ) [ 1 ] [ offset - 1 ]
except IndexError :
return None
if sys . version_info [ 0 ] > 2 : # int . to _ bytes is not available in Python < 3.2
# code = code . to _ bytes ( 1 , sys . byteorder )
code = bytes ( [ code ] )
if arg == 255 :
return None
if code != b'n' :
if arg == 0 :
return b'r0'
elif arg < r_constants :
return ( 'r%d[%s]' % ( arg , nex . input_names [ arg - 1 ] ) ) . encode ( 'ascii' )
elif arg < r_temps :
return ( 'c%d[%s]' % ( arg , nex . constants [ arg - r_constants ] ) ) . encode ( 'ascii' )
else :
return ( 't%d' % ( arg , ) ) . encode ( 'ascii' )
else :
return arg
source = [ ]
for pc in range ( 0 , len ( nex . program ) , 4 ) :
if sys . version_info [ 0 ] < 3 :
op = rev_opcodes . get ( ord ( nex . program [ pc ] ) )
else :
op = rev_opcodes . get ( nex . program [ pc ] )
dest = getArg ( pc , 1 )
arg1 = getArg ( pc , 2 )
arg2 = getArg ( pc , 3 )
source . append ( ( op , dest , arg1 , arg2 ) )
return source |
def guess_path_encoding ( file_path , default = DEFAULT_ENCODING ) :
"""Wrapper to open that damn file for you , lazy bastard .""" | with io . open ( file_path , 'rb' ) as fh :
return guess_file_encoding ( fh , default = default ) |
def dynamicmap_memoization ( callable_obj , streams ) :
"""Determine whether the Callable should have memoization enabled
based on the supplied streams ( typically by a
DynamicMap ) . Memoization is disabled if any of the streams require
it it and are currently in a triggered state .""" | memoization_state = bool ( callable_obj . _stream_memoization )
callable_obj . _stream_memoization &= not any ( s . transient and s . _triggering for s in streams )
try :
yield
except :
raise
finally :
callable_obj . _stream_memoization = memoization_state |
def schema ( self ) :
"""The DQL syntax for creating this item""" | schema = "%s %s %s %s('%s'" % ( self . name , self . data_type , self . index_type , self . key_type , self . index_name , )
if self . includes is not None :
schema += ", ["
schema += ", " . join ( ( "'%s'" % i for i in self . includes ) )
schema += "]"
return schema + ")" |
def getChangeSets ( self ) :
"""Get all the ChangeSets of this workitem
: return : a : class : ` list ` contains all the
: class : ` rtcclient . models . ChangeSet ` objects
: rtype : list""" | changeset_tag = ( "rtc_cm:com.ibm.team.filesystem.workitems." "change_set.com.ibm.team.scm.ChangeSet" )
return ( self . rtc_obj . _get_paged_resources ( "ChangeSet" , workitem_id = self . identifier , customized_attr = changeset_tag , page_size = "10" ) ) |
def _print_message ( self , text , fd = None ) :
'''Note : this overrides an existing method in ArgumentParser''' | # Since we have the async - > sync - > async problem , queue up and print at exit
self . root . mesgs . extend ( text . split ( '\n' ) ) |
def signature_validate_single ( signature , error = None ) :
"is signature a single valid type ." | error , my_error = _get_error ( error )
result = dbus . dbus_signature_validate_single ( signature . encode ( ) , error . _dbobj ) != 0
my_error . raise_if_set ( )
return result |
async def read ( self , n = None ) :
"""Read all content""" | if self . _streamed :
return b''
buffer = [ ]
async for body in self :
buffer . append ( body )
return b'' . join ( buffer ) |
def to_dlpack_for_read ( data ) :
"""Returns a reference view of NDArray that represents as DLManagedTensor until
all previous write operations on the current array are finished .
Parameters
data : NDArray
input data .
Returns
PyCapsule ( the pointer of DLManagedTensor )
a reference view of NDArray that represents as DLManagedTensor .
Examples
> > > x = mx . nd . ones ( ( 2,3 ) )
> > > y = mx . nd . to _ dlpack _ for _ read ( x )
> > > type ( y )
< class ' PyCapsule ' >
> > > z = mx . nd . from _ dlpack ( y )
[ [ 1 . 1 . 1 . ]
[1 . 1 . 1 . ] ]
< NDArray 2x3 @ cpu ( 0 ) >""" | data . wait_to_read ( )
dlpack = DLPackHandle ( )
check_call ( _LIB . MXNDArrayToDLPack ( data . handle , ctypes . byref ( dlpack ) ) )
return ctypes . pythonapi . PyCapsule_New ( dlpack , _c_str_dltensor , _c_dlpack_deleter ) |
def list_entries ( self , projects , filter_ = None , order_by = None , page_size = None , page_token = None ) :
"""Return a page of log entry resources .
See
https : / / cloud . google . com / logging / docs / reference / v2 / rest / v2 / entries / list
: type projects : list of strings
: param projects : project IDs to include . If not passed ,
defaults to the project bound to the client .
: type filter _ : str
: param filter _ :
a filter expression . See
https : / / cloud . google . com / logging / docs / view / advanced _ filters
: type order _ by : str
: param order _ by : One of : data : ` ~ google . cloud . logging . ASCENDING `
or : data : ` ~ google . cloud . logging . DESCENDING ` .
: type page _ size : int
: param page _ size : maximum number of entries to return , If not passed ,
defaults to a value set by the API .
: type page _ token : str
: param page _ token : opaque marker for the next " page " of entries . If not
passed , the API will return the first page of
entries .
: rtype : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: returns : Iterator of : class : ` ~ google . cloud . logging . entries . _ BaseEntry `
accessible to the current API .""" | extra_params = { "projectIds" : projects }
if filter_ is not None :
extra_params [ "filter" ] = filter_
if order_by is not None :
extra_params [ "orderBy" ] = order_by
if page_size is not None :
extra_params [ "pageSize" ] = page_size
path = "/entries:list"
# We attach a mutable loggers dictionary so that as Logger
# objects are created by entry _ from _ resource , they can be
# re - used by other log entries from the same logger .
loggers = { }
item_to_value = functools . partial ( _item_to_entry , loggers = loggers )
iterator = page_iterator . HTTPIterator ( client = self . _client , api_request = self . _client . _connection . api_request , path = path , item_to_value = item_to_value , items_key = "entries" , page_token = page_token , extra_params = extra_params , )
# This method uses POST to make a read - only request .
iterator . _HTTP_METHOD = "POST"
return iterator |
def orbit ( self , x1_px , y1_px , x2_px , y2_px ) :
"""Causes the camera to " orbit " around the target point .
This is also called " tumbling " in some software packages .""" | px_per_deg = self . vport_radius_px / float ( self . orbit_speed )
radians_per_px = 1.0 / px_per_deg * np . pi / 180.0
t2p = self . position - self . target
M = Matrix4x4 . rotation_around_origin ( ( x1_px - x2_px ) * radians_per_px , self . ground )
t2p = M * t2p
self . up = M * self . up
right = ( self . up ^ t2p ) . normalized ( )
M = Matrix4x4 . rotation_around_origin ( ( y1_px - y2_px ) * radians_per_px , right )
t2p = M * t2p
self . up = M * self . up
self . position = self . target + t2p |
def add_match_rules ( self , match_rules ) :
"""Add the given match rules to the ruleset . Handles single rules or a list of rules .
: param match _ rules : Object representing YAML section from config file
: return :
Example match _ rules object :
[ { ' filename - starts - with ' : ' abc ' } , { ' filename - ends - with ' : ' xyz ' ]""" | if type ( match_rules ) == list :
for r in match_rules :
self . add_match_rule ( r )
else : # Handle a single rule being passed in that ' s not in a list
self . add_match_rule ( match_rules ) |
def get_next_tag ( cls , el ) :
"""Get next sibling tag .""" | sibling = el . next_sibling
while not cls . is_tag ( sibling ) and sibling is not None :
sibling = sibling . next_sibling
return sibling |
def angle_between_vectors ( x , y ) :
"""Compute the angle between vector x and y""" | dp = dot_product ( x , y )
if dp == 0 :
return 0
xm = magnitude ( x )
ym = magnitude ( y )
return math . acos ( dp / ( xm * ym ) ) * ( 180. / math . pi ) |
def load_csv_data ( fname , tag ) :
"""Load data from a comma separated SuperMAG file
Parameters
fname : ( str )
CSV SuperMAG file name
tag : ( str )
Denotes type of file to load . Accepted types are ' indices ' , ' all ' ,
' stations ' , and ' ' ( for just magnetometer measurements ) .
Returns
data : ( pandas . DataFrame )
Pandas DataFrame""" | import re
if tag == "stations" : # Because there may be multiple operators , the default pandas reader
# cannot be used .
ddict = dict ( )
dkeys = list ( )
date_list = list ( )
# Open and read the file
with open ( fname , "r" ) as fopen :
dtime = pds . datetime . strptime ( fname . split ( "_" ) [ - 1 ] . split ( "." ) [ 0 ] , "%Y" )
for fline in fopen . readlines ( ) :
sline = [ ll for ll in re . split ( r'[,\n]+' , fline ) if len ( ll ) > 0 ]
if len ( ddict . items ( ) ) == 0 :
for kk in sline :
kk = re . sub ( "-" , "_" , kk )
ddict [ kk ] = list ( )
dkeys . append ( kk )
else :
date_list . append ( dtime )
for i , ll in enumerate ( sline ) :
if i >= 1 and i <= 4 :
ddict [ dkeys [ i ] ] . append ( float ( ll ) )
elif i == 6 :
ddict [ dkeys [ i ] ] . append ( int ( ll ) )
elif i < len ( dkeys ) :
ddict [ dkeys [ i ] ] . append ( ll )
else :
ddict [ dkeys [ - 1 ] ] [ - 1 ] += " {:s}" . format ( ll )
# Create a data frame for this file
data = pds . DataFrame ( ddict , index = date_list , columns = ddict . keys ( ) )
else : # Define the date parser
def parse_smag_date ( dd ) :
return pysat . datetime . strptime ( dd , "%Y-%m-%d %H:%M:%S" )
# Load the file into a data frame
data = pds . read_csv ( fname , parse_dates = { 'datetime' : [ 0 ] } , date_parser = parse_smag_date , index_col = 'datetime' )
return data |
async def running ( self ) :
"""Start websocket connection .""" | url = 'http://{}:{}' . format ( self . host , self . port )
try :
async with self . session . ws_connect ( url ) as ws :
self . state = STATE_RUNNING
async for msg in ws :
if self . state == STATE_STOPPED :
break
elif msg . type == aiohttp . WSMsgType . TEXT :
self . _data = json . loads ( msg . data )
self . async_session_handler_callback ( 'data' )
_LOGGER . debug ( 'Websocket data: %s' , msg . data )
elif msg . type == aiohttp . WSMsgType . CLOSED :
break
elif msg . type == aiohttp . WSMsgType . ERROR :
break
except aiohttp . ClientConnectorError :
if self . state != STATE_STOPPED :
self . retry ( )
except Exception as err :
_LOGGER . error ( 'Unexpected error %s' , err )
if self . state != STATE_STOPPED :
self . retry ( )
else :
if self . state != STATE_STOPPED :
self . retry ( ) |
def errors_handler ( self , * custom_filters , exception = None , run_task = None , ** kwargs ) :
"""Decorator for errors handler
: param exception : you can make handler for specific errors type
: param run _ task : run callback in task ( no wait results )
: return :""" | def decorator ( callback ) :
self . register_errors_handler ( self . _wrap_async_task ( callback , run_task ) , * custom_filters , exception = exception , ** kwargs )
return callback
return decorator |
def after_model_change ( self , form , model , is_created ) :
"""Save model .""" | super ( KnowledgeAdmin , self ) . after_model_change ( form , model , is_created )
from invenio_collections . models import Collection
if form . kbtype . data == KnwKB . KNWKB_TYPES [ 'dynamic' ] :
id_collection = form . id_collection . data or None
collection = Collection . query . filter_by ( id = id_collection ) . one ( ) if id_collection else None
model . set_dyn_config ( field = form . output_tag . data , expression = form . search_expression . data , collection = collection )
if form . kbtype . data == KnwKB . KNWKB_TYPES [ 'taxonomy' ] :
if form . tfile . data :
file_name = model . get_filename ( )
file_data = request . files [ form . tfile . name ] . read ( )
with open ( file_name , 'w' ) as f :
f . write ( file_data ) |
def interpolate_data ( self , data , limit , method ) :
"""Interpolate dataframe .
Parameters
data : pd . DataFrame ( )
Dataframe to interpolate
limit : int
Interpolation limit .
method : str
Interpolation method .
Returns
pd . DataFrame ( )
Dataframe containing interpolated data""" | data = data . interpolate ( how = "index" , limit = limit , method = method )
return data |
def get_sequence ( self , chrom , start , end , strand = '+' , indexing = ( - 1 , 0 ) ) :
"""chromosome is entered relative to the file it was built with , so it can be ' chr11 ' or ' 11 ' ,
start / end are coordinates , which default to python style [ 0,1 ) internally . So positions should be
entered with ( 1,1 ) indexing . This can be changed with the indexing keyword .
The default is for everything to be relative to the positive strand""" | try :
divisor = int ( self . sequence_index [ chrom ] [ 2 ] )
except KeyError :
self . open_fasta_index ( )
try :
divisor = int ( self . sequence_index [ chrom ] [ 2 ] )
except KeyError :
sys . stderr . write ( "%s cannot be found within the fasta index file.\n" % chrom )
return ""
start += indexing [ 0 ]
end += indexing [ 1 ]
# is it a valid position ?
if ( start < 0 or end > int ( self . sequence_index [ chrom ] [ 0 ] ) ) :
raise ValueError ( "The range %d-%d is invalid. Valid range for this feature is 1-%d." % ( start - indexing [ 0 ] , end - indexing [ 1 ] , int ( self . sequence_index [ chrom ] [ 0 ] ) ) )
# go to start of chromosome
seekpos = int ( self . sequence_index [ chrom ] [ 1 ] )
# find how many newlines we have
seekpos += start + start / divisor
slen = end - start
endpos = int ( slen + ( slen / divisor ) + 1 )
# a hack of sorts but it works and is easy
self . fasta_file . seek ( seekpos , 0 )
output = self . fasta_file . read ( endpos )
output = output . replace ( '\n' , '' )
out = output [ : slen ]
if strand == '+' or strand == 1 :
return out
if strand == '-' or strand == - 1 :
return _reverse_complement ( out ) |
def scipy_solve_symm_block_tridiag ( H_diag , H_upper_diag , v , ab = None ) :
"""use scipy . linalg . solve _ banded to solve a symmetric block tridiagonal system
see https : / / docs . scipy . org / doc / scipy / reference / generated / scipy . linalg . solveh _ banded . html""" | from scipy . linalg import solveh_banded
ab = convert_block_tridiag_to_banded ( H_diag , H_upper_diag ) if ab is None else ab
x = solveh_banded ( ab , v . ravel ( ) , lower = True )
return x . reshape ( v . shape ) |
def merge_obs ( self ) :
"""Match forecasts and observations .""" | for model_type in self . model_types :
self . matched_forecasts [ model_type ] = { }
for model_name in self . model_names [ model_type ] :
self . matched_forecasts [ model_type ] [ model_name ] = pd . merge ( self . forecasts [ model_type ] [ model_name ] , self . obs , right_on = "Step_ID" , how = "left" , left_index = True ) |
def create_instance ( self , ** kwargs ) :
"""Creates a new virtual server instance .
. . warning : :
This will add charges to your account
Example : :
new _ vsi = {
' domain ' : u ' test01 . labs . sftlyr . ws ' ,
' hostname ' : u ' minion05 ' ,
' datacenter ' : u ' hkg02 ' ,
' flavor ' : ' BL1_1X2X100'
' dedicated ' : False ,
' private ' : False ,
' os _ code ' : u ' UBUNTU _ LATEST ' ,
' hourly ' : True ,
' ssh _ keys ' : [ 1234 ] ,
' disks ' : ( ' 100 ' , ' 25 ' ) ,
' local _ disk ' : True ,
' tags ' : ' test , pleaseCancel ' ,
' public _ security _ groups ' : [ 12 , 15]
vsi = mgr . create _ instance ( * * new _ vsi )
# vsi will have the newly created vsi details if done properly .
print vsi
: param int cpus : The number of virtual CPUs to include in the instance .
: param int memory : The amount of RAM to order .
: param bool hourly : Flag to indicate if this server should be billed hourly ( default ) or monthly .
: param string hostname : The hostname to use for the new server .
: param string domain : The domain to use for the new server .
: param bool local _ disk : Flag to indicate if this should be a local disk ( default ) or a SAN disk .
: param string datacenter : The short name of the data center in which the VS should reside .
: param string os _ code : The operating system to use . Cannot be specified if image _ id is specified .
: param int image _ id : The GUID of the image to load onto the server . Cannot be specified if os _ code is specified .
: param bool dedicated : Flag to indicate if this should be housed on adedicated or shared host ( default ) .
This will incur a fee on your account .
: param int public _ vlan : The ID of the public VLAN on which you want this VS placed .
: param list public _ security _ groups : The list of security group IDs to apply to the public interface
: param list private _ security _ groups : The list of security group IDs to apply to the private interface
: param int private _ vlan : The ID of the private VLAN on which you want this VS placed .
: param list disks : A list of disk capacities for this server .
: param string post _ uri : The URI of the post - install script to run after reload
: param bool private : If true , the VS will be provisioned only with access to the private network .
Defaults to false
: param list ssh _ keys : The SSH keys to add to the root user
: param int nic _ speed : The port speed to set
: param string tags : tags to set on the VS as a comma separated list
: param string flavor : The key name of the public virtual server flavor being ordered .
: param int host _ id : The host id of a dedicated host to provision a dedicated host virtual server on .""" | tags = kwargs . pop ( 'tags' , None )
inst = self . guest . createObject ( self . _generate_create_dict ( ** kwargs ) )
if tags is not None :
self . set_tags ( tags , guest_id = inst [ 'id' ] )
return inst |
def _register_template ( cls , template_bytes ) :
'''Registers the template for the widget and hooks init _ template''' | # This implementation won ' t work if there are nested templates , but
# we can ' t do that anyways due to PyGObject limitations so it ' s ok
if not hasattr ( cls , 'set_template' ) :
raise TypeError ( "Requires PyGObject 3.13.2 or greater" )
cls . set_template ( template_bytes )
bound_methods = set ( )
bound_widgets = set ( )
# Walk the class , find marked callbacks and child attributes
for name in dir ( cls ) :
o = getattr ( cls , name , None )
if inspect . ismethod ( o ) :
if hasattr ( o , '_gtk_callback' ) :
bound_methods . add ( name )
# Don ' t need to call this , as connect _ func always gets called
# cls . bind _ template _ callback _ full ( name , o )
elif isinstance ( o , _Child ) :
cls . bind_template_child_full ( name , True , 0 )
bound_widgets . add ( name )
# Have to setup a special connect function to connect at template init
# because the methods are not bound yet
cls . set_connect_func ( _connect_func , cls )
cls . __gtemplate_methods__ = bound_methods
cls . __gtemplate_widgets__ = bound_widgets
base_init_template = cls . init_template
cls . init_template = lambda s : _init_template ( s , cls , base_init_template ) |
def recvall ( self , timeout = 0.5 ) :
"""Receive the RCON command response
: param timeout : The timeout between consequent data receive
: return str : The RCON command response with header stripped out""" | response = ''
self . socket . setblocking ( False )
start = time . time ( )
while True :
if response and time . time ( ) - start > timeout :
break
elif time . time ( ) - start > timeout * 2 :
break
try :
data = self . socket . recv ( 4096 )
if data :
response += data . replace ( self . _rconreplystring , '' )
start = time . time ( )
else :
time . sleep ( 0.1 )
except socket . error :
pass
return response . strip ( ) |
def inheritanceTree ( self ) :
"""Returns the inheritance tree for this schema , traversing up the hierarchy for the inherited schema instances .
: return : < generator >""" | inherits = self . inherits ( )
while inherits :
ischema = orb . system . schema ( inherits )
if not ischema :
raise orb . errors . ModelNotFound ( schema = inherits )
yield ischema
inherits = ischema . inherits ( ) |
def _get_neighbor_conf ( neigh_ip_address ) :
"""Returns neighbor configuration for given neighbor ip address .
Raises exception if no neighbor with ` neigh _ ip _ address ` exists .""" | neigh_conf = CORE_MANAGER . neighbors_conf . get_neighbor_conf ( neigh_ip_address )
if not neigh_conf :
raise RuntimeConfigError ( desc = 'No Neighbor configuration with IP' ' address %s' % neigh_ip_address )
assert isinstance ( neigh_conf , NeighborConf )
return neigh_conf |
def get_distance ( F , x ) :
"""Helper function for margin - based loss . Return a distance matrix given a matrix .""" | n = x . shape [ 0 ]
square = F . sum ( x ** 2.0 , axis = 1 , keepdims = True )
distance_square = square + square . transpose ( ) - ( 2.0 * F . dot ( x , x . transpose ( ) ) )
# Adding identity to make sqrt work .
return F . sqrt ( distance_square + F . array ( np . identity ( n ) ) ) |
def IsAllSpent ( self ) :
"""Flag indicating if all balance is spend .
Returns :
bool :""" | for item in self . Items :
if item == CoinState . Confirmed :
return False
return True |
def is_identity ( self ) :
"""If ` self ` is I , returns True , otherwise False .""" | if not self . terms :
return True
return len ( self . terms ) == 1 and not self . terms [ 0 ] . ops and self . terms [ 0 ] . coeff == 1.0 |
def evaluate ( self , values ) :
"""Evaluate the " OR " expression
Check if the left " or " right expression
evaluate to True .""" | return self . left . evaluate ( values ) or self . right . evaluate ( values ) |
def _LookUpSeasonDirectory ( self , showID , showDir , seasonNum ) :
"""Look up season directory . First attempt to find match from database ,
otherwise search TV show directory . If no match is found in the database
the user can choose to accept a match from the TV show directory , enter
a new directory name to use or accept an autogenerated name .
Parameters
showID : int
Show ID number
showDir : string
Path to show file directory
seasonNum : int
Season number
Returns
string
Name of season directory to use . This can be a blank string to
use the root show directory , an autogenerated string or a user
given string .""" | goodlogging . Log . Info ( "RENAMER" , "Looking up season directory for show {0}" . format ( showID ) )
goodlogging . Log . IncreaseIndent ( )
# Look up existing season folder from database
seasonDirName = self . _db . SearchSeasonDirTable ( showID , seasonNum )
if seasonDirName is not None :
goodlogging . Log . Info ( "RENAMER" , "Found season directory match from database: {0}" . format ( seasonDirName ) )
else : # Look up existing season folder in show directory
goodlogging . Log . Info ( "RENAMER" , "Looking up season directory (Season {0}) in {1}" . format ( seasonNum , showDir ) )
if os . path . isdir ( showDir ) is False :
goodlogging . Log . Info ( "RENAMER" , "Show directory ({0}) is not an existing directory" . format ( showDir ) )
seasonDirName = self . _CreateNewSeasonDir ( seasonNum )
else :
matchDirList = [ ]
for dirName in os . listdir ( showDir ) :
subDir = os . path . join ( showDir , dirName )
if os . path . isdir ( subDir ) :
seasonResult = re . findall ( "Season" , dirName )
if len ( seasonResult ) > 0 :
numResult = re . findall ( "[0-9]+" , dirName )
numResult = set ( numResult )
if len ( numResult ) == 1 :
if int ( numResult . pop ( ) ) == int ( seasonNum ) :
matchDirList . append ( dirName )
if self . _skipUserInput is True :
if len ( matchDirList ) == 1 :
userAcceptance = matchDirList [ 0 ]
goodlogging . Log . Info ( "RENAMER" , "Automatic selection of season directory: {0}" . format ( seasonDirName ) )
else :
userAcceptance = None
goodlogging . Log . Info ( "RENAMER" , "Could not make automatic selection of season directory" )
else :
listDirPrompt = "enter 'ls' to list all items in show directory"
userAcceptance = util . UserAcceptance ( matchDirList , promptComment = listDirPrompt , xStrOverride = "to create new season directory" )
if userAcceptance in matchDirList :
seasonDirName = userAcceptance
elif userAcceptance is None :
seasonDirName = self . _CreateNewSeasonDir ( seasonNum )
else :
recursiveSelectionComplete = False
promptOnly = False
dirLookup = userAcceptance
while recursiveSelectionComplete is False :
dirList = os . listdir ( showDir )
if dirLookup . lower ( ) == 'ls' :
dirLookup = ''
promptOnly = True
if len ( dirList ) == 0 :
goodlogging . Log . Info ( "RENAMER" , "Show directory is empty" )
else :
goodlogging . Log . Info ( "RENAMER" , "Show directory contains: {0}" . format ( ', ' . join ( dirList ) ) )
else :
matchDirList = util . GetBestMatch ( dirLookup , dirList )
response = util . UserAcceptance ( matchDirList , promptComment = listDirPrompt , promptOnly = promptOnly , xStrOverride = "to create new season directory" )
promptOnly = False
if response in matchDirList :
seasonDirName = response
recursiveSelectionComplete = True
elif response is None :
seasonDirName = self . _CreateNewSeasonDir ( seasonNum )
recursiveSelectionComplete = True
else :
dirLookup = response
# Add season directory to database
if seasonDirName is not None :
self . _db . AddSeasonDirTable ( showID , seasonNum , seasonDirName )
goodlogging . Log . DecreaseIndent ( )
return seasonDirName |
def get_info ( self , security_symbols , info_field_codes ) :
"""Queries data from a / < security _ type > / info endpoint .
Args :
security _ symbols ( list ) : List of string symbols
info _ field _ codes ( list ) : List of string info field codes
Returns :
dict of the decoded json from server response .
Notes :
The max length of any list arg is 100""" | security_symbols = self . _str_or_list ( security_symbols )
info_field_codes = self . _str_or_list ( info_field_codes )
url_path = self . _build_url_path ( security_symbols , 'info' , info_field_codes )
return self . _get_data ( url_path , None ) |
def find_usage ( self ) :
"""Determine the current usage for each limit of this service ,
and update corresponding Limit via
: py : meth : ` ~ . AwsLimit . _ add _ current _ usage ` .""" | logger . debug ( "Checking usage for service %s" , self . service_name )
self . connect ( )
self . connect_resource ( )
for lim in self . limits . values ( ) :
lim . _reset_usage ( )
self . _find_usage_instances ( )
self . _find_usage_networking_sgs ( )
self . _find_usage_networking_eips ( )
self . _find_usage_networking_eni_sg ( )
self . _find_usage_spot_instances ( )
self . _find_usage_spot_fleets ( )
self . _have_usage = True
logger . debug ( "Done checking usage." ) |
def pandas_dataframe_to_unit_arrays ( df , column_units = None ) :
"""Attach units to data in pandas dataframes and return united arrays .
Parameters
df : ` pandas . DataFrame `
Data in pandas dataframe .
column _ units : dict
Dictionary of units to attach to columns of the dataframe . Overrides
the units attribute if it is attached to the dataframe .
Returns
Dictionary containing united arrays with keys corresponding to the dataframe
column names .""" | if not column_units :
try :
column_units = df . units
except AttributeError :
raise ValueError ( 'No units attribute attached to pandas ' 'dataframe and col_units not given.' )
# Iterate through columns attaching units if we have them , if not , don ' t touch it
res = { }
for column in df :
if column in column_units and column_units [ column ] :
res [ column ] = df [ column ] . values * units ( column_units [ column ] )
else :
res [ column ] = df [ column ] . values
return res |
def sanity_check_ir_blocks_from_frontend ( ir_blocks , query_metadata_table ) :
"""Assert that IR blocks originating from the frontend do not have nonsensical structure .
Args :
ir _ blocks : list of BasicBlocks representing the IR to sanity - check
Raises :
AssertionError , if the IR has unexpected structure . If the IR produced by the front - end
cannot be successfully and correctly used to generate MATCH or Gremlin due to a bug ,
this is the method that should catch the problem .""" | if not ir_blocks :
raise AssertionError ( u'Received no ir_blocks: {}' . format ( ir_blocks ) )
_sanity_check_fold_scope_locations_are_unique ( ir_blocks )
_sanity_check_no_nested_folds ( ir_blocks )
_sanity_check_query_root_block ( ir_blocks )
_sanity_check_output_source_follower_blocks ( ir_blocks )
_sanity_check_block_pairwise_constraints ( ir_blocks )
_sanity_check_mark_location_preceding_optional_traverse ( ir_blocks )
_sanity_check_every_location_is_marked ( ir_blocks )
_sanity_check_coerce_type_outside_of_fold ( ir_blocks )
_sanity_check_all_marked_locations_are_registered ( ir_blocks , query_metadata_table )
_sanity_check_registered_locations_parent_locations ( query_metadata_table ) |
def yield_sequences_in_list ( paths ) :
"""Yield the discrete sequences within paths . This does not try to
determine if the files actually exist on disk , it assumes you already
know that .
Args :
paths ( list [ str ] ) : a list of paths
Yields :
: obj : ` FileSequence ` :""" | seqs = { }
_check = DISK_RE . match
for match in ifilter ( None , imap ( _check , imap ( utils . asString , paths ) ) ) :
dirname , basename , frame , ext = match . groups ( )
if not basename and not ext :
continue
key = ( dirname , basename , ext )
seqs . setdefault ( key , set ( ) )
if frame :
seqs [ key ] . add ( frame )
for ( dirname , basename , ext ) , frames in seqs . iteritems ( ) : # build the FileSequence behind the scenes , rather than dupe work
seq = FileSequence . __new__ ( FileSequence )
seq . _dir = dirname or ''
seq . _base = basename or ''
seq . _ext = ext or ''
if frames :
seq . _frameSet = FrameSet ( set ( imap ( int , frames ) ) ) if frames else None
seq . _pad = FileSequence . getPaddingChars ( min ( imap ( len , frames ) ) )
else :
seq . _frameSet = None
seq . _pad = ''
seq . __init__ ( str ( seq ) )
yield seq |
def register_deliver_command ( self , deliver_func ) :
"""Add ' deliver ' command for transferring a project to another user . ,
: param deliver _ func : function to run when user choses this option""" | description = "Initiate delivery of a project to another user. Removes other user's current permissions. " "Send message to D4S2 service to send email and allow access to the project once user " "acknowledges receiving the data."
deliver_parser = self . subparsers . add_parser ( 'deliver' , description = description )
add_project_name_or_id_arg ( deliver_parser )
user_or_email = deliver_parser . add_mutually_exclusive_group ( required = True )
add_user_arg ( user_or_email )
add_email_arg ( user_or_email )
add_share_usernames_arg ( deliver_parser )
add_share_emails_arg ( deliver_parser )
_add_copy_project_arg ( deliver_parser )
_add_resend_arg ( deliver_parser , "Resend delivery" )
include_or_exclude = deliver_parser . add_mutually_exclusive_group ( required = False )
_add_include_arg ( include_or_exclude )
_add_exclude_arg ( include_or_exclude )
_add_message_file ( deliver_parser , "Filename containing a message to be sent with the delivery. " "Pass - to read from stdin." )
deliver_parser . set_defaults ( func = deliver_func ) |
def _get_gecos ( name ) :
'''Retrieve GECOS field info and return it in dictionary form''' | gecos_field = pwd . getpwnam ( name ) . pw_gecos . split ( ',' , 3 )
if not gecos_field :
return { }
else : # Assign empty strings for any unspecified trailing GECOS fields
while len ( gecos_field ) < 4 :
gecos_field . append ( '' )
return { 'fullname' : six . text_type ( gecos_field [ 0 ] ) , 'roomnumber' : six . text_type ( gecos_field [ 1 ] ) , 'workphone' : six . text_type ( gecos_field [ 2 ] ) , 'homephone' : six . text_type ( gecos_field [ 3 ] ) } |
def is_convex ( self ) :
"""Check if a mesh is convex or not .
Returns
is _ convex : bool
Is mesh convex or not""" | if self . is_empty :
return False
is_convex = bool ( convex . is_convex ( self ) )
return is_convex |
def get_zone ( self , zone_id , records = True ) :
"""Get a zone and its records .
: param zone : the zone name
: returns : A dictionary containing a large amount of information about
the specified zone .""" | mask = None
if records :
mask = 'resourceRecords'
return self . service . getObject ( id = zone_id , mask = mask ) |
def step_command_output_should_contain_text ( context , text ) :
'''EXAMPLE :
Then the command output should contain " TEXT "''' | expected_text = text
if "{__WORKDIR__}" in expected_text or "{__CWD__}" in expected_text :
expected_text = textutil . template_substitute ( text , __WORKDIR__ = posixpath_normpath ( context . workdir ) , __CWD__ = posixpath_normpath ( os . getcwd ( ) ) )
actual_output = context . command_result . output
with on_assert_failed_print_details ( actual_output , expected_text ) :
textutil . assert_normtext_should_contain ( actual_output , expected_text ) |
def image_augmentation ( images , do_colors = False , crop_size = None ) :
"""Image augmentation : cropping , flipping , and color transforms .""" | if crop_size is None :
crop_size = [ 299 , 299 ]
images = tf . random_crop ( images , crop_size + [ 3 ] )
images = tf . image . random_flip_left_right ( images )
if do_colors : # More augmentation , but might be slow .
images = tf . image . random_brightness ( images , max_delta = 32. / 255. )
images = tf . image . random_saturation ( images , lower = 0.5 , upper = 1.5 )
images = tf . image . random_hue ( images , max_delta = 0.2 )
images = tf . image . random_contrast ( images , lower = 0.5 , upper = 1.5 )
return images |
def postbuild_arch ( self , arch ) :
'''Run any post - build tasks for the Recipe . By default , this checks if
any postbuild _ archname methods exist for the archname of the
current architecture , and runs them if so .''' | postbuild = "postbuild_{}" . format ( arch . arch )
if hasattr ( self , postbuild ) :
getattr ( self , postbuild ) ( ) |
def OSIncludes ( self ) :
"""Microsoft Windows SDK Include""" | include = os . path . join ( self . si . WindowsSdkDir , 'include' )
if self . vc_ver <= 10.0 :
return [ include , os . path . join ( include , 'gl' ) ]
else :
if self . vc_ver >= 14.0 :
sdkver = self . _sdk_subdir
else :
sdkver = ''
return [ os . path . join ( include , '%sshared' % sdkver ) , os . path . join ( include , '%sum' % sdkver ) , os . path . join ( include , '%swinrt' % sdkver ) ] |