query
stringlengths
5
1.23k
positive
stringlengths
53
15.2k
id_
int64
0
252k
task_name
stringlengths
87
242
negative
sequencelengths
20
553
Return either the full or truncated version of a QIIME - formatted taxonomy string .
def split_phylogeny ( p , level = "s" ) : level = level + "__" result = p . split ( level ) return result [ 0 ] + level + result [ 1 ] . split ( ";" ) [ 0 ]
0
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L159-L177
[ "def", "listen_user_events", "(", "self", ")", ":", "if", "not", "self", ".", "_user_id", ":", "raise", "AmigoCloudError", "(", "self", ".", "error_msg", "[", "'logged_in_websockets'", "]", ")", "response", "=", "self", ".", "get", "(", "'/me/start_websocket_session'", ")", "websocket_session", "=", "response", "[", "'websocket_session'", "]", "auth_data", "=", "{", "'userid'", ":", "self", ".", "_user_id", ",", "'websocket_session'", ":", "websocket_session", "}", "self", ".", "amigosocket", ".", "emit", "(", "'authenticate'", ",", "auth_data", ")" ]
Check to make sure the supplied directory path does not exist if so create it . The method catches OSError exceptions and returns a descriptive message instead of re - raising the error .
def ensure_dir ( d ) : if not os . path . exists ( d ) : try : os . makedirs ( d ) except OSError as oe : # should not happen with os.makedirs # ENOENT: No such file or directory if os . errno == errno . ENOENT : msg = twdd ( """One or more directories in the path ({}) do not exist. If you are specifying a new directory for output, please ensure all other directories in the path currently exist.""" ) return msg . format ( d ) else : msg = twdd ( """An error occurred trying to create the output directory ({}) with message: {}""" ) return msg . format ( d , oe . strerror )
1
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L180-L206
[ "def", "getTotalAssociations", "(", "self", ",", "wifiInterfaceId", "=", "1", ",", "timeout", "=", "1", ")", ":", "namespace", "=", "Wifi", ".", "getServiceType", "(", "\"getTotalAssociations\"", ")", "+", "str", "(", "wifiInterfaceId", ")", "uri", "=", "self", ".", "getControlURL", "(", "namespace", ")", "results", "=", "self", ".", "execute", "(", "uri", ",", "namespace", ",", "\"GetTotalAssociations\"", ",", "timeout", "=", "timeout", ")", "return", "int", "(", "results", "[", "\"NewTotalAssociations\"", "]", ")" ]
Takes either a file path or an open file handle checks validity and returns an open file handle or raises an appropriate Exception .
def file_handle ( fnh , mode = "rU" ) : handle = None if isinstance ( fnh , file ) : if fnh . closed : raise ValueError ( "Input file is closed." ) handle = fnh elif isinstance ( fnh , str ) : handle = open ( fnh , mode ) return handle
2
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L209-L231
[ "def", "get_batch_size", "(", "batch", ":", "Union", "[", "Dict", ",", "torch", ".", "Tensor", "]", ")", "->", "int", ":", "if", "isinstance", "(", "batch", ",", "torch", ".", "Tensor", ")", ":", "return", "batch", ".", "size", "(", "0", ")", "# type: ignore", "elif", "isinstance", "(", "batch", ",", "Dict", ")", ":", "return", "get_batch_size", "(", "next", "(", "iter", "(", "batch", ".", "values", "(", ")", ")", ")", ")", "else", ":", "return", "0" ]
Find the user specified categories in the map and create a dictionary to contain the relevant data for each type within the categories . Multiple categories will have their types combined such that each possible combination will have its own entry in the dictionary .
def gather_categories ( imap , header , categories = None ) : # If no categories provided, return all SampleIDs if categories is None : return { "default" : DataCategory ( set ( imap . keys ( ) ) , { } ) } cat_ids = [ header . index ( cat ) for cat in categories if cat in header and "=" not in cat ] table = OrderedDict ( ) conditions = defaultdict ( set ) for i , cat in enumerate ( categories ) : if "=" in cat and cat . split ( "=" ) [ 0 ] in header : cat_name = header [ header . index ( cat . split ( "=" ) [ 0 ] ) ] conditions [ cat_name ] . add ( cat . split ( "=" ) [ 1 ] ) # If invalid categories or conditions identified, return all SampleIDs if not cat_ids and not conditions : return { "default" : DataCategory ( set ( imap . keys ( ) ) , { } ) } #If only category column given, return column-wise SampleIDs if cat_ids and not conditions : for sid , row in imap . items ( ) : cat_name = "_" . join ( [ row [ cid ] for cid in cat_ids ] ) if cat_name not in table : table [ cat_name ] = DataCategory ( set ( ) , { } ) table [ cat_name ] . sids . add ( sid ) return table # Collect all condition names cond_ids = set ( ) for k in conditions : try : cond_ids . add ( header . index ( k ) ) except ValueError : continue idx_to_test = set ( cat_ids ) . union ( cond_ids ) # If column name and condition given, return overlapping SampleIDs of column and # condition combinations for sid , row in imap . items ( ) : if all ( [ row [ header . index ( c ) ] in conditions [ c ] for c in conditions ] ) : key = "_" . join ( [ row [ idx ] for idx in idx_to_test ] ) try : assert key in table . keys ( ) except AssertionError : table [ key ] = DataCategory ( set ( ) , { } ) table [ key ] . sids . add ( sid ) try : assert len ( table ) > 0 except AssertionError : return { "default" : DataCategory ( set ( imap . keys ( ) ) , { } ) } else : return table
3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L238-L309
[ "def", "_getLPA", "(", "self", ")", ":", "return", "str", "(", "self", ".", "line", ")", "+", "\":\"", "+", "str", "(", "self", ".", "pos", ")", "+", "\":\"", "+", "str", "(", "self", ".", "absPosition", ")" ]
Parses the unifrac results file into a dictionary
def parse_unifrac ( unifracFN ) : with open ( unifracFN , "rU" ) as uF : first = uF . next ( ) . split ( "\t" ) lines = [ line . strip ( ) for line in uF ] unifrac = { "pcd" : OrderedDict ( ) , "eigvals" : [ ] , "varexp" : [ ] } if first [ 0 ] == "pc vector number" : return parse_unifrac_v1_8 ( unifrac , lines ) elif first [ 0 ] == "Eigvals" : return parse_unifrac_v1_9 ( unifrac , lines ) else : raise ValueError ( "File format not supported/recognized. Please check input " "unifrac file." )
4
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L311-L334
[ "def", "_get_port_speed_price_id", "(", "items", ",", "port_speed", ",", "no_public", ",", "location", ")", ":", "for", "item", "in", "items", ":", "if", "utils", ".", "lookup", "(", "item", ",", "'itemCategory'", ",", "'categoryCode'", ")", "!=", "'port_speed'", ":", "continue", "# Check for correct capacity and if the item matches private only", "if", "any", "(", "[", "int", "(", "utils", ".", "lookup", "(", "item", ",", "'capacity'", ")", ")", "!=", "port_speed", ",", "_is_private_port_speed_item", "(", "item", ")", "!=", "no_public", ",", "not", "_is_bonded", "(", "item", ")", "]", ")", ":", "continue", "for", "price", "in", "item", "[", "'prices'", "]", ":", "if", "not", "_matches_location", "(", "price", ",", "location", ")", ":", "continue", "return", "price", "[", "'id'", "]", "raise", "SoftLayer", ".", "SoftLayerError", "(", "\"Could not find valid price for port speed: '%s'\"", "%", "port_speed", ")" ]
Function to parse data from older version of unifrac file obtained from Qiime version 1 . 8 and earlier .
def parse_unifrac_v1_8 ( unifrac , file_data ) : for line in file_data : if line == "" : break line = line . split ( "\t" ) unifrac [ "pcd" ] [ line [ 0 ] ] = [ float ( e ) for e in line [ 1 : ] ] unifrac [ "eigvals" ] = [ float ( entry ) for entry in file_data [ - 2 ] . split ( "\t" ) [ 1 : ] ] unifrac [ "varexp" ] = [ float ( entry ) for entry in file_data [ - 1 ] . split ( "\t" ) [ 1 : ] ] return unifrac
5
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L337-L356
[ "def", "working_directory", "(", "self", ")", ":", "if", "self", ".", "chroot_directory", "and", "not", "self", ".", "_working_directory", ".", "startswith", "(", "self", ".", "chroot_directory", ")", ":", "return", "self", ".", "chroot_directory", "+", "self", ".", "_working_directory", "else", ":", "return", "self", ".", "_working_directory" ]
Function to parse data from newer version of unifrac file obtained from Qiime version 1 . 9 and later .
def parse_unifrac_v1_9 ( unifrac , file_data ) : unifrac [ "eigvals" ] = [ float ( entry ) for entry in file_data [ 0 ] . split ( "\t" ) ] unifrac [ "varexp" ] = [ float ( entry ) * 100 for entry in file_data [ 3 ] . split ( "\t" ) ] for line in file_data [ 8 : ] : if line == "" : break line = line . split ( "\t" ) unifrac [ "pcd" ] [ line [ 0 ] ] = [ float ( e ) for e in line [ 1 : ] ] return unifrac
6
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L359-L378
[ "def", "stop", "(", "self", ")", ":", "self", ".", "_stopped", "=", "True", "for", "result", "in", "self", ".", "_results", ":", "result", ".", "_set_result", "(", "Failure", "(", "ReactorStopped", "(", ")", ")", ")" ]
Determine color - category mapping . If color_column was specified then map the category names to color values . Otherwise use the palettable colors to automatically generate a set of colors for the group values .
def color_mapping ( sample_map , header , group_column , color_column = None ) : group_colors = OrderedDict ( ) group_gather = gather_categories ( sample_map , header , [ group_column ] ) if color_column is not None : color_gather = gather_categories ( sample_map , header , [ color_column ] ) # match sample IDs between color_gather and group_gather for group in group_gather : for color in color_gather : # allow incomplete assignment of colors, if group sids overlap at # all with the color sids, consider it a match if group_gather [ group ] . sids . intersection ( color_gather [ color ] . sids ) : group_colors [ group ] = color else : bcolors = itertools . cycle ( Set3_12 . hex_colors ) for group in group_gather : group_colors [ group ] = bcolors . next ( ) return group_colors
7
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L380-L419
[ "def", "update_topology", "(", ")", ":", "for", "topology", "in", "Topology", ".", "objects", ".", "all", "(", ")", ":", "try", ":", "topology", ".", "update", "(", ")", "except", "Exception", "as", "e", ":", "msg", "=", "'Failed to update {}'", ".", "format", "(", "topology", ".", "__repr__", "(", ")", ")", "logger", ".", "exception", "(", "msg", ")", "print", "(", "'{0}: {1}\\n'", "'see networking.log for more information\\n'", ".", "format", "(", "msg", ",", "e", ".", "__class__", ")", ")" ]
return reverse completment of read
def rev_c ( read ) : rc = [ ] rc_nucs = { 'A' : 'T' , 'T' : 'A' , 'G' : 'C' , 'C' : 'G' , 'N' : 'N' } for base in read : rc . extend ( rc_nucs [ base . upper ( ) ] ) return rc [ : : - 1 ]
8
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/shuffle_genome.py#L27-L35
[ "def", "get_site_url", "(", ")", ":", "site_url", "=", "getattr", "(", "_THREAD_LOCAL", ",", "_THREAD_SITE_URL", ",", "None", ")", "if", "site_url", "is", "None", ":", "site_url", "=", "SITE_URL", "or", "get_site_url_", "(", ")", "setattr", "(", "_THREAD_LOCAL", ",", "_THREAD_SITE_URL", ",", "site_url", ")", "return", "site_url" ]
randomly shuffle genome
def shuffle_genome ( genome , cat , fraction = float ( 100 ) , plot = True , alpha = 0.1 , beta = 100000 , min_length = 1000 , max_length = 200000 ) : header = '>randomized_%s' % ( genome . name ) sequence = list ( '' . join ( [ i [ 1 ] for i in parse_fasta ( genome ) ] ) ) length = len ( sequence ) shuffled = [ ] # break genome into pieces while sequence is not False : s = int ( random . gammavariate ( alpha , beta ) ) if s <= min_length or s >= max_length : continue if len ( sequence ) < s : seq = sequence [ 0 : ] else : seq = sequence [ 0 : s ] sequence = sequence [ s : ] # if bool(random.getrandbits(1)) is True: # seq = rev_c(seq) # print('fragment length: %s reverse complement: True' % ('{:,}'.format(s)), file=sys.stderr) # else: # print('fragment length: %s reverse complement: False' % ('{:,}'.format(s)), file=sys.stderr) shuffled . append ( '' . join ( seq ) ) if sequence == [ ] : break # shuffle pieces random . shuffle ( shuffled ) # subset fragments if fraction == float ( 100 ) : subset = shuffled else : max_pieces = int ( length * fraction / 100 ) subset , total = [ ] , 0 for fragment in shuffled : length = len ( fragment ) if total + length <= max_pieces : subset . append ( fragment ) total += length else : diff = max_pieces - total subset . append ( fragment [ 0 : diff ] ) break # combine sequences, if requested if cat is True : yield [ header , '' . join ( subset ) ] else : for i , seq in enumerate ( subset ) : yield [ '%s fragment:%s' % ( header , i ) , seq ]
9
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/shuffle_genome.py#L37-L87
[ "def", "process_new_issues", "(", "self", ",", "volumes", ",", "existing_issues", ")", ":", "new_issues", "=", "{", "}", "for", "issue_id", ",", "volume", "in", "volumes", ".", "items", "(", ")", ":", "state", "=", "EBSIssueState", ".", "DETECTED", ".", "value", "if", "issue_id", "in", "existing_issues", ":", "issue", "=", "existing_issues", "[", "issue_id", "]", "data", "=", "{", "'state'", ":", "state", ",", "'notes'", ":", "issue", ".", "notes", ",", "'last_notice'", ":", "issue", ".", "last_notice", "}", "if", "issue", ".", "update", "(", "data", ")", ":", "new_issues", ".", "setdefault", "(", "issue", ".", "volume", ".", "account", ",", "[", "]", ")", ".", "append", "(", "issue", ")", "self", ".", "log", ".", "debug", "(", "'Updated EBSVolumeAuditIssue {}'", ".", "format", "(", "issue_id", ")", ")", "else", ":", "properties", "=", "{", "'volume_id'", ":", "volume", ".", "id", ",", "'account_id'", ":", "volume", ".", "account_id", ",", "'location'", ":", "volume", ".", "location", ",", "'state'", ":", "state", ",", "'last_change'", ":", "datetime", ".", "now", "(", ")", ",", "'last_notice'", ":", "None", ",", "'notes'", ":", "[", "]", "}", "issue", "=", "EBSVolumeAuditIssue", ".", "create", "(", "issue_id", ",", "properties", "=", "properties", ")", "new_issues", ".", "setdefault", "(", "issue", ".", "volume", ".", "account", ",", "[", "]", ")", ".", "append", "(", "issue", ")", "return", "new_issues" ]
If the fit contains statistically insignificant parameters remove them . Returns a pruned fit where all parameters have p - values of the t - statistic below p_max
def _prune ( self , fit , p_max ) : def remove_from_model_desc ( x , model_desc ) : """ Return a model_desc without x """ rhs_termlist = [ ] for t in model_desc . rhs_termlist : if not t . factors : # intercept, add anyway rhs_termlist . append ( t ) elif not x == t . factors [ 0 ] . _varname : # this is not the term with x rhs_termlist . append ( t ) md = ModelDesc ( model_desc . lhs_termlist , rhs_termlist ) return md corrected_model_desc = ModelDesc ( fit . model . formula . lhs_termlist [ : ] , fit . model . formula . rhs_termlist [ : ] ) pars_to_prune = fit . pvalues . where ( fit . pvalues > p_max ) . dropna ( ) . index . tolist ( ) try : pars_to_prune . remove ( 'Intercept' ) except : pass while pars_to_prune : corrected_model_desc = remove_from_model_desc ( pars_to_prune [ 0 ] , corrected_model_desc ) fit = fm . ols ( corrected_model_desc , data = self . df ) . fit ( ) pars_to_prune = fit . pvalues . where ( fit . pvalues > p_max ) . dropna ( ) . index . tolist ( ) try : pars_to_prune . remove ( 'Intercept' ) except : pass return fit
10
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L222-L272
[ "def", "write_asc_file", "(", "filename", ",", "data", ",", "xsize", ",", "ysize", ",", "geotransform", ",", "nodata_value", ")", ":", "UtilClass", ".", "mkdir", "(", "os", ".", "path", ".", "dirname", "(", "FileClass", ".", "get_file_fullpath", "(", "filename", ")", ")", ")", "header", "=", "'NCOLS %d\\n'", "'NROWS %d\\n'", "'XLLCENTER %f\\n'", "'YLLCENTER %f\\n'", "'CELLSIZE %f\\n'", "'NODATA_VALUE %f'", "%", "(", "xsize", ",", "ysize", ",", "geotransform", "[", "0", "]", "+", "0.5", "*", "geotransform", "[", "1", "]", ",", "geotransform", "[", "3", "]", "-", "(", "ysize", "-", "0.5", ")", "*", "geotransform", "[", "1", "]", ",", "geotransform", "[", "1", "]", ",", "nodata_value", ")", "with", "open", "(", "filename", ",", "'w'", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "f", ".", "write", "(", "header", ")", "for", "i", "in", "range", "(", "0", ",", "ysize", ")", ":", "for", "j", "in", "range", "(", "0", ",", "xsize", ")", ":", "f", ".", "write", "(", "'%s\\t'", "%", "repr", "(", "data", "[", "i", "]", "[", "j", "]", ")", ")", "f", ".", "write", "(", "'\\n'", ")", "f", ".", "close", "(", ")" ]
Return the best fit based on rsquared
def find_best_rsquared ( list_of_fits ) : res = sorted ( list_of_fits , key = lambda x : x . rsquared ) return res [ - 1 ]
11
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L275-L278
[ "def", "message", "(", "self", ",", "assistant_id", ",", "session_id", ",", "input", "=", "None", ",", "context", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "assistant_id", "is", "None", ":", "raise", "ValueError", "(", "'assistant_id must be provided'", ")", "if", "session_id", "is", "None", ":", "raise", "ValueError", "(", "'session_id must be provided'", ")", "if", "input", "is", "not", "None", ":", "input", "=", "self", ".", "_convert_model", "(", "input", ",", "MessageInput", ")", "if", "context", "is", "not", "None", ":", "context", "=", "self", ".", "_convert_model", "(", "context", ",", "MessageContext", ")", "headers", "=", "{", "}", "if", "'headers'", "in", "kwargs", ":", "headers", ".", "update", "(", "kwargs", ".", "get", "(", "'headers'", ")", ")", "sdk_headers", "=", "get_sdk_headers", "(", "'conversation'", ",", "'V2'", ",", "'message'", ")", "headers", ".", "update", "(", "sdk_headers", ")", "params", "=", "{", "'version'", ":", "self", ".", "version", "}", "data", "=", "{", "'input'", ":", "input", ",", "'context'", ":", "context", "}", "url", "=", "'/v2/assistants/{0}/sessions/{1}/message'", ".", "format", "(", "*", "self", ".", "_encode_path_vars", "(", "assistant_id", ",", "session_id", ")", ")", "response", "=", "self", ".", "request", "(", "method", "=", "'POST'", ",", "url", "=", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "json", "=", "data", ",", "accept_json", "=", "True", ")", "return", "response" ]
Return a df with predictions and confidence interval
def _predict ( self , fit , df ) : # Add model results to data as column 'predictions' df_res = df . copy ( ) if 'Intercept' in fit . model . exog_names : df_res [ 'Intercept' ] = 1.0 df_res [ 'predicted' ] = fit . predict ( df_res ) if not self . allow_negative_predictions : df_res . loc [ df_res [ 'predicted' ] < 0 , 'predicted' ] = 0 prstd , interval_l , interval_u = wls_prediction_std ( fit , df_res [ fit . model . exog_names ] , alpha = 1 - self . confint ) df_res [ 'interval_l' ] = interval_l df_res [ 'interval_u' ] = interval_u if 'Intercept' in df_res : df_res . drop ( labels = [ 'Intercept' ] , axis = 1 , inplace = True ) return df_res
12
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/regression.py#L292-L338
[ "def", "user_deleted_from_site_event", "(", "event", ")", ":", "userid", "=", "event", ".", "principal", "catalog", "=", "api", ".", "portal", ".", "get_tool", "(", "'portal_catalog'", ")", "query", "=", "{", "'object_provides'", ":", "WORKSPACE_INTERFACE", "}", "query", "[", "'workspace_members'", "]", "=", "userid", "workspaces", "=", "[", "IWorkspace", "(", "b", ".", "_unrestrictedGetObject", "(", ")", ")", "for", "b", "in", "catalog", ".", "unrestrictedSearchResults", "(", "query", ")", "]", "for", "workspace", "in", "workspaces", ":", "workspace", ".", "remove_from_team", "(", "userid", ")" ]
Calculate the relative abundance of each OTUID in a Sample .
def relative_abundance ( biomf , sampleIDs = None ) : if sampleIDs is None : sampleIDs = biomf . ids ( ) else : try : for sid in sampleIDs : assert sid in biomf . ids ( ) except AssertionError : raise ValueError ( "\nError while calculating relative abundances: The sampleIDs provided do" " not match the sampleIDs in biom file. Please double check the sampleIDs" " provided.\n" ) otuIDs = biomf . ids ( axis = "observation" ) norm_biomf = biomf . norm ( inplace = False ) return { sample : { otuID : norm_biomf . get_value_by_ids ( otuID , sample ) for otuID in otuIDs } for sample in sampleIDs }
13
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L11-L41
[ "def", "getNewQuery", "(", "connection", "=", "None", ",", "commitOnEnd", "=", "False", ",", "*", "args", ",", "*", "*", "kargs", ")", ":", "if", "connection", "is", "None", ":", "return", "query", ".", "PySQLQuery", "(", "getNewConnection", "(", "*", "args", ",", "*", "*", "kargs", ")", ",", "commitOnEnd", "=", "commitOnEnd", ")", "else", ":", "#Updated 7/24/08 to include commitOnEnd here", "#-Chandler Prall", "return", "query", ".", "PySQLQuery", "(", "connection", ",", "commitOnEnd", "=", "commitOnEnd", ")" ]
Calculate the mean OTU abundance percentage .
def mean_otu_pct_abundance ( ra , otuIDs ) : sids = ra . keys ( ) otumeans = defaultdict ( int ) for oid in otuIDs : otumeans [ oid ] = sum ( [ ra [ sid ] [ oid ] for sid in sids if oid in ra [ sid ] ] ) / len ( sids ) * 100 return otumeans
14
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L44-L67
[ "def", "format_log_context", "(", "msg", ",", "connection", "=", "None", ",", "keyspace", "=", "None", ")", ":", "connection_info", "=", "connection", "or", "'DEFAULT_CONNECTION'", "if", "keyspace", ":", "msg", "=", "'[Connection: {0}, Keyspace: {1}] {2}'", ".", "format", "(", "connection_info", ",", "keyspace", ",", "msg", ")", "else", ":", "msg", "=", "'[Connection: {0}] {1}'", ".", "format", "(", "connection_info", ",", "msg", ")", "return", "msg" ]
Calculate the mean relative abundance percentage .
def MRA ( biomf , sampleIDs = None , transform = None ) : ra = relative_abundance ( biomf , sampleIDs ) if transform is not None : ra = { sample : { otuID : transform ( abd ) for otuID , abd in ra [ sample ] . items ( ) } for sample in ra . keys ( ) } otuIDs = biomf . ids ( axis = "observation" ) return mean_otu_pct_abundance ( ra , otuIDs )
15
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L70-L92
[ "def", "unpack_rsp", "(", "cls", ",", "rsp_pb", ")", ":", "ret_type", "=", "rsp_pb", ".", "retType", "ret_msg", "=", "rsp_pb", ".", "retMsg", "if", "ret_type", "!=", "RET_OK", ":", "return", "RET_ERROR", ",", "ret_msg", ",", "None", "res", "=", "{", "}", "if", "rsp_pb", ".", "HasField", "(", "'s2c'", ")", ":", "res", "[", "'server_version'", "]", "=", "rsp_pb", ".", "s2c", ".", "serverVer", "res", "[", "'login_user_id'", "]", "=", "rsp_pb", ".", "s2c", ".", "loginUserID", "res", "[", "'conn_id'", "]", "=", "rsp_pb", ".", "s2c", ".", "connID", "res", "[", "'conn_key'", "]", "=", "rsp_pb", ".", "s2c", ".", "connAESKey", "res", "[", "'keep_alive_interval'", "]", "=", "rsp_pb", ".", "s2c", ".", "keepAliveInterval", "else", ":", "return", "RET_ERROR", ",", "\"rsp_pb error\"", ",", "None", "return", "RET_OK", ",", "\"\"", ",", "res" ]
Calculate the total number of sequences in each OTU or SampleID .
def raw_abundance ( biomf , sampleIDs = None , sample_abd = True ) : results = defaultdict ( int ) if sampleIDs is None : sampleIDs = biomf . ids ( ) else : try : for sid in sampleIDs : assert sid in biomf . ids ( ) except AssertionError : raise ValueError ( "\nError while calculating raw total abundances: The sampleIDs provided " "do not match the sampleIDs in biom file. Please double check the " "sampleIDs provided.\n" ) otuIDs = biomf . ids ( axis = "observation" ) for sampleID in sampleIDs : for otuID in otuIDs : abd = biomf . get_value_by_ids ( otuID , sampleID ) if sample_abd : results [ sampleID ] += abd else : results [ otuID ] += abd return results
16
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L95-L135
[ "def", "internal2external_grad", "(", "xi", ",", "bounds", ")", ":", "ge", "=", "np", ".", "empty_like", "(", "xi", ")", "for", "i", ",", "(", "v", ",", "bound", ")", "in", "enumerate", "(", "zip", "(", "xi", ",", "bounds", ")", ")", ":", "a", "=", "bound", "[", "0", "]", "# minimum", "b", "=", "bound", "[", "1", "]", "# maximum", "if", "a", "==", "None", "and", "b", "==", "None", ":", "# No constraints", "ge", "[", "i", "]", "=", "1.0", "elif", "b", "==", "None", ":", "# only min", "ge", "[", "i", "]", "=", "v", "/", "np", ".", "sqrt", "(", "v", "**", "2", "+", "1", ")", "elif", "a", "==", "None", ":", "# only max", "ge", "[", "i", "]", "=", "-", "v", "/", "np", ".", "sqrt", "(", "v", "**", "2", "+", "1", ")", "else", ":", "# both min and max", "ge", "[", "i", "]", "=", "(", "b", "-", "a", ")", "*", "np", ".", "cos", "(", "v", ")", "/", "2.", "return", "ge" ]
Function to transform the total abundance calculation for each sample ID to another format based on user given transformation function .
def transform_raw_abundance ( biomf , fn = math . log10 , sampleIDs = None , sample_abd = True ) : totals = raw_abundance ( biomf , sampleIDs , sample_abd ) return { sid : fn ( abd ) for sid , abd in totals . items ( ) }
17
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/biom_calc.py#L138-L155
[ "def", "parse_instance", "(", "self", ",", "global_params", ",", "region", ",", "reservation", ")", ":", "for", "i", "in", "reservation", "[", "'Instances'", "]", ":", "instance", "=", "{", "}", "vpc_id", "=", "i", "[", "'VpcId'", "]", "if", "'VpcId'", "in", "i", "and", "i", "[", "'VpcId'", "]", "else", "ec2_classic", "manage_dictionary", "(", "self", ".", "vpcs", ",", "vpc_id", ",", "VPCConfig", "(", "self", ".", "vpc_resource_types", ")", ")", "instance", "[", "'reservation_id'", "]", "=", "reservation", "[", "'ReservationId'", "]", "instance", "[", "'id'", "]", "=", "i", "[", "'InstanceId'", "]", "get_name", "(", "i", ",", "instance", ",", "'InstanceId'", ")", "get_keys", "(", "i", ",", "instance", ",", "[", "'KeyName'", ",", "'LaunchTime'", ",", "'InstanceType'", ",", "'State'", ",", "'IamInstanceProfile'", ",", "'SubnetId'", "]", ")", "# Network interfaces & security groups", "manage_dictionary", "(", "instance", ",", "'network_interfaces'", ",", "{", "}", ")", "for", "eni", "in", "i", "[", "'NetworkInterfaces'", "]", ":", "nic", "=", "{", "}", "get_keys", "(", "eni", ",", "nic", ",", "[", "'Association'", ",", "'Groups'", ",", "'PrivateIpAddresses'", ",", "'SubnetId'", ",", "'Ipv6Addresses'", "]", ")", "instance", "[", "'network_interfaces'", "]", "[", "eni", "[", "'NetworkInterfaceId'", "]", "]", "=", "nic", "self", ".", "vpcs", "[", "vpc_id", "]", ".", "instances", "[", "i", "[", "'InstanceId'", "]", "]", "=", "instance" ]
Compute the Mann - Whitney U test for unequal group sample sizes .
def print_MannWhitneyU ( div_calc ) : try : x = div_calc . values ( ) [ 0 ] . values ( ) y = div_calc . values ( ) [ 1 ] . values ( ) except : return "Error setting up input arrays for Mann-Whitney U Test. Skipping " "significance testing." T , p = stats . mannwhitneyu ( x , y ) print "\nMann-Whitney U test statistic:" , T print "Two-tailed p-value: {}" . format ( 2 * p )
18
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L54-L66
[ "def", "_extract_ocsp_certs", "(", "self", ",", "ocsp_response", ")", ":", "status", "=", "ocsp_response", "[", "'response_status'", "]", ".", "native", "if", "status", "==", "'successful'", ":", "response_bytes", "=", "ocsp_response", "[", "'response_bytes'", "]", "if", "response_bytes", "[", "'response_type'", "]", ".", "native", "==", "'basic_ocsp_response'", ":", "response", "=", "response_bytes", "[", "'response'", "]", ".", "parsed", "if", "response", "[", "'certs'", "]", ":", "for", "other_cert", "in", "response", "[", "'certs'", "]", ":", "if", "self", ".", "certificate_registry", ".", "add_other_cert", "(", "other_cert", ")", ":", "self", ".", "_revocation_certs", "[", "other_cert", ".", "issuer_serial", "]", "=", "other_cert" ]
Compute the Kruskal - Wallis H - test for independent samples . A typical rule is that each group must have at least 5 measurements .
def print_KruskalWallisH ( div_calc ) : calc = defaultdict ( list ) try : for k1 , v1 in div_calc . iteritems ( ) : for k2 , v2 in v1 . iteritems ( ) : calc [ k1 ] . append ( v2 ) except : return "Error setting up input arrays for Kruskal-Wallis H-Test. Skipping " "significance testing." h , p = stats . kruskal ( * calc . values ( ) ) print "\nKruskal-Wallis H-test statistic for {} groups: {}" . format ( str ( len ( div_calc ) ) , h ) print "p-value: {}" . format ( p )
19
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L69-L84
[ "def", "_record_offset", "(", "self", ")", ":", "offset", "=", "self", ".", "blob_file", ".", "tell", "(", ")", "self", ".", "event_offsets", ".", "append", "(", "offset", ")" ]
Parses the given options passed in at the command line .
def handle_program_options ( ) : parser = argparse . ArgumentParser ( description = "Calculate the alpha diversity\ of a set of samples using one or more \ metrics and output a kernal density \ estimator-smoothed histogram of the \ results." ) parser . add_argument ( "-m" , "--map_file" , help = "QIIME mapping file." ) parser . add_argument ( "-i" , "--biom_fp" , help = "Path to the BIOM table" ) parser . add_argument ( "-c" , "--category" , help = "Specific category from the mapping file." ) parser . add_argument ( "-d" , "--diversity" , default = [ "shannon" ] , nargs = "+" , help = "The alpha diversity metric. Default \ value is 'shannon', which will calculate the Shannon\ entropy. Multiple metrics can be specified (space separated).\ The full list of metrics is available at:\ http://scikit-bio.org/docs/latest/generated/skbio.diversity.alpha.html.\ Beta diversity metrics will be supported in the future." ) parser . add_argument ( "--x_label" , default = [ None ] , nargs = "+" , help = "The name of the diversity metric to be displayed on the\ plot as the X-axis label. If multiple metrics are specified,\ then multiple entries for the X-axis label should be given." ) parser . add_argument ( "--color_by" , help = "A column name in the mapping file containing\ hexadecimal (#FF0000) color values that will\ be used to color the groups. Each sample ID must\ have a color entry." ) parser . add_argument ( "--plot_title" , default = "" , help = "A descriptive title that will appear at the top \ of the output plot. Surround with quotes if there are\ spaces in the title." ) parser . add_argument ( "-o" , "--output_dir" , default = "." , help = "The directory plots will be saved to." ) parser . add_argument ( "--image_type" , default = "png" , help = "The type of image to save: png, svg, pdf, eps, etc..." ) parser . add_argument ( "--save_calculations" , help = "Path and name of text file to store the calculated " "diversity metrics." ) parser . add_argument ( "--suppress_stats" , action = "store_true" , help = "Do not display " "significance testing results which are shown by default." ) parser . add_argument ( "--show_available_metrics" , action = "store_true" , help = "Supply this parameter to see which alpha diversity metrics " " are available for usage. No calculations will be performed" " if this parameter is provided." ) return parser . parse_args ( )
20
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/diversity.py#L122-L168
[ "def", "identical", "(", "self", ",", "other", ")", ":", "try", ":", "return", "(", "self", ".", "name", "==", "other", ".", "name", "and", "self", ".", "_all_compat", "(", "other", ",", "'identical'", ")", ")", "except", "(", "TypeError", ",", "AttributeError", ")", ":", "return", "False" ]
make blast db
def blastdb ( fasta , maxfile = 10000000 ) : db = fasta . rsplit ( '.' , 1 ) [ 0 ] type = check_type ( fasta ) if type == 'nucl' : type = [ 'nhr' , type ] else : type = [ 'phr' , type ] if os . path . exists ( '%s.%s' % ( db , type [ 0 ] ) ) is False and os . path . exists ( '%s.00.%s' % ( db , type [ 0 ] ) ) is False : print ( '# ... making blastdb for: %s' % ( fasta ) , file = sys . stderr ) os . system ( 'makeblastdb \ -in %s -out %s -dbtype %s -max_file_sz %s >> log.txt' % ( fasta , db , type [ 1 ] , maxfile ) ) else : print ( '# ... database found for: %s' % ( fasta ) , file = sys . stderr ) return db
21
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/search.py#L28-L46
[ "def", "access_required", "(", "config", "=", "None", ")", ":", "def", "_access_required", "(", "http_method_handler", ")", ":", "def", "secure_http_method_handler", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# authentication context must be set", "if", "not", "self", ".", "__provider_config__", ".", "authentication", ":", "_message", "=", "\"Service available to authenticated users only, no auth context provider set in handler\"", "authentication_error", "=", "prestans", ".", "exception", ".", "AuthenticationError", "(", "_message", ")", "authentication_error", ".", "request", "=", "self", ".", "request", "raise", "authentication_error", "# check for access by calling is_authorized_user", "if", "not", "self", ".", "__provider_config__", ".", "authentication", ".", "is_authorized_user", "(", "config", ")", ":", "_message", "=", "\"Service available to authorized users only\"", "authorization_error", "=", "prestans", ".", "exception", ".", "AuthorizationError", "(", "_message", ")", "authorization_error", ".", "request", "=", "self", ".", "request", "raise", "authorization_error", "http_method_handler", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wraps", "(", "http_method_handler", ")", "(", "secure_http_method_handler", ")", "return", "_access_required" ]
make usearch db
def usearchdb ( fasta , alignment = 'local' , usearch_loc = 'usearch' ) : if '.udb' in fasta : print ( '# ... database found: %s' % ( fasta ) , file = sys . stderr ) return fasta type = check_type ( fasta ) db = '%s.%s.udb' % ( fasta . rsplit ( '.' , 1 ) [ 0 ] , type ) if os . path . exists ( db ) is False : print ( '# ... making usearch db for: %s' % ( fasta ) , file = sys . stderr ) if alignment == 'local' : os . system ( '%s -makeudb_ublast %s -output %s >> log.txt' % ( usearch_loc , fasta , db ) ) elif alignment == 'global' : os . system ( '%s -makeudb_usearch %s -output %s >> log.txt' % ( usearch_loc , fasta , db ) ) else : print ( '# ... database found for: %s' % ( fasta ) , file = sys . stderr ) return db
22
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/search.py#L68-L85
[ "def", "noise", "(", "mesh", ",", "magnitude", "=", "None", ")", ":", "if", "magnitude", "is", "None", ":", "magnitude", "=", "mesh", ".", "scale", "/", "100.0", "random", "=", "(", "np", ".", "random", ".", "random", "(", "mesh", ".", "vertices", ".", "shape", ")", "-", ".5", ")", "*", "magnitude", "vertices_noise", "=", "mesh", ".", "vertices", ".", "copy", "(", ")", "+", "random", "# make sure we've re- ordered faces randomly", "triangles", "=", "np", ".", "random", ".", "permutation", "(", "vertices_noise", "[", "mesh", ".", "faces", "]", ")", "mesh_type", "=", "util", ".", "type_named", "(", "mesh", ",", "'Trimesh'", ")", "permutated", "=", "mesh_type", "(", "*", "*", "triangles_module", ".", "to_kwargs", "(", "triangles", ")", ")", "return", "permutated" ]
Pretty print .
def _pp ( dict_data ) : for key , val in dict_data . items ( ) : # pylint: disable=superfluous-parens print ( '{0:<11}: {1}' . format ( key , val ) )
23
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L11-L15
[ "def", "_add_dependency", "(", "self", ",", "dependency", ",", "var_name", "=", "None", ")", ":", "if", "var_name", "is", "None", ":", "var_name", "=", "next", "(", "self", ".", "temp_var_names", ")", "# Don't add duplicate dependencies", "if", "(", "dependency", ",", "var_name", ")", "not", "in", "self", ".", "dependencies", ":", "self", ".", "dependencies", ".", "append", "(", "(", "dependency", ",", "var_name", ")", ")", "return", "var_name" ]
Print licenses .
def print_licences ( params , metadata ) : if hasattr ( params , 'licenses' ) : if params . licenses : _pp ( metadata . licenses_desc ( ) ) sys . exit ( 0 )
24
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L27-L36
[ "def", "on_response", "(", "self", ",", "ch", ",", "method_frame", ",", "props", ",", "body", ")", ":", "LOGGER", ".", "debug", "(", "\"rabbitmq.Requester.on_response\"", ")", "if", "self", ".", "corr_id", "==", "props", ".", "correlation_id", ":", "self", ".", "response", "=", "{", "'props'", ":", "props", ",", "'body'", ":", "body", "}", "else", ":", "LOGGER", ".", "warn", "(", "\"rabbitmq.Requester.on_response - discarded response : \"", "+", "str", "(", "props", ".", "correlation_id", ")", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "props", ",", "'body'", ":", "body", "}", ")", ")" ]
Check repository existence .
def check_repository_existence ( params ) : repodir = os . path . join ( params . outdir , params . name ) if os . path . isdir ( repodir ) : raise Conflict ( 'Package repository "{0}" has already exists.' . format ( repodir ) )
25
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L39-L47
[ "def", "OnAdjustVolume", "(", "self", ",", "event", ")", ":", "self", ".", "volume", "=", "self", ".", "player", ".", "audio_get_volume", "(", ")", "if", "event", ".", "GetWheelRotation", "(", ")", "<", "0", ":", "self", ".", "volume", "=", "max", "(", "0", ",", "self", ".", "volume", "-", "10", ")", "elif", "event", ".", "GetWheelRotation", "(", ")", ">", "0", ":", "self", ".", "volume", "=", "min", "(", "200", ",", "self", ".", "volume", "+", "10", ")", "self", ".", "player", ".", "audio_set_volume", "(", "self", ".", "volume", ")" ]
Generate package repository .
def generate_package ( params ) : pkg_data = package . PackageData ( params ) pkg_tree = package . PackageTree ( pkg_data ) pkg_tree . generate ( ) pkg_tree . move ( ) VCS ( os . path . join ( pkg_tree . outdir , pkg_tree . name ) , pkg_tree . pkg_data )
26
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/control.py#L59-L68
[ "def", "main", "(", ")", ":", "alarm", "=", "XBeeAlarm", "(", "'/dev/ttyUSB0'", ",", "'\\x56\\x78'", ")", "routine", "=", "SimpleWakeupRoutine", "(", "alarm", ")", "from", "time", "import", "sleep", "while", "True", ":", "\"\"\"\n Run the routine with 10 second delays\n \"\"\"", "try", ":", "print", "\"Waiting 5 seconds...\"", "sleep", "(", "5", ")", "print", "\"Firing\"", "routine", ".", "trigger", "(", ")", "except", "KeyboardInterrupt", ":", "break" ]
print single reads to stderr
def print_single ( line , rev ) : if rev is True : seq = rc ( [ '' , line [ 9 ] ] ) [ 1 ] qual = line [ 10 ] [ : : - 1 ] else : seq = line [ 9 ] qual = line [ 10 ] fq = [ '@%s' % line [ 0 ] , seq , '+%s' % line [ 0 ] , qual ] print ( '\n' . join ( fq ) , file = sys . stderr )
27
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/sam2fastq.py#L13-L24
[ "def", "set_cache_dir", "(", "directory", ")", ":", "global", "cache_dir", "if", "directory", "is", "None", ":", "cache_dir", "=", "None", "return", "if", "not", "os", ".", "path", ".", "exists", "(", "directory", ")", ":", "os", ".", "makedirs", "(", "directory", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "directory", ")", ":", "raise", "ValueError", "(", "\"not a directory\"", ")", "cache_dir", "=", "directory" ]
convert sam to fastq
def sam2fastq ( sam , singles = False , force = False ) : L , R = None , None for line in sam : if line . startswith ( '@' ) is True : continue line = line . strip ( ) . split ( ) bit = [ True if i == '1' else False for i in bin ( int ( line [ 1 ] ) ) . split ( 'b' ) [ 1 ] [ : : - 1 ] ] while len ( bit ) < 8 : bit . append ( False ) pair , proper , na , nap , rev , mrev , left , right = bit # make sure read is paired if pair is False : if singles is True : print_single ( line , rev ) continue # check if sequence is reverse-complemented if rev is True : seq = rc ( [ '' , line [ 9 ] ] ) [ 1 ] qual = line [ 10 ] [ : : - 1 ] else : seq = line [ 9 ] qual = line [ 10 ] # check if read is forward or reverse, return when both have been found if left is True : if L is not None and force is False : print ( 'sam file is not sorted' , file = sys . stderr ) print ( '\te.g.: %s' % ( line [ 0 ] ) , file = sys . stderr ) exit ( ) if L is not None : L = None continue L = [ '@%s' % line [ 0 ] , seq , '+%s' % line [ 0 ] , qual ] if R is not None : yield L yield R L , R = None , None if right is True : if R is not None and force is False : print ( 'sam file is not sorted' , file = sys . stderr ) print ( '\te.g.: %s' % ( line [ 0 ] ) , file = sys . stderr ) exit ( ) if R is not None : R = None continue R = [ '@%s' % line [ 0 ] , seq , '+%s' % line [ 0 ] , qual ] if L is not None : yield L yield R L , R = None , None
28
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/sam2fastq.py#L26-L78
[ "def", "get_changed_devices", "(", "self", ",", "timestamp", ")", ":", "if", "timestamp", "is", "None", ":", "payload", "=", "{", "}", "else", ":", "payload", "=", "{", "'timeout'", ":", "SUBSCRIPTION_WAIT", ",", "'minimumdelay'", ":", "SUBSCRIPTION_MIN_WAIT", "}", "payload", ".", "update", "(", "timestamp", ")", "# double the timeout here so requests doesn't timeout before vera", "payload", ".", "update", "(", "{", "'id'", ":", "'lu_sdata'", ",", "}", ")", "logger", ".", "debug", "(", "\"get_changed_devices() requesting payload %s\"", ",", "str", "(", "payload", ")", ")", "r", "=", "self", ".", "data_request", "(", "payload", ",", "TIMEOUT", "*", "2", ")", "r", ".", "raise_for_status", "(", ")", "# If the Vera disconnects before writing a full response (as lu_sdata", "# will do when interrupted by a Luup reload), the requests module will", "# happily return 200 with an empty string. So, test for empty response,", "# so we don't rely on the JSON parser to throw an exception.", "if", "r", ".", "text", "==", "\"\"", ":", "raise", "PyveraError", "(", "\"Empty response from Vera\"", ")", "# Catch a wide swath of what the JSON parser might throw, within", "# reason. Unfortunately, some parsers don't specifically return", "# json.decode.JSONDecodeError, but so far most seem to derive what", "# they do throw from ValueError, so that's helpful.", "try", ":", "result", "=", "r", ".", "json", "(", ")", "except", "ValueError", "as", "ex", ":", "raise", "PyveraError", "(", "\"JSON decode error: \"", "+", "str", "(", "ex", ")", ")", "if", "not", "(", "type", "(", "result", ")", "is", "dict", "and", "'loadtime'", "in", "result", "and", "'dataversion'", "in", "result", ")", ":", "raise", "PyveraError", "(", "\"Unexpected/garbled response from Vera\"", ")", "# At this point, all good. Update timestamp and return change data.", "device_data", "=", "result", ".", "get", "(", "'devices'", ")", "timestamp", "=", "{", "'loadtime'", ":", "result", ".", "get", "(", "'loadtime'", ")", ",", "'dataversion'", ":", "result", ".", "get", "(", "'dataversion'", ")", "}", "return", "[", "device_data", ",", "timestamp", "]" ]
sort sam file
def sort_sam ( sam , sort ) : tempdir = '%s/' % ( os . path . abspath ( sam ) . rsplit ( '/' , 1 ) [ 0 ] ) if sort is True : mapping = '%s.sorted.sam' % ( sam . rsplit ( '.' , 1 ) [ 0 ] ) if sam != '-' : if os . path . exists ( mapping ) is False : os . system ( "\ sort -k1 --buffer-size=%sG -T %s -o %s %s\ " % ( sbuffer , tempdir , mapping , sam ) ) else : mapping = 'stdin-sam.sorted.sam' p = Popen ( "sort -k1 --buffer-size=%sG -T %s -o %s" % ( sbuffer , tempdir , mapping ) , stdin = sys . stdin , shell = True ) p . communicate ( ) mapping = open ( mapping ) else : if sam == '-' : mapping = sys . stdin else : mapping = open ( sam ) return mapping
29
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/subset_sam.py#L14-L37
[ "def", "cmd_oreoled", "(", "self", ",", "args", ")", ":", "if", "len", "(", "args", ")", "<", "4", ":", "print", "(", "\"Usage: oreoled LEDNUM RED GREEN BLUE <RATE>\"", ")", "return", "lednum", "=", "int", "(", "args", "[", "0", "]", ")", "pattern", "=", "[", "0", "]", "*", "24", "pattern", "[", "0", "]", "=", "ord", "(", "'R'", ")", "pattern", "[", "1", "]", "=", "ord", "(", "'G'", ")", "pattern", "[", "2", "]", "=", "ord", "(", "'B'", ")", "pattern", "[", "3", "]", "=", "ord", "(", "'0'", ")", "pattern", "[", "4", "]", "=", "0", "pattern", "[", "5", "]", "=", "int", "(", "args", "[", "1", "]", ")", "pattern", "[", "6", "]", "=", "int", "(", "args", "[", "2", "]", ")", "pattern", "[", "7", "]", "=", "int", "(", "args", "[", "3", "]", ")", "self", ".", "master", ".", "mav", ".", "led_control_send", "(", "self", ".", "settings", ".", "target_system", ",", "self", ".", "settings", ".", "target_component", ",", "lednum", ",", "255", ",", "8", ",", "pattern", ")" ]
randomly subset sam file
def sub_sam ( sam , percent , sort = True , sbuffer = False ) : mapping = sort_sam ( sam , sort ) pool = [ 1 for i in range ( 0 , percent ) ] + [ 0 for i in range ( 0 , 100 - percent ) ] c = cycle ( [ 1 , 2 ] ) for line in mapping : line = line . strip ( ) . split ( ) if line [ 0 ] . startswith ( '@' ) : # get the sam header yield line continue if int ( line [ 1 ] ) <= 20 : # is this from a single read? if random . choice ( pool ) == 1 : yield line else : n = next ( c ) if n == 1 : prev = line if n == 2 and random . choice ( pool ) == 1 : yield prev yield line
30
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/subset_sam.py#L39-L60
[ "def", "post", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "resp", "=", "self", ".", "session", ".", "post", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "resp", ".", "status_code", "in", "_EXCEPTIONS_BY_CODE", ":", "raise", "_EXCEPTIONS_BY_CODE", "[", "resp", ".", "status_code", "]", "(", "resp", ".", "reason", ")", "if", "resp", ".", "status_code", "!=", "requests", ".", "codes", "[", "'ok'", "]", ":", "raise", "exceptions", ".", "Etcd3Exception", "(", "resp", ".", "reason", ")", "except", "requests", ".", "exceptions", ".", "Timeout", "as", "ex", ":", "raise", "exceptions", ".", "ConnectionTimeoutError", "(", "six", ".", "text_type", "(", "ex", ")", ")", "except", "requests", ".", "exceptions", ".", "ConnectionError", "as", "ex", ":", "raise", "exceptions", ".", "ConnectionFailedError", "(", "six", ".", "text_type", "(", "ex", ")", ")", "return", "resp", ".", "json", "(", ")" ]
convert fq to fa
def fq2fa ( fq ) : c = cycle ( [ 1 , 2 , 3 , 4 ] ) for line in fq : n = next ( c ) if n == 1 : seq = [ '>%s' % ( line . strip ( ) . split ( '@' , 1 ) [ 1 ] ) ] if n == 2 : seq . append ( line . strip ( ) ) yield seq
31
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/fastq2fasta.py#L11-L22
[ "def", "run_configurations", "(", "callback", ",", "sections_reader", ")", ":", "base", "=", "dict", "(", "OPTIONS", ")", "sections", "=", "sections_reader", "(", ")", "if", "sections", "is", "None", ":", "logger", ".", "info", "(", "\"Configuration not found in .ini files. \"", "\"Running with default settings\"", ")", "recompile", "(", ")", "elif", "sections", "==", "[", "]", ":", "logger", ".", "info", "(", "\"Configuration does not match current runtime. \"", "\"Exiting\"", ")", "results", "=", "[", "]", "for", "section", ",", "options", "in", "sections", ":", "OPTIONS", ".", "clear", "(", ")", "OPTIONS", ".", "update", "(", "base", ")", "OPTIONS", ".", "update", "(", "options", ")", "logger", ".", "debug", "(", "\"Running configuration from section \\\"%s\\\". OPTIONS: %r\"", ",", "section", ",", "OPTIONS", ")", "results", ".", "append", "(", "callback", "(", ")", ")", "return", "results" ]
Converts the returned value of wrapped function to the type of the first arg or to the type specified by a kwarg key return_type s value .
def change_return_type ( f ) : @ wraps ( f ) def wrapper ( * args , * * kwargs ) : if kwargs . has_key ( 'return_type' ) : return_type = kwargs [ 'return_type' ] kwargs . pop ( 'return_type' ) return return_type ( f ( * args , * * kwargs ) ) elif len ( args ) > 0 : return_type = type ( args [ 0 ] ) return return_type ( f ( * args , * * kwargs ) ) else : return f ( * args , * * kwargs ) return wrapper
32
https://github.com/elbow-jason/Uno-deprecated/blob/4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4/uno/decorators.py#L11-L27
[ "def", "is_running", "(", "self", ")", ":", "self", ".", "__update_status", "(", ")", "return", "self", ".", "status", "==", "Status", ".", "UP", "or", "self", ".", "status", "==", "Status", ".", "DECOMMISSIONED" ]
Converts all args to set type via self . setify function .
def convert_args_to_sets ( f ) : @ wraps ( f ) def wrapper ( * args , * * kwargs ) : args = ( setify ( x ) for x in args ) return f ( * args , * * kwargs ) return wrapper
33
https://github.com/elbow-jason/Uno-deprecated/blob/4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4/uno/decorators.py#L30-L38
[ "def", "remove_pickle_problems", "(", "obj", ")", ":", "if", "hasattr", "(", "obj", ",", "\"doc_loader\"", ")", ":", "obj", ".", "doc_loader", "=", "None", "if", "hasattr", "(", "obj", ",", "\"embedded_tool\"", ")", ":", "obj", ".", "embedded_tool", "=", "remove_pickle_problems", "(", "obj", ".", "embedded_tool", ")", "if", "hasattr", "(", "obj", ",", "\"steps\"", ")", ":", "obj", ".", "steps", "=", "[", "remove_pickle_problems", "(", "s", ")", "for", "s", "in", "obj", ".", "steps", "]", "return", "obj" ]
Membuat objek - objek entri dari laman yang diambil .
def _init_entri ( self , laman ) : sup = BeautifulSoup ( laman . text , 'html.parser' ) estr = '' for label in sup . find ( 'hr' ) . next_siblings : if label . name == 'hr' : self . entri . append ( Entri ( estr ) ) break if label . name == 'h2' : if estr : self . entri . append ( Entri ( estr ) ) estr = '' estr += str ( label ) . strip ( )
34
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L46-L63
[ "def", "clone", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_client", ".", "clone_scope", "(", "*", "args", ",", "source_scope", "=", "self", ",", "*", "*", "kwargs", ")" ]
Memproses kata dasar yang ada dalam nama entri .
def _init_kata_dasar ( self , dasar ) : for tiap in dasar : kata = tiap . find ( 'a' ) dasar_no = kata . find ( 'sup' ) kata = ambil_teks_dalam_label ( kata ) self . kata_dasar . append ( kata + ' [{}]' . format ( dasar_no . text . strip ( ) ) if dasar_no else kata )
35
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L126-L139
[ "def", "fast_sync_snapshot_decompress", "(", "snapshot_path", ",", "output_dir", ")", ":", "if", "not", "tarfile", ".", "is_tarfile", "(", "snapshot_path", ")", ":", "return", "{", "'error'", ":", "'Not a tarfile-compatible archive: {}'", ".", "format", "(", "snapshot_path", ")", "}", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "makedirs", "(", "output_dir", ")", "with", "tarfile", ".", "TarFile", ".", "bz2open", "(", "snapshot_path", ",", "'r'", ")", "as", "f", ":", "tarfile", ".", "TarFile", ".", "extractall", "(", "f", ",", "path", "=", "output_dir", ")", "return", "{", "'status'", ":", "True", "}" ]
Mengembalikan hasil serialisasi objek Entri ini .
def serialisasi ( self ) : return { "nama" : self . nama , "nomor" : self . nomor , "kata_dasar" : self . kata_dasar , "pelafalan" : self . pelafalan , "bentuk_tidak_baku" : self . bentuk_tidak_baku , "varian" : self . varian , "makna" : [ makna . serialisasi ( ) for makna in self . makna ] }
36
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L141-L156
[ "def", "construct_ctcp", "(", "*", "parts", ")", ":", "message", "=", "' '", ".", "join", "(", "parts", ")", "message", "=", "message", ".", "replace", "(", "'\\0'", ",", "CTCP_ESCAPE_CHAR", "+", "'0'", ")", "message", "=", "message", ".", "replace", "(", "'\\n'", ",", "CTCP_ESCAPE_CHAR", "+", "'n'", ")", "message", "=", "message", ".", "replace", "(", "'\\r'", ",", "CTCP_ESCAPE_CHAR", "+", "'r'", ")", "message", "=", "message", ".", "replace", "(", "CTCP_ESCAPE_CHAR", ",", "CTCP_ESCAPE_CHAR", "+", "CTCP_ESCAPE_CHAR", ")", "return", "CTCP_DELIMITER", "+", "message", "+", "CTCP_DELIMITER" ]
Mengembalikan representasi string untuk semua makna entri ini .
def _makna ( self ) : if len ( self . makna ) > 1 : return '\n' . join ( str ( i ) + ". " + str ( makna ) for i , makna in enumerate ( self . makna , 1 ) ) return str ( self . makna [ 0 ] )
37
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L158-L170
[ "def", "as_of", "(", "self", ",", "qtime", "=", "None", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "clone", ".", "querytime", "=", "QueryTime", "(", "time", "=", "qtime", ",", "active", "=", "True", ")", "return", "clone" ]
Mengembalikan representasi string untuk nama entri ini .
def _nama ( self ) : hasil = self . nama if self . nomor : hasil += " [{}]" . format ( self . nomor ) if self . kata_dasar : hasil = " » ". j oin( s elf. k ata_dasar) » " + h sil return hasil
38
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L172-L184
[ "def", "as_of", "(", "self", ",", "qtime", "=", "None", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "clone", ".", "querytime", "=", "QueryTime", "(", "time", "=", "qtime", ",", "active", "=", "True", ")", "return", "clone" ]
Mengembalikan representasi string untuk varian entri ini . Dapat digunakan untuk Varian maupun Bentuk tidak baku .
def _varian ( self , varian ) : if varian == self . bentuk_tidak_baku : nama = "Bentuk tidak baku" elif varian == self . varian : nama = "Varian" else : return '' return nama + ': ' + ', ' . join ( varian )
39
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L186-L202
[ "def", "as_of", "(", "self", ",", "qtime", "=", "None", ")", ":", "clone", "=", "self", ".", "_clone", "(", ")", "clone", ".", "querytime", "=", "QueryTime", "(", "time", "=", "qtime", ",", "active", "=", "True", ")", "return", "clone" ]
Memproses kelas kata yang ada dalam makna .
def _init_kelas ( self , makna_label ) : kelas = makna_label . find ( color = 'red' ) lain = makna_label . find ( color = 'darkgreen' ) info = makna_label . find ( color = 'green' ) if kelas : kelas = kelas . find_all ( 'span' ) if lain : self . kelas = { lain . text . strip ( ) : lain [ 'title' ] . strip ( ) } self . submakna = lain . next_sibling . strip ( ) self . submakna += ' ' + makna_label . find ( color = 'grey' ) . text . strip ( ) else : self . kelas = { k . text . strip ( ) : k [ 'title' ] . strip ( ) for k in kelas } if kelas else { } self . info = info . text . strip ( ) if info else ''
40
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L239-L259
[ "def", "crc", "(", "self", ")", ":", "# will make sure everything has been transferred", "# to datastore that needs to be before returning crc", "result", "=", "self", ".", "_data", ".", "fast_hash", "(", ")", "if", "hasattr", "(", "self", ".", "mesh", ",", "'crc'", ")", ":", "# bitwise xor combines hashes better than a sum", "result", "^=", "self", ".", "mesh", ".", "crc", "(", ")", "return", "result" ]
Memproses contoh yang ada dalam makna .
def _init_contoh ( self , makna_label ) : indeks = makna_label . text . find ( ': ' ) if indeks != - 1 : contoh = makna_label . text [ indeks + 2 : ] . strip ( ) self . contoh = contoh . split ( '; ' ) else : self . contoh = [ ]
41
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L261-L273
[ "def", "setHalfLife", "(", "self", ",", "halfLife", ",", "timeUnit", ")", ":", "self", ".", "_timeUnit", "=", "timeUnit", "self", ".", "_decayFactor", "=", "exp", "(", "log", "(", "0.5", ")", "/", "halfLife", ")", "return", "self" ]
Mengembalikan hasil serialisasi objek Makna ini .
def serialisasi ( self ) : return { "kelas" : self . kelas , "submakna" : self . submakna , "info" : self . info , "contoh" : self . contoh }
42
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L275-L287
[ "def", "escape_latex", "(", "text", ")", ":", "text", "=", "unicode", "(", "text", ".", "decode", "(", "'utf-8'", ")", ")", "CHARS", "=", "{", "'&'", ":", "r'\\&'", ",", "'%'", ":", "r'\\%'", ",", "'$'", ":", "r'\\$'", ",", "'#'", ":", "r'\\#'", ",", "'_'", ":", "r'\\_'", ",", "'{'", ":", "r'\\{'", ",", "'}'", ":", "r'\\}'", ",", "'~'", ":", "r'\\~{}'", ",", "'^'", ":", "r'\\^{}'", ",", "'\\\\'", ":", "r'\\textbackslash{}'", ",", "}", "escaped", "=", "\"\"", ".", "join", "(", "[", "CHARS", ".", "get", "(", "char", ",", "char", ")", "for", "char", "in", "text", "]", ")", "return", "escaped", ".", "encode", "(", "'utf-8'", ")" ]
Build sphinx documentation .
def build_sphinx ( pkg_data , projectdir ) : try : version , _minor_version = pkg_data . version . rsplit ( '.' , 1 ) except ValueError : version = pkg_data . version args = ' ' . join ( ( 'sphinx-quickstart' , '--sep' , '-q' , '-p "{name}"' , '-a "{author}"' , '-v "{version}"' , '-r "{release}"' , '-l en' , '--suffix=.rst' , '--master=index' , '--ext-autodoc' , '--ext-viewcode' , '--makefile' , '{projectdir}' ) ) . format ( name = pkg_data . name , author = pkg_data . author , version = version , release = pkg_data . version , projectdir = projectdir ) if subprocess . call ( shlex . split ( args ) ) == 0 : _touch_gitkeep ( projectdir )
43
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/docs.py#L8-L40
[ "def", "relative_humidity_wet_psychrometric", "(", "dry_bulb_temperature", ",", "web_bulb_temperature", ",", "pressure", ",", "*", "*", "kwargs", ")", ":", "return", "(", "psychrometric_vapor_pressure_wet", "(", "dry_bulb_temperature", ",", "web_bulb_temperature", ",", "pressure", ",", "*", "*", "kwargs", ")", "/", "saturation_vapor_pressure", "(", "dry_bulb_temperature", ")", ")" ]
make bowtie db
def bowtiedb ( fa , keepDB ) : btdir = '%s/bt2' % ( os . getcwd ( ) ) # make directory for if not os . path . exists ( btdir ) : os . mkdir ( btdir ) btdb = '%s/%s' % ( btdir , fa . rsplit ( '/' , 1 ) [ - 1 ] ) if keepDB is True : if os . path . exists ( '%s.1.bt2' % ( btdb ) ) : return btdb p = subprocess . Popen ( 'bowtie2-build -q %s %s' % ( fa , btdb ) , shell = True ) p . communicate ( ) return btdb
44
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/crossmap.py#L16-L31
[ "def", "parse", "(", "self", ",", "content", ")", ":", "raw", "=", "{", "}", "try", ":", "root", "=", "etree", ".", "fromstring", "(", "content", ")", "except", "SyntaxError", "as", "e", ":", "raise", "ValueError", "(", "*", "e", ".", "args", ")", "for", "child", "in", "root", ":", "raw", "[", "child", ".", "tag", "]", "=", "child", ".", "text", "formatted", "=", "self", ".", "format", "(", "raw", ")", "msg_type", "=", "formatted", "[", "'type'", "]", "msg_parser", "=", "getattr", "(", "self", ",", "'parse_%s'", "%", "msg_type", ",", "None", ")", "if", "callable", "(", "msg_parser", ")", ":", "parsed", "=", "msg_parser", "(", "raw", ")", "else", ":", "parsed", "=", "self", ".", "parse_invalid_type", "(", "raw", ")", "formatted", ".", "update", "(", "parsed", ")", "return", "formatted" ]
generate bowtie2 command
def bowtie ( sam , btd , f , r , u , opt , no_shrink , threads ) : bt2 = 'bowtie2 -x %s -p %s ' % ( btd , threads ) if f is not False : bt2 += '-1 %s -2 %s ' % ( f , r ) if u is not False : bt2 += '-U %s ' % ( u ) bt2 += opt if no_shrink is False : if f is False : bt2 += ' | shrinksam -u -k %s-shrunk.sam ' % ( sam ) else : bt2 += ' | shrinksam -k %s-shrunk.sam ' % ( sam ) else : bt2 += ' > %s.sam' % ( sam ) return bt2
45
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/crossmap.py#L33-L50
[ "def", "format_string", "(", "self", ",", "s", ",", "args", ",", "kwargs", ")", ":", "if", "isinstance", "(", "s", ",", "Markup", ")", ":", "formatter", "=", "SandboxedEscapeFormatter", "(", "self", ",", "s", ".", "escape", ")", "else", ":", "formatter", "=", "SandboxedFormatter", "(", "self", ")", "kwargs", "=", "_MagicFormatMapping", "(", "args", ",", "kwargs", ")", "rv", "=", "formatter", ".", "vformat", "(", "s", ",", "args", ",", "kwargs", ")", "return", "type", "(", "s", ")", "(", "rv", ")" ]
map all read sets against all fasta files
def crossmap ( fas , reads , options , no_shrink , keepDB , threads , cluster , nodes ) : if cluster is True : threads = '48' btc = [ ] for fa in fas : btd = bowtiedb ( fa , keepDB ) F , R , U = reads if F is not False : if U is False : u = False for i , f in enumerate ( F ) : r = R [ i ] if U is not False : u = U [ i ] sam = '%s/%s-vs-%s' % ( os . getcwd ( ) , fa . rsplit ( '/' , 1 ) [ - 1 ] , f . rsplit ( '/' , 1 ) [ - 1 ] . rsplit ( '.' , 3 ) [ 0 ] ) btc . append ( bowtie ( sam , btd , f , r , u , options , no_shrink , threads ) ) else : f = False r = False for u in U : sam = '%s/%s-vs-%s' % ( os . getcwd ( ) , fa . rsplit ( '/' , 1 ) [ - 1 ] , u . rsplit ( '/' , 1 ) [ - 1 ] . rsplit ( '.' , 3 ) [ 0 ] ) btc . append ( bowtie ( sam , btd , f , r , u , options , no_shrink , threads ) ) if cluster is False : for i in btc : p = subprocess . Popen ( i , shell = True ) p . communicate ( ) else : ID = '' . join ( random . choice ( [ str ( i ) for i in range ( 0 , 9 ) ] ) for _ in range ( 5 ) ) for node , commands in enumerate ( chunks ( btc , nodes ) , 1 ) : bs = open ( '%s/crossmap-qsub.%s.%s.sh' % ( os . getcwd ( ) , ID , node ) , 'w' ) print ( '\n' . join ( commands ) , file = bs ) bs . close ( ) p = subprocess . Popen ( 'qsub -V -N crossmap %s' % ( bs . name ) , shell = True ) p . communicate ( )
46
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/crossmap.py#L55-L96
[ "async", "def", "socket_connection", "(", "self", ")", ":", "if", "not", "self", ".", "_registered", ":", "_LOGGER", ".", "error", "(", "'Client not registered, cannot start socket.'", ")", "return", "url", "=", "'{}?DeviceID={}&api_key={}'", ".", "format", "(", "self", ".", "construct_url", "(", "SOCKET_URL", ")", ",", "self", ".", "_api_id", ",", "self", ".", "_api_key", ")", "fail_count", "=", "0", "while", "True", ":", "_LOGGER", ".", "debug", "(", "'Attempting Socket Connection.'", ")", "try", ":", "with", "async_timeout", ".", "timeout", "(", "DEFAULT_TIMEOUT", ",", "loop", "=", "self", ".", "_event_loop", ")", ":", "self", ".", "wsck", "=", "await", "self", ".", "_api_session", ".", "ws_connect", "(", "url", ")", "# Enable sever session updates:", "try", ":", "msg", "=", "await", "self", ".", "wsck", ".", "send_str", "(", "'{\"MessageType\":\"SessionsStart\", \"Data\": \"0,1500\"}'", ")", "except", "Exception", "as", "err", ":", "# Catch all for now", "_LOGGER", ".", "error", "(", "'Failure setting session updates: %s'", ",", "err", ")", "raise", "ValueError", "(", "'Session updates error.'", ")", "_LOGGER", ".", "debug", "(", "'Socket Connected!'", ")", "fail_count", "=", "0", "while", "True", ":", "msg", "=", "await", "self", ".", "wsck", ".", "receive", "(", ")", "if", "msg", ".", "type", "==", "aiohttp", ".", "WSMsgType", ".", "text", ":", "# Process data", "self", ".", "process_msg", "(", "msg", ".", "data", ")", "elif", "msg", ".", "type", "==", "aiohttp", ".", "WSMsgType", ".", "closed", ":", "raise", "ValueError", "(", "'Websocket was closed.'", ")", "elif", "msg", ".", "type", "==", "aiohttp", ".", "WSMsgType", ".", "error", ":", "_LOGGER", ".", "debug", "(", "'Websocket encountered an error: %s'", ",", "msg", ")", "raise", "ValueError", "(", "'Websocket error.'", ")", "except", "(", "aiohttp", ".", "ClientError", ",", "asyncio", ".", "TimeoutError", ",", "aiohttp", ".", "WSServerHandshakeError", ",", "ConnectionRefusedError", ",", "OSError", ",", "ValueError", ")", "as", "err", ":", "if", "not", "self", ".", "_shutdown", ":", "fail_count", "+=", "1", "_LOGGER", ".", "debug", "(", "'Websocket unintentionally closed.'", "' Trying reconnect in %ss. Error: %s'", ",", "(", "fail_count", "*", "5", ")", "+", "5", ",", "err", ")", "await", "asyncio", ".", "sleep", "(", "15", ",", "self", ".", "_event_loop", ")", "continue", "else", ":", "break" ]
Returns a connection object from the router given args .
def get_conn ( self , * args , * * kwargs ) : connections = self . __connections_for ( 'get_conn' , args = args , kwargs = kwargs ) if len ( connections ) is 1 : return connections [ 0 ] else : return connections
47
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/base.py#L100-L113
[ "def", "denormalize_volume", "(", "volume", ")", ":", "id", "=", "volume", ".", "get", "(", "'id'", ",", "None", ")", "res", "=", "dict", "(", ")", "res", ".", "update", "(", "volume", "[", "'metadata'", "]", ")", "denorm_attachments", "=", "list", "(", ")", "for", "a", "in", "volume", "[", "'attachments'", "]", ":", "denorm_attachments", ".", "append", "(", "Archivant", ".", "denormalize_attachment", "(", "a", ")", ")", "res", "[", "'_attachments'", "]", "=", "denorm_attachments", "return", "id", ",", "res" ]
return the non - direct init if the direct algorithm has been selected .
def __get_nondirect_init ( self , init ) : crc = init for i in range ( self . Width ) : bit = crc & 0x01 if bit : crc ^= self . Poly crc >>= 1 if bit : crc |= self . MSB_Mask return crc & self . Mask
48
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L98-L110
[ "def", "vocab_token_counts", "(", "text_filepattern", ",", "max_lines", ")", ":", "ret", "=", "{", "}", "for", "i", ",", "line", "in", "enumerate", "(", "_read_filepattern", "(", "text_filepattern", ",", "max_lines", "=", "max_lines", ")", ")", ":", "if", "\",\"", "not", "in", "line", ":", "tf", ".", "logging", ".", "warning", "(", "\"Malformed vocab line #%d '%s'\"", ",", "i", ",", "line", ")", "continue", "token", ",", "count", "=", "line", ".", "rsplit", "(", "\",\"", ",", "1", ")", "ret", "[", "_native_to_unicode", "(", "token", ")", "]", "=", "int", "(", "count", ")", "return", "ret" ]
reflect a data word i . e . reverts the bit order .
def reflect ( self , data , width ) : x = data & 0x01 for i in range ( width - 1 ) : data >>= 1 x = ( x << 1 ) | ( data & 0x01 ) return x
49
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L115-L123
[ "def", "update", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "unique_name", "=", "values", ".", "unset", ")", ":", "return", "self", ".", "_proxy", ".", "update", "(", "friendly_name", "=", "friendly_name", ",", "unique_name", "=", "unique_name", ",", ")" ]
Classic simple and slow CRC implementation . This function iterates bit by bit over the augmented input message and returns the calculated CRC value at the end .
def bit_by_bit ( self , in_data ) : # If the input data is a string, convert to bytes. if isinstance ( in_data , str ) : in_data = [ ord ( c ) for c in in_data ] register = self . NonDirectInit for octet in in_data : if self . ReflectIn : octet = self . reflect ( octet , 8 ) for i in range ( 8 ) : topbit = register & self . MSB_Mask register = ( ( register << 1 ) & self . Mask ) | ( ( octet >> ( 7 - i ) ) & 0x01 ) if topbit : register ^= self . Poly for i in range ( self . Width ) : topbit = register & self . MSB_Mask register = ( ( register << 1 ) & self . Mask ) if topbit : register ^= self . Poly if self . ReflectOut : register = self . reflect ( register , self . Width ) return register ^ self . XorOut
50
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L128-L156
[ "def", "get_default_storage_policy_of_datastore", "(", "profile_manager", ",", "datastore", ")", ":", "# Retrieve all datastores visible", "hub", "=", "pbm", ".", "placement", ".", "PlacementHub", "(", "hubId", "=", "datastore", ".", "_moId", ",", "hubType", "=", "'Datastore'", ")", "log", ".", "trace", "(", "'placement_hub = %s'", ",", "hub", ")", "try", ":", "policy_id", "=", "profile_manager", ".", "QueryDefaultRequirementProfile", "(", "hub", ")", "except", "vim", ".", "fault", ".", "NoPermission", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "VMwareApiError", "(", "'Not enough permissions. Required privilege: '", "'{0}'", ".", "format", "(", "exc", ".", "privilegeId", ")", ")", "except", "vim", ".", "fault", ".", "VimFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "VMwareApiError", "(", "exc", ".", "msg", ")", "except", "vmodl", ".", "RuntimeFault", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "raise", "VMwareRuntimeError", "(", "exc", ".", "msg", ")", "policy_refs", "=", "get_policies_by_id", "(", "profile_manager", ",", "[", "policy_id", "]", ")", "if", "not", "policy_refs", ":", "raise", "VMwareObjectRetrievalError", "(", "'Storage policy with id \\'{0}\\' was '", "'not found'", ".", "format", "(", "policy_id", ")", ")", "return", "policy_refs", "[", "0", "]" ]
This function generates the CRC table used for the table_driven CRC algorithm . The Python version cannot handle tables of an index width other than 8 . See the generated C code for tables with different sizes instead .
def gen_table ( self ) : table_length = 1 << self . TableIdxWidth tbl = [ 0 ] * table_length for i in range ( table_length ) : register = i if self . ReflectIn : register = self . reflect ( register , self . TableIdxWidth ) register = register << ( self . Width - self . TableIdxWidth + self . CrcShift ) for j in range ( self . TableIdxWidth ) : if register & ( self . MSB_Mask << self . CrcShift ) != 0 : register = ( register << 1 ) ^ ( self . Poly << self . CrcShift ) else : register = ( register << 1 ) if self . ReflectIn : register = self . reflect ( register >> self . CrcShift , self . Width ) << self . CrcShift tbl [ i ] = register & ( self . Mask << self . CrcShift ) return tbl
51
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L190-L212
[ "def", "setGradingNotFinishedStateAction", "(", "self", ",", "request", ",", "queryset", ")", ":", "for", "subm", "in", "queryset", ":", "subm", ".", "state", "=", "Submission", ".", "GRADING_IN_PROGRESS", "subm", ".", "save", "(", ")" ]
The Standard table_driven CRC algorithm .
def table_driven ( self , in_data ) : # If the input data is a string, convert to bytes. if isinstance ( in_data , str ) : in_data = [ ord ( c ) for c in in_data ] tbl = self . gen_table ( ) register = self . DirectInit << self . CrcShift if not self . ReflectIn : for octet in in_data : tblidx = ( ( register >> ( self . Width - self . TableIdxWidth + self . CrcShift ) ) ^ octet ) & 0xff register = ( ( register << ( self . TableIdxWidth - self . CrcShift ) ) ^ tbl [ tblidx ] ) & ( self . Mask << self . CrcShift ) register = register >> self . CrcShift else : register = self . reflect ( register , self . Width + self . CrcShift ) << self . CrcShift for octet in in_data : tblidx = ( ( register >> self . CrcShift ) ^ octet ) & 0xff register = ( ( register >> self . TableIdxWidth ) ^ tbl [ tblidx ] ) & ( self . Mask << self . CrcShift ) register = self . reflect ( register , self . Width + self . CrcShift ) & self . Mask if self . ReflectOut : register = self . reflect ( register , self . Width ) return register ^ self . XorOut
52
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L217-L242
[ "def", "remove", "(", "self", ",", "membership_id", ")", ":", "path", "=", "'{}/remove'", ".", "format", "(", "membership_id", ")", "url", "=", "utils", ".", "urljoin", "(", "self", ".", "url", ",", "path", ")", "payload", "=", "{", "'membership_id'", ":", "membership_id", "}", "response", "=", "self", ".", "session", ".", "post", "(", "url", ",", "json", "=", "payload", ")", "return", "response", ".", "ok" ]
parse masked sequence into non - masked and masked regions
def parse_masked ( seq , min_len ) : nm , masked = [ ] , [ [ ] ] prev = None for base in seq [ 1 ] : if base . isupper ( ) : nm . append ( base ) if masked != [ [ ] ] and len ( masked [ - 1 ] ) < min_len : nm . extend ( masked [ - 1 ] ) del masked [ - 1 ] prev = False elif base . islower ( ) : if prev is False : masked . append ( [ ] ) masked [ - 1 ] . append ( base ) prev = True return nm , masked
53
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_masked.py#L13-L31
[ "def", "dl_cub", "(", "cub_url", ",", "cub_archive_name", ")", ":", "with", "open", "(", "cub_archive_name", ",", "'wb'", ")", "as", "f", ":", "remote_file", "=", "urllib2", ".", "urlopen", "(", "cub_url", ")", "meta", "=", "remote_file", ".", "info", "(", ")", "# The server may provide us with the size of the file.", "cl_header", "=", "meta", ".", "getheaders", "(", "\"Content-Length\"", ")", "remote_file_size", "=", "int", "(", "cl_header", "[", "0", "]", ")", "if", "len", "(", "cl_header", ")", ">", "0", "else", "None", "# Initialise variables", "local_file_size", "=", "0", "block_size", "=", "128", "*", "1024", "# Do the download", "while", "True", ":", "data", "=", "remote_file", ".", "read", "(", "block_size", ")", "if", "not", "data", ":", "break", "f", ".", "write", "(", "data", ")", "local_file_size", "+=", "len", "(", "data", ")", "if", "(", "remote_file_size", "is", "not", "None", "and", "not", "local_file_size", "==", "remote_file_size", ")", ":", "log", ".", "warn", "(", "\"Local file size '{}' \"", "\"does not match remote '{}'\"", ".", "format", "(", "local_file_size", ",", "remote_file_size", ")", ")", "remote_file", ".", "close", "(", ")" ]
remove masked regions from fasta file as long as they are longer than min_len
def strip_masked ( fasta , min_len , print_masked ) : for seq in parse_fasta ( fasta ) : nm , masked = parse_masked ( seq , min_len ) nm = [ '%s removed_masked >=%s' % ( seq [ 0 ] , min_len ) , '' . join ( nm ) ] yield [ 0 , nm ] if print_masked is True : for i , m in enumerate ( [ i for i in masked if i != [ ] ] , 1 ) : m = [ '%s insertion:%s' % ( seq [ 0 ] , i ) , '' . join ( m ) ] yield [ 1 , m ]
54
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_masked.py#L33-L45
[ "def", "__launchThreads", "(", "self", ",", "numThreads", ")", ":", "i", "=", "0", "while", "i", "<", "numThreads", ":", "self", ".", "__logger", ".", "debug", "(", "\"Launching thread number \"", "+", "str", "(", "i", ")", ")", "i", "+=", "1", "newThr", "=", "Thread", "(", "target", "=", "self", ".", "__processUsers", ")", "newThr", ".", "setDaemon", "(", "True", ")", "self", ".", "__threads", ".", "add", "(", "newThr", ")", "newThr", ".", "start", "(", ")" ]
Return arcsine transformed relative abundance from a BIOM format file .
def get_relative_abundance ( biomfile ) : biomf = biom . load_table ( biomfile ) norm_biomf = biomf . norm ( inplace = False ) rel_abd = { } for sid in norm_biomf . ids ( ) : rel_abd [ sid ] = { } for otuid in norm_biomf . ids ( "observation" ) : otuname = oc . otu_name ( norm_biomf . metadata ( otuid , axis = "observation" ) [ "taxonomy" ] ) otuname = " " . join ( otuname . split ( "_" ) ) abd = norm_biomf . get_value_by_ids ( otuid , sid ) rel_abd [ sid ] [ otuname ] = abd ast_rel_abd = bc . arcsine_sqrt_transform ( rel_abd ) return ast_rel_abd
55
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/network_plots_gephi.py#L33-L57
[ "def", "is_expired", "(", "self", ",", "max_idle_seconds", ")", ":", "now", "=", "current_time", "(", ")", "return", "(", "self", ".", "expiration_time", "is", "not", "None", "and", "self", ".", "expiration_time", "<", "now", ")", "or", "(", "max_idle_seconds", "is", "not", "None", "and", "self", ".", "last_access_time", "+", "max_idle_seconds", "<", "now", ")" ]
Find an OTU ID in a Newick - format tree . Return the starting position of the ID or None if not found .
def find_otu ( otuid , tree ) : for m in re . finditer ( otuid , tree ) : before , after = tree [ m . start ( ) - 1 ] , tree [ m . start ( ) + len ( otuid ) ] if before in [ "(" , "," , ")" ] and after in [ ":" , ";" ] : return m . start ( ) return None
56
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/iTol.py#L17-L26
[ "async", "def", "_wait_exponentially", "(", "self", ",", "exception", ",", "max_wait_time", "=", "300", ")", ":", "wait_time", "=", "min", "(", "(", "2", "**", "self", ".", "_connection_attempts", ")", "+", "random", ".", "random", "(", ")", ",", "max_wait_time", ")", "try", ":", "wait_time", "=", "exception", ".", "response", "[", "\"headers\"", "]", "[", "\"Retry-After\"", "]", "except", "(", "KeyError", ",", "AttributeError", ")", ":", "pass", "self", ".", "_logger", ".", "debug", "(", "\"Waiting %s seconds before reconnecting.\"", ",", "wait_time", ")", "await", "asyncio", ".", "sleep", "(", "float", "(", "wait_time", ")", ")" ]
Replace the OTU ids in the Newick phylogenetic tree format with truncated OTU names
def newick_replace_otuids ( tree , biomf ) : for val , id_ , md in biomf . iter ( axis = "observation" ) : otu_loc = find_otu ( id_ , tree ) if otu_loc is not None : tree = tree [ : otu_loc ] + oc . otu_name ( md [ "taxonomy" ] ) + tree [ otu_loc + len ( id_ ) : ] return tree
57
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/iTol.py#L29-L40
[ "def", "_eval_progress", "(", "self", ",", "match", ")", ":", "_locals", "=", "{", "k", ":", "safe_float", "(", "v", ")", "for", "k", ",", "v", "in", "match", ".", "groupdict", "(", ")", ".", "items", "(", ")", "}", "if", "\"x\"", "not", "in", "_locals", ":", "_locals", "[", "\"x\"", "]", "=", "[", "safe_float", "(", "x", ")", "for", "x", "in", "match", ".", "groups", "(", ")", "]", "try", ":", "return", "int", "(", "eval", "(", "self", ".", "progress_expr", ",", "{", "}", ",", "_locals", ")", ")", "except", ":", "return", "None" ]
return genome info for choosing representative
def genome_info ( genome , info ) : try : scg = info [ '#SCGs' ] dups = info [ '#SCG duplicates' ] length = info [ 'genome size (bp)' ] return [ scg - dups , length , genome ] except : return [ False , False , info [ 'genome size (bp)' ] , genome ]
58
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L97-L112
[ "def", "_send_and_wait", "(", "self", ",", "*", "*", "kwargs", ")", ":", "frame_id", "=", "self", ".", "next_frame_id", "kwargs", ".", "update", "(", "dict", "(", "frame_id", "=", "frame_id", ")", ")", "self", ".", "_send", "(", "*", "*", "kwargs", ")", "timeout", "=", "datetime", ".", "now", "(", ")", "+", "const", ".", "RX_TIMEOUT", "while", "datetime", ".", "now", "(", ")", "<", "timeout", ":", "try", ":", "frame", "=", "self", ".", "_rx_frames", ".", "pop", "(", "frame_id", ")", "raise_if_error", "(", "frame", ")", "return", "frame", "except", "KeyError", ":", "sleep", "(", "0.1", ")", "continue", "_LOGGER", ".", "exception", "(", "\"Did not receive response within configured timeout period.\"", ")", "raise", "exceptions", ".", "ZigBeeResponseTimeout", "(", ")" ]
choose represenative genome and print cluster information
def print_clusters ( fastas , info , ANI ) : header = [ '#cluster' , 'num. genomes' , 'rep.' , 'genome' , '#SCGs' , '#SCG duplicates' , 'genome size (bp)' , 'fragments' , 'list' ] yield header in_cluster = [ ] for cluster_num , cluster in enumerate ( connected_components ( ANI ) ) : cluster = sorted ( [ genome_info ( genome , info [ genome ] ) for genome in cluster ] , key = lambda x : x [ 0 : ] , reverse = True ) rep = cluster [ 0 ] [ - 1 ] cluster = [ i [ - 1 ] for i in cluster ] size = len ( cluster ) for genome in cluster : in_cluster . append ( genome ) try : stats = [ size , rep , genome , info [ genome ] [ '#SCGs' ] , info [ genome ] [ '#SCG duplicates' ] , info [ genome ] [ 'genome size (bp)' ] , info [ genome ] [ '# contigs' ] , cluster ] except : stats = [ size , rep , genome , 'n/a' , 'n/a' , info [ genome ] [ 'genome size (bp)' ] , info [ genome ] [ '# contigs' ] , cluster ] if rep == genome : stats = [ '*%s' % ( cluster_num ) ] + stats else : stats = [ cluster_num ] + stats yield stats # print singletons try : start = cluster_num + 1 except : start = 0 fastas = set ( [ i . rsplit ( '.' , 1 ) [ 0 ] . rsplit ( '/' , 1 ) [ - 1 ] . rsplit ( '.contigs' ) [ 0 ] for i in fastas ] ) for cluster_num , genome in enumerate ( fastas . difference ( set ( in_cluster ) ) , start ) : try : stats = [ '*%s' % ( cluster_num ) , 1 , genome , genome , info [ genome ] [ '#SCGs' ] , info [ genome ] [ '#SCG duplicates' ] , info [ genome ] [ 'genome size (bp)' ] , info [ genome ] [ '# contigs' ] , [ genome ] ] except : stats = [ '*%s' % ( cluster_num ) , 1 , genome , genome , 'n/a' , 'n/a' , info [ genome ] [ 'genome size (bp)' ] , info [ genome ] [ '# contigs' ] , [ genome ] ] yield stats
59
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L114-L163
[ "def", "delete", "(", "self", ",", "data_src", ")", ":", "items", "=", "self", ".", "objects", "[", "data_src", "]", ".", "data", ".", "keys", "(", ")", "# items to edit", "self", ".", "reg", ".", "unregister", "(", "items", ")", "# remove items from Registry", "self", ".", "layer", ".", "pop", "(", "data_src", ")", "# remove data source from layer", "self", ".", "objects", ".", "pop", "(", "data_src", ")", "# remove data_source object", "self", ".", "sources", ".", "pop", "(", "data_src", ")" ]
convert ggKbase genome info tables to dictionary
def parse_ggKbase_tables ( tables , id_type ) : g2info = { } for table in tables : for line in open ( table ) : line = line . strip ( ) . split ( '\t' ) if line [ 0 ] . startswith ( 'name' ) : header = line header [ 4 ] = 'genome size (bp)' header [ 12 ] = '#SCGs' header [ 13 ] = '#SCG duplicates' continue name , code , info = line [ 0 ] , line [ 1 ] , line info = [ to_int ( i ) for i in info ] if id_type is False : # try to use name and code ID if 'UNK' in code or 'unknown' in code : code = name if ( name != code ) and ( name and code in g2info ) : print ( '# duplicate name or code in table(s)' , file = sys . stderr ) print ( '# %s and/or %s' % ( name , code ) , file = sys . stderr ) exit ( ) if name not in g2info : g2info [ name ] = { item : stat for item , stat in zip ( header , info ) } if code not in g2info : g2info [ code ] = { item : stat for item , stat in zip ( header , info ) } else : if id_type == 'name' : ID = name elif id_type == 'code' : ID = code else : print ( '# specify name or code column using -id' , file = sys . stderr ) exit ( ) ID = ID . replace ( ' ' , '' ) g2info [ ID ] = { item : stat for item , stat in zip ( header , info ) } if g2info [ ID ] [ 'genome size (bp)' ] == '' : g2info [ ID ] [ 'genome size (bp)' ] = 0 return g2info
60
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L174-L213
[ "def", "minutes_for_sessions_in_range", "(", "self", ",", "start_session_label", ",", "end_session_label", ")", ":", "first_minute", ",", "_", "=", "self", ".", "open_and_close_for_session", "(", "start_session_label", ")", "_", ",", "last_minute", "=", "self", ".", "open_and_close_for_session", "(", "end_session_label", ")", "return", "self", ".", "minutes_in_range", "(", "first_minute", ",", "last_minute", ")" ]
convert checkM genome info tables to dictionary
def parse_checkM_tables ( tables ) : g2info = { } for table in tables : for line in open ( table ) : line = line . strip ( ) . split ( '\t' ) if line [ 0 ] . startswith ( 'Bin Id' ) : header = line header [ 8 ] = 'genome size (bp)' header [ 5 ] = '#SCGs' header [ 6 ] = '#SCG duplicates' continue ID , info = line [ 0 ] , line info = [ to_int ( i ) for i in info ] ID = ID . replace ( ' ' , '' ) g2info [ ID ] = { item : stat for item , stat in zip ( header , info ) } if g2info [ ID ] [ 'genome size (bp)' ] == '' : g2info [ ID ] [ 'genome size (bp)' ] = 0 return g2info
61
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L215-L235
[ "def", "start_transmit", "(", "self", ",", "blocking", "=", "False", ",", "start_packet_groups", "=", "True", ",", "*", "ports", ")", ":", "port_list", "=", "self", ".", "set_ports_list", "(", "*", "ports", ")", "if", "start_packet_groups", ":", "port_list_for_packet_groups", "=", "self", ".", "ports", ".", "values", "(", ")", "port_list_for_packet_groups", "=", "self", ".", "set_ports_list", "(", "*", "port_list_for_packet_groups", ")", "self", ".", "api", ".", "call_rc", "(", "'ixClearTimeStamp {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartPacketGroups {}'", ".", "format", "(", "port_list_for_packet_groups", ")", ")", "self", ".", "api", ".", "call_rc", "(", "'ixStartTransmit {}'", ".", "format", "(", "port_list", ")", ")", "time", ".", "sleep", "(", "0.2", ")", "if", "blocking", ":", "self", ".", "wait_transmit", "(", "*", "ports", ")" ]
get genome lengths
def genome_lengths ( fastas , info ) : if info is False : info = { } for genome in fastas : name = genome . rsplit ( '.' , 1 ) [ 0 ] . rsplit ( '/' , 1 ) [ - 1 ] . rsplit ( '.contigs' ) [ 0 ] if name in info : continue length = 0 fragments = 0 for seq in parse_fasta ( genome ) : length += len ( seq [ 1 ] ) fragments += 1 info [ name ] = { 'genome size (bp)' : length , '# contigs' : fragments } return info
62
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/cluster_ani.py#L237-L253
[ "def", "serverinfo", "(", "url", "=", "'http://localhost:8080/manager'", ",", "timeout", "=", "180", ")", ":", "data", "=", "_wget", "(", "'serverinfo'", ",", "{", "}", ",", "url", ",", "timeout", "=", "timeout", ")", "if", "data", "[", "'res'", "]", "is", "False", ":", "return", "{", "'error'", ":", "data", "[", "'msg'", "]", "}", "ret", "=", "{", "}", "data", "[", "'msg'", "]", ".", "pop", "(", "0", ")", "for", "line", "in", "data", "[", "'msg'", "]", ":", "tmp", "=", "line", ".", "split", "(", "':'", ")", "ret", "[", "tmp", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "tmp", "[", "1", "]", ".", "strip", "(", ")", "return", "ret" ]
Returns a list of db keys to route the given call to .
def get_dbs ( self , attr , args , kwargs , * * fkwargs ) : if not self . _ready : if not self . setup_router ( args = args , kwargs = kwargs , * * fkwargs ) : raise self . UnableToSetupRouter ( ) retval = self . _pre_routing ( attr = attr , args = args , kwargs = kwargs , * * fkwargs ) if retval is not None : args , kwargs = retval if not ( args or kwargs ) : return self . cluster . hosts . keys ( ) try : db_nums = self . _route ( attr = attr , args = args , kwargs = kwargs , * * fkwargs ) except Exception as e : self . _handle_exception ( e ) db_nums = [ ] return self . _post_routing ( attr = attr , db_nums = db_nums , args = args , kwargs = kwargs , * * fkwargs )
63
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L50-L81
[ "async", "def", "write", "(", "self", ",", "writer", ":", "Any", ",", "close_boundary", ":", "bool", "=", "True", ")", "->", "None", ":", "if", "not", "self", ".", "_parts", ":", "return", "for", "part", ",", "encoding", ",", "te_encoding", "in", "self", ".", "_parts", ":", "await", "writer", ".", "write", "(", "b'--'", "+", "self", ".", "_boundary", "+", "b'\\r\\n'", ")", "await", "writer", ".", "write", "(", "part", ".", "_binary_headers", ")", "if", "encoding", "or", "te_encoding", ":", "w", "=", "MultipartPayloadWriter", "(", "writer", ")", "if", "encoding", ":", "w", ".", "enable_compression", "(", "encoding", ")", "if", "te_encoding", ":", "w", ".", "enable_encoding", "(", "te_encoding", ")", "await", "part", ".", "write", "(", "w", ")", "# type: ignore", "await", "w", ".", "write_eof", "(", ")", "else", ":", "await", "part", ".", "write", "(", "writer", ")", "await", "writer", ".", "write", "(", "b'\\r\\n'", ")", "if", "close_boundary", ":", "await", "writer", ".", "write", "(", "b'--'", "+", "self", ".", "_boundary", "+", "b'--\\r\\n'", ")" ]
Call method to perform any setup
def setup_router ( self , args , kwargs , * * fkwargs ) : self . _ready = self . _setup_router ( args = args , kwargs = kwargs , * * fkwargs ) return self . _ready
64
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L87-L93
[ "def", "load_stl_ascii", "(", "file_obj", ")", ":", "# the first line is the header", "header", "=", "file_obj", ".", "readline", "(", ")", "# make sure header is a string, not bytes", "if", "hasattr", "(", "header", ",", "'decode'", ")", ":", "try", ":", "header", "=", "header", ".", "decode", "(", "'utf-8'", ")", "except", "BaseException", ":", "header", "=", "''", "# save header to metadata", "metadata", "=", "{", "'header'", ":", "header", "}", "# read all text into one string", "text", "=", "file_obj", ".", "read", "(", ")", "# convert bytes to string", "if", "hasattr", "(", "text", ",", "'decode'", ")", ":", "text", "=", "text", ".", "decode", "(", "'utf-8'", ")", "# split by endsolid keyword", "text", "=", "text", ".", "lower", "(", ")", ".", "split", "(", "'endsolid'", ")", "[", "0", "]", "# create array of splits", "blob", "=", "np", ".", "array", "(", "text", ".", "strip", "(", ")", ".", "split", "(", ")", ")", "# there are 21 'words' in each face", "face_len", "=", "21", "# length of blob should be multiple of face_len", "if", "(", "len", "(", "blob", ")", "%", "face_len", ")", "!=", "0", ":", "raise", "HeaderError", "(", "'Incorrect length STL file!'", ")", "face_count", "=", "int", "(", "len", "(", "blob", ")", "/", "face_len", ")", "# this offset is to be added to a fixed set of tiled indices", "offset", "=", "face_len", "*", "np", ".", "arange", "(", "face_count", ")", ".", "reshape", "(", "(", "-", "1", ",", "1", ")", ")", "normal_index", "=", "np", ".", "tile", "(", "[", "2", ",", "3", ",", "4", "]", ",", "(", "face_count", ",", "1", ")", ")", "+", "offset", "vertex_index", "=", "np", ".", "tile", "(", "[", "8", ",", "9", ",", "10", ",", "12", ",", "13", ",", "14", ",", "16", ",", "17", ",", "18", "]", ",", "(", "face_count", ",", "1", ")", ")", "+", "offset", "# faces are groups of three sequential vertices", "faces", "=", "np", ".", "arange", "(", "face_count", "*", "3", ")", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "face_normals", "=", "blob", "[", "normal_index", "]", ".", "astype", "(", "'<f8'", ")", "vertices", "=", "blob", "[", "vertex_index", ".", "reshape", "(", "(", "-", "1", ",", "3", ")", ")", "]", ".", "astype", "(", "'<f8'", ")", "return", "{", "'vertices'", ":", "vertices", ",", "'faces'", ":", "faces", ",", "'metadata'", ":", "metadata", ",", "'face_normals'", ":", "face_normals", "}" ]
Perform routing and return db_nums
def _route ( self , attr , args , kwargs , * * fkwargs ) : return self . cluster . hosts . keys ( )
65
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L111-L115
[ "def", "get_thumbnail", "(", "self", ",", "video_path", ",", "at_time", "=", "0.5", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "video_path", ")", "filename", ",", "__", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "_", ",", "image_path", "=", "tempfile", ".", "mkstemp", "(", "suffix", "=", "'_{}.jpg'", ".", "format", "(", "filename", ")", ")", "video_duration", "=", "self", ".", "get_media_info", "(", "video_path", ")", "[", "'duration'", "]", "if", "at_time", ">", "video_duration", ":", "raise", "exceptions", ".", "InvalidTimeError", "(", ")", "thumbnail_time", "=", "at_time", "cmds", "=", "[", "self", ".", "ffmpeg_path", ",", "'-i'", ",", "video_path", ",", "'-vframes'", ",", "'1'", "]", "cmds", ".", "extend", "(", "[", "'-ss'", ",", "str", "(", "thumbnail_time", ")", ",", "'-y'", ",", "image_path", "]", ")", "process", "=", "self", ".", "_spawn", "(", "cmds", ")", "self", ".", "_check_returncode", "(", "process", ")", "if", "not", "os", ".", "path", ".", "getsize", "(", "image_path", ")", ":", "# we somehow failed to generate thumbnail", "os", ".", "unlink", "(", "image_path", ")", "raise", "exceptions", ".", "InvalidTimeError", "(", ")", "return", "image_path" ]
Iterates through all connections which were previously listed as unavailable and marks any that have expired their retry_timeout as being up .
def check_down_connections ( self ) : now = time . time ( ) for db_num , marked_down_at in self . _down_connections . items ( ) : if marked_down_at + self . retry_timeout <= now : self . mark_connection_up ( db_num )
66
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L175-L184
[ "def", "transform_sparql_construct", "(", "rdf", ",", "construct_query", ")", ":", "logging", ".", "debug", "(", "\"performing SPARQL CONSTRUCT transformation\"", ")", "if", "construct_query", "[", "0", "]", "==", "'@'", ":", "# actual query should be read from file", "construct_query", "=", "file", "(", "construct_query", "[", "1", ":", "]", ")", ".", "read", "(", ")", "logging", ".", "debug", "(", "\"CONSTRUCT query: %s\"", ",", "construct_query", ")", "newgraph", "=", "Graph", "(", ")", "for", "triple", "in", "rdf", ".", "query", "(", "construct_query", ")", ":", "newgraph", ".", "add", "(", "triple", ")", "return", "newgraph" ]
Marks all connections which were previously listed as unavailable as being up .
def flush_down_connections ( self ) : self . _get_db_attempts = 0 for db_num in self . _down_connections . keys ( ) : self . mark_connection_up ( db_num )
67
https://github.com/disqus/nydus/blob/9b505840da47a34f758a830c3992fa5dcb7bb7ad/nydus/db/routers/base.py#L186-L192
[ "def", "_op_generic_Ctz", "(", "self", ",", "args", ")", ":", "wtf_expr", "=", "claripy", ".", "BVV", "(", "self", ".", "_from_size", ",", "self", ".", "_from_size", ")", "for", "a", "in", "reversed", "(", "range", "(", "self", ".", "_from_size", ")", ")", ":", "bit", "=", "claripy", ".", "Extract", "(", "a", ",", "a", ",", "args", "[", "0", "]", ")", "wtf_expr", "=", "claripy", ".", "If", "(", "bit", "==", "1", ",", "claripy", ".", "BVV", "(", "a", ",", "self", ".", "_from_size", ")", ",", "wtf_expr", ")", "return", "wtf_expr" ]
Compute standby power
def standby ( df , resolution = '24h' , time_window = None ) : if df . empty : raise EmptyDataFrame ( ) df = pd . DataFrame ( df ) # if df was a pd.Series, convert to DataFrame def parse_time ( t ) : if isinstance ( t , numbers . Number ) : return pd . Timestamp . utcfromtimestamp ( t ) . time ( ) else : return pd . Timestamp ( t ) . time ( ) # first filter based on the time-window if time_window is not None : t_start = parse_time ( time_window [ 0 ] ) t_end = parse_time ( time_window [ 1 ] ) if t_start > t_end : # start before midnight df = df [ ( df . index . time >= t_start ) | ( df . index . time < t_end ) ] else : df = df [ ( df . index . time >= t_start ) & ( df . index . time < t_end ) ] return df . resample ( resolution ) . min ( )
68
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/analysis.py#L72-L115
[ "def", "generate_http_manifest", "(", "self", ")", ":", "base_path", "=", "os", ".", "path", ".", "dirname", "(", "self", ".", "translate_path", "(", "self", ".", "path", ")", ")", "self", ".", "dataset", "=", "dtoolcore", ".", "DataSet", ".", "from_uri", "(", "base_path", ")", "admin_metadata_fpath", "=", "os", ".", "path", ".", "join", "(", "base_path", ",", "\".dtool\"", ",", "\"dtool\"", ")", "with", "open", "(", "admin_metadata_fpath", ")", "as", "fh", ":", "admin_metadata", "=", "json", ".", "load", "(", "fh", ")", "http_manifest", "=", "{", "\"admin_metadata\"", ":", "admin_metadata", ",", "\"manifest_url\"", ":", "self", ".", "generate_url", "(", "\".dtool/manifest.json\"", ")", ",", "\"readme_url\"", ":", "self", ".", "generate_url", "(", "\"README.yml\"", ")", ",", "\"overlays\"", ":", "self", ".", "generate_overlay_urls", "(", ")", ",", "\"item_urls\"", ":", "self", ".", "generate_item_urls", "(", ")", "}", "return", "bytes", "(", "json", ".", "dumps", "(", "http_manifest", ")", ",", "\"utf-8\"", ")" ]
Compute the share of the standby power in the total consumption .
def share_of_standby ( df , resolution = '24h' , time_window = None ) : p_sb = standby ( df , resolution , time_window ) df = df . resample ( resolution ) . mean ( ) p_tot = df . sum ( ) p_standby = p_sb . sum ( ) share_standby = p_standby / p_tot res = share_standby . iloc [ 0 ] return res
69
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/analysis.py#L118-L146
[ "def", "getAsKmlPngAnimation", "(", "self", ",", "session", ",", "projectFile", "=", "None", ",", "path", "=", "None", ",", "documentName", "=", "None", ",", "colorRamp", "=", "None", ",", "alpha", "=", "1.0", ",", "noDataValue", "=", "0", ",", "drawOrder", "=", "0", ",", "cellSize", "=", "None", ",", "resampleMethod", "=", "'NearestNeighbour'", ")", ":", "# Prepare rasters", "timeStampedRasters", "=", "self", ".", "_assembleRasterParams", "(", "projectFile", ",", "self", ".", "rasters", ")", "# Make sure the raster field is valid", "converter", "=", "RasterConverter", "(", "sqlAlchemyEngineOrSession", "=", "session", ")", "# Configure color ramp", "if", "isinstance", "(", "colorRamp", ",", "dict", ")", ":", "converter", ".", "setCustomColorRamp", "(", "colorRamp", "[", "'colors'", "]", ",", "colorRamp", "[", "'interpolatedPoints'", "]", ")", "else", ":", "converter", ".", "setDefaultColorRamp", "(", "colorRamp", ")", "if", "documentName", "is", "None", ":", "documentName", "=", "self", ".", "fileExtension", "kmlString", ",", "binaryPngStrings", "=", "converter", ".", "getAsKmlPngAnimation", "(", "tableName", "=", "WMSDatasetRaster", ".", "tableName", ",", "timeStampedRasters", "=", "timeStampedRasters", ",", "rasterIdFieldName", "=", "'id'", ",", "rasterFieldName", "=", "'raster'", ",", "documentName", "=", "documentName", ",", "alpha", "=", "alpha", ",", "drawOrder", "=", "drawOrder", ",", "cellSize", "=", "cellSize", ",", "noDataValue", "=", "noDataValue", ",", "resampleMethod", "=", "resampleMethod", ")", "if", "path", ":", "directory", "=", "os", ".", "path", ".", "dirname", "(", "path", ")", "archiveName", "=", "(", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "kmzPath", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "(", "archiveName", "+", "'.kmz'", ")", ")", "with", "ZipFile", "(", "kmzPath", ",", "'w'", ")", "as", "kmz", ":", "kmz", ".", "writestr", "(", "archiveName", "+", "'.kml'", ",", "kmlString", ")", "for", "index", ",", "binaryPngString", "in", "enumerate", "(", "binaryPngStrings", ")", ":", "kmz", ".", "writestr", "(", "'raster{0}.png'", ".", "format", "(", "index", ")", ",", "binaryPngString", ")", "return", "kmlString", ",", "binaryPngStrings" ]
Toggle counter for gas boilers
def count_peaks ( ts ) : on_toggles = ts . diff ( ) > 3000 shifted = np . logical_not ( on_toggles . shift ( 1 ) ) result = on_toggles & shifted count = result . sum ( ) return count
70
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/analysis.py#L149-L169
[ "def", "users", "(", "self", ",", "start", "=", "1", ",", "num", "=", "10", ",", "sortField", "=", "\"fullName\"", ",", "sortOrder", "=", "\"asc\"", ",", "role", "=", "None", ")", ":", "users", "=", "[", "]", "url", "=", "self", ".", "_url", "+", "\"/users\"", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"start\"", ":", "start", ",", "\"num\"", ":", "num", "}", "if", "not", "role", "is", "None", ":", "params", "[", "'role'", "]", "=", "role", "if", "not", "sortField", "is", "None", ":", "params", "[", "'sortField'", "]", "=", "sortField", "if", "not", "sortOrder", "is", "None", ":", "params", "[", "'sortOrder'", "]", "=", "sortOrder", "from", ".", "_community", "import", "Community", "res", "=", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "if", "\"users\"", "in", "res", ":", "if", "len", "(", "res", "[", "'users'", "]", ")", ">", "0", ":", "parsed", "=", "urlparse", ".", "urlparse", "(", "self", ".", "_url", ")", "if", "parsed", ".", "netloc", ".", "lower", "(", ")", ".", "find", "(", "'arcgis.com'", ")", "==", "-", "1", ":", "cURL", "=", "\"%s://%s/%s/sharing/rest/community\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ",", "parsed", ".", "path", "[", "1", ":", "]", ".", "split", "(", "'/'", ")", "[", "0", "]", ")", "else", ":", "cURL", "=", "\"%s://%s/sharing/rest/community\"", "%", "(", "parsed", ".", "scheme", ",", "parsed", ".", "netloc", ")", "com", "=", "Community", "(", "url", "=", "cURL", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")", "for", "r", "in", "res", "[", "'users'", "]", ":", "users", ".", "append", "(", "com", ".", "users", ".", "user", "(", "r", "[", "\"username\"", "]", ")", ")", "res", "[", "'users'", "]", "=", "users", "return", "res" ]
Calculate the ratio of input vs . norm over a given interval .
def load_factor ( ts , resolution = None , norm = None ) : if norm is None : norm = ts . max ( ) if resolution is not None : ts = ts . resample ( rule = resolution ) . mean ( ) lf = ts / norm return lf
71
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/analysis.py#L172-L199
[ "def", "unbind", "(", "self", ",", "devices_to_unbind", ")", ":", "if", "self", ".", "entity_api_key", "==", "\"\"", ":", "return", "{", "'status'", ":", "'failure'", ",", "'response'", ":", "'No API key found in request'", "}", "url", "=", "self", ".", "base_url", "+", "\"api/0.1.0/subscribe/unbind\"", "headers", "=", "{", "\"apikey\"", ":", "self", ".", "entity_api_key", "}", "data", "=", "{", "\"exchange\"", ":", "\"amq.topic\"", ",", "\"keys\"", ":", "devices_to_unbind", ",", "\"queue\"", ":", "self", ".", "entity_id", "}", "with", "self", ".", "no_ssl_verification", "(", ")", ":", "r", "=", "requests", ".", "delete", "(", "url", ",", "json", "=", "data", ",", "headers", "=", "headers", ")", "print", "(", "r", ")", "response", "=", "dict", "(", ")", "if", "\"No API key\"", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "json", ".", "loads", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", "[", "'message'", "]", "elif", "'unbind'", "in", "str", "(", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", ")", ":", "response", "[", "\"status\"", "]", "=", "\"success\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "response", "[", "\"status\"", "]", "=", "\"failure\"", "r", "=", "r", ".", "content", ".", "decode", "(", "\"utf-8\"", ")", "response", "[", "\"response\"", "]", "=", "str", "(", "r", ")", "return", "response" ]
get top hits after sorting by column number
def top_hits ( hits , num , column , reverse ) : hits . sort ( key = itemgetter ( column ) , reverse = reverse ) for hit in hits [ 0 : num ] : yield hit
72
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/besthits.py#L17-L23
[ "def", "_openResources", "(", "self", ")", ":", "try", ":", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "True", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "warning", "(", "ex", ")", "logger", ".", "warning", "(", "\"Unable to read wav with memmory mapping. Trying without now.\"", ")", "rate", ",", "data", "=", "scipy", ".", "io", ".", "wavfile", ".", "read", "(", "self", ".", "_fileName", ",", "mmap", "=", "False", ")", "self", ".", "_array", "=", "data", "self", ".", "attributes", "[", "'rate'", "]", "=", "rate" ]
parse b6 output with sorting
def numBlast_sort ( blast , numHits , evalueT , bitT ) : header = [ '#query' , 'target' , 'pident' , 'alen' , 'mismatch' , 'gapopen' , 'qstart' , 'qend' , 'tstart' , 'tend' , 'evalue' , 'bitscore' ] yield header hmm = { h : [ ] for h in header } for line in blast : if line . startswith ( '#' ) : continue line = line . strip ( ) . split ( '\t' ) # Evalue and Bitscore thresholds line [ 10 ] , line [ 11 ] = float ( line [ 10 ] ) , float ( line [ 11 ] ) evalue , bit = line [ 10 ] , line [ 11 ] if evalueT is not False and evalue > evalueT : continue if bitT is not False and bit < bitT : continue for i , h in zip ( line , header ) : hmm [ h ] . append ( i ) hmm = pd . DataFrame ( hmm ) for query , df in hmm . groupby ( by = [ '#query' ] ) : df = df . sort_values ( by = [ 'bitscore' ] , ascending = False ) for hit in df [ header ] . values [ 0 : numHits ] : yield hit
73
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/besthits.py#L25-L50
[ "def", "bind_texture", "(", "texture", ")", ":", "if", "not", "getattr", "(", "texture", ",", "'image'", ",", "None", ")", ":", "texture", ".", "image", "=", "load_image", "(", "texture", ".", "path", ")", "glEnable", "(", "texture", ".", "image", ".", "target", ")", "glBindTexture", "(", "texture", ".", "image", ".", "target", ",", "texture", ".", "image", ".", "id", ")", "gl", ".", "glTexParameterf", "(", "texture", ".", "image", ".", "target", ",", "gl", ".", "GL_TEXTURE_WRAP_S", ",", "gl", ".", "GL_CLAMP_TO_EDGE", ")", "gl", ".", "glTexParameterf", "(", "texture", ".", "image", ".", "target", ",", "gl", ".", "GL_TEXTURE_WRAP_T", ",", "gl", ".", "GL_CLAMP_TO_EDGE", ")" ]
parse b6 output
def numBlast ( blast , numHits , evalueT = False , bitT = False , sort = False ) : if sort is True : for hit in numBlast_sort ( blast , numHits , evalueT , bitT ) : yield hit return header = [ '#query' , 'target' , 'pident' , 'alen' , 'mismatch' , 'gapopen' , 'qstart' , 'qend' , 'tstart' , 'tend' , 'evalue' , 'bitscore' ] yield header prev , hits = None , [ ] for line in blast : line = line . strip ( ) . split ( '\t' ) ID = line [ 0 ] line [ 10 ] , line [ 11 ] = float ( line [ 10 ] ) , float ( line [ 11 ] ) evalue , bit = line [ 10 ] , line [ 11 ] if ID != prev : if len ( hits ) > 0 : # column is 1 + line index for hit in top_hits ( hits , numHits , 11 , True ) : yield hit hits = [ ] if evalueT == False and bitT == False : hits . append ( line ) elif evalue <= evalueT and bitT == False : hits . append ( line ) elif evalue <= evalueT and bit >= bitT : hits . append ( line ) elif evalueT == False and bit >= bitT : hits . append ( line ) prev = ID for hit in top_hits ( hits , numHits , 11 , True ) : yield hit
74
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/besthits.py#L52-L85
[ "def", "__getBio", "(", "self", ",", "web", ")", ":", "bio", "=", "web", ".", "find_all", "(", "\"div\"", ",", "{", "\"class\"", ":", "\"user-profile-bio\"", "}", ")", "if", "bio", ":", "try", ":", "bio", "=", "bio", "[", "0", "]", ".", "text", "if", "bio", "and", "GitHubUser", ".", "isASCII", "(", "bio", ")", ":", "bioText", "=", "bio", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", "bioText", "=", "bioText", ".", "replace", "(", "\"\\t\"", ",", "\" \"", ")", ".", "replace", "(", "\"\\\"\"", ",", "\"\"", ")", "bioText", "=", "bioText", ".", "replace", "(", "\"\\'\"", ",", "\"\"", ")", ".", "replace", "(", "\"\\\\\"", ",", "\"\"", ")", "self", ".", "bio", "=", "bioText", "else", ":", "self", ".", "bio", "=", "\"\"", "except", "IndexError", "as", "error", ":", "print", "(", "\"There was an error with the user \"", "+", "self", ".", "name", ")", "print", "(", "error", ")", "except", "AttributeError", "as", "error", ":", "print", "(", "\"There was an error with the user \"", "+", "self", ".", "name", ")", "print", "(", "error", ")" ]
parse hmm domain table output this version is faster but does not work unless the table is sorted
def numDomtblout ( domtblout , numHits , evalueT , bitT , sort ) : if sort is True : for hit in numDomtblout_sort ( domtblout , numHits , evalueT , bitT ) : yield hit return header = [ '#target name' , 'target accession' , 'tlen' , 'query name' , 'query accession' , 'qlen' , 'full E-value' , 'full score' , 'full bias' , 'domain #' , '# domains' , 'domain c-Evalue' , 'domain i-Evalue' , 'domain score' , 'domain bias' , 'hmm from' , 'hmm to' , 'seq from' , 'seq to' , 'env from' , 'env to' , 'acc' , 'target description' ] yield header prev , hits = None , [ ] for line in domtblout : if line . startswith ( '#' ) : continue # parse line and get description line = line . strip ( ) . split ( ) desc = ' ' . join ( line [ 18 : ] ) line = line [ 0 : 18 ] line . append ( desc ) # create ID based on query name and domain number ID = line [ 0 ] + line [ 9 ] # domain c-Evalue and domain score thresholds line [ 11 ] , line [ 13 ] = float ( line [ 11 ] ) , float ( line [ 13 ] ) evalue , bitscore = line [ 11 ] , line [ 13 ] line [ 11 ] , line [ 13 ] = evalue , bitscore if ID != prev : if len ( hits ) > 0 : for hit in top_hits ( hits , numHits , 13 , True ) : yield hit hits = [ ] if evalueT == False and bitT == False : hits . append ( line ) elif evalue <= evalueT and bitT == False : hits . append ( line ) elif evalue <= evalueT and bit >= bitT : hits . append ( line ) elif evalueT == False and bit >= bitT : hits . append ( line ) prev = ID for hit in top_hits ( hits , numHits , 13 , True ) : yield hit
75
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/besthits.py#L121-L168
[ "def", "files_comments_delete", "(", "self", ",", "*", ",", "file", ":", "str", ",", "id", ":", "str", ",", "*", "*", "kwargs", ")", "->", "SlackResponse", ":", "kwargs", ".", "update", "(", "{", "\"file\"", ":", "file", ",", "\"id\"", ":", "id", "}", ")", "return", "self", ".", "api_call", "(", "\"files.comments.delete\"", ",", "json", "=", "kwargs", ")" ]
convert stockholm to fasta
def stock2fa ( stock ) : seqs = { } for line in stock : if line . startswith ( '#' ) is False and line . startswith ( ' ' ) is False and len ( line ) > 3 : id , seq = line . strip ( ) . split ( ) id = id . rsplit ( '/' , 1 ) [ 0 ] id = re . split ( '[0-9]\|' , id , 1 ) [ - 1 ] if id not in seqs : seqs [ id ] = [ ] seqs [ id ] . append ( seq ) if line . startswith ( '//' ) : break return seqs
76
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/stockholm2fa.py#L11-L26
[ "def", "list_tables", "(", ")", ":", "tables", "=", "[", "]", "try", ":", "table_list", "=", "DYNAMODB_CONNECTION", ".", "list_tables", "(", ")", "while", "True", ":", "for", "table_name", "in", "table_list", "[", "u'TableNames'", "]", ":", "tables", ".", "append", "(", "get_table", "(", "table_name", ")", ")", "if", "u'LastEvaluatedTableName'", "in", "table_list", ":", "table_list", "=", "DYNAMODB_CONNECTION", ".", "list_tables", "(", "table_list", "[", "u'LastEvaluatedTableName'", "]", ")", "else", ":", "break", "except", "DynamoDBResponseError", "as", "error", ":", "dynamodb_error", "=", "error", ".", "body", "[", "'__type'", "]", ".", "rsplit", "(", "'#'", ",", "1", ")", "[", "1", "]", "if", "dynamodb_error", "==", "'ResourceNotFoundException'", ":", "logger", ".", "error", "(", "'No tables found'", ")", "elif", "dynamodb_error", "==", "'AccessDeniedException'", ":", "logger", ".", "debug", "(", "'Your AWS API keys lack access to listing tables. '", "'That is an issue if you are trying to use regular '", "'expressions in your table configuration.'", ")", "elif", "dynamodb_error", "==", "'UnrecognizedClientException'", ":", "logger", ".", "error", "(", "'Invalid security token. Are your AWS API keys correct?'", ")", "else", ":", "logger", ".", "error", "(", "(", "'Unhandled exception: {0}: {1}. '", "'Please file a bug report at '", "'https://github.com/sebdah/dynamic-dynamodb/issues'", ")", ".", "format", "(", "dynamodb_error", ",", "error", ".", "body", "[", "'message'", "]", ")", ")", "except", "JSONResponseError", "as", "error", ":", "logger", ".", "error", "(", "'Communication error: {0}'", ".", "format", "(", "error", ")", ")", "sys", ".", "exit", "(", "1", ")", "return", "tables" ]
Return boolean time series following given week schedule .
def week_schedule ( index , on_time = None , off_time = None , off_days = None ) : if on_time is None : on_time = '9:00' if off_time is None : off_time = '17:00' if off_days is None : off_days = [ 'Sunday' , 'Monday' ] if not isinstance ( on_time , datetime . time ) : on_time = pd . to_datetime ( on_time , format = '%H:%M' ) . time ( ) if not isinstance ( off_time , datetime . time ) : off_time = pd . to_datetime ( off_time , format = '%H:%M' ) . time ( ) times = ( index . time >= on_time ) & ( index . time < off_time ) & ( ~ index . weekday_name . isin ( off_days ) ) return pd . Series ( times , index = index )
77
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/utils.py#L10-L47
[ "def", "removeAllEntitlements", "(", "self", ",", "appId", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"appId\"", ":", "appId", "}", "url", "=", "self", ".", "_url", "+", "\"/licenses/removeAllEntitlements\"", "return", "self", ".", "_post", "(", "url", "=", "url", ",", "param_dict", "=", "params", ",", "proxy_url", "=", "self", ".", "_proxy_url", ",", "proxy_port", "=", "self", ".", "_proxy_port", ")" ]
Draw a carpet plot of a pandas timeseries .
def carpet ( timeseries , * * kwargs ) : # define optional input parameters cmap = kwargs . pop ( 'cmap' , cm . coolwarm ) norm = kwargs . pop ( 'norm' , LogNorm ( ) ) interpolation = kwargs . pop ( 'interpolation' , 'nearest' ) cblabel = kwargs . pop ( 'zlabel' , timeseries . name if timeseries . name else '' ) title = kwargs . pop ( 'title' , 'carpet plot: ' + timeseries . name if timeseries . name else '' ) # data preparation if timeseries . dropna ( ) . empty : print ( 'skipped {} - no data' . format ( title ) ) return ts = timeseries . resample ( '15min' ) . interpolate ( ) vmin = max ( 0.1 , kwargs . pop ( 'vmin' , ts [ ts > 0 ] . min ( ) ) ) vmax = max ( vmin , kwargs . pop ( 'vmax' , ts . quantile ( .999 ) ) ) # convert to dataframe with date as index and time as columns by # first replacing the index by a MultiIndex mpldatetimes = date2num ( ts . index . to_pydatetime ( ) ) ts . index = pd . MultiIndex . from_arrays ( [ np . floor ( mpldatetimes ) , 2 + mpldatetimes % 1 ] ) # '2 +': matplotlib bug workaround. # and then unstacking the second index level to columns df = ts . unstack ( ) # data plotting fig , ax = plt . subplots ( ) # define the extent of the axes (remark the +- 0.5 for the y axis in order to obtain aligned date ticks) extent = [ df . columns [ 0 ] , df . columns [ - 1 ] , df . index [ - 1 ] + 0.5 , df . index [ 0 ] - 0.5 ] im = plt . imshow ( df , vmin = vmin , vmax = vmax , extent = extent , cmap = cmap , aspect = 'auto' , norm = norm , interpolation = interpolation , * * kwargs ) # figure formatting # x axis ax . xaxis_date ( ) ax . xaxis . set_major_locator ( HourLocator ( interval = 2 ) ) ax . xaxis . set_major_formatter ( DateFormatter ( '%H:%M' ) ) ax . xaxis . grid ( True ) plt . xlabel ( 'UTC Time' ) # y axis ax . yaxis_date ( ) dmin , dmax = ax . yaxis . get_data_interval ( ) number_of_days = ( num2date ( dmax ) - num2date ( dmin ) ) . days # AutoDateLocator is not suited in case few data is available if abs ( number_of_days ) <= 35 : ax . yaxis . set_major_locator ( DayLocator ( ) ) else : ax . yaxis . set_major_locator ( AutoDateLocator ( ) ) ax . yaxis . set_major_formatter ( DateFormatter ( "%a, %d %b %Y" ) ) # plot colorbar cbticks = np . logspace ( np . log10 ( vmin ) , np . log10 ( vmax ) , 11 , endpoint = True ) cb = plt . colorbar ( format = '%.0f' , ticks = cbticks ) cb . set_label ( cblabel ) # plot title plt . title ( title ) return im
78
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/library/plotting.py#L34-L125
[ "async", "def", "remove", "(", "self", ",", "von_wallet", ":", "Wallet", ")", "->", "None", ":", "LOGGER", ".", "debug", "(", "'WalletManager.remove >>> wallet %s'", ",", "von_wallet", ")", "await", "von_wallet", ".", "remove", "(", ")", "LOGGER", ".", "debug", "(", "'WalletManager.remove <<<'", ")" ]
calculate percent identity
def calc_pident_ignore_gaps ( a , b ) : m = 0 # matches mm = 0 # mismatches for A , B in zip ( list ( a ) , list ( b ) ) : if A == '-' or A == '.' or B == '-' or B == '.' : continue if A == B : m += 1 else : mm += 1 try : return float ( float ( m ) / float ( ( m + mm ) ) ) * 100 except : return 0
79
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L34-L50
[ "def", "BuildChecks", "(", "self", ",", "request", ")", ":", "result", "=", "[", "]", "if", "request", ".", "HasField", "(", "\"start_time\"", ")", "or", "request", ".", "HasField", "(", "\"end_time\"", ")", ":", "def", "FilterTimestamp", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "HasField", "(", "\"st_mtime\"", ")", "and", "(", "file_stat", ".", "st_mtime", "<", "request", ".", "start_time", "or", "file_stat", ".", "st_mtime", ">", "request", ".", "end_time", ")", "result", ".", "append", "(", "FilterTimestamp", ")", "if", "request", ".", "HasField", "(", "\"min_file_size\"", ")", "or", "request", ".", "HasField", "(", "\"max_file_size\"", ")", ":", "def", "FilterSize", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "HasField", "(", "\"st_size\"", ")", "and", "(", "file_stat", ".", "st_size", "<", "request", ".", "min_file_size", "or", "file_stat", ".", "st_size", ">", "request", ".", "max_file_size", ")", "result", ".", "append", "(", "FilterSize", ")", "if", "request", ".", "HasField", "(", "\"perm_mode\"", ")", ":", "def", "FilterPerms", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "(", "file_stat", ".", "st_mode", "&", "request", ".", "perm_mask", ")", "!=", "request", ".", "perm_mode", "result", ".", "append", "(", "FilterPerms", ")", "if", "request", ".", "HasField", "(", "\"uid\"", ")", ":", "def", "FilterUID", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "st_uid", "!=", "request", ".", "uid", "result", ".", "append", "(", "FilterUID", ")", "if", "request", ".", "HasField", "(", "\"gid\"", ")", ":", "def", "FilterGID", "(", "file_stat", ",", "request", "=", "request", ")", ":", "return", "file_stat", ".", "st_gid", "!=", "request", ".", "gid", "result", ".", "append", "(", "FilterGID", ")", "if", "request", ".", "HasField", "(", "\"path_regex\"", ")", ":", "regex", "=", "request", ".", "path_regex", "def", "FilterPath", "(", "file_stat", ",", "regex", "=", "regex", ")", ":", "\"\"\"Suppress any filename not matching the regular expression.\"\"\"", "return", "not", "regex", ".", "Search", "(", "file_stat", ".", "pathspec", ".", "Basename", "(", ")", ")", "result", ".", "append", "(", "FilterPath", ")", "if", "request", ".", "HasField", "(", "\"data_regex\"", ")", ":", "def", "FilterData", "(", "file_stat", ",", "*", "*", "_", ")", ":", "\"\"\"Suppress files that do not match the content.\"\"\"", "return", "not", "self", ".", "TestFileContent", "(", "file_stat", ")", "result", ".", "append", "(", "FilterData", ")", "return", "result" ]
skip column if either is a gap
def remove_gaps ( A , B ) : a_seq , b_seq = [ ] , [ ] for a , b in zip ( list ( A ) , list ( B ) ) : if a == '-' or a == '.' or b == '-' or b == '.' : continue a_seq . append ( a ) b_seq . append ( b ) return '' . join ( a_seq ) , '' . join ( b_seq )
80
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L52-L62
[ "def", "get_needful_files", "(", "self", ")", ":", "manifest", "=", "self", ".", "storage", ".", "load_manifest", "(", ")", "if", "self", ".", "keep_unhashed_files", ":", "if", "PY3", ":", "needful_files", "=", "set", "(", "manifest", ".", "keys", "(", ")", "|", "manifest", ".", "values", "(", ")", ")", "else", ":", "needful_files", "=", "set", "(", "manifest", ".", "keys", "(", ")", "+", "manifest", ".", "values", "(", ")", ")", "needful_files", "=", "{", "self", ".", "storage", ".", "clean_name", "(", "file", ")", "for", "file", "in", "needful_files", "}", "else", ":", "needful_files", "=", "set", "(", "manifest", ".", "values", "(", ")", ")", "return", "{", "self", ".", "process_file", "(", "file", ")", "for", "file", "in", "needful_files", "}" ]
compare pairs of sequences
def compare_seqs ( seqs ) : A , B , ignore_gaps = seqs a , b = A [ 1 ] , B [ 1 ] # actual sequences if len ( a ) != len ( b ) : print ( '# reads are not the same length' , file = sys . stderr ) exit ( ) if ignore_gaps is True : pident = calc_pident_ignore_gaps ( a , b ) else : pident = calc_pident ( a , b ) return A [ 0 ] , B [ 0 ] , pident
81
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L64-L77
[ "def", "color_electrodes", "(", "self", ",", "config_nr", ",", "ax", ")", ":", "electrodes", "=", "np", ".", "loadtxt", "(", "options", ".", "config_file", ",", "skiprows", "=", "1", ")", "electrodes", "=", "self", ".", "configs", "[", "~", "np", ".", "isnan", "(", "self", ".", "configs", ")", ".", "any", "(", "1", ")", "]", "electrodes", "=", "electrodes", ".", "astype", "(", "int", ")", "conf", "=", "[", "]", "for", "dim", "in", "range", "(", "0", ",", "electrodes", ".", "shape", "[", "1", "]", ")", ":", "c", "=", "electrodes", "[", "config_nr", ",", "dim", "]", "# c = c.partition('0')", "a", "=", "np", ".", "round", "(", "c", "/", "10000", ")", "-", "1", "b", "=", "np", ".", "mod", "(", "c", ",", "10000", ")", "-", "1", "conf", ".", "append", "(", "a", ")", "conf", ".", "append", "(", "b", ")", "Ex", ",", "Ez", "=", "elem", ".", "get_electrodes", "(", ")", "color", "=", "[", "'#ffed00'", ",", "'#ffed00'", ",", "'#ff0000'", ",", "'#ff0000'", "]", "ax", ".", "scatter", "(", "Ex", "[", "conf", "]", ",", "Ez", "[", "conf", "]", ",", "c", "=", "color", ",", "marker", "=", "'s'", ",", "s", "=", "60", ",", "clip_on", "=", "False", ",", "edgecolors", "=", "'k'", ")" ]
calculate Levenshtein ratio of sequences
def compare_seqs_leven ( seqs ) : A , B , ignore_gaps = seqs a , b = remove_gaps ( A [ 1 ] , B [ 1 ] ) # actual sequences if len ( a ) != len ( b ) : print ( '# reads are not the same length' , file = sys . stderr ) exit ( ) pident = lr ( a , b ) * 100 return A [ 0 ] , B [ 0 ] , pident
82
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L79-L89
[ "def", "create_calcs", "(", "self", ")", ":", "specs", "=", "self", ".", "_combine_core_aux_specs", "(", ")", "for", "spec", "in", "specs", ":", "spec", "[", "'dtype_out_time'", "]", "=", "_prune_invalid_time_reductions", "(", "spec", ")", "return", "[", "Calc", "(", "*", "*", "sp", ")", "for", "sp", "in", "specs", "]" ]
make pairwise sequence comparisons between aligned sequences
def pairwise_compare ( afa , leven , threads , print_list , ignore_gaps ) : # load sequences into dictionary seqs = { seq [ 0 ] : seq for seq in nr_fasta ( [ afa ] , append_index = True ) } num_seqs = len ( seqs ) # define all pairs pairs = ( ( i [ 0 ] , i [ 1 ] , ignore_gaps ) for i in itertools . combinations ( list ( seqs . values ( ) ) , 2 ) ) pool = multithread ( threads ) # calc percent identity between all pairs - parallelize if leven is True : pident = pool . map ( compare_seqs_leven , pairs ) else : compare = pool . imap_unordered ( compare_seqs , pairs ) pident = [ i for i in tqdm ( compare , total = ( num_seqs * num_seqs ) / 2 ) ] pool . close ( ) pool . terminate ( ) pool . join ( ) return to_dictionary ( pident , print_list )
83
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L91-L110
[ "def", "refresh", "(", "self", ")", "->", "None", ":", "if", "not", "self", ":", "self", ".", "values", "[", ":", "]", "=", "0.", "elif", "len", "(", "self", ")", "==", "1", ":", "values", "=", "list", "(", "self", ".", "_toy2values", ".", "values", "(", ")", ")", "[", "0", "]", "self", ".", "values", "[", ":", "]", "=", "self", ".", "apply_timefactor", "(", "values", ")", "else", ":", "for", "idx", ",", "date", "in", "enumerate", "(", "timetools", ".", "TOY", ".", "centred_timegrid", "(", "self", ".", "simulationstep", ")", ")", ":", "values", "=", "self", ".", "interp", "(", "date", ")", "self", ".", "values", "[", "idx", "]", "=", "self", ".", "apply_timefactor", "(", "values", ")" ]
print matrix of pidents to stdout
def print_pairwise ( pw , median = False ) : names = sorted ( set ( [ i for i in pw ] ) ) if len ( names ) != 0 : if '>' in names [ 0 ] : yield [ '#' ] + [ i . split ( '>' ) [ 1 ] for i in names if '>' in i ] else : yield [ '#' ] + names for a in names : if '>' in a : yield [ a . split ( '>' ) [ 1 ] ] + [ pw [ a ] [ b ] for b in names ] else : out = [ ] for b in names : if b in pw [ a ] : if median is False : out . append ( max ( pw [ a ] [ b ] ) ) else : out . append ( np . median ( pw [ a ] [ b ] ) ) else : out . append ( '-' ) yield [ a ] + out
84
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L132-L155
[ "def", "from_manifest", "(", "app", ",", "filename", ",", "raw", "=", "False", ",", "*", "*", "kwargs", ")", ":", "cfg", "=", "current_app", ".", "config", "if", "current_app", ".", "config", ".", "get", "(", "'TESTING'", ")", ":", "return", "# Do not spend time here when testing", "path", "=", "_manifests", "[", "app", "]", "[", "filename", "]", "if", "not", "raw", "and", "cfg", ".", "get", "(", "'CDN_DOMAIN'", ")", "and", "not", "cfg", ".", "get", "(", "'CDN_DEBUG'", ")", ":", "scheme", "=", "'https'", "if", "cfg", ".", "get", "(", "'CDN_HTTPS'", ")", "else", "request", ".", "scheme", "prefix", "=", "'{}://'", ".", "format", "(", "scheme", ")", "if", "not", "path", ".", "startswith", "(", "'/'", ")", ":", "# CDN_DOMAIN has no trailing slash", "path", "=", "'/'", "+", "path", "return", "''", ".", "join", "(", "(", "prefix", ",", "cfg", "[", "'CDN_DOMAIN'", "]", ",", "path", ")", ")", "elif", "not", "raw", "and", "kwargs", ".", "get", "(", "'external'", ",", "False", ")", ":", "if", "path", ".", "startswith", "(", "'/'", ")", ":", "# request.host_url has a trailing slash", "path", "=", "path", "[", "1", ":", "]", "return", "''", ".", "join", "(", "(", "request", ".", "host_url", ",", "path", ")", ")", "return", "path" ]
print stats for comparisons
def print_comps ( comps ) : if comps == [ ] : print ( 'n/a' ) else : print ( '# min: %s, max: %s, mean: %s' % ( min ( comps ) , max ( comps ) , np . mean ( comps ) ) )
85
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L157-L165
[ "def", "readSources", "(", "self", ")", ":", "for", "sourceCount", ",", "sourceElement", "in", "enumerate", "(", "self", ".", "root", ".", "findall", "(", "\".sources/source\"", ")", ")", ":", "# shall we just read the UFO here?", "filename", "=", "sourceElement", ".", "attrib", ".", "get", "(", "'filename'", ")", "# filename is a path relaive to the documentpath. resolve first.", "sourcePath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "path", ")", ",", "filename", ")", ")", "sourceName", "=", "sourceElement", ".", "attrib", ".", "get", "(", "'name'", ")", "if", "sourceName", "is", "None", ":", "# if the source element has no name attribute", "# (some authoring tools do not need them)", "# then we should make a temporary one. We still need it for reference.", "sourceName", "=", "\"temp_master.%d\"", "%", "(", "sourceCount", ")", "self", ".", "reportProgress", "(", "\"prep\"", ",", "'load'", ",", "sourcePath", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "sourcePath", ")", ":", "raise", "MutatorError", "(", "\"Source not found at %s\"", "%", "sourcePath", ")", "sourceObject", "=", "self", ".", "_instantiateFont", "(", "sourcePath", ")", "# read the locations", "sourceLocationObject", "=", "None", "sourceLocationObject", "=", "self", ".", "locationFromElement", "(", "sourceElement", ")", "if", "sourceLocationObject", "is", "None", ":", "raise", "MutatorError", "(", "\"No location defined for source %s\"", "%", "sourceName", ")", "# read lib flag", "for", "libElement", "in", "sourceElement", ".", "findall", "(", "'.lib'", ")", ":", "if", "libElement", ".", "attrib", ".", "get", "(", "'copy'", ")", "==", "'1'", ":", "self", ".", "libSource", "=", "sourceName", "# read the groups flag", "for", "groupsElement", "in", "sourceElement", ".", "findall", "(", "'.groups'", ")", ":", "if", "groupsElement", ".", "attrib", ".", "get", "(", "'copy'", ")", "==", "'1'", ":", "self", ".", "groupsSource", "=", "sourceName", "# read the info flag", "for", "infoElement", "in", "sourceElement", ".", "findall", "(", "\".info\"", ")", ":", "if", "infoElement", ".", "attrib", ".", "get", "(", "'copy'", ")", "==", "'1'", ":", "self", ".", "infoSource", "=", "sourceName", "if", "infoElement", ".", "attrib", ".", "get", "(", "'mute'", ")", "==", "'1'", ":", "self", ".", "muted", "[", "'info'", "]", ".", "append", "(", "sourceName", ")", "# read the features flag", "for", "featuresElement", "in", "sourceElement", ".", "findall", "(", "\".features\"", ")", ":", "if", "featuresElement", ".", "attrib", ".", "get", "(", "'copy'", ")", "==", "'1'", ":", "if", "self", ".", "featuresSource", "is", "not", "None", ":", "self", ".", "featuresSource", "=", "None", "else", ":", "self", ".", "featuresSource", "=", "sourceName", "mutedGlyphs", "=", "[", "]", "for", "glyphElement", "in", "sourceElement", ".", "findall", "(", "\".glyph\"", ")", ":", "glyphName", "=", "glyphElement", ".", "attrib", ".", "get", "(", "'name'", ")", "if", "glyphName", "is", "None", ":", "continue", "if", "glyphElement", ".", "attrib", ".", "get", "(", "'mute'", ")", "==", "'1'", ":", "if", "not", "sourceName", "in", "self", ".", "muted", "[", "'glyphs'", "]", ":", "self", ".", "muted", "[", "'glyphs'", "]", "[", "sourceName", "]", "=", "[", "]", "self", ".", "muted", "[", "'glyphs'", "]", "[", "sourceName", "]", ".", "append", "(", "glyphName", ")", "for", "kerningElement", "in", "sourceElement", ".", "findall", "(", "\".kerning\"", ")", ":", "if", "kerningElement", ".", "attrib", ".", "get", "(", "'mute'", ")", "==", "'1'", ":", "self", ".", "muted", "[", "'kerning'", "]", ".", "append", "(", "sourceName", ")", "# store", "self", ".", "sources", "[", "sourceName", "]", "=", "sourceObject", ",", "sourceLocationObject", "self", ".", "reportProgress", "(", "\"prep\"", ",", "'done'", ")" ]
print min . pident within each clade and then matrix of between - clade max .
def compare_clades ( pw ) : names = sorted ( set ( [ i for i in pw ] ) ) for i in range ( 0 , 4 ) : wi , bt = { } , { } for a in names : for b in pw [ a ] : if ';' not in a or ';' not in b : continue pident = pw [ a ] [ b ] cA , cB = a . split ( ';' ) [ i ] , b . split ( ';' ) [ i ] if i == 0 and '_' in cA and '_' in cB : cA = cA . rsplit ( '_' , 1 ) [ 1 ] cB = cB . rsplit ( '_' , 1 ) [ 1 ] elif '>' in cA or '>' in cB : cA = cA . split ( '>' ) [ 1 ] cB = cB . split ( '>' ) [ 1 ] if cA == cB : if cA not in wi : wi [ cA ] = [ ] wi [ cA ] . append ( pident ) else : if cA not in bt : bt [ cA ] = { } if cB not in bt [ cA ] : bt [ cA ] [ cB ] = [ ] bt [ cA ] [ cB ] . append ( pident ) print ( '\n# min. within' ) for clade , pidents in list ( wi . items ( ) ) : print ( '\t' . join ( [ 'wi:%s' % str ( i ) , clade , str ( min ( pidents ) ) ] ) ) # print matrix of maximum between groups comps = [ ] print ( '\n# max. between' ) for comp in print_pairwise ( bt ) : if comp is not None : print ( '\t' . join ( [ 'bt:%s' % str ( i ) ] + [ str ( j ) for j in comp ] ) ) if comp [ 0 ] != '#' : comps . extend ( [ j for j in comp [ 1 : ] if j != '-' ] ) print_comps ( comps ) # print matrix of median between groups comps = [ ] print ( '\n# median between' ) for comp in print_pairwise ( bt , median = True ) : if comp is not None : print ( '\t' . join ( [ 'bt:%s' % str ( i ) ] + [ str ( j ) for j in comp ] ) ) if comp [ 0 ] != '#' : comps . extend ( [ j for j in comp [ 1 : ] if j != '-' ] ) print_comps ( comps )
86
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L167-L216
[ "def", "AddKeywordsForName", "(", "self", ",", "name", ",", "keywords", ")", ":", "data_store", ".", "DB", ".", "IndexAddKeywordsForName", "(", "self", ".", "urn", ",", "name", ",", "keywords", ")" ]
convert matrix to dictionary of comparisons
def matrix2dictionary ( matrix ) : pw = { } for line in matrix : line = line . strip ( ) . split ( '\t' ) if line [ 0 ] . startswith ( '#' ) : names = line [ 1 : ] continue a = line [ 0 ] for i , pident in enumerate ( line [ 1 : ] ) : b = names [ i ] if a not in pw : pw [ a ] = { } if b not in pw : pw [ b ] = { } if pident != '-' : pident = float ( pident ) pw [ a ] [ b ] = pident pw [ b ] [ a ] = pident return pw
87
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/compare_aligned.py#L218-L239
[ "async", "def", "postprocess_websocket", "(", "self", ",", "response", ":", "Optional", "[", "Response", "]", ",", "websocket_context", ":", "Optional", "[", "WebsocketContext", "]", "=", "None", ",", ")", "->", "Response", ":", "websocket_", "=", "(", "websocket_context", "or", "_websocket_ctx_stack", ".", "top", ")", ".", "websocket", "functions", "=", "(", "websocket_context", "or", "_websocket_ctx_stack", ".", "top", ")", ".", "_after_websocket_functions", "blueprint", "=", "websocket_", ".", "blueprint", "if", "blueprint", "is", "not", "None", ":", "functions", "=", "chain", "(", "functions", ",", "self", ".", "after_websocket_funcs", "[", "blueprint", "]", ")", "functions", "=", "chain", "(", "functions", ",", "self", ".", "after_websocket_funcs", "[", "None", "]", ")", "for", "function", "in", "functions", ":", "response", "=", "await", "function", "(", "response", ")", "session_", "=", "(", "websocket_context", "or", "_request_ctx_stack", ".", "top", ")", ".", "session", "if", "not", "self", ".", "session_interface", ".", "is_null_session", "(", "session_", ")", ":", "if", "response", "is", "None", "and", "isinstance", "(", "session_", ",", "SecureCookieSession", ")", "and", "session_", ".", "modified", ":", "self", ".", "logger", ".", "exception", "(", "\"Secure Cookie Session modified during websocket handling. \"", "\"These modifications will be lost as a cookie cannot be set.\"", ")", "else", ":", "await", "self", ".", "save_session", "(", "session_", ",", "response", ")", "return", "response" ]
Set argument parser option .
def setoption ( parser , metadata = None ) : parser . add_argument ( '-v' , action = 'version' , version = __version__ ) subparsers = parser . add_subparsers ( help = 'sub commands help' ) create_cmd = subparsers . add_parser ( 'create' ) create_cmd . add_argument ( 'name' , help = 'Specify Python package name.' ) create_cmd . add_argument ( '-d' , dest = 'description' , action = 'store' , help = 'Short description about your package.' ) create_cmd . add_argument ( '-a' , dest = 'author' , action = 'store' , required = True , help = 'Python package author name.' ) create_cmd . add_argument ( '-e' , dest = 'email' , action = 'store' , required = True , help = 'Python package author email address.' ) create_cmd . add_argument ( '-l' , dest = 'license' , choices = metadata . licenses ( ) . keys ( ) , default = 'GPLv3+' , help = 'Specify license. (default: %(default)s)' ) create_cmd . add_argument ( '-s' , dest = 'status' , choices = metadata . status ( ) . keys ( ) , default = 'Alpha' , help = ( 'Specify development status. ' '(default: %(default)s)' ) ) create_cmd . add_argument ( '--no-check' , action = 'store_true' , help = 'No checking package name in PyPI.' ) create_cmd . add_argument ( '--with-samples' , action = 'store_true' , help = 'Generate package with sample code.' ) group = create_cmd . add_mutually_exclusive_group ( required = True ) group . add_argument ( '-U' , dest = 'username' , action = 'store' , help = 'Specify GitHub username.' ) group . add_argument ( '-u' , dest = 'url' , action = 'store' , type = valid_url , help = 'Python package homepage url.' ) create_cmd . add_argument ( '-o' , dest = 'outdir' , action = 'store' , default = os . path . abspath ( os . path . curdir ) , help = 'Specify output directory. (default: $PWD)' ) list_cmd = subparsers . add_parser ( 'list' ) list_cmd . add_argument ( '-l' , dest = 'licenses' , action = 'store_true' , help = 'show license choices.' )
88
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L12-L51
[ "def", "write", "(", "self", ",", "splits", "=", "None", ",", "mergers", "=", "None", ",", "dividends", "=", "None", ",", "stock_dividends", "=", "None", ")", ":", "self", ".", "write_frame", "(", "'splits'", ",", "splits", ")", "self", ".", "write_frame", "(", "'mergers'", ",", "mergers", ")", "self", ".", "write_dividend_data", "(", "dividends", ",", "stock_dividends", ")", "# Use IF NOT EXISTS here to allow multiple writes if desired.", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS splits_sids \"", "\"ON splits(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS splits_effective_date \"", "\"ON splits(effective_date)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS mergers_sids \"", "\"ON mergers(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS mergers_effective_date \"", "\"ON mergers(effective_date)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS dividends_sid \"", "\"ON dividends(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS dividends_effective_date \"", "\"ON dividends(effective_date)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS dividend_payouts_sid \"", "\"ON dividend_payouts(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS dividends_payouts_ex_date \"", "\"ON dividend_payouts(ex_date)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS stock_dividend_payouts_sid \"", "\"ON stock_dividend_payouts(sid)\"", ")", "self", ".", "conn", ".", "execute", "(", "\"CREATE INDEX IF NOT EXISTS stock_dividends_payouts_ex_date \"", "\"ON stock_dividend_payouts(ex_date)\"", ")" ]
Parse argument options .
def parse_options ( metadata ) : parser = argparse . ArgumentParser ( description = '%(prog)s usage:' , prog = __prog__ ) setoption ( parser , metadata = metadata ) return parser
89
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L72-L77
[ "def", "bisine_wahwah_wave", "(", "frequency", ")", ":", "#", "# This is clearly intended to build on the bisine wave defined above,", "# so we can start by generating that.", "waves_a", "=", "bisine_wave", "(", "frequency", ")", "#", "# Then, by reversing axis 2, we swap the stereo channels. By mixing", "# this with `waves_a`, we'll be able to create the desired effect.", "waves_b", "=", "tf", ".", "reverse", "(", "waves_a", ",", "axis", "=", "[", "2", "]", ")", "#", "# Let's have the balance oscillate from left to right four times.", "iterations", "=", "4", "#", "# Now, we compute the balance for each sample: `ts` has values", "# in [0, 1] that indicate how much we should use `waves_a`.", "xs", "=", "tf", ".", "reshape", "(", "tf", ".", "range", "(", "_samples", "(", ")", ",", "dtype", "=", "tf", ".", "float32", ")", ",", "[", "1", ",", "_samples", "(", ")", ",", "1", "]", ")", "thetas", "=", "xs", "/", "_samples", "(", ")", "*", "iterations", "ts", "=", "(", "tf", ".", "sin", "(", "math", ".", "pi", "*", "2", "*", "thetas", ")", "+", "1", ")", "/", "2", "#", "# Finally, we can mix the two together, and we're done.", "wave", "=", "ts", "*", "waves_a", "+", "(", "1.0", "-", "ts", ")", "*", "waves_b", "#", "# Alternately, we can make the effect more pronounced by exaggerating", "# the sample data. Let's emit both variations.", "exaggerated_wave", "=", "wave", "**", "3.0", "return", "tf", ".", "concat", "(", "[", "wave", ",", "exaggerated_wave", "]", ",", "axis", "=", "0", ")" ]
Execute main processes .
def main ( ) : try : pkg_version = Update ( ) if pkg_version . updatable ( ) : pkg_version . show_message ( ) metadata = control . retreive_metadata ( ) parser = parse_options ( metadata ) argvs = sys . argv if len ( argvs ) <= 1 : parser . print_help ( ) sys . exit ( 1 ) args = parser . parse_args ( ) control . print_licences ( args , metadata ) control . check_repository_existence ( args ) control . check_package_existence ( args ) control . generate_package ( args ) except ( RuntimeError , BackendFailure , Conflict ) as exc : sys . stderr . write ( '{0}\n' . format ( exc ) ) sys . exit ( 1 )
90
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/commands.py#L80-L99
[ "def", "_pare_down_model", "(", "self", ",", "strain_gempro", ",", "genes_to_remove", ")", ":", "# Filter out genes in genes_to_remove which do not show up in the model", "strain_genes", "=", "[", "x", ".", "id", "for", "x", "in", "strain_gempro", ".", "genes", "]", "genes_to_remove", ".", "extend", "(", "self", ".", "missing_in_orthology_matrix", ")", "genes_to_remove", "=", "list", "(", "set", "(", "genes_to_remove", ")", ".", "intersection", "(", "set", "(", "strain_genes", ")", ")", ")", "if", "len", "(", "genes_to_remove", ")", "==", "0", ":", "log", ".", "info", "(", "'{}: no genes marked non-functional'", ".", "format", "(", "strain_gempro", ".", "id", ")", ")", "return", "else", ":", "log", ".", "debug", "(", "'{}: {} genes to be marked non-functional'", ".", "format", "(", "strain_gempro", ".", "id", ",", "len", "(", "genes_to_remove", ")", ")", ")", "# If a COBRApy model exists, utilize the delete_model_genes method", "if", "strain_gempro", ".", "model", ":", "strain_gempro", ".", "model", ".", "_trimmed", "=", "False", "strain_gempro", ".", "model", ".", "_trimmed_genes", "=", "[", "]", "strain_gempro", ".", "model", ".", "_trimmed_reactions", "=", "{", "}", "# Delete genes!", "cobra", ".", "manipulation", ".", "delete_model_genes", "(", "strain_gempro", ".", "model", ",", "genes_to_remove", ")", "if", "strain_gempro", ".", "model", ".", "_trimmed", ":", "log", ".", "info", "(", "'{}: marked {} genes as non-functional, '", "'deactivating {} reactions'", ".", "format", "(", "strain_gempro", ".", "id", ",", "len", "(", "strain_gempro", ".", "model", ".", "_trimmed_genes", ")", ",", "len", "(", "strain_gempro", ".", "model", ".", "_trimmed_reactions", ")", ")", ")", "# Otherwise, just mark the genes as non-functional", "else", ":", "for", "g", "in", "genes_to_remove", ":", "strain_gempro", ".", "genes", ".", "get_by_id", "(", "g", ")", ".", "functional", "=", "False", "log", ".", "info", "(", "'{}: marked {} genes as non-functional'", ".", "format", "(", "strain_gempro", ".", "id", ",", "len", "(", "genes_to_remove", ")", ")", ")" ]
Check key and set default vaule when it does not exists .
def _check_or_set_default_params ( self ) : if not hasattr ( self , 'date' ) : self . _set_param ( 'date' , datetime . utcnow ( ) . strftime ( '%Y-%m-%d' ) ) if not hasattr ( self , 'version' ) : self . _set_param ( 'version' , self . default_version ) # pylint: disable=no-member if not hasattr ( self , 'description' ) or self . description is None : getattr ( self , '_set_param' ) ( 'description' , self . warning_message )
91
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L44-L52
[ "def", "matchw", "(", "string", ",", "templ", ",", "wstr", ",", "wchr", ")", ":", "# ctypes.c_char(wstr.encode(encoding='UTF-8')", "string", "=", "stypes", ".", "stringToCharP", "(", "string", ")", "templ", "=", "stypes", ".", "stringToCharP", "(", "templ", ")", "wstr", "=", "ctypes", ".", "c_char", "(", "wstr", ".", "encode", "(", "encoding", "=", "'UTF-8'", ")", ")", "wchr", "=", "ctypes", ".", "c_char", "(", "wchr", ".", "encode", "(", "encoding", "=", "'UTF-8'", ")", ")", "return", "bool", "(", "libspice", ".", "matchw_c", "(", "string", ",", "templ", ",", "wstr", ",", "wchr", ")", ")" ]
Move directory from working directory to output directory .
def move ( self ) : if not os . path . isdir ( self . outdir ) : os . makedirs ( self . outdir ) shutil . move ( self . tmpdir , os . path . join ( self . outdir , self . name ) )
92
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L169-L173
[ "def", "check_origin", "(", "self", ",", "origin", ")", ":", "mod_opts", "=", "self", ".", "application", ".", "mod_opts", "if", "mod_opts", ".", "get", "(", "'cors_origin'", ")", ":", "return", "bool", "(", "_check_cors_origin", "(", "origin", ",", "mod_opts", "[", "'cors_origin'", "]", ")", ")", "else", ":", "return", "super", "(", "AllEventsHandler", ",", "self", ")", ".", "check_origin", "(", "origin", ")" ]
Initialize VCS repository .
def vcs_init ( self ) : VCS ( os . path . join ( self . outdir , self . name ) , self . pkg_data )
93
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/package.py#L185-L187
[ "def", "with_timestamps", "(", "self", ",", "created_at", "=", "None", ",", "updated_at", "=", "None", ")", ":", "if", "not", "created_at", ":", "created_at", "=", "self", ".", "created_at", "(", ")", "if", "not", "updated_at", ":", "updated_at", "=", "self", ".", "updated_at", "(", ")", "return", "self", ".", "with_pivot", "(", "created_at", ",", "updated_at", ")" ]
Finds the location of the current Steam installation on Windows machines . Returns None for any non - Windows machines or for Windows machines where Steam is not installed .
def find_steam_location ( ) : if registry is None : return None key = registry . CreateKey ( registry . HKEY_CURRENT_USER , "Software\Valve\Steam" ) return registry . QueryValueEx ( key , "SteamPath" ) [ 0 ]
94
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/winutils.py#L10-L20
[ "def", "remove_node", "(", "self", ",", "node", ")", ":", "affected_nodes", "=", "[", "v", "for", "u", ",", "v", "in", "self", ".", "edges", "(", ")", "if", "u", "==", "node", "]", "for", "affected_node", "in", "affected_nodes", ":", "node_cpd", "=", "self", ".", "get_cpds", "(", "node", "=", "affected_node", ")", "if", "node_cpd", ":", "node_cpd", ".", "marginalize", "(", "[", "node", "]", ",", "inplace", "=", "True", ")", "if", "self", ".", "get_cpds", "(", "node", "=", "node", ")", ":", "self", ".", "remove_cpds", "(", "node", ")", "super", "(", "BayesianModel", ",", "self", ")", ".", "remove_node", "(", "node", ")" ]
Plot PCoA principal coordinates scaled by the relative abundances of otu_name .
def plot_PCoA ( cat_data , otu_name , unifrac , names , colors , xr , yr , outDir , save_as , plot_style ) : fig = plt . figure ( figsize = ( 14 , 8 ) ) ax = fig . add_subplot ( 111 ) for i , cat in enumerate ( cat_data ) : plt . scatter ( cat_data [ cat ] [ "pc1" ] , cat_data [ cat ] [ "pc2" ] , cat_data [ cat ] [ "size" ] , color = colors [ cat ] , alpha = 0.85 , marker = "o" , edgecolor = "black" , label = cat ) lgnd = plt . legend ( loc = "best" , scatterpoints = 3 , fontsize = 13 ) for i in range ( len ( colors . keys ( ) ) ) : lgnd . legendHandles [ i ] . _sizes = [ 80 ] # Change the legend marker size manually plt . title ( " " . join ( otu_name . split ( "_" ) ) , style = "italic" ) plt . ylabel ( "PC2 (Percent Explained Variance {:.3f}%)" . format ( float ( unifrac [ "varexp" ] [ 1 ] ) ) ) plt . xlabel ( "PC1 (Percent Explained Variance {:.3f}%)" . format ( float ( unifrac [ "varexp" ] [ 0 ] ) ) ) plt . xlim ( round ( xr [ 0 ] * 1.5 , 1 ) , round ( xr [ 1 ] * 1.5 , 1 ) ) plt . ylim ( round ( yr [ 0 ] * 1.5 , 1 ) , round ( yr [ 1 ] * 1.5 , 1 ) ) if plot_style : gu . ggplot2_style ( ax ) fc = "0.8" else : fc = "none" fig . savefig ( os . path . join ( outDir , "_" . join ( otu_name . split ( ) ) ) + "." + save_as , facecolor = fc , edgecolor = "none" , format = save_as , bbox_inches = "tight" , pad_inches = 0.2 ) plt . close ( fig )
95
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/PCoA_bubble.py#L36-L65
[ "def", "reset", "(", "cls", ")", ":", "cls", ".", "debug", "=", "False", "cls", ".", "disabled", "=", "False", "cls", ".", "overwrite", "=", "False", "cls", ".", "playback_only", "=", "False", "cls", ".", "recv_timeout", "=", "5", "cls", ".", "recv_endmarkers", "=", "[", "]", "cls", ".", "recv_size", "=", "None" ]
Split up the column data in a biom table by mapping category value .
def split_by_category ( biom_cols , mapping , category_id ) : columns = defaultdict ( list ) for i , col in enumerate ( biom_cols ) : columns [ mapping [ col [ 'id' ] ] [ category_id ] ] . append ( ( i , col ) ) return columns
96
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/transpose_biom.py#L17-L25
[ "def", "sshagent_run", "(", "cmd", ")", ":", "# Handle context manager modifications", "wrapped_cmd", "=", "_prefix_commands", "(", "_prefix_env_vars", "(", "cmd", ")", ",", "'remote'", ")", "try", ":", "host", ",", "port", "=", "env", ".", "host_string", ".", "split", "(", "':'", ")", "return", "local", "(", "u\"ssh -p %s -A -o StrictHostKeyChecking=no %s@%s '%s'\"", "%", "(", "port", ",", "env", ".", "user", ",", "host", ",", "wrapped_cmd", ")", ")", "except", "ValueError", ":", "return", "local", "(", "u\"ssh -A -o StrictHostKeyChecking=no %s@%s '%s'\"", "%", "(", "env", ".", "user", ",", "env", ".", "host_string", ",", "wrapped_cmd", ")", ")" ]
print line if starts with ...
def print_line ( l ) : print_lines = [ '# STOCKHOLM' , '#=GF' , '#=GS' , ' ' ] if len ( l . split ( ) ) == 0 : return True for start in print_lines : if l . startswith ( start ) : return True return False
97
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/stockholm2oneline.py#L11-L21
[ "def", "setOverlayTransformTrackedDeviceRelative", "(", "self", ",", "ulOverlayHandle", ",", "unTrackedDevice", ")", ":", "fn", "=", "self", ".", "function_table", ".", "setOverlayTransformTrackedDeviceRelative", "pmatTrackedDeviceToOverlayTransform", "=", "HmdMatrix34_t", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "unTrackedDevice", ",", "byref", "(", "pmatTrackedDeviceToOverlayTransform", ")", ")", "return", "result", ",", "pmatTrackedDeviceToOverlayTransform" ]
convert stockholm to single line format
def stock2one ( stock ) : lines = { } for line in stock : line = line . strip ( ) if print_line ( line ) is True : yield line continue if line . startswith ( '//' ) : continue ID , seq = line . rsplit ( ' ' , 1 ) if ID not in lines : lines [ ID ] = '' else : # remove preceding white space seq = seq . strip ( ) lines [ ID ] += seq for ID , line in lines . items ( ) : yield '\t' . join ( [ ID , line ] ) yield '\n//'
98
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/stockholm2oneline.py#L23-L44
[ "def", "Run", "(", "self", ")", ":", "global", "DB", "# pylint: disable=global-statement", "global", "REL_DB", "# pylint: disable=global-statement", "global", "BLOBS", "# pylint: disable=global-statement", "if", "flags", ".", "FLAGS", ".", "list_storage", ":", "self", ".", "_ListStorageOptions", "(", ")", "sys", ".", "exit", "(", "0", ")", "try", ":", "cls", "=", "DataStore", ".", "GetPlugin", "(", "config", ".", "CONFIG", "[", "\"Datastore.implementation\"", "]", ")", "except", "KeyError", ":", "msg", "=", "(", "\"No Storage System %s found.\"", "%", "config", ".", "CONFIG", "[", "\"Datastore.implementation\"", "]", ")", "if", "config", ".", "CONFIG", "[", "\"Datastore.implementation\"", "]", "==", "\"SqliteDataStore\"", ":", "msg", "=", "\"The SQLite datastore is no longer supported.\"", "print", "(", "msg", ")", "print", "(", "\"Available options:\"", ")", "self", ".", "_ListStorageOptions", "(", ")", "raise", "ValueError", "(", "msg", ")", "DB", "=", "cls", "(", ")", "# pylint: disable=g-bad-name", "DB", ".", "Initialize", "(", ")", "atexit", ".", "register", "(", "DB", ".", "Flush", ")", "monitor_port", "=", "config", ".", "CONFIG", "[", "\"Monitoring.http_port\"", "]", "if", "monitor_port", "!=", "0", ":", "DB", ".", "InitializeMonitorThread", "(", ")", "# Initialize the blobstore.", "blobstore_name", "=", "config", ".", "CONFIG", ".", "Get", "(", "\"Blobstore.implementation\"", ")", "try", ":", "cls", "=", "blob_store", ".", "REGISTRY", "[", "blobstore_name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"No blob store %s found.\"", "%", "blobstore_name", ")", "BLOBS", "=", "blob_store", ".", "BlobStoreValidationWrapper", "(", "cls", "(", ")", ")", "# Initialize a relational DB if configured.", "rel_db_name", "=", "config", ".", "CONFIG", "[", "\"Database.implementation\"", "]", "if", "not", "rel_db_name", ":", "return", "try", ":", "cls", "=", "registry_init", ".", "REGISTRY", "[", "rel_db_name", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "\"Database %s not found.\"", "%", "rel_db_name", ")", "logging", ".", "info", "(", "\"Using database implementation %s\"", ",", "rel_db_name", ")", "REL_DB", "=", "db", ".", "DatabaseValidationWrapper", "(", "cls", "(", ")", ")" ]
Statics the methods . wut .
def math_func ( f ) : @ wraps ( f ) def wrapper ( * args , * * kwargs ) : if len ( args ) > 0 : return_type = type ( args [ 0 ] ) if kwargs . has_key ( 'return_type' ) : return_type = kwargs [ 'return_type' ] kwargs . pop ( 'return_type' ) return return_type ( f ( * args , * * kwargs ) ) args = list ( ( setify ( x ) for x in args ) ) return return_type ( f ( * args , * * kwargs ) ) return wrapper
99
https://github.com/elbow-jason/Uno-deprecated/blob/4ad07d7b84e5b6e3e2b2c89db69448906f24b4e4/uno/helpers.py#L8-L22
[ "def", "do_logout_service", "(", "request", ",", "data", ",", "binding", ",", "config_loader_path", "=", "None", ",", "next_page", "=", "None", ",", "logout_error_template", "=", "'djangosaml2/logout_error.html'", ")", ":", "logger", ".", "debug", "(", "'Logout service started'", ")", "conf", "=", "get_config", "(", "config_loader_path", ",", "request", ")", "state", "=", "StateCache", "(", "request", ".", "session", ")", "client", "=", "Saml2Client", "(", "conf", ",", "state_cache", "=", "state", ",", "identity_cache", "=", "IdentityCache", "(", "request", ".", "session", ")", ")", "if", "'SAMLResponse'", "in", "data", ":", "# we started the logout", "logger", ".", "debug", "(", "'Receiving a logout response from the IdP'", ")", "response", "=", "client", ".", "parse_logout_request_response", "(", "data", "[", "'SAMLResponse'", "]", ",", "binding", ")", "state", ".", "sync", "(", ")", "return", "finish_logout", "(", "request", ",", "response", ",", "next_page", "=", "next_page", ")", "elif", "'SAMLRequest'", "in", "data", ":", "# logout started by the IdP", "logger", ".", "debug", "(", "'Receiving a logout request from the IdP'", ")", "subject_id", "=", "_get_subject_id", "(", "request", ".", "session", ")", "if", "subject_id", "is", "None", ":", "logger", ".", "warning", "(", "'The session does not contain the subject id for user %s. Performing local logout'", ",", "request", ".", "user", ")", "auth", ".", "logout", "(", "request", ")", "return", "render", "(", "request", ",", "logout_error_template", ",", "status", "=", "403", ")", "else", ":", "http_info", "=", "client", ".", "handle_logout_request", "(", "data", "[", "'SAMLRequest'", "]", ",", "subject_id", ",", "binding", ",", "relay_state", "=", "data", ".", "get", "(", "'RelayState'", ",", "''", ")", ")", "state", ".", "sync", "(", ")", "auth", ".", "logout", "(", "request", ")", "return", "HttpResponseRedirect", "(", "get_location", "(", "http_info", ")", ")", "else", ":", "logger", ".", "error", "(", "'No SAMLResponse or SAMLRequest parameter found'", ")", "raise", "Http404", "(", "'No SAMLResponse or SAMLRequest parameter found'", ")" ]

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
0
Add dataset card