content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def str_to_fancyc_comment(text): """ Return a string as a C formatted comment. """ l_lines = text.splitlines() outstr = "/* " + l_lines[0] + "\n" for line in l_lines[1:]: outstr += " * " + line + "\n" outstr += " */\n" return outstr
3ee7fd146c8390346dfa9580870aef0551124339
539,025
def _code_range_to_set(code_range): """Converts a code range output by _parse_code_ranges to a set.""" characters = set() for first, last, _ in code_range: characters.update(range(first, last+1)) return characters
3cb7c2771bec698241ae4d2ae103e4827493e40d
617,676
def easy_product(a, b): """Takes two numbers and returns their product""" return a * b
8a455a81a8a6b4a3219874d52b50bb84d2587b89
302,801
import textwrap def get_notifiers_provider_config(message, subtitle, title) -> dict: """ Return kwargs that will be passed to `notifiers.notify` method. """ # different providers have different requirements for the `notify` method # most seem to take a `message` parameter, but they also have different # potential formatting requirements for messages. # use the following provider-specific map for `notify` parameters provider_config = { "pushover": { "message": textwrap.dedent( f""" <i>{subtitle}</i> {message} """ ), "title": title, "html": True, }, "slack": {"message": message if message else "task complete"}, } return provider_config
45d78bdd37925d85eab91aaad430696f23cb4c70
691,896
import random import string def get_random_string(n): """ Create random string with len `n` """ return ''.join(random.choices(string.ascii_letters + string.digits, k=n))
b1d354d8a7f1a4c3520e865fd87e2f2ca048699e
619,359
def generate_question_with_choices(choices: list[str], message: str) -> list[dict]: """Generate a multiple choice single select question with <choices> and <message> and return it in py-inquirer format :param choices: choices of the question :param message: message of the question """ return [ { 'type': 'list', 'name': 'options', 'message': message, 'choices': choices } ]
1379d4c1ca6dca2e9bae786a9b2937de6cf95ec3
118,696
def top_hat_width(subband_df, subband_f0, DM): """top_hat_width(subband_df, subband_f0, DM) Returns width of a top-hat pulse to convolve with pulses for dipsersion broadening. Following Lorimer and Kramer, 2005 (sec 4.1.1 and A2.4) subband_df : subband bandwidth (MHz) subband_f0 : subband center frequency (MHz) DM : dispersion measure (pc/cm^3) return top_hat_width (milliseconds) """ D = 4.148808e3 # sec*MHz^2*pc^-1*cm^3, dispersion const width_sec = 2*D * DM * (subband_df) / (subband_f0)**3 return width_sec * 1.0e+3
5e6d399a3d78952ef6ea7f30101ebc5bddd2bc8a
57,705
def ReadFile(filename): """Utility function reads a file and returns its content. Args: filename: the file to read. Returns: The file content. """ with open(filename, "r") as data_file: return data_file.read()
7487e5c2e238f8890cdb1582ace984278346434e
585,396
import logging def exists_handler_with_name(name: str) -> bool: """ >>> discard = set_stream_handler() >>> assert exists_handler_with_name('stream_handler') >>> discard2 = set_stream_handler_color() >>> assert exists_handler_with_name('stream_handler_color') >>> assert not exists_handler_with_name('unknown_handler') """ handlers = logging.getLogger().handlers for handler in handlers: if hasattr(handler, 'name'): if handler.name == name: return True return False
ab2fc4f6b1a7ae02c74164c7054ad0e4fe24312a
107,918
import re def _unescape_value(value): """Unescape a value.""" def unescape(c): return { "\\\\": "\\", "\\\"": "\"", "\\n": "\n", "\\t": "\t", "\\b": "\b", }[c.group(0)] return re.sub(r"(\\.)", unescape, value)
84a2c4cd39e90b219be3ec68e5e88e1e68def56b
539,975
def repeat(string, n): """ Description ---------- Repeat a string 'n' number of times. Parameters ---------- string : str - string to repeat n : int - number of times to repeat Returns ---------- str - string consisting of param string repeated 'n' times Example ---------- >>> repeat('abc', 3) -> 'abcabcabc' """ if not isinstance(string, str): raise TypeError("param 'string' must be a string") if not isinstance(n, int): raise TypeError("param 'n' must be an integer: 0 <= n <= sys.maxsize") if n < 0: raise ValueError("param 'n' must be in range 0 <= n <= sys.maxsize") n_str = "" for _ in range(n): n_str = n_str + string return n_str
7edef4df6df14fd7b9087771b000c026e1715689
88,675
from datetime import datetime def roundTime(dt=None, roundTo=1): """Round a datetime object to any time lapse in seconds dt : datetime.datetime object, default now. roundTo : Closest number of seconds to round to, default 15 seconds. """ if dt == None : dt = datetime.now() seconds = dt.replace(second=0,microsecond=0).timestamp() remainder = seconds % (roundTo * 15) return seconds - remainder
1c383917fb4b0aab130f1ecd8e5f05b069f454b9
398,579
import zipfile def extract_zipfile(source_zip): """ unzips your new zip file into a temporary directory to work in :param source_zip: a .zip file :return: None. should create a temp dir in the PWD then put the .zip contents in it """ # unzip the .zip zip_ref = zipfile.ZipFile(source_zip, 'r') zip_ref.extractall('./temp') return zip_ref
13a9426771bb7209cda5344b36ce49cbe66590c2
68,626
import ipaddress def getResourceManagerIpv4(args): """Retrieve the IPv4 address of the cloudFPGA Resource Manager. :param args The options passed as arguments to the script. :return The IP address as an 'ipaddress.IPv4Address'.""" ipResMngrStr = args.mngr_ipv4 while True: if args.mngr_ipv4 == '': # Take an IP address from the console print("Enter the IPv4 address of the cloudFPGA Resource Manager (e.g. 10.12.0.132)") ipResMngrStr = input() try: ipResMngr = ipaddress.ip_address(ipResMngrStr) except ValueError: print('[ERROR] Unrecognized IPv4 address.') else: return ipResMngr
5f5baecad256bcfde2e555d229448c68da3580a8
514,497
import sqlite3 def get_tile(dbfile, z, x, y): """Get a specific image tile from an sqlite db.""" con = sqlite3.connect(dbfile) with con: cur = con.cursor() cur.execute('SELECT image FROM tiles where z=? AND x=? AND y=?', (z, x, y)) image = cur.fetchone()[0] return str(image)
709e1577ada327c802a34311e193cc19fde1b8e0
440,356
import requests from bs4 import BeautifulSoup def _scraper(url): """ Scraper function :Param url: url to be scraped :Return: BeautifulSoup object """ response = requests.get(url) assert response.status_code == 200, "url could not be reached" soup = BeautifulSoup(response.content, "html.parser") return soup
77fb20d7f3519587195226ccbf71635021350d29
285,165
def test_case_is_success(test): """Determine if the the test expects a successful parse""" if "results" not in test: return True for result in test["results"]: if "error" in result: return False return True
fee43364a8a11ed42564b07533a2500b69c60894
635,677
def get_recipients(msg, recipients): """Given a parsed message, extract and return recipient list""" msg_fields = ['From', 'To', 'Cc', 'Bcc', 'Reply-To', 'Sender', 'Subject', 'In-Reply-To', 'Message-ID','References'] for f in msg_fields: if msg[f] is None: continue if f == 'Subject' or f == 'Message-ID' or f == 'References': recipients[f] = msg[f] continue if f == 'Cc': copies = msg[f] recipients["Cc"] = copies else: recipients[f] = msg[f] return recipients
e76868e3427e4c578c79b9ac897047a11aaab129
224,876
import posixpath import ntpath def _is_abs(path): """ Check if path is absolute on any platform. :param str path: Path to validate. :returns bool: True is absolute on any platform, False otherwise. """ return posixpath.isabs(path) or ntpath.isabs(path)
08d13e51c42da51196c1e92d90783ab24b6e9382
531,321
def match_any(matcher, values): """Check if the matcher matches *any* of the supplied values. :param matcher(regex pattern): compiled regular expression. :param values(list): list of ``str`` values to match. :returns: ``True`` if *any* of the ``str`` values matches (fullmatch) the matcher; otherwise ``False``. """ for val in values: if matcher.fullmatch(val): return True return False
cd7c8d9cb0e2adf85a665be22acefc34d8058735
304,131
def is_valid_scheme(scheme: str) -> bool: """Validate a scheme. Arguments: scheme: scheme to validate. Returns: True if is valid else False. """ return scheme.lower() in ("http", "https")
9fccb1eb57e5fe3b36551cb2f84a10b9db7181b1
191,220
def stop_server(proc): """ Stop server process. proc: ShellProc Process of server to stop. """ return proc.terminate(timeout=10)
c126ca840b56407f1eea8af943f4602532df13df
16,318
def dget(d, dkey, default=None): """Dictionary get: gets the field from nested key Args: d (dict, list): Dictionary/list to retrieve field from dkey (str): Nested key to retrive field from dictionary/list separated by periods. Example: key1.3.key2, this will be the equivalent of d['key1'][3]['key2']. default (optional): Default object to return if value not found. Defaults to None. Returns: Any: field to return from dict or default if not found. """ keys = dkey.split('.') obj = d for key in keys: if not obj: return default if key.isdigit(): index = int(key) if isinstance(obj, list) and index < len(obj): obj = obj[index] else: return default else: if isinstance(obj, dict) and key in obj: obj = obj[key] else: return default return obj
813aaad713f2167fac3b479903b021bc409d0d6c
97,611
def NW(N, tp='U'): """function to compute number of edges or number of feasible vertex-pair Args: N (int): number of nodes tp (str, optional): Can be either 'U' for undirected and 'D' for directed graphs. Defaults to 'U'. Returns: int: number of edges or number of feasible vertex-pair between N nodes """ assert tp=='U' or tp=='D', "Invalid type in NW, it shall be either 'U' or 'D'" if tp=='U': return N*(N-1)//2 else: return N*(N-1)
5b13eaf1c2d1b50e9ddd7e9e8dec3b999722dd4f
379,572
def interpolate(x1, y1, x2, y2, current_step, total_steps): """Interpolates between two 2d points. Args: x1, y1, x2, y2: coords representing the two 2d points current_step, total_steps: ints representing the current progress (example 2, 10 represents 20%) Returns: 2-float tuple representing a 2d point """ dx1, dy1 = (x2 - x1) / total_steps, (y2 - y1) / total_steps mx1, my1 = x1 + dx1 * current_step, y1 + dy1 * current_step return mx1, my1
b61c493b86815c5373c414abc4da3fe78c14ca1d
561,975
import torch def move_bdim_to_front(x, result_ndim=None): """ Returns a tensor with a batch dimension at the front. If a batch dimension already exists, move it. Otherwise, create a new batch dimension at the front. If `result_ndim` is not None, ensure that the resulting tensor has rank equal to `result_ndim`. """ x_dim = len(x.shape) x_bdim = x.bdim if x_bdim is None: x = torch.unsqueeze(x, 0) else: x = torch.movedim(x, x_bdim, 0) if result_ndim is None: return x diff = result_ndim - x_dim - (x_bdim is None) for _ in range(diff): x = torch.unsqueeze(x, 1) return x
313a1837b6c3b451cebacaa7815f2631dfa387e5
24
import unicodedata def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
ac4eeeeceba447f85cb61d792237c930d9c131f9
691,361
def recursive_linear_search(array:list,target:int,index:int =0)-> int: """ This a python implementation of recursive linear search algorithm. Where we are applying linear search algorithm with recursion. Parameters : 1>array : First parameter to the function is the array(searching space) (accepts both sorted and unsorted array). 2>target : Second parameter to the function is target which is the element to be search in the array(argument 1). 3>index : Third parameter is default parameater to the function. Default value of it is 0 it is a starting position of our seraching. Output: This function will return the index (starting form zero) of the target(2nd argument) if found in the giving array (1st argument) else will return -1 Exceptions : TypeError : This exception will be raised when the 1st argument(array) will be any other type rather than list,tuple or string. ValueError: This exception will be raised when the 1st argument of type string and 2nd argument(target) will be of type non-string.As you cannot search non-str element in str. Examples : >>> recursive_linear_search([1,2,3,4,5],5) 4 >>> recursive_linear_search([5,4,3,2,1],3) 2 """ if type(array) not in (list,tuple,str) : raise TypeError("Invalid input(argument 1) only list,tuple,str allowed") if type(array)==str and type(target)!=str : raise ValueError("Invalid input(argument 2) cannot search non-str in str") if index==len(array): return -1 if array[index]==target: return index return recursive_linear_search(array,target,index+1)
e2cb7331813aa11bef8fd19247ff3667b3f1e9ae
77,249
from typing import Dict from typing import List def update_enrollments( tracked_entity: Dict, enrollments_with_new_events: List, ) -> List: """ Adds new events to current program enrollments and adds new enrollments. Returns a complete list of enrollments. """ current_enrollments = tracked_entity.get("enrollments", []) enrollments_by_program_id = {e["program"]: e for e in current_enrollments} for enrol in enrollments_with_new_events: program_id = enrol["program"] if program_id in enrollments_by_program_id: enrollments_by_program_id[program_id]["events"].extend(enrol["events"]) enrollments_by_program_id[program_id]["status"] = enrol["status"] else: enrollments_by_program_id[program_id] = enrol return list(enrollments_by_program_id.values())
1d364cc843026e44d4680d0e55e1275ff2433737
525,845
from datetime import datetime def timestamp2datetime(unixtime : float): """ Convert unix timestamp to datetime :param unixtime: Unix timestamp to be converted in a datetime :type unixtime: int :return: datetime :rtype: datetime """ if unixtime and unixtime > 0: result = datetime.fromtimestamp(unixtime) else: result = None return result
3ddca644bf05babbba88cf0cf027930a3fb49ba5
254,784
def ReadIgnoreFile(infile): """Parses a file indicating which communities should be skipped by RemoveLowComplexityRepeats. The numbers of communities to skip should be separated by commas Lines beginning with # will be treated as comments and skipped (eg providing justification for skipping these communities). """ ignore_list=[] inhandle=open(infile, 'r') for line in inhandle: #Skip comments if line[0]=='#': continue ignore_list+=line.split(',') inhandle.close() return ignore_list
98b25c8b26c0d4a8c320e66a55628c8e62c996c0
533,690
def _to_int(x): """Converts a completion and error code as it is listed in 32-bit notation in the VPP-4.3.2 specification to the actual integer value. """ if x > 0x7FFFFFFF: return int(x - 0x100000000) else: return int(x)
de155dce733dc93c3112d5bbc10f0dbc20110978
77,536
import uuid def _replace_substrings(code, first_char, last_char): """Replace the substrings between first_char and last_char with a unique id. Return the replaced string and a list of replacement tuples: (unique_id, original_substring) """ substrings = [] level = 0 start = -1 for i in range(len(code)): if code[i] == first_char: level += 1 if level == 1: start = i if code[i] == last_char: level -= 1 if level == 0: substrings.append((start, i + 1)) code_ = code replacements = [] for substr_index in substrings: unique_id = '{}"{}"{}'.format(first_char, str(uuid.uuid4()), last_char) substring = code[substr_index[0]:substr_index[1]] code_ = code_.replace(substring, unique_id) replacements.append((unique_id, substring)) return code_, replacements
4a4b877df5aa6ce889a62f6e3ed9b784e37ad41c
688,805
from typing import Optional def res_to_q(res: float) -> Optional[float]: """ :param res: resonance value :return: Q factor, or None if res out of range [0, 1) """ if not 0.0 <= res < 1.0: return None return 1.25 / (1 - res) - 1
92e8ee2d0d8636d8d516701e57a5fb8d507765e2
526,773
def transpose(matlist, K): """ Returns the transpose of a matrix Examples ======== >>> from sympy.matrices.densetools import transpose >>> from sympy import ZZ >>> a = [ ... [ZZ(3), ZZ(7), ZZ(4)], ... [ZZ(2), ZZ(4), ZZ(5)], ... [ZZ(6), ZZ(2), ZZ(3)]] >>> transpose(a, ZZ) [[3, 2, 6], [7, 4, 2], [4, 5, 3]] """ return [list(a) for a in (zip(*matlist))]
2f3909e6c7bd0d554520c8d240d7630e0620f3f5
646,412
def split_number(number, multiplier): """Decodes a number into two. The number = high * multiplier + low, and This method returns the tuple (high, low). """ low = int(number % multiplier) high = int(number / multiplier) return (high, low)
48381531bd177055056641cec5eaee9b2af7a28d
165,162
import re import string def clean_string(text): """ Performs basic string cleaning: - Clear pdf conversion md strings - Remove weird symbols - Reduce redundant white spaces Parameters ---------- text : list list of strings to clean Returns ------- text : list list of strings that are cleaned by the above process """ # Clear line breaks md_break = re.compile('(\\n|\\uf0d8|\\uf0b7|\\uf0fc|\\uf020)') text = re.sub(md_break, ' ', text) # Clear symbols and numbers text = re.sub( re.compile( '[0-9–°“”�º…' + string.punctuation + ']'), ' ', text) # Clear multiple whitespace text = re.sub(re.compile(r'\s+'), ' ', text) return text
afa2fea6dbed2a2fc11b8c71203dbdd6188f4c81
610,999
from typing import List def add_bcc(cmd: List[int]): """Compute BCC (= Block Checking Charactor) and append to command sequence. Returns: list of binary data with BCC code. """ check: int = 0x00 for b in cmd: check = check ^ b cmd.append(check) return cmd
6fac48e348cdb2c6fd63f1545a09fa5ea6ddba61
666,279
def extract_one_string_in_section(section, str_ptr): """Extract one string in an ELF section""" data = section['data'] max_offset = section['size'] offset = str_ptr - section['start'] if offset < 0 or offset >= max_offset: return None ret_str = "" while (offset < max_offset) and (data[offset] != 0): ret_str += chr(data[offset]) offset += 1 return ret_str
7f3e95961d1febd0c5edb2a18e0f6f0e3098a539
490,849
def mean(mylist): """ function to take the mean of a list Parameters ---------- mylist : list list of numbers to take a mean Returns ------- mean_list : float The mean of the list. Examples -------- >>> mean([1,2,3,4,5,6,7]) 4.0 """ if not isinstance(mylist, list): raise TypeError("Mean: %s is not a list!" % mylist) return (sum(mylist) / len(mylist))
5b3c0796e752c2ac384ea00f97ff14ab7d3e7f2d
673,052
def glob_to_sql(string: str) -> str: """Convert glob-like wildcards to SQL wildcards * becomes % ? becomes _ % becomes \% \\ remains \\ \* remains \* \? remains \? This also adds a leading and trailing %, unless the pattern begins with ^ or ends with $ """ # What's with the chr(1) and chr(2) nonsense? It's a trick to # hide \* and \? from the * and ? substitutions. This trick # depends on the substitutions being done in order. chr(1) # and chr(2) were picked because I know those characters # almost certainly won't be in the input string table = ( (r"\\", chr(1)), (r"\*", chr(2)), (r"\?", chr(3)), (r"%", r"\%"), (r"?", "_"), (r"*", "%"), (chr(1), r"\\"), (chr(2), r"\*"), (chr(3), r"\?"), ) for (a, b) in table: string = string.replace(a, b) string = string[1:] if string.startswith("^") else "%" + string string = string[:-1] if string.endswith("$") else string + "%" return string
bf3ca7bf522292c17c418ccdf75aca21e4f2bc69
70,654
from typing import Dict from typing import Any from pathlib import Path import yaml def load_config(fpath: str) -> Dict[str, Any]: """Loads a configuration from a path to a YAML file, allows for inheritance between files using the ``base_config`` key. Parameters ---------- path : str Path to the (YAML) config file """ path = Path(fpath) with open(path) as handle: config = yaml.load(handle, Loader=yaml.FullLoader) if 'base_config' in config: base_config = load_config(path.parent / config['base_config']) base_config.update(config) return base_config else: return config
8b3d708f4ec441dc5be661efb5fc243d7d4d57fd
84,505
def colnames_to_yeo_7(colnames: list, order: bool = True) -> list: """ takes a list of colnames in the brainnetome format/naming and converts them to yeo_7 regions Examples: >>> print(colnames_to_yeo_7(["108_110", "1_2", "200_218", "148_140"])) >>> print(colnames_to_yeo_7(["108_110", "1_2", "200_218", "148_140"], order=False)) Args: colnames: list of the brainnetome colnames order: whether the resulting colnames should be ordered Returns: list of yeo_7 converted colnames """ lookup = { 1: 6, 2: 4, 3: 7, 4: 6, 5: 7, 6: 7, 7: 3, 8: 3, 9: 2, 10: 2, 11: 7, 12: 6, 13: 7, 14: 7, 15: 4, 16: 6, 17: 6, 18: 6, 19: 6, 20: 6, 21: 6, 22: 6, 23: 7, 24: 6, 25: 3, 26: 3, 27: 5, 28: 6, 29: 6, 30: 3, 31: 6, 32: 6, 33: 7, 34: 7, 35: 7, 36: 6, 37: 4, 38: 4, 39: 4, 40: 4, 41: 7, 42: 7, 43: 7, 44: 7, 45: 5, 46: 6, 47: 5, 48: 5, 49: 5, 50: 5, 51: 7, 52: 7, 53: 2, 54: 2, 55: 3, 56: 3, 57: 2, 58: 2, 59: 2, 60: 2, 61: 4, 62: 4, 63: 3, 64: 3, 65: 4, 66: 2, 67: 2, 68: 2, 69: 5, 70: 5, 71: 2, 72: 2, 73: 2, 74: 2, 75: 2, 76: 2, 77: 5, 78: 5, 79: 7, 80: 7, 81: 7, 82: 6, 83: 7, 84: 7, 85: 3, 86: 3, 87: 7, 88: 7, 89: 5, 90: 5, 91: 3, 92: 3, 93: 5, 94: 5, 95: 7, 96: 5, 97: 3, 98: 3, 99: 6, 100: 6, 101: 5, 102: 5, 103: 5, 104: 5, 105: 1, 106: 1, 107: 3, 108: 1, 109: 5, 110: 5, 111: 5, 112: 1, 113: 1, 114: 1, 115: 5, 116: 5, 117: 5, 118: 5, 119: 1, 120: 1, 121: 7, 122: 7, 123: 4, 124: 4, 125: 3, 126: 3, 127: 3, 128: 3, 129: 3, 130: 3, 131: 2, 132: 2, 133: 3, 134: 3, 135: 1, 136: 1, 137: 6, 138: 6, 139: 3, 140: 3, 141: 7, 142: 6, 143: 3, 144: 7, 145: 2, 146: 2, 147: 6, 148: 6, 149: 2, 150: 3, 151: 1, 152: 1, 153: 7, 154: 7, 155: 2, 156: 2, 157: 2, 158: 2, 159: 3, 160: 2, 161: 2, 162: 2, 163: 2, 164: 2, 165: 0, 166: 6, 167: 4, 168: 4, 169: 4, 170: 4, 171: 2, 172: 2, 173: 4, 174: 4, 175: 7, 176: 7, 177: 0, 178: 0, 179: 7, 180: 4, 181: 7, 182: 1, 183: 4, 184: 4, 185: 4, 186: 4, 187: 7, 188: 7, 189: 1, 190: 1, 191: 1, 192: 1, 193: 1, 194: 1, 195: 1, 196: 1, 197: 1, 198: 1, 199: 1, 200: 1, 201: 3, 202: 1, 203: 1, 204: 1, 205: 1, 206: 1, 207: 1, 208: 1, 209: 1, 210: 1, 211: 0, 212: 0, 213: 0, 214: 0, 215: 0, 216: 0, 217: 0, 218: 0, 219: 0, 220: 0, 221: 0, 222: 0, 223: 0, 224: 0, 225: 0, 226: 0, 227: 0, 228: 0, 229: 0, 230: 0, 231: 0, 232: 0, 233: 0, 234: 0, 235: 0, 236: 0, 237: 0, 238: 0, 239: 0, 240: 0, 241: 0, 242: 0, 243: 0, 244: 0, 245: 0, 246: 0 } splitted = [[int(j) for j in i.split("_")] for i in colnames] new_names = [sorted([lookup[i] for i in j]) if order else [lookup[i] for i in j] for j in splitted] return [str(i[0]) + "_" + str(i[1]) for i in new_names]
eeac8f08c8ff2f9308c9c608fb1c74d5f5bb0387
110,693
def register(func): """Method decorator to register CLI commands.""" func._registered = True return func
6cc016eaf99c2cb585fafb0c641e16b145dc8871
360,367
def add_argument_group (parser, title): """Add group only is it doesn't exist yet""" for group in parser._action_groups: if group.title == title: return group return parser.add_argument_group(title)
e6d5a794004b9478e06b42f4108044e70c7d8e0d
134,745
def change_one(pair, index, new_value): """Return new tuple with one element modified.""" new_pair = list(pair) new_pair[index] = new_value return tuple(new_pair)
82f3f29639044bdfdf861cc0c7917cf521ac74aa
111,375
def _off_faces(faces): """ Return a string representing the faces in OFF format. Parameters ---------- faces: numpy array of integers A 2D array containing 3 vertex indices per face. Dimension is (m, 3) for m faces. Returns ------- string The OFF format string for the faces. """ if faces.shape[0] == 0: return '' face_reps = ["3 %d %d %d" % (f[0], f[1], f[2]) for f in faces] # vertex count and the 3 vertex indices defining the face, starting at 0 return '\n'.join(face_reps) + '\n'
5488ed221fabe329be876bf5914591a2fd18f2fc
582,142
import torch def pad_batch(items, pad_value=0): """Pad tensors in list to equal length (in the first dim) :param items: :param pad_value: :return: padded_items, orig_lens """ max_len = len(max(items, key=lambda x: len(x))) zeros = (2*torch.as_tensor(items[0]).ndim -1) * [pad_value] padded_items = torch.stack([torch.nn.functional.pad(torch.as_tensor(x), pad= zeros + [max_len - len(x)], value=pad_value) for x in items]) orig_lens = [len(xx) for xx in items] return padded_items, orig_lens
481c3c8dcbc3dfa63cfbe7b71d04d0d897980f5a
289,441
def get_sea_attribute_cmd(seaname): """ Get pvid, pvid_adapter, and virt_adapters from the configured SEA device. Also get the state of the SEA. :param seaname: sea device name :returns: A VIOS command to get the sea adapter's attributes. """ return ("ioscli lsdev -dev %(sea)s -attr pvid,pvid_adapter,virt_adapters;" "ioscli lsdev -type sea | grep %(sea)s" % {'sea': seaname})
18224e14e45b73ff716f4282aaff3e06c4584866
18,867
def get_text_ls(filename): """Returns text of file as a list of strings""" with open(filename, 'r') as f_in: return f_in.readlines()
5b788f38e683c82648224b7cc1b5875a0f602dce
11,083
def get_commerce_url(root_url: str) -> str: """Get the Kamereon base commerce url.""" return f"{root_url}/commerce/v1"
3e56bfcb13d476a2668d756673bf7c0425404baa
576,517
def get_kernel_rpm_release(rpm): """ Get the release of a kernel RPM as an integer. :param rpm: An instance of an RPM derived model. """ return int(rpm.release.split('.')[0])
c81e78457e70da29a2b2a4e8d13eb65ef4a16a10
594,794
def _module_name_to_class_name(module_name): """Converts the given name of a module containing a tool test settings class into a class name. """ # Settings for some of our tools have to be treated specially because the # generic conversion below is inadequate. if module_name == 'idaplugin_test_settings': return 'IDAPluginTestSettings' elif module_name == 'r2plugin_test_settings': return 'R2PluginTestSettings' elif module_name == 'bin2pat_test_settings': return 'Bin2PatTestSettings' # Generic conversion. For example, 'tool_test_settings' is converted to # 'ToolTestSettings'. return ''.join(part.title() for part in module_name.split('_'))
f68309c565d9454b9fa1c64cb1ffee102a4a97a3
227,042
import struct import binascii def hex2double(s): """Convert Ephemeris Time hex string into double.""" return struct.unpack('d', binascii.unhexlify(s))[0]
2320ba8352cf880ec61e8d867ac7d648f818ae45
670,963
def path_to_key(datastore, path): """ Translates a file system path to a datastore key. The basename becomes the key name and the extension becomes the kind. Examples: /file.ext -> key(ext, file) /parent.ext/file.ext -> key(ext, parent, ext, file) """ key_parts = [] path_parts = path.strip(u'/').split(u'/') for n, x in enumerate(path_parts): name, ext = x.rsplit('.', 1) key_parts.extend([ext, name]) return datastore.key(*key_parts)
53b52478f5c451f17498020b2ec316bbb4e8a822
253,581
import random def simulate_ip() -> str: """ Simulates a random IP address Returns: A random IP address (str) """ return "{}.{}.{}.{}".format( random.randint(1, 1000), random.randint(1, 1000), random.randint(1, 1000), random.randint(1, 1000) )
faf424a9025244220f2b4c0b9bffad95243a152b
117,390
import re def get_offer_total_floors(html_parser, default_value=''): """ This method returns the maximal number of floors in the building. :param html_parser: a BeautifulSoup object :rtype: string :return: The maximal floor number """ # searching dom for floor data floor_raw_data = html_parser.find(class_="param_floor_no") if hasattr(floor_raw_data, 'span'): floor_data = floor_raw_data.span.text else: return default_value # extracting information about floor match = re.search(r"\w+\s(?P<total>\d+)", floor_data) total_floors = default_value if match: total_floors = match.groupdict().get("total") return total_floors
170e75a04104f6fa1c544788c3d25324edd6b2e8
17,185
def gcd(first_number: int, second_number: int) -> int: """ gcd: Function which finds the GCD of two numbers. The GCD (Greatest Common Divisor) of two numbers is found by the Steins GCD Algorithm. Args: first_number (int): First number second_number (int): Second number Returns: int: Returns the GCD of the two numbers. """ # GCD(0, second_number) == second_number; GCD(first_number, 0) == first_number, GCD(0, 0) == 0. if first_number == 0: return second_number if second_number == 0: return first_number # Finding K, where K is the greatest power of 2 that divides both first_number and second_number. k: int = 0 while ((first_number | second_number) & 1) == 0: first_number = first_number >> 1 second_number = second_number >> 1 k = k + 1 # Dividing first_number by 2 until first_number becomes odd while (first_number & 1) == 0: first_number = first_number >> 1 # From here on, 'first_number' is always odd. while second_number != 0: # If second_number is even, remove all factor of 2 in second_number while (second_number & 1) == 0: second_number = second_number >> 1 # Now first_number and second_number are both odd. Swap if necessary so first_number <= second_number, then set second_number = second_number - first_number (which is even). if first_number > second_number: # Swap u and v. first_number, second_number = second_number, first_number second_number = second_number - first_number # restore common factors of 2 return first_number << k
5475dba0153a9d73e6f339350afe7c811c6e7140
340,624
def transform(token, operations=None): """ Apply transformations to token. """ for operation in operations if operations else []: token = operation(token) if token is None: break return token
a77ee0470c7073162f56d0318043b5901183542c
282,678
def _skip_nop_operations(task, pre=None, post=None): """If `task` is a NOP, then skip pre and post Useful for skipping the 'creating node instance' message in case no creating is actually going to happen. """ if not task or task.is_nop(): return [] if pre is None: pre = [] if post is None: post = [] if not isinstance(pre, list): pre = [pre] if not isinstance(post, list): post = [post] return pre + [task] + post
9a6f0e27a46c6df22bcc518ff444cf185da4030b
504,131
def as_digits(number, min_digits): """ break a positive number into an array of digits 1244 -> [1, 2, 4, 4] 0 -> [0] -222999 -> [-222999] """ n = number arr = [] while n >= 10: arr.append(n % 10) n = round(n) // 10 arr.append(round(n)) if len(arr) < min_digits: for i in range(min_digits - len(arr)): arr.append(0) return list(reversed(arr))
da5848506d06da3f10ea5497c7b16c07f72bce5c
187,408
def split_grads_by_size(threshold_size, device_grads): """Break gradients into two sets according to tensor size. Args: threshold_size: int size cutoff for small vs large tensor. device_grads: List of lists of (gradient, variable) tuples. The outer list is over devices, the inner list is over individual gradients. Returns: small_grads: Subset of device_grads where shape is <= theshold_size elements. large_grads: Subset of device_grads where shape is > threshold_size elements. """ small_grads = [] large_grads = [] for dl in device_grads: small_dl = [] large_dl = [] for (g, v) in dl: tensor_size = g.get_shape().num_elements() if tensor_size <= threshold_size: small_dl.append([g, v]) else: large_dl.append([g, v]) if small_dl: small_grads.append(small_dl) if large_dl: large_grads.append(large_dl) return small_grads, large_grads
2e182650be07b1ce2bfe6cc3ee4b34c9915160f7
660,477
def is_prime(p): """ Returns True if p is a prime This function uses Fermat's little theorem to quickly remove most candidates. https://en.wikipedia.org/wiki/Fermat%27s_little_theorem """ if p == 2: return True elif p <= 1 or p % 2 == 0 or 2**(p-1) % p != 1: return False return all(p % n != 0 for n in range(3,int(p ** 0.5 + 1),2))
463de539dcc346cf35ad78a1a020782d13acdf93
98,578
import requests def get_pypi_package_data(package_name, version=None): """ Get package data from pypi by the package name https://wiki.python.org/moin/PyPIJSON :param package_name: string :param version: string :return: dict """ pypi_url = 'https://pypi.org/pypi' if version: package_url = '%s/%s/%s/json' % (pypi_url, package_name, version, ) else: package_url = '%s/%s/json' % (pypi_url, package_name, ) try: response = requests.get(package_url) except requests.ConnectionError: raise RuntimeError('Connection error!') # Package not available on pypi if not response.ok: return None return response.json()
955c805b679b6656b5d1b8aead09860723a2a9d3
483,703
def checkbytes_lt128(file): """ Check if all bytes of a file are less than decimal 128. Returns : True for an ASCII encoded text file else False. """ with open(file, 'rb') as f: content = f.read() return all(b<128 for b in content)
d492dce226a55e12fe34cbac8f32b16267062b42
125,027
def annotation_as_image_size(label): """Convert a VOC detection label to the image size. Args: label (dict): an image label in the VOC detection format. Returns: tuple: width, height of image. """ width = int(label['annotation']['size']['width']) height = int(label['annotation']['size']['height']) return width, height
68bc99b67a40ffdd8ca3772189c5be45bd0f775f
295,291
import functools import warnings def deprecated(_func=None, *, print_msg=None): """ This is a decorator which can be used to mark functions as deprecated. It will result in a warning being emitted when the function is used. Args: print_msg: Information to point user to newest version. """ def decorator_deprecated(func): @functools.wraps(func) def wrapper_decorator(*args, **kwargs): with warnings.catch_warnings(): if print_msg is None: warnings.warn(f"\nFunction deprecated: {func.__name__}", category=DeprecationWarning, stacklevel=2) else: warnings.warn(f"\nFunction deprecated: {func.__name__}" f"\n{print_msg}", category=DeprecationWarning, stacklevel=2) return func(*args, **kwargs) return wrapper_decorator if _func is None: return decorator_deprecated else: return decorator_deprecated(_func)
1d2849a95702f470fb8933b695712839fc71a90f
587,112
def smaller_root(a, b, c): """ Returns the smaller root of a quadratic equation with the given coefficients. """ dis = (b **2) - (4 * a * c) if dis < 0: return "Error: No real solutions" else: s = (-b - (dis ** 0.5)) / (2 * a) return s
085905ea3081cb33db98c092a7c7db9589190012
164,773
from typing import Optional from typing import Any def safe_min(*values) -> Optional[Any]: """ Find the min value in a list. Ignore None values. Args: *values: all values to be compared Returns: min value in the list or None Examples: >>> safe_min(None, 5, 2, 1, None, 5) 1 >>> safe_min(None, None) is None True """ min_value = None for value in values: if value is None: continue if min_value is None: min_value = value elif value < min_value: min_value = value return min_value
2c38380df97eb84f04bb751df1725d953806220d
372,806
def all_same(items): """Returns bool for checking if all items in a list are the same.""" return all(x == items[0] for x in items)
e89945fd1c6151fd50db1ce92c7269642dc5eac6
477,577
def solucion_a(texto: str) -> str: """Devuelve el texto ingresado sin mayúsculas. :param texto: Texto :type texto: str :return: Texto ingresado sin mayúsculas :rtype: str """ resultado = "" for letra in texto: if not letra.isupper(): resultado += letra return resultado
637c663f00a4cb58a8e061d61e72eba54f48ace5
259,653
def row_hasindel(r): """ Return True if the row has an indel. Parameters ---------- r : pandas.Series VCF row. Returns ------- bool True if the row has an indel. Examples -------- >>> from fuc import pyvcf >>> data = { ... 'CHROM': ['chr1', 'chr1', 'chr1', 'chr1'], ... 'POS': [100, 101, 102, 103], ... 'ID': ['.', '.', '.', '.'], ... 'REF': ['G', 'CT', 'A', 'C'], ... 'ALT': ['A', 'C', 'C,AT', 'A'], ... 'QUAL': ['.', '.', '.', '.'], ... 'FILTER': ['.', '.', '.', '.'], ... 'INFO': ['.', '.', '.', '.'], ... 'FORMAT': ['GT', 'GT', 'GT', 'GT'], ... 'Steven': ['0/1', '0/1', '1/2', '0/1'], ... } >>> vf = pyvcf.VcfFrame.from_dict([], data) >>> vf.df CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Steven 0 chr1 100 . G A . . . GT 0/1 1 chr1 101 . CT C . . . GT 0/1 2 chr1 102 . A C,AT . . . GT 1/2 3 chr1 103 . C A . . . GT 0/1 >>> vf.df.apply(pyvcf.row_hasindel, axis=1) 0 False 1 True 2 True 3 False dtype: bool """ ref_has = len(r['REF']) > 1 alt_has = max([len(x) for x in r['ALT'].split(',')]) > 1 return ref_has or alt_has
b79053ae0df480662848b05e146452d29375cdea
428,516
def clip_box(box, shape): """ Clip box for given image shape. Args: box (array_like[int]): Box for clipping in the next format: [y_min, x_min, y_max, x_max]. shape (tuple[int]): Shape of image. Returns: array_like[int]: Clipped box. """ ymin, xmin, ymax, xmax = box if ymin < 0: ymin = 0 elif ymin >= shape[0]: ymin = shape[0] - 1 box[0] = ymin if xmin < 0: xmin = 0 elif xmin >= shape[1]: xmin = shape[1] - 1 box[1] = xmin if ymax < 0: ymax = 0 elif ymax >= shape[0]: ymax = shape[0] - 1 box[2] = ymax if xmax < 0: xmax = 0 elif xmax >= shape[1]: xmax = shape[1] - 1 box[3] = xmax return box
7cb329c630dc5eef47e87a8e32dab4d5a97739ae
670,044
from typing import List def check_padding(padding: List[int], bounds: List[int]): """This function checks whether padding is within the limits supported by the NPU""" if len(padding) != 4 or len(bounds) != 4: return False top, left, bottom, right = padding topb, leftb, bottomb, rightb = bounds return not (top > topb or left > leftb or bottom > bottomb or right > rightb)
c9f2f2e14ddacc0697c905c7d3cc871c4e7cacb2
547,075
def uniq(l): """Return list without duplication.""" return list(set(l))
c1fecd07f5e2c1d0ff7f19a2f4356be15b430a7e
409,212
def filter_interval_tensor(sample, interval, data_frequency, apply_indices=[0]): """Cut out a specific interval from a sample of EEG-Data. :param sample: sample consisitng of channel_number * data_points :type sample: tensor :param interval: two values specifying the starting and end point in seconds of the interval to be cut out. Each sample consists of a 4.5 second EEG-data window. These 4.5 seconds consist of: Concentration Interval - 0.5 s Cue Interval - 0.5 s Action Interval - 2.5 s Relac Interval - 1 s :type interval: list of two floating point numbers :param data_frequency: specifies the frequency the provided data was measured at :type data_frequency: floating point number :param apply_indices: specifies on what elemtents of sample to apply the function, defaults to [0] :type apply_indices: list of integers, optional :return: cut sample :rtype: tensor """ sample = list(sample) start_index_interval = int(interval[0] * data_frequency) end_index_interval = int(interval[1] * data_frequency) for index in apply_indices: sample[index] = sample[index][:, start_index_interval:end_index_interval] return sample
d1221680d32d07bcfe7f47af8797f895b58d030a
223,031
import re def format_row(row): """Formats the row Args: row (string): The row of the file. Returns: dict: The dictionary containing the following attributes: - query (string): The query. - document (string): The document. - relevance (integer): The relevance label. """ splitted_values = re.split(r"\t+", row) if len(splitted_values) == 3: rel, query, document = splitted_values return { "query": query.strip(), "document": document.strip(), "relevance": 1 if int(rel.strip()) > 0 else 0, } else: return None
da779f84f79e2b9feb8cdb47213525605a8d14cc
124,038
def pkg_as_json(pkg): """Return a dictionary of information for the package.""" result = { 'name': pkg.name, 'ensure': pkg.evr, 'platform': pkg.arch} return result
db43d2eadf57f3a27d88d650f562c805fbadb2a2
119,979
from typing import Mapping def replace_name_in_key(key, rename: Mapping[str, str]): """Given a dask collection's key, replace the collection name with a new one. Parameters ---------- key: string or tuple Dask collection's key, which must be either a single string or a tuple whose first element is a string (commonly referred to as a collection's 'name'), rename: Mapping of zero or more names from : to. Extraneous names will be ignored. Names not found in this mapping won't be replaced. Examples -------- >>> replace_name_in_key("foo", {}) 'foo' >>> replace_name_in_key("foo", {"foo": "bar"}) 'bar' >>> replace_name_in_key(("foo-123", 1, 2), {"foo-123": "bar-456"}) ('bar-456', 1, 2) """ if isinstance(key, tuple) and key and isinstance(key[0], str): return (rename.get(key[0], key[0]),) + key[1:] if isinstance(key, str): return rename.get(key, key) raise TypeError(f"Expected str or tuple[str, Hashable, ...]; got {key}")
71f04aa9b6fdca4aa1569cd9d31af5c0126ca90d
152,422
def __sbox_single_byte(byte, sbox): """S-Box substitution of a single byte""" row = byte // 16 col = byte % 16 return sbox[row][col]
3091bf7da9d6365713f11f8659c56d0c815965ec
59,717
def append_lesson_content(from_lesson, to_lesson): """Append lesson properties from the send lesson to the receiving lesson """ to_lesson.objectives = from_lesson.objectives to_lesson.activity_title = from_lesson.activity to_lesson.title = from_lesson.title to_lesson.notes = from_lesson.notes to_lesson.video = from_lesson.video to_lesson.activity_listed = from_lesson.activity_listed return to_lesson
c0fbb8c2f649bf5de2e346ab589ce7080caef60f
361,200
def euclidean(p1: tuple[int, int], p2: tuple[int, int]) -> int: """Calculate the distance between two points. Args: p1: First point. p2: Second point. Returns: The distance between the two points. """ diff_x = p1[0] - p2[0] diff_y = p1[1] - p2[1] return (diff_x ** 2 + diff_y ** 2) ** 0.5
cd78ac92ff6792401a724247739ef6d0db825bb9
617,627
import math def Diameter(volume, factor=3/math.pi/4, exp=1/3.0): """Converts a volume to a diameter. d = 2r = 2 * (3/4/pi V)^1/3 """ return 2 * (factor * volume) ** exp
9b2d49568e0f2494647152f7e29006d4a770e1ca
217,422
def get_relative_freqs(type2freq): """ Calculates the relative frequency (proportion) of each type in a system Parameters ---------- type2freq: dict Keys are types of a system and values are frequencies of those types Returns ------- type2p: dict Keys are types and values are relative (normalized) frequencies """ n = sum(type2freq.values()) type2p = {t: s / n for t, s in type2freq.items()} return type2p
15c33e0ed235543430e9c1925593916135d96615
72,084
def format_direction(direction): """ Examples -------- >>> format_direction('ra') 'ra' >>> format_direction('el') 'alat' >>> format_direction('az') 'alon' """ lowerdir = direction.lower() if lowerdir == 'el': return 'alat' elif lowerdir == 'az': return 'alon' return direction
d961ff38fd68b19a8a5975cb2fe1a80af09d8c56
632,661
import warnings def data_order(iaf, ibf, order, multite=False): """ Determine the data format and ordering from ``iaf`` and ``ibf`` options If ``iaf`` is not specified (None or empty string), TC pairs are specified if the ``order`` parameter is given and contains the ``l`` character. Otherwise differenced data is assumed. If neither ``ibf`` nor ``order`` are specified, ``ibf=rpt`` is guessed. If ``iaf`` is not ``diff`` and ``order`` does not include the ``l` character it is assumed that the encoded images (e.g. TC pairs) are the fastest varying. If any parameters had to be guessed (rather than inferred from other information) a warning is output. :return: Tuple of: IAF (ASL format, 'tc', 'ct', 'diff', 've', 'vediff', 'hadamard' or 'mp'), ordering (sequence of 2 or three chars, 'l'=labelling images, 'r'=repeats, 't'=TIs/PLDs, 'e'=TEs. The characters are in order from fastest to slowest varying), Boolean indicating whether the block format needed to be guessed """ if order is not None: order = order.lower() if not iaf: if order is not None and "l" in order: # Order specified labelling images so guess TC pairs as this is most common warnings.warn("Data format was not specified - assuming TC pairs") iaf = "tc" elif not order: # No order specified - guess differenced for compatibility with Oxford ASL warnings.warn("Data format was not specified - assuming differenced") iaf = "diff" else: # Order specified and did not include labelling images so we are entitled # to assume differenced data without a warning iaf = "diff" elif iaf not in ("diff", "tc", "ct", "mp", "ve", "vediff", "hadamard"): raise ValueError("Unrecognized data format: iaf=%s" % iaf) ibf_guessed = False if not order: if not ibf: # Guess but defer warning until we have extracted TIs as it doesn't matter # for single TI data ibf = "rpt" ibf_guessed = True order_map = { "rpt" : "tr", "tis" : "rt", } order = order_map.get(ibf.lower(), None) if not order: raise ValueError("Unrecognized data block format: ibf=%s" % ibf) if iaf != "diff" and "l" not in order: order = "l" + order if multite and "e" not in order: # TE ordering is optional since most data is single TE. Insert it at the # start as this seems to make most sense in terms of multi-TE acquisition order = "e" + order for char in order: if char not in ('l', 'r', 't', 'e'): raise ValueError("Unrecognized character in data ordering: '%s'" % char) return iaf, order, ibf_guessed
fff663af2c0092271c62ae74e0fe97a5d7e243a3
214,371
def groupIntersections(intersections: list, key: int) -> dict: """ Function to group horizontal or vertical intersections Groups horizontal or vertical intersections as a list into a dict by the given key Parameters: intersections (list): List of tuples representing intersection points key (int): Tuple index to group by (0 for rows and 1 for columns) Returns: dict: Lists of intersections as values grouped by key """ groupedIntersections = {} for intersection in intersections: keyValue = intersection[key] if keyValue not in groupedIntersections.keys(): groupedIntersections[keyValue] = [] group = groupedIntersections[keyValue] group.append(intersection) return groupedIntersections
45f33715dc9917eb224376c2bedede1a8c49c48f
636,256
from typing import Tuple from typing import Dict from typing import Union from typing import List def parse_stan_vars( names: Tuple[str, ...] ) -> Tuple[Dict[str, Tuple[int, ...]], Dict[str, Tuple[int, ...]]]: """ Parses out Stan variable names (i.e., names not ending in `__`) from list of CSV file column names. Returns a pair of dicts which map variable names to dimensions and variable names to columns, respectively, using zero-based column indexing. Note: assumes: (a) munged varnames and (b) container vars are non-ragged and dense; no checks size, indices. """ if names is None: raise ValueError('missing argument "names"') dims_map: Dict[str, Tuple[int, ...]] = {} cols_map: Dict[str, Tuple[int, ...]] = {} idxs = [] dims: Union[List[str], List[int]] for (idx, name) in enumerate(names): idxs.append(idx) var, *dims = name.split('[') if var.endswith('__'): idxs = [] elif len(dims) == 0: dims_map[var] = () cols_map[var] = tuple(idxs) idxs = [] else: if idx < len(names) - 1 and names[idx + 1].split('[')[0] == var: continue dims = [int(x) for x in dims[0][:-1].split(',')] dims_map[var] = tuple(dims) cols_map[var] = tuple(idxs) idxs = [] return (dims_map, cols_map)
ba334eb66990d58640d9d978786e89e5555b5d71
623,853
def split_phylogeny(p, level="s"): """ Return either the full or truncated version of a QIIME-formatted taxonomy string. :type p: str :param p: A QIIME-formatted taxonomy string: k__Foo; p__Bar; ... :type level: str :param level: The different level of identification are kingdom (k), phylum (p), class (c),order (o), family (f), genus (g) and species (s). If level is not provided, the default level of identification is species. :rtype: str :return: A QIIME-formatted taxonomy string up to the classification given by param level. """ level = level+"__" result = p.split(level) return result[0]+level+result[1].split(";")[0]
b61d5cafa1ca7f56c37195c619e8f02f9b925dab
578,669
import random def split_dataFrames(df, trimmedRUL, splittingRatio): """Split the dataframes according to the indicated splitting ratio""" num_engines = df['Unit Number'].max() shuffledEngines = list(range(1,num_engines+1)) random.shuffle(shuffledEngines) i = int(splittingRatio*num_engines) num_crossVal = i num_train = num_engines - num_crossVal crossVal_engines = shuffledEngines[:i] train_engines = shuffledEngines[i:] trimmedRUL_train = trimmedRUL[:i] trimmedRUL_crossVal = trimmedRUL[i:] df_train = df[df['Unit Number'].isin(train_engines)] df_crossVal = df[df['Unit Number'].isin(crossVal_engines)] return (df_train, df_crossVal, num_train, num_crossVal, trimmedRUL_train, trimmedRUL_crossVal)
1190d9bb55c307daa753956eae88eb4806c52eb8
614,136
def validate_input(user_input, input_map): """ Checks user_input for validity and appends any validation errors to return data. Input: - user_input: Dictionary containing the following input keys: values - amount: float - prefix: string - type: string - base: string - input_map: Dictionary containing the following valid keys: values prefix: - valid_prefix_key: string type: - valid_type_key: string base: - valid_base_key: string Output: Validated user_input dictionary with errors list as key errors """ # Split out user_input keys amt_value = user_input['amount'] amt_prefix = user_input['prefix'].lower() amt_type = user_input['type'].lower() amt_base = user_input['base'].lower() # Set up error list to track any input issues errors = [] # Convert input amount from string to float for use in calculations try: amt_value = float(amt_value) except ValueError: # Append error string to errors list in user_input err_str = "'{}' is not a valid value for amount, use numbers only.".format(amt_value) errors.append(err_str) # Check user input against valid value lists valid_prefix = True if amt_prefix in input_map['prefix'] else False valid_type = True if amt_type in input_map['type'] else False valid_base = True if amt_base in input_map['base'] else False if not valid_prefix: # Append error string to errors list in user_input err_str = "'{}' is not a valid prefix: ({})".format( amt_prefix, ', '.join(input_map['prefix'])) errors.append(err_str) if not valid_type: # Append error string to errors list in user_input err_str = "'{}' is not a valid type: ({})".format( amt_type, ', '.join(input_map['type'])) errors.append(err_str) if not valid_base: # Append error string to errors list in user_input err_str = "'{}' is not a valid base: ({})".format( amt_base, ', '.join(input_map['base'])) errors.append(err_str) validated_input = { 'amount': amt_value, 'prefix': amt_prefix, 'type': amt_type, 'base': amt_base, 'errors': errors } return validated_input
ddeed40bc2cf8294c4fcb86251411290989030c2
422,539
import cgi def check_content_type(content_type, mime_type): """Return True if the content type contains the MIME type""" mt, _ = cgi.parse_header(content_type) return mime_type == mt
faf22b8c546b6846a690d33fbaa4df5525cfc255
589,367
def has_tag(filename, tag): """Check if a filename has a given tag.""" return f':{tag}' in filename
c6ae18c687a5bfeacfae635af3d858ec8bca0820
579,377
import re def find_and_replace(items, updates): """ aka find_and_replace apply all updates in updates list to all items in items list updates consist of a list of lists where the sub lists contain: (search_string, replace_string) """ for u in updates: search_string = u[0] replace_string = u[1] #print search_string pattern = re.compile(search_string) for item in items: if pattern.search(item): index = items.index(item) items.remove(item) #print "ORIGINAL ITEM: %s" % item item = pattern.sub(replace_string, item) # not sure if python replace is faster than re.sub: #journal.replace(pu[0], pu[1]) #print " NEW ITEM: %s" % item items.insert(index, item) return items
57f6f9ed15feb306584fd217d5b062307f2e6aac
616,783
def concat_attrs(*attrs, separator=' '): """ Helper function to join string attributes using separator. """ return separator.join([attr for attr in attrs if attr])
9cd81916c34fc0a04b65c5676bcea4a3eaa06c19
404,682
from datetime import datetime def is_date_format_correct(date_string): """ Check if date is in format YYYY-MM-DD Return: - True if format correct - False if format wrong """ if len(date_string) != 10: return False try: date = datetime.strptime(date_string, "%Y-%m-%d") except ValueError: return False if date > datetime.now(): return False return True
414093f4c109b8dda35b310acef50ad2d67ad601
518,210
def rekey(x, key_map=None): """Replace the feature keys according to the mapping in `key_map`. For example, if the dataset returns examples of the format: {'foo': 'something', 'bar': 'something else'} and key_map = {'boo': 'foo', 'spar': 'bar'} then this function will return examples with the format {'boo': 'something', 'spar': 'something else'} If a mapping is to an empty key or None, set the new key to an empty string. Args: x: an example to process. key_map: dictionary mapping new keys to original keys Returns: A preprocessed example with the format listed above. """ if key_map: return { new_key: x[old_key] if old_key else '' for new_key, old_key in key_map.items() } return x
b036a3f28eed2a1226dd4ca78de378cc93022a4f
538,670
def read_from_txt(file_path): """read data from txt file signal content: % ... % ... float; float ... border content: int \t int int \t int ... Args: file_path (str): txt file path Returns: data (list): signal data(float) or border data(int) """ with open(file_path, mode='r', encoding='utf-8') as f: flag = "" data = [] lines = f.readlines() if lines[0].startswith("%"): del lines[0:5] if ";" in lines[5]: # 此时返回信号数据 flag = "signal" for line in lines: tar = line.split(";")[1] data.append(float(tar)) else: # 此时返回border数据 flag = "border" for line in lines: begin, end = line.split("\t") data.append([int(begin), int(end)]) return data, flag
ac8aeebdbc62ef7fe90dddb4a5b500dbec3de698
430,624
def area_rhombus(diagonal_1: float, diagonal_2: float) -> float: """ Calculate the area of a rhombus. >>> area_rhombus(10, 20) 100.0 >>> area_rhombus(-1, -2) Traceback (most recent call last): ... ValueError: area_rhombus() only accepts non-negative values >>> area_rhombus(1, -2) Traceback (most recent call last): ... ValueError: area_rhombus() only accepts non-negative values >>> area_rhombus(-1, 2) Traceback (most recent call last): ... ValueError: area_rhombus() only accepts non-negative values """ if diagonal_1 < 0 or diagonal_2 < 0: raise ValueError("area_rhombus() only accepts non-negative values") return 1 / 2 * diagonal_1 * diagonal_2
5ed8737df583cda7fe976aa66b92f5d0ef568edd
417,977
def getFirstValid(opts, default): """Returns the first valid entry from `opts`, or `default` if none found. Valid is defined as ``if o`` returns true.""" for o in opts: if o: return o return default
799a6ea4a993f0a112fa38b882566d72a0d223e0
6,320