content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import typing def remove_fields_with_value_none(fields: typing.Dict) -> typing.Dict: """ Remove keys whose value is none :param fields: the fields to clean :return: a copy of fields, without the none values """ fields = dict((key, value) for key, value in fields.items() if value is not None) # Strip out none values return fields
22d7ac2a77248809c691bdb98f5f6ebaaf6d4f2b
1,572
import typing import hashlib def sha512(data: typing.Optional[bytes] = None): """Returns a sha512 hash object; optionally initialized with a string.""" if data is None: return hashlib.sha512() return hashlib.sha512(data)
067fffc4c006d9c46e5037b07b86149ac15bb573
1,575
def nt(node, tag): """ returns text of the tag or None if the tag does not exist """ if node.find(tag) is not None and node.find(tag).text is not None: return node.find(tag).text else: return None
7ca5f83cf18f918f594374fa2aa875415238eef6
1,578
def _explored_parameters_in_group(traj, group_node): """Checks if one the parameters in `group_node` is explored. :param traj: Trajectory container :param group_node: Group node :return: `True` or `False` """ explored = False for param in traj.f_get_explored_parameters(): if param in group_node: explored = True break return explored
71cbafbad0dcc3fa9294c0bede5f6a09941d452b
1,581
from pathlib import Path def _construct_out_filename(fname, group_name): """ Construct a specifically formatted output filename. The vrt will be placed adjacent to the HDF5 file, as such write access is required. """ basedir = fname.absolute().parent basename = fname.with_suffix('.vrt').name.replace( 'wagl', group_name ) out_fname = basedir.joinpath(Path(basename)) return out_fname
117bb8470ab65f0b9fb11bb3151ae653e5e28d23
1,582
def change_app_header(uri, headers, body): """ Add Accept header for preview features of Github apps API """ headers["Accept"] = "application/vnd.github.machine-man-preview+json" return uri, headers, body
3610d1d482e057ba73a1901aed8430ff35d98f3b
1,588
import itertools def largets_prime_factor(num): """ Returns the largest prime factor of num. """ prime_factors = [] for n in itertools.count(2): if n > num: break if num%n == 0: prime_factors.append(n) while (num%n == 0): num = num/n return max(prime_factors)
12100b6cdc2e0553295c1803e699544aa930bbfb
1,590
def round_grade(grade: int) -> int: """ Round the grade according to policy. Parameters ---------- grade: int Raw grade. Returns ------- rounded_grade: int Rounded grade. """ if grade < 38: rounded_grade = grade else: closest_multiple_5 = (grade // 5 + 1) * 5 if (closest_multiple_5 - grade) >= 3: rounded_grade = grade else: rounded_grade = closest_multiple_5 return rounded_grade
8f1be9575d98b4ed24ff1e5904a5345d7ebc3e48
1,595
def _urpc_test_func_2(buf): """! @brief u-RPC variable length data test function. @param buf A byte string buffer @return The same byte string repeated three times """ return buf*3
f13f7dcf45eaa0706b69eb09c63d29ba2bbd3d60
1,596
def versionString(version): """Create version string.""" ver = [str(v) for v in version] numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:] return '.'.join(numbers) + '-'.join(rest)
2feec3f8ac5a1f2b848d0805dfa0c3ff53a44ead
1,597
def pymongo_formatter(credentials): """Returns a DSN for a pymongo-MongoDB connection. Note that the username and password will still be needed separately in the constructor. Args: credentials (dict): The credentials dictionary from the relationships. Returns: (string) A formatted pymongo DSN. """ return '{0}:{1}/{2}'.format( credentials['host'], credentials['port'], credentials['path'] )
69216575258f297c368ec3015c1c14569bb82cd2
1,601
def compute_alphabet(sequences): """ Returns the alphabet used in a set of sequences. """ alphabet = set() for s in sequences: alphabet = alphabet.union(set(s)) return alphabet
cf8f7dc1e31a28fe0910d806d18189aae7d7a85b
1,603
def Diff(a, b): """Returns the number of different elements between 2 interables. Args: a(iterable): first iterable. b(iterable): second iterable. Returns: int: the number of different elements. """ return sum(map(lambda x, y: bool(x-y), a, b))
0885bd224f956f138e80a4b681ebc581c733cc51
1,604
def display_datetime(datetime_str, time_zone=None, verbose=True): """Returns a formatted datetime with TZ (if provided) or 'Error (Missing)""" """ >>> print(datetime.datetime.utcnow().strftime("%Y/%m/%d %a %I:%M %p")) 2019/05/19 Sun 01:10 AM """ if datetime_str: # and type(datetime_str) == datetime.datetime.now(): if verbose: return f'{datetime_str.strftime("%Y/%m/%d %a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}' else: return f'{datetime_str.strftime("%a %I:%M %p")}{f" ({time_zone})" if time_zone else ""}' else: return 'Error (Missing)'
45caa488688e790ae19f8f3f2cda2cb0f250b1fd
1,608
import torch def mask_channels(mask_type, in_channels, out_channels, data_channels=3): """ Creates an autoregressive channel mask. Input: mask_type: str Either 'A' or 'B'. 'A' for first layer of network, 'B' for all others. in_channels: int Number of input channels to layer. out_channels: int Number of output channels of layer. data_channels: int Number of channels in the input data, e.g. 3 for RGB images. (default = 3). Output: mask: torch.FloatTensor Shape (out_channels, in_channels). A mask with 0 in places for masked elements. """ in_factor = in_channels // data_channels + 1 out_factor = out_channels // data_channels + 1 base_mask = torch.ones([data_channels,data_channels]) if mask_type == 'A': base_mask = base_mask.tril(-1) else: base_mask = base_mask.tril(0) mask_p1 = torch.cat([base_mask]*in_factor, dim=1) mask_p2 = torch.cat([mask_p1]*out_factor, dim=0) mask = mask_p2[0:out_channels,0:in_channels] return mask
772fa71f63d2f31c80966db0b0eb43a70ac5e9a9
1,609
import textwrap def dedent(text): """ Remove all common indentation from every line but the 0th. This will avoid getting <code> blocks when rendering text via markdown. Ignoring the 0th line will also allow the 0th line not to be aligned. Args: text: A string of text to dedent. Returns: String dedented by above rules. For example: assertEquals("bar\nline1\nline2", dedent("bar\n line1\n line2")) assertEquals("bar\nline1\nline2", dedent(" bar\n line1\n line2")) assertEquals("bar\n line1\nline2", dedent(" bar\n line1\n line2")) """ text = textwrap.dedent(text) text_lines = text.split('\n') text_not_first = "\n".join(text_lines[1:]) text_not_first = textwrap.dedent(text_not_first) text = text_lines[0] + "\n" + text_not_first return text
b450a873c4c2b667d10c66985d19f8057aa205f9
1,610
def join_nonempty(l): """ Join all of the nonempty string with a plus sign. >>> join_nonempty(('x1 + x2 + x1:x2', 'x3 + x4')) 'x1 + x2 + x1:x2 + x3 + x4' >>> join_nonempty(('abc', '', '123', '')) 'abc + 123' """ return ' + '.join(s for s in l if s != '')
041948f95caaef14cb96e761f08b4a84fba37d6e
1,615
def sieveEr(N): """ input: positive integer 'N' > 2 returns a list of prime numbers from 2 up to N. This function implements the algorithm called sieve of erathostenes. """ # precondition assert isinstance(N,int) and (N > 2), "'N' must been an int and > 2" # beginList: conatins all natural numbers from 2 upt to N beginList = [x for x in range(2,N+1)] ans = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(beginList)): for j in range(i+1,len(beginList)): if (beginList[i] != 0) and \ (beginList[j] % beginList[i] == 0): beginList[j] = 0 # filters actual prime numbers. ans = [x for x in beginList if x != 0] # precondition assert isinstance(ans,list), "'ans' must been from type list" return ans
8d48d2a491341d5302307597ad64ac4a37b1abb8
1,617
def validate_fields(item, fields=None): """ Check that all requested fields were returned :param item: comment or submission :param fields: list[str] :return: list[str] """ actual_fields = item.d_.keys() if fields is None: requested_fields = actual_fields else: requested_fields = fields missing_fields = set(requested_fields).difference(actual_fields) # drop extra fields returned from api final_fields = set(requested_fields).intersection(actual_fields) return final_fields, missing_fields
88bd6d20ba1cc04f8478128f7f32192ef680762b
1,618
from typing import Iterable def remove_nones(sequence: Iterable) -> list: """Removes elements where bool(x) evaluates to False. Examples -------- Normal usage:: remove_nones(['m', '', 'l', 0, 42, False, True]) # ['m', 'l', 42, True] """ # Note this is redundant with it.chain return [x for x in sequence if x]
975c0104b3cc05bb82fa211c1b85b49c7d3cb174
1,619
from typing import List import pathlib def retrieve(passed: List[str]) -> List[str]: """ Retrieves all items that are able to be converted, recursively, from the passed list. Parameters ---------- passed: List[str] The items to search. Returns ------- List[str]: All found items. """ ret = [] for item in passed: try: path = pathlib.Path(item) if path.is_file() and path.suffix == ".txt": ret += retrieve(path.read_text().split("\n")) elif path.is_file(): ret.append(str(path)) elif path.is_dir(): ret += retrieve([str(p) for p in path.iterdir()]) else: ret.append(item) except OSError: ret.append(item) return ret
6789255e302caf9dc6e481df532acec20dfc6b3c
1,620
def bq_to_rows(rows): """Reformat BigQuery's output to regular pnguin LOD data Reformat BigQuery's output format so we can put it into a DataFrame Args: rows (dict): A nested list of key-value tuples that need to be converted into a list of dicts Returns: list: A list of dictionaries based on the input x """ def _reformat(x): pairs = x.items() row = {} for pair in pairs: key, value = pair row[key] = value return row return [_reformat(x) for x in rows]
9ff842d1c41d7ebe5c822d4c07b2f26b5524b0fe
1,622
def average_saccades_time(saccades_times): """ :param saccades_times: a list of tuples with (start_time_inclusive, end_time_exclusive) :return: returns the average time of saccades """ return sum([saccade_time[1] - saccade_time[0] for saccade_time in saccades_times]) / len(saccades_times)
a22a5d89ddd4317fa10ed6f5d920f17560028514
1,625
def _get_connection_params(resource): """Extract connection and params from `resource`.""" args = resource.split(";") if len(args) > 1: return args[0], args[1:] else: return args[0], []
87cdb607027774d58d1c3bf97ac164c48c32395c
1,630
def import_class(class_object): """ Import a class given a string with its name in the format module.module.classname """ d = class_object.rfind(".") class_name = class_object[d + 1:len(class_object)] m = __import__(class_object[0:d], globals(), locals(), [class_name]) return getattr(m, class_name)
82df3ed7d646bd423ccefacc00493e917f13c430
1,633
def prepare_default_result_dict(key, done, nodes): """Prepares the default result `dict` using common values returned by any operation on the DHT. Returns: dict: with keys `(k, d, n)` for the key, done and nodes; `n` is a list of `dict` with keys `(i, a, x)` for id, address, and expiration. """ d = { "k": key, "d": done, } nb = [] for n in nodes: _node = n.getNode() nb.append({ "i": n.getId().toString(), "a": _node.getAddr(), "x": _node.isExpired() }) d["n"] = nb return d
420beb66352fee7b4d38f6b4cf628cbaa86a03df
1,635
def MatchScorer(match, mismatch): """Factory function that returns a score function set to match and mismatch. match and mismatch should both be numbers. Typically, match should be positive and mismatch should be negative. Resulting function has signature f(x,y) -> number. """ def scorer(x, y): if x == y: return match else: return mismatch return scorer
fe3829efc64cb4d9785e52b8af6949c147481902
1,636
def _compute_paddings(height_pad_amt, width_pad_amt, patch_axes): """Convert the total pad amounts to the format needed by tf.pad().""" top_pad = height_pad_amt // 2 bottom_pad = height_pad_amt - top_pad left_pad = width_pad_amt // 2 right_pad = width_pad_amt - left_pad paddings = [[0, 0] for _ in range(4)] paddings[patch_axes[0]] = [top_pad, bottom_pad] paddings[patch_axes[1]] = [left_pad, right_pad] return paddings
3a5154ba0fa6808bc6dc8e20fcb4203324762ba9
1,637
def get_first(somelist, function): """ Returns the first item of somelist for which function(item) is True """ for item in somelist: if function(item): return item return None
81976910c46102d3b15803d215f3bf5a554f9beb
1,638
import itertools def remove_duplicates(llist): """ Removes any and all duplicate entries in the specified list. This function is intended to be used during dataset merging and therefore must be able to handle list-of-lists. :param llist: The list to prune. :return: A list of unique elements only. """ if not llist: return [] llist.sort() return [x for x, _ in itertools.groupby(llist)]
cbdf1a4db99a7a5fac37f25776cc1387ed8c54e0
1,640
def kubernetes_node_label_to_dict(node_label): """Load Kubernetes node label to Python dict.""" if node_label: label_name, value = node_label.split("=") return {label_name: value} return {}
c856d4e6d1f2169f7028ce842edc881cbca4e783
1,643
def smallest_subarray_with_given_sum(arr, s): """Find the length of the smallest subarray whose sum is >= s. Time: O(n) Space: O(1) >>> smallest_subarray_with_given_sum([2, 1, 5, 2, 3, 2], 7) 2 >>> smallest_subarray_with_given_sum([2, 1, 5, 2, 8], 7) 1 >>> smallest_subarray_with_given_sum([3, 4, 1, 1, 6], 8) 3 """ win_sum = 0 win_start = 0 min_len = 0 for win_end in range(len(arr)): win_sum += arr[win_end] while win_sum >= s: cur_len = win_end - win_start + 1 if min_len == 0 or cur_len < min_len: min_len = cur_len win_sum -= arr[win_start] win_start += 1 return min_len
4a1d63619fc200c32ffae80dc7d404f486efcdd1
1,648
def teraflops_for_accelerator(accel): """ Stores the number of TFLOPs available to a few accelerators, including driver handicaps. Args: accel (str): A string descriptor of which accelerator to use. Must be either "3090" or "V100". Returns: accel_flops (int): an integer of how many TFLOPs are in the accelerator. """ accel_flops = {"3090": 71, "V100": 125} return accel_flops[accel]
a491beb06baf73325e2e7b5f0876e98ea312e2aa
1,650
def _name_cleaner(agent_name): """Renames agent_name to prettier string for plots.""" rename_dict = {'correct_ts': 'Correct TS', 'kl_ucb': 'KL UCB', 'misspecified_ts': 'Misspecified TS', 'ucb1': 'UCB1', 'ucb-best': 'UCB-best', 'nonstationary_ts': 'Nonstationary TS', 'stationary_ts': 'Stationary TS', 'greedy': 'greedy', 'ts': 'TS', 'action_0': 'Action 0', 'action_1': 'Action 1', 'action_2': 'Action 2', 'bootstrap': 'bootstrap TS', 'laplace': 'Laplace TS', 'thoughtful': 'Thoughtful TS', 'gibbs': 'Gibbs TS'} if agent_name in rename_dict: return rename_dict[agent_name] else: return agent_name
e874745e804e07e385b377ec0ecd4247640ef6ce
1,653
def hamming(s1, s2): """Return the hamming distance between 2 DNA sequences""" return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2)) + abs(len(s1) - len(s2))
e3e1f3e9cc883f27d26f00c1b3c9495d29c1a139
1,659
def nasnet_dual_path_scheme_ordinal(module, x, _): """ NASNet specific scheme of dual path response for an ordinal module with dual inputs/outputs in a DualPathSequential module. Parameters: ---------- module : nn.Module A module. x : Tensor Current processed tensor. Returns ------- x_next : Tensor Next processed tensor. x : Tensor Current processed tensor. """ return module(x), x
aef487a25bc3349f14a112826ee4f8e8912dd324
1,660
def count_path_recursive(m, n): """Count number of paths with the recursive method.""" def traverse(m, n, location=[1, 1]): # return 0 if past edge if location[0] > m or location[1] > n: return 0 # return 1 if at end position if location == [m, n]: return 1 return traverse(m, n, [location[0] + 1, location[1]]) + traverse(m, n, [location[0], location[1] + 1]) return traverse(m, n)
ad31718d179bf46966117ecfa414807e6d356634
1,665
def _build_xyz_pow(name, pref, l, m, n, shift=2): """ Builds an individual row contraction line. name = pref * xc_pow[n] yc_pow[m] * zc_pow[n] """ l = l - shift m = m - shift n = n - shift if (pref <= 0) or (l < 0) or (n < 0) or (m < 0): return None mul = " " if pref == 1: ret = name + " =" else: # Basically always an int ret = name + " = %2.1f" % float(pref) mul = " * " if l > 0: ret += mul + "xc_pow[%d]" % (l - 1) mul = " * " if m > 0: ret += mul + "yc_pow[%d]" % (m - 1) mul = " * " if n > 0: ret += mul + "zc_pow[%d]" % (n - 1) mul = " * " if mul == " ": ret += " 1" return ret
0dbae02252b27845e795a586e2e28b58c948fa1d
1,668
import torch def hamming_dist(y_true, y_pred): """ Calculate the Hamming distance between a given predicted label and the true label. Assumes inputs are torch Variables! Args: y_true (autograd.Variable): The true label y_pred (autograd.Variable): The predicted label Returns: (float): The Hamming distance between the two vectors """ # Make sure y_pred is rounded to 0/1 y_pred = torch.round(y_pred) result = torch.mean(torch.abs(y_true - y_pred), dim=1) result = torch.mean(result, dim=0) return float(result.data.cpu().numpy())
0edda102820626b824861ac0f05d4d77f5def432
1,677
import re def rmchars(value): """Remove special characters from alphanumeric values except for period (.) and negative (-) characters. :param value: Alphanumeric value :type value: string :returns: Alphanumeric value stripped of any special characters :rtype: string >>> import utils >>> utils.rmchars(value = "*6.5_") '6.5' >>> utils.rmchars(value = "ICE") 'ICE' >>> utils.rmchars(value = "-4.2") '-4.2' >>> utils.rmchars(value = "%&!@#8.32&#*;") '8.32' """ value = re.sub("[^A-Za-z0-9.-]+", "", value) return value
63428103f7da4184c6d9f33a9d05b02ce17f2448
1,679
def _filename_pattern(ext): """Returns an re matching native or tfrecord files of format `ext`.""" return r".*\.{}(\.tfrecord)?(\.gz)?".format(ext)
6ec5a86dbba2432293451ca7dff0a0d1d5091bf0
1,681
def _extract_protocol_layers(deserialized_data): """ Removes unnecessary values from packets dictionaries. :param deserialized_data: Deserialized data from tshark. :return: List of filtered packets in dictionary format. """ packets_filtered = [] for packet in deserialized_data: packets_filtered.append(packet["_source"]["layers"]) return packets_filtered
3c3a899909c5278b29ffb402ccb4d8dde24fce3a
1,682
def Get_Histogram_key(qubitOperator): """ Function to obtain histogram key string for Cirq Simulator. e.g. PauliWord = QubitOperator('X0 Z2 Y3', 0.5j) returning: histogram_string = '0,2,3' Args: qubitOperator (openfermion.ops._qubit_operator.QubitOperator): QubitOperator Returns: histogram_string (str): Returns string corresponding to histogram key (required for Cirq simulator) """ qubit_No, PauliStr = zip(*list(*qubitOperator.terms.keys())) histogram_string = ','.join([str(i) for i in qubit_No]) return histogram_string
f574f7b3f6c43de7b3121d4e49240a84a4bcfdfc
1,686
def origin_trial_function_call(feature_name, execution_context=None): """Returns a function call to determine if an origin trial is enabled.""" return 'RuntimeEnabledFeatures::{feature_name}Enabled({context})'.format( feature_name=feature_name, context=execution_context if execution_context else "execution_context")
201dbe8449373dbad0144633350d3e6adbb58b80
1,691
def get_bit(byteval, index) -> bool: """retrieve bit value from byte at provided index""" return (byteval & (1 << index)) != 0
1fe020449ae2ae2513073835db6f75b24e558fdb
1,692
def toint16(i): """ Convert a number to a hexadecimal string of length 2 """ return f'{i:02x}'
3effd2b3f011a962beac19682ad29e930eb0f057
1,695
def next(space, w_arr): """ Advance the internal array pointer of an array """ length = w_arr.arraylen() current_idx = w_arr.current_idx + 1 if current_idx >= length: w_arr.current_idx = length return space.w_False w_arr.current_idx = current_idx return w_arr._current(space)
668fec305ed6bbe05895f317e284c7d2e4f83189
1,702
def clean_record(raw_string: str) -> str: """ Removes all unnecessary signs from a raw_string and returns it :param raw_string: folder or file name to manage :return: clean value """ for sign in ("'", '(', ')', '"'): raw_string = raw_string.replace(sign, '') return raw_string.replace(' ', '-').replace('--', '-')
ea484934dc10da879ede883287fc1d650cda74b8
1,704
def _find_data_between_ranges(data, ranges, top_k): """Finds the rows of the data that fall between each range. Args: data (pd.Series): The predicted probability values for the postive class. ranges (list): The threshold ranges defining the bins. Should include 0 and 1 as the first and last value. top_k (int): The number of row indices per bin to include as samples. Returns: list(list): Each list corresponds to the row indices that fall in the range provided. """ results = [] for i in range(1, len(ranges)): mask = data[(data >= ranges[i - 1]) & (data < ranges[i])] if top_k != -1: results.append(mask.index.tolist()[: min(len(mask), top_k)]) else: results.append(mask.index.tolist()) return results
323986cba953a724f9cb3bad8b2522fc711529e5
1,706
def path_inclusion_filter_fn(path, param, layer): """Returns whether or not layer name is contained in path.""" return layer in path
c93aa83e67c600cd83d053d50fbeaee4f7eebf94
1,709
def punctuation(chars=r',.\"!@#\$%\^&*(){}\[\]?/;\'`~:<>+=-'): """Finds characters in text. Useful to preprocess text. Do not forget to escape special characters. """ return rf'[{chars}]'
b2fd23d8485c3b6d429723a02a95c981982559b5
1,714
def log2_fold_change(df, samp_grps): """ calculate fold change - fixed as samp_grps.mean_names[0] over samp_grps.mean_names[1], where the mean names are sorted alphabetically. The log has already been taken, so the L2FC is calculated as mean0 - mean1 :param df: expanded and/or filtered dataframe :param samp_grps: SampleGroups() object :return: dataframe with fold change column appended, with name as in samp_grps.fc_name """ mean1 = samp_grps.mean_names[0] mean2 = samp_grps.mean_names[1] df[samp_grps.fc_name] = df[mean1] - df[mean2] return df
07fcef6f5143095f4f8f77d0251bbd7ecd486fd9
1,716
def clean_key(func): """Provides a clean, readable key from the funct name and module path. """ module = func.__module__.replace("formfactoryapp.", "") return "%s.%s" % (module, func.__name__)
946288cd231148eb39af5d1e7e0b957d9f2131e8
1,719
def multiply(a,b): """ multiply values Args: a ([float/int]): any value b ([float/int]): any value """ return a*b
67a85b1675da48684e9de7e9834d3daa4357699b
1,720
def extract_traceback(notebook): """ Extracts information about an error from the notebook. Parameters ---------- notebook: :class:`nbformat.notebooknode.NotebookNode` Executed notebook to find an error traceback. Returns ------- bool Whether the executed notebook has an error traceback. int or None Number of a cell with a traceback. If None, then the notebook doesn't contain an error traceback. str Error traceback if exists. """ for cell in notebook['cells']: # Find a cell output with a traceback and extract the traceback outputs = cell.get('outputs', []) for output in outputs: traceback = output.get('traceback', []) if traceback: traceback = '\n'.join(traceback) return True, cell['execution_count'], traceback return False, None, ""
9af26f973e6810936eaa68058efcdb7bc145803b
1,723
def prod_cart(in_list_1: list, in_list_2: list) -> list: """ Compute the cartesian product of two list :param in_list_1: the first list to be evaluated :param in_list_2: the second list to be evaluated :return: the prodotto cartesiano result as [[x,y],..] """ _list = [] for element_1 in in_list_1: for element_2 in in_list_2: _list.append([element_1,element_2]) return _list
9fdbfc558f5ec3b11c78535b9125e0a1c293035e
1,727
async def cors_handler(request, handler): """Middleware to add CORS response headers """ response = await handler(request) response.headers['Access-Control-Allow-Origin'] = '*' return response
c9f33261b1fb2e6dc3ab3139e657106a94c5bfd1
1,730
import hmac import hashlib def get_proxy_signature(query_dict, secret): """ Calculate the signature of the given query dict as per Shopify's documentation for proxy requests. See: http://docs.shopify.com/api/tutorials/application-proxies#security """ # Sort and combine query parameters into a single string. sorted_params = '' for key in sorted(query_dict.keys()): sorted_params += "{0}={1}".format(key, ",".join(query_dict.getlist(key))) signature = hmac.new(secret.encode('utf-8'), sorted_params.encode('utf-8'), hashlib.sha256) return signature.hexdigest()
c234f18c1d44a936c4844ae2fe1b912a624eef61
1,732
import hashlib def cal_md5(content): """ 计算content字符串的md5 :param content: :return: """ # 使用encode result = hashlib.md5(content.encode()) # 打印hash md5 = result.hexdigest() return md5
0cd26654c364e34ecc27b0a0b4d410a539e286c3
1,736
def to_square_feet(square_metres): """Convert metres^2 to ft^2""" return square_metres * 10.7639
50510aad230efcb47662936237a232662fef5596
1,738
def ask_name(question: str = "What is your name?") -> str: """Ask for the users name.""" return input(question)
1cc9ec4d3bc48d7ae4be1b2cf8eb64a0b4f94b23
1,750
def last(*args): """Return last value from any object type - list,tuple,int,string""" if len(args) == 1: return int(''.join(map(str,args))) if isinstance(args[0],int) else args[0][-1] return args[-1]
ad8d836597dd6a5dfe059756b7d8d728f6ea35fc
1,751
def is_float(s): """ Detertmine if a string can be converted to a floating point number. """ try: float(s) except: return False return True
2df52b4f8e0835d9f169404a6cb4f003ca661fff
1,752
def get_zcl_attribute_size(code): """ Determine the number of bytes a given ZCL attribute takes up. Args: code (int): The attribute size code included in the packet. Returns: int: size of the attribute data in bytes, or -1 for error/no size. """ opts = (0x00, 0, 0x08, 1, 0x09, 2, 0x0a, 3, 0x0b, 4, 0x0c, 5, 0x0d, 6, 0x0e, 7, 0x0f, 8, 0x10, 1, 0x18, 1, 0x19, 2, 0x1a, 3, 0x1b, 4, 0x1c, 5, 0x1d, 6, 0x1e, 7, 0x1f, 8, 0x20, 1, 0x21, 2, 0x22, 3, 0x23, 4, 0x24, 5, 0x25, 6, 0x26, 7, 0x27, 8, 0x28, 1, 0x29, 3, 0x2a, 3, 0x2b, 4, 0x2c, 5, 0x2d, 6, 0x2e, 7, 0x2f, 8, 0x30, 1, 0x31, 2, 0x38, 2, 0x38, 4, 0x39, 8, 0x41, -1, 0x42, -1, 0x43, -1, 0x44, -1, 0x48, -1, 0x4c, -1, 0x50, -1, 0x51, -1, 0xe0, 4, 0xe1, 4, 0xe2, 4, 0xe8, 2, 0xe9, 2, 0xea, 4, 0xf0, 8, 0xf1, 16, 0xff, 0) for i in range(0, len(opts), 2): if code == opts[i]: return opts[i + 1] return -1
99782c86be2413410c6819a59eadf0daba326af2
1,758
import torch def index_initial(n_batch, n_ch, tensor=True): """Tensor batch and channel index initialization. Args: n_batch (Int): Number of batch. n_ch (Int): Number of channel. tensor (bool): Return tensor or numpy array Returns: Tensor: Batch index Tensor: Channel index """ batch_index = [] for i in range(n_batch): batch_index.append([[i]] * n_ch) ch_index = [] for i in range(n_ch): ch_index += [[i]] ch_index = [ch_index] * n_batch if tensor: batch_index = torch.tensor(batch_index) ch_index = torch.tensor(ch_index) if torch.cuda.is_available(): batch_index = batch_index.cuda() ch_index = ch_index.cuda() return batch_index, ch_index
52a16ad4afcf931ba4cda9c014d47050970995c5
1,763
def boolToYes(b): """Convert a Boolean input into 'yes' or 'no' Args: b (bool): The Boolean value to be converted Returns: str: 'yes' if b is True, and 'no' otherwise. """ if b: return "yes" else: return "no"
ff94b66b5a166592062bf1d5b286b425e7997304
1,773
def vint_mask_for_length(length): """ Returns the bitmask for the first byte of a variable-length integer (used for element ID and size descriptors). :arg length: the length of the variable-length integer :type length: int :returns: the bitmask for the first byte of the variable-length integer :rtype: int """ return 0b10000000 >> (length - 1)
92fe3cb0fa09713ff4b650349294a2b241bb3918
1,774
def createVskDataDict(labels,data): """Creates a dictionary of vsk file values from labels and data. Parameters ---------- labels : array List of label names for vsk file values. data : array List of subject measurement values corresponding to the label names in `labels`. Returns ------- vsk : dict Dictionary of vsk file values. Dictionary keys correspond to names in `labels` and dictionary values correspond to values in `data`. Examples -------- This example tests for dictionary equality through python instead of doctest since python does not guarantee the order in which dictionary elements are printed. >>> labels = ['MeanLegLength', 'LeftKneeWidth', 'RightAnkleWidth'] >>> data = [940.0, 105.0, 70.0] >>> res = createVskDataDict(labels, data) >>> res == {'MeanLegLength':940.0, 'LeftKneeWidth':105.0, 'RightAnkleWidth':70.0} True """ vsk={} for key,data in zip(labels,data): vsk[key]=data return vsk
a4669e4a173aaeef534d13faceaeab869eb62cb3
1,779
import math def fmt_bytes(size_bytes): """Return a nice 'total_size' string with Gb, Mb, Kb, and Byte ranges""" units = ["Bytes", "KB", "MB", "GB"] if size_bytes == 0: return f"{0} Bytes" for unit in units: digits = int(math.log10(size_bytes)) + 1 if digits < 4: return f"{round(size_bytes, 1)} {unit}" size_bytes /= 1024 return f"{size_bytes} TB"
40613403092bdc9d8dca8b0b487d5af6c887b075
1,783
from typing import Dict def _remove_attribute(note_dict: Dict, attribute: str) -> Dict: """ Create a copy of the note where a single attribute is removed """ d = dict(note_dict) d[attribute] = None return d
d2659b887c1a2a7c67f6785889db2aa2039f9627
1,786
def only_half_radius( subsampled_radius: float, full_diameter: float, radius_constraint: float ): """ Check if radius is smaller than fraction of full radius. """ assert 0.0 <= radius_constraint <= 1.0 return subsampled_radius <= ((full_diameter / 2) * radius_constraint)
565c301932d5445e8bbb594085e65df63814663a
1,793
def sanitise_description(original: str) -> str: """ Remove newlines from ticket descriptions. :param original: the string to sanitise :return: the same string, with newlines as spaces """ return original.replace("\n", " ")
741aa7df758fb342a0d9a0fa182d24a643f5dbbc
1,794
def splitmod(n, k): """ Split n into k lists containing the elements of n in positions i (mod k). Return the heads of the lists and the tails. """ heads = [None]*k tails = [None]*k i = 0 while n is not None: if heads[i] is None: heads[i] = n if tails[i] is not None: tails[i].next = n tails[i] = n n.next, n = None, n.next i = (i+1)%k return heads, tails
a4a1885ce0c9541c534145d0236996a511cbdd00
1,797
def _generate_var_name(prefix, field_name): """ Generate the environment variable name, given a prefix and the configuration field name. Examples: >>> _generate_var_name("", "some_var") "SOME_VAR" >>> _generate_var_name("my_app", "some_var") "MY_APP_SOME_VAR" :param prefix: the prefix to be used, can be empty :param field_name: the name of the field from which the variable is derived """ return ( "_".join((prefix, field_name)).upper() if prefix else field_name.upper() )
9065d1deb76789582e68df779ec2c961a7d4aedc
1,800
import decimal def truncate_decimal_places(value: decimal.Decimal, places: int = 1) -> float: """ Truncate a float (i.e round towards zero) to a given number of decimal places. NB: Takes a decimal but returns a float! >>> truncate_decimal_places(12.364, 1) 12.3 >>> round_decimal_places(-12.364, 1) -12.3 # -12.3 is bigger than -12.4 >>> round_decimal_places(12.364, 0) 12.0 # rounding to 0 returns float with no decmial part """ if places == 0: quantize_string = "1" else: quantize_string = "0." + ((places - 1) * "0") + "1" exponent = decimal.Decimal(quantize_string) decimal_result = value.quantize(exponent, rounding=decimal.ROUND_DOWN) return float(decimal_result)
11b924a5e4f6560674b1f7378f6a4001a3265a97
1,803
def flip_dict(d): """Returns a dict with values and keys reversed. Args: d: The dict to flip the values and keys of. Returns: A dict whose keys are the values of the original dict, and whose values are the corresponding keys. """ return {v: k for k, v in d.items()}
c9c960209663639613739979c0dc4066a63c44cb
1,805
def has_sample(args): """Returns if some kind of sample id is given in args. """ return args.sample or args.samples or args.sample_tag
c2ae87acb11232d7f56cb9e09eb8509720669058
1,807
def __column(matrix, i): """Returns columns from a bidimensional Python list (a list of lists)""" return [row[i] for row in matrix]
f455245eb8bbda90f185479afc85eecfb481c70c
1,810
def _path_list_creator(path, file_prefix_name, number_of_digits_zfill, file_suffix_name): """Creates a list of paths where the files have a predefined prefix, an incremental number and a predefined suffix on their name, respectively. Eg.: img01.zdf Args: path: a path that leads to the files directory file_prefix_name: a string that comes before the number number_of_digits_zfill: a number of digits in the number file_suffix_name: a string that comes after the number Returns: list_of_paths: list of appended paths """ num = 1 list_of_paths = [] while True: file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}" list_of_paths.append(file_path) next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}" if not next_file_path.exists(): return list_of_paths num = num + 1
4850edbbf544284b0736ee52188bd53119c50fdf
1,813
import re def tamper(payload, **kwargs): """ Replaces instances of UNION with -.1UNION Requirement: * MySQL Notes: * Reference: https://raw.githubusercontent.com/y0unge/Notes/master/SQL%20Injection%20WAF%20Bypassing%20shortcut.pdf >>> tamper('1 UNION ALL SELECT') '1-.1UNION ALL SELECT' >>> tamper('1" UNION ALL SELECT') '1"-.1UNION ALL SELECT' """ return re.sub(r"(?i)\s+(UNION )", r"-.1\g<1>", payload) if payload else payload
cbf4fc5b81bc7760aafe6cf65fa498945285e5bb
1,814
from typing import Union from pathlib import Path from typing import List from typing import Dict import json def readJSONLFile(file_name: Union[str, Path]) -> List[Dict]: """ Read a '.jsonl' file and create a list of dicts Args: file_name: `Union[str,Path]` The file to open Returns: The list of dictionaries read from the 'file_name' """ lines = ( open(file_name, 'r', encoding='utf-8').readlines() if isinstance(file_name, str) else file_name.read_text('utf-8').splitlines(False) ) return [json.loads(line) for line in lines]
8e33fad766a255578179828dc76ec793c02f90b9
1,818
from typing import List def max_crossing_sum(lst: List[int], mid: int, n: int) -> int: """ Parameter <mid> is the floor middle index of <lst>. Parameter <n> is the length of the input list <lst>. Pre: <lst> is a list of integers and len(lst) >= 2. Post: returns the maximum contiguous crossing sum starting from the middle of <lst>. >>> max_crossing_sum([2, -5, 8, -6, 10, -2], 3, 6) 12 """ left_sum, right_sum, total = 0, 0, 0 # initialize values # max sum of the left half k = mid - 1 i = 0 while i < mid: total += lst[k - i] i += 1 if total > left_sum: left_sum = total # # max sum the left half # for i in range(mid - 1, -1, -1): # iterate from index mid - 1...0 backward # total += lst[i] # if total > left_sum: # left_sum = total total = 0 # max sum the right half for i in range(mid, n): # iterate from index mid...n - 1 total += lst[i] if total > right_sum: right_sum = total # note: left_sum and right_sum are each at least zero return left_sum + right_sum
3d873907cb7ed0c14152ec3c2e92a742bd52aa85
1,820
def get_duration(df): """Get duration of ECG recording Args: df (DataFrame): DataFrame with time/voltage data Returns: float: duration of ECG recording """ start = df.time.iloc[0] end = df.time.iloc[-1] duration = end - start return duration
77698afc8ef7af557628d5fea760dc101c3e6112
1,823
def calc_triangular_number(n: int): """ A triangular number or triangle number counts objects arranged in an equilateral triangle. More info: https://www.mathsisfun.com/algebra/triangular-numbers.html :param n: :return: """ return int((n * (n + 1)) / 2)
e3bfefd6e0e9451849cee8f6da252ec128285c85
1,826
def find_ccs(unmerged): """ Find connected components of a list of sets. E.g. x = [{'a','b'}, {'a','c'}, {'d'}] find_cc(x) [{'a','b','c'}, {'d'}] """ merged = set() while unmerged: elem = unmerged.pop() shares_elements = False for s in merged.copy(): if not elem.isdisjoint(s): merged.remove(s) merged.add(frozenset(s.union(elem))) shares_elements = True if not shares_elements: merged.add(frozenset(elem)) return [list(x) for x in merged]
4bff4cc32237dacac7737ff509b4a68143a03914
1,827
def gen_image_name(reference: str) -> str: """ Generate the image name as a signing input, based on the docker reference. Args: reference: Docker reference for the signed content, e.g. registry.redhat.io/redhat/community-operator-index:v4.9 """ no_tag = reference.split(":")[0] image_parts = no_tag.split("/") return "/".join(image_parts[1:])
ccaecfe91b5b16a85e3a3c87b83bbc91e54080b1
1,829
def get_chat_id(update): """ Get chat ID from update. Args: update (instance): Incoming update. Returns: (int, None): Chat ID. """ # Simple messages if update.message: return update.message.chat_id # Menu callbacks if update.callback_query: return update.callback_query.message.chat_id return None
1669382fd430b445ea9e3a1306c1e68bf2ec0013
1,830
import requests def upload_record(data, headers, rdr_project_id): """ Upload a supplied record to the research data repository """ request_url = f"https://api.figsh.com/v2/account/projects/{rdr_project_id}/articles" response = requests.post(request_url, headers=headers, json=data) return response.json()
7431234757668f9157f90aa8a9c335ee0e2a043b
1,834
def generate_url_fragment(title, blog_post_id): """Generates the url fragment for a blog post from the title of the blog post. Args: title: str. The title of the blog post. blog_post_id: str. The unique blog post ID. Returns: str. The url fragment of the blog post. """ lower_title = title.lower() hyphenated_title = lower_title.replace(' ', '-') lower_id = blog_post_id.lower() return hyphenated_title + '-' + lower_id
c846e6203fa4782c6dc92c892b9e0b6c7a0077b5
1,835
def of_type(_type, value_1, *args) -> bool: """ Check if a collection of values are of the same type. Parameters: _type (any): The type to check for. value_1 (any): The first value to check. *args (any): Rest of values to check against given type. Returns: (bool) whether or not all inputs of given type. """ all_of_type = isinstance(value_1, _type) i = len(args) while i > 0 and all_of_type != False: all_of_type = isinstance(args[i-1], _type) i -= 1 return all_of_type
eab1e70655ff74b1cbfc338a893719b7f0681f4a
1,838
def hex_to_byte(hexStr): """ Convert hex strings to bytes. """ bytes = [] hexStr = ''.join(hexStr.split(" ")) for i in range(0, len(hexStr), 2): bytes.append(chr(int(hexStr[i:i + 2], 16))) return ''.join(bytes)
a424d65b0a02c0d10ee5c7c25409f4a0ce477528
1,842
from typing import Tuple import re def _parse_cli_variable(mapping_str: str) -> Tuple[str, str]: """Checks that the input is of shape `name:value` and then splits it into a tuple""" match = re.match(r"(?P<name>.+?):(?P<value>.+)", mapping_str) if match is None: raise ValueError(f'CLI variable input {mapping_str} is not of form `"name:value"`') parsed = match.groupdict() return parsed["name"], parsed["value"]
f701b7e85c45c2df35e1252721cd3215357909ba
1,847
def get_point(points, cmp, axis): """ Get a point based on values of either x or y axys. :cmp: Integer less than or greater than 0, representing respectively < and > singhs. :returns: the index of the point matching the constraints """ index = 0 for i in range(len(points)): if cmp < 0: if points[i][axis] < points[index][axis]: index = i else: if points[i][axis] > points[index][axis]: index = i return index
b59035d390e83b45a0131e28c4acf7e302cf3e45
1,848
import pathlib def create_jobs_list(chunks, outdir, *filters): # TO DO # Figure out the packing/unpacking """ Create a list of dictionaries that hold information for the given chunks Arguments: chunks: list: A list of lists. Each nested list contains the filepaths to be processed outdir: Path object: The directory where results will be written filters: Callables Return: jobs_list: list: A list of dictionaries that holds information for the execution of each chunk. Of the form [ {'chunk_id' : int, (0,1,2,...) 'out_fp' : Path object, (outdir/chunk_<chunk_id>.fa.gz) 'fastas' : list of Path objects, ([PosixPath('path/to/PATRIC.faa'),...]) 'filters' : list of functions } ] """ jobs_list = [] for i, chunk in enumerate(chunks): chunk_id = f"chunk_{i}" chunk_out = f"{chunk_id}.fa.gz" out_fp = outdir / pathlib.Path(chunk_out) # chunk_skipped = f"{chunk_id}.skipped.txt" chunk_fastas = chunk chunk_dict = { "chunk_id": chunk_id, "fastas": chunk_fastas, "out_fp": out_fp, # Should there be an if filters or if len(filters) != 0 ? "filters": [f for f in filters], } jobs_list.append(chunk_dict) return jobs_list
433992eb34bc1f80d12f8cdcee3dbd99d04d22c1
1,849
def _check_max_features(importances, max_features): """Interpret the max_features value""" n_features = len(importances) if max_features is None: max_features = n_features elif isinstance(max_features, int): max_features = min(n_features, max_features) elif isinstance(max_features, float): max_features = int(n_features * max_features) return max_features
816daf9d99ac4ecd2d5024a3be63f793d7669e1f
1,854
def is_row_and_col_balanced(T1, T2): """ Partial latin squares T1 and T2 are balanced if the symbols appearing in row r of T1 are the same as the symbols appearing in row r of T2, for each r, and if the same condition holds on columns. EXAMPLES:: sage: from sage.combinat.matrices.latin import * sage: T1 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: T2 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: is_row_and_col_balanced(T1, T2) True sage: T2 = matrix([[0,3,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: is_row_and_col_balanced(T1, T2) False """ for r in range(T1.nrows()): val1 = set(x for x in T1.row(r) if x >= 0) val2 = set(x for x in T2.row(r) if x >= 0) if val1 != val2: return False for c in range(T1.ncols()): val1 = set(x for x in T1.column(c) if x >= 0) val2 = set(x for x in T2.column(c) if x >= 0) if val1 != val2: return False return True
f0a9d1522da2fc079d4021603198e79c438de727
1,860
def submit(ds, entry_name, molecule, index): """ Submit an optimization job to a QCArchive server. Parameters ---------- ds : qcportal.collections.OptimizationDataset The QCArchive OptimizationDataset object that this calculation belongs to entry_name : str The base entry name that the conformation belongs to. Usually, this is a canonical SMILES, but can be anything as it is represents a key in a dictionary-like datastructure. This will be used as an entry name in the dataset molecule : QCMolecule The JSON representation of a QCMolecule, which has geometry and connectivity present, among others index : int The conformation identifier of the molecule. This is used to make the entry names unique, since each conformation must have its own unique entry in the dataset in the dataset Returns ------- (unique_id, success): tuple unique_id : str The unique_id that was submitted to the dataset. This is the name of the new entry in the dataset. success : bool Whether the dataset was able to successfully add the entry. If this is False, then the entry with the name corresponding to unique_id was already present in the dataset. """ # This workaround prevents cmiles from crashing if OE is installed but has # no license. Even though rdkit is specified, protomer enumeration is OE- # specific and still attempted. # oe_flag = cmiles.utils.has_openeye # cmiles.utils.has_openeye = False # attrs = cmiles.generator.get_molecule_ids(molecule, toolkit="rdkit") # cmiles.utils.has_openeye = oe_flag CIEHMS = "canonical_isomeric_explicit_hydrogen_mapped_smiles" molecule["extras"] = {CIEHMS: entry_name} attrs = {CIEHMS: entry_name} unique_id = entry_name + f"-{index}" success = False try: ds.add_entry(unique_id, molecule, attributes=attrs, save=False) success = True except KeyError: pass return unique_id, success
50a30a25af59906ce5636ce8a176e29befd27d60
1,861
import ctypes def ctypes_pointer(name): """Create a ctypes type representing a C pointer to a custom data type ``name``.""" return type("c_%s_p" % name, (ctypes.c_void_p,), {})
d87f10ac06391379a24f166272fd42fa938e3676
1,863
def split_and_load(data, ctx_list, batch_axis=0, even_split=True): """Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads each slice to one context in `ctx_list`. Parameters ---------- data : NDArray A batch of data. ctx_list : list of Context A list of Contexts. batch_axis : int, default 0 The axis along which to slice. even_split : bool, default True Whether to force all slices to have the same number of elements. Returns ------- list of NDArray Each corresponds to a context in `ctx_list`. """ if len(ctx_list) == 1: return [d.as_in_context(ctx_list[0]) for d in data] size = len(data) num_slice = len(ctx_list) step = size // num_slice for i in range(num_slice): for k in range(i*step, (i+1)*step): data[k].as_in_context(ctx_list[i]) return data
4b8f0d1b6b256895da3e37fbb4b1be0cd0da5c46
1,867
def string_to_weld_literal(s): """ Converts a string to a UTF-8 encoded Weld literal byte-vector. Examples -------- >>> string_to_weld_literal('hello') '[104c,101c,108c,108c,111c]' """ return "[" + ",".join([str(b) + 'c' for b in list(s.encode('utf-8'))]) + "]"
d85b016091988c9307cbed56aafdd5766c3c9be5
1,869