content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def _read_id_not_in_dict(read_ids, read_dict):
"""Return True if all read_ids in a list are not in the read_dict keys, otherwise False"""
for read_id in read_ids:
if read_id not in read_dict.keys():
return True
return False | 3a0e0926ed33f65cc67139311af1c860f3e371ae | 3,079 |
def number_from_string(s):
"""
Parse and return number from string.
Return float only if number is not an int. Assume number can be parsed from
string.
"""
try:
return int(s)
except ValueError:
return float(s) | 50cc7defe7c60b536d184aaf91c2831ab63043e1 | 3,086 |
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
# rgb of each pixel
pixel_r = 0
pixel_g = 0
pixel_b = 0
# how many pixels in the list[pixels]
n = 0
for pixel in pixels:
n += 1
pixel_r += pixel.red
pixel_g += pixel.green
pixel_b += pixel.blue
pixel_avg = [pixel_r//n, pixel_g//n, pixel_b//n]
return pixel_avg | 9cd694505f8d445732bc178b5d645ff273b298d1 | 3,088 |
def _leading_space_count(line):
"""Return number of leading spaces in line."""
i = 0
while i < len(line) and line[i] == ' ':
i += 1
return i | b28daa2845618df5030a79129bb7cec1167b149a | 3,089 |
def isfloat(string: str) -> bool:
"""
This function receives a string and returns if it is a float or not.
:param str string: The string to check.
:return: A boolean representing if the string is a float.
:rtype: bool
"""
try:
float(string)
return True
except (ValueError, TypeError):
return False | ac6d8fcbbcf6b8cb442c50895576f417618a7429 | 3,098 |
def hook(t):
"""Calculate the progress from download callbacks (For progress bar)"""
def inner(bytes_amount):
t.update(bytes_amount) # Update progress bar
return inner | d8228b9dec203aaa32d268dea8feef52e8db6137 | 3,102 |
import gzip
import pickle
def load_object(filename):
"""
Load saved object from file
:param filename: The file to load
:return: the loaded object
"""
with gzip.GzipFile(filename, 'rb') as f:
return pickle.load(f) | f7e15216c371e1ab05169d40ca4df15611fa7978 | 3,106 |
def response_map(fetch_map):
"""Create an expected FETCH response map from the given request map.
Most of the keys returned in a FETCH response are unmodified from the
request. The exceptions are BODY.PEEK and BODY partial range. A BODY.PEEK
request is answered without the .PEEK suffix. A partial range (e.g.
BODY[]<0.1000>) has the octet count (1000) removed, since that information
is provided in the literal size (and may be different if the data was
truncated).
"""
if not isinstance(fetch_map, dict):
fetch_map = dict((v, v) for v in fetch_map)
rmap = {}
for k, v in fetch_map.items():
for name in ('BODY', 'BINARY'):
if k.startswith(name):
k = k.replace(name + '.PEEK', name, 1)
if k.endswith('>'):
k = k.rsplit('.', 1)[0] + '>'
rmap[k] = v
return rmap | 42d992662e5bba62046c2fc1a50f0f8275798ef8 | 3,107 |
def examine_mode(mode):
"""
Returns a numerical index corresponding to a mode
:param str mode: the subset user wishes to examine
:return: the numerical index
"""
if mode == 'test':
idx_set = 2
elif mode == 'valid':
idx_set = 1
elif mode == 'train':
idx_set = 0
else:
raise NotImplementedError
return idx_set | 4fee6f018cacff4c760cb92ef250cad21b497697 | 3,110 |
def add_classification_categories(json_object, classes_file):
"""
Reads the name of classes from the file *classes_file* and adds them to
the JSON object *json_object*. The function assumes that the first line
corresponds to output no. 0, i.e. we use 0-based indexing.
Modifies json_object in-place.
Args:
json_object: an object created from a json in the format of the detection API output
classes_file: the list of classes that correspond to the output elements of the classifier
Return:
The modified json_object with classification_categories added. If the field 'classification_categories'
already exists, then this function is a no-op.
"""
if ('classification_categories' not in json_object.keys()) or (len(json_object['classification_categories']) == 0):
# Read the name of all classes
with open(classes_file, 'rt') as fi:
class_names = fi.read().splitlines()
# remove empty lines
class_names = [cn for cn in class_names if cn.strip()]
# Create field with name *classification_categories*
json_object['classification_categories'] = dict()
# Add classes using 0-based indexing
for idx, name in enumerate(class_names):
json_object['classification_categories']['%i'%idx] = name
else:
print('WARNING: The input json already contains the list of classification categories.')
return json_object | ef92902210f275238271c21e20f8f0eec90253b0 | 3,111 |
def sumdigits(a: int):
"""Sum of the digits of an integer"""
return sum(map(int, str(a))) | 018bcc429e6ea3842fd9e9e2580820aed29bc0aa | 3,113 |
def _vba_to_python_op(op, is_boolean):
"""
Convert a VBA boolean operator to a Python boolean operator.
"""
op_map = {
"Not" : "not",
"And" : "and",
"AndAlso" : "and",
"Or" : "or",
"OrElse" : "or",
"Eqv" : "|eq|",
"=" : "|eq|",
">" : ">",
"<" : "<",
">=" : ">=",
"=>" : ">=",
"<=" : "<=",
"=<" : "<=",
"<>" : "|neq|",
"is" : "|eq|"
}
if (not is_boolean):
op_map["Not"] = "~"
op_map["And"] = "&"
op_map["AndAlso"] = "&"
op_map["Or"] = "|"
op_map["OrElse"] = "|"
return op_map[op] | a6ed0c65c6c2d2635f14fb664540eaf283ee4065 | 3,116 |
def get_label_number(window):
"""This method assigns to each label of a window a number."""
mode_list = ["bike", "car", "walk", "bus", "train"]
current_label_number = 0
for mode in enumerate(mode_list):
if window[1] == mode[1]:
current_label_number = mode[0]
return current_label_number | 5ed3c683e8619e1b07857992f54079bc68fdfa58 | 3,117 |
def shave_bd(img, bd):
"""
Shave border area of spatial views. A common operation in SR.
:param img:
:param bd:
:return:
"""
return img[bd:-bd, bd:-bd, :] | 4b822c5e57787edb74955fd350ad361080b8640b | 3,119 |
def generate_dict_entry(key, wordlist):
"""Generate one entry of the python dictionary"""
entry = " '{}': {},\n".format(key, wordlist)
return entry | 57ab3c063df0bde1261602f0c6279c70900a7a88 | 3,124 |
def line(value):
"""
| Line which can be used to cross with functions like RSI or MACD.
| Name: line\_\ **value**\
:param value: Value of the line
:type value: float
"""
def return_function(data):
column_name = f'line_{value}'
if column_name not in data.columns:
data[column_name] = value
return data[column_name].copy()
return return_function | 07b4f9671ae06cf63c02062a9da4eb2a0b1a265a | 3,125 |
def _is_hangul_syllable(i):
"""
Function for determining if a Unicode scalar value i is within the range of Hangul syllables.
:param i: Unicode scalar value to lookup
:return: Boolean: True if the lookup value is within the range of Hangul syllables, otherwise False.
"""
if i in range(0xAC00, 0xD7A3 + 1): # Range of Hangul characters as defined in UnicodeData.txt
return True
return False | 793519ec33a8920ea13328b0e5a4f814c859b0d3 | 3,127 |
def validate_model(model):
"""
Validate a single data model parameter or a full data model block by
recursively calling the 'validate' method on each node working from
the leaf nodes up the tree.
:param model: part of data model to validate
:type model: :graphit:GraphAxis
:return: overall successful validation
:rtype: :py:bool
"""
allnodes = model.nodes.keys()
leaves = model.leaves(return_nids=True)
done = []
def _walk_ancestors(nodes, success=True):
parents = []
for node in nodes:
node = model.getnodes(node)
# Continue only if the node was found and it has a 'validate' method
if not node.empty() and hasattr(node, 'validate'):
val = node.validate()
done.append(node.nid)
if not val:
return False
pnid = node.parent().nid
if pnid not in done and pnid in allnodes:
parents.append(pnid)
if parents:
return _walk_ancestors(set(parents), success=success)
return success
# Recursively walk the tree from leaves up to root.
return _walk_ancestors(leaves) | 009c629fe80af65f574c698567cb6b5213e9c888 | 3,128 |
def format_data_hex(data):
"""Convert the bytes array to an hex representation."""
# Bytes are separated by spaces.
return ' '.join('%02X' % byte for byte in data) | 27239052d9ca0b12c19977e79d512e0cab04182e | 3,134 |
def sort_dict(original):
"""Recursively sorts dictionary keys and dictionary values in alphabetical order"""
if isinstance(original, dict):
res = (
dict()
) # Make a new "ordered" dictionary. No need for Collections in Python 3.7+
for k, v in sorted(original.items()):
res[k] = v
d = res
else:
d = original
for k in d:
if isinstance(d[k], str):
continue
if isinstance(d[k], list) and len(d[k]) > 1 and isinstance(d[k][0], str):
d[k] = sorted(d[k])
if isinstance(d[k], dict):
d[k] = sort_dict(d[k])
if isinstance(d[k], list) and len(d[k]) >= 1 and isinstance(d[k][0], dict):
for i in range(len(d[k])):
d[k][i] = sort_dict(d[k][i])
return d | 8c194af76160b0e4d3bad135720e051a4d4622b0 | 3,135 |
from typing import Dict
from typing import List
import math
def best_broaders(supers_for_all_entities: Dict,
per_candidate_links_and_supers: List[Dict],
num_best: int = 5,
super_counts_field: str = "broader_counts",
doprint=False,
representativeness_threshold=0.1):
"""
Returns the best matching super for a candidate class, according to a list of supers for entities in the class
and entities in the whole corpus. If comparing to a taxonomy, a super is a broader.
@param super_counts_field:
@param super_counts: a dictionary that has, for every possible entity, the supers it belongs to
@param per_candidate_links_and_supers: a list of dictionaries, one per candidate. Fro each, at least
two fields are expected "entities" containing the list of entities, and that given by super_counts_field
which is, in turn, a dictionary whose keys are supers and whose values are the number of entities in that
candidate having this broad
@param num_best: maximum number of best matching supers to be returned
@return: for every candidate class, the num_best best matching supers and their log odds ratio
"""
result = []
global_counts = dict()
for ent, bros in supers_for_all_entities.items():
for bro in bros:
global_counts[bro] = global_counts.get(bro, 0) + 1
onlytopmost = []
for can in per_candidate_links_and_supers:
# For this entity, the following dictionaries have an element for every possible super
# Using notation from the paper
# T_cc : The number of entities narrower to a candidate which are tagged with NER typeT
T_cc = {x: y for x, y in can[super_counts_field].items()
if y > representativeness_threshold * len(can["entities"])}
if len(T_cc) == 0:
T_cc = {x: y for x, y in can[super_counts_field].items()}
# T_w : is the number of entities in the wholecorpus tagged with T
T_w = {y: global_counts[y] for y in T_cc.keys()}
# w : the total number of entities in the whole corpus
w = float(len(supers_for_all_entities))
# cc : the total number of entities in this candidate
cc = float(len(can["entities"]))
# dict of the form super : log_odds
log_odds_per_super = {x: math.log((T_cc[x] / cc) / (T_w[x] / w))
for x in T_cc.keys()}
logslist = list(log_odds_per_super.items())
logslist.sort(key=lambda x: x[1])
logslist.reverse()
maxbroads = min(len(logslist), num_best)
logodds = []
for bi in range(maxbroads):
logodds.append({"candidatesbroader": logslist[bi][0],
"loggods": logslist[bi][1]})
can["log_odds"] = logodds
if doprint:
print("\t\t---", ", ".join([str(x[1]) for x in logslist[:maxbroads]]))
if len(logslist) > 0:
onlytopmost.append(logslist[0][1])
can["best_match_broader"] = logslist[0][0]
else:
onlytopmost.append(None)
can["best_match_broader"] = None
return onlytopmost | 9aa9826c43e67a28eeca463b107296e093709246 | 3,137 |
import re
def is_heading(line):
"""Determine whether a given line is a section header
that describes subsequent lines of a report.
"""
has_cattle = re.search(r'steer?|hfrs?|calves|cows?|bulls?', line, re.IGNORECASE)
has_price = re.search(r'\$[0-9]+\.[0-9]{2}', line)
return bool(has_cattle) and not bool(has_price) | ccbc80f7db61f7ba82aa88e54112d1995d457764 | 3,139 |
import torch
def masks_empty(sample, mask_names):
""" Tests whether a sample has any non-masked values """
return any(not torch.any(sample[name] != 0) for name in mask_names) | 4c13b123fe6f5a17c3cd2ee673c54de331af7b23 | 3,152 |
def _add_left_zeros(number, iteration_digits):
"""Add zeros to the left side of the experiment run number.
Zeros will be added according to missing spaces until iterations_digits are
reached.
"""
number = str(number)
return f'{"0" * (iteration_digits - len(number))}{number}' | e3f86a7e7f276ceff4eb662a3f5bc364b4d10ea3 | 3,154 |
def default_data_to_device(
input, target=None, device: str = "cuda", non_blocking: bool = True
):
"""Sends data output from a PyTorch Dataloader to the device."""
input = input.to(device=device, non_blocking=non_blocking)
if target is not None:
target = target.to(device=device, non_blocking=non_blocking)
return input, target | 8dafddbd52b54a576ddc67d7d79af4372fbd57dc | 3,155 |
import collections
import random
def random_sample_with_weight_and_cost(population, weights, costs, cost_limit):
"""
Like random_sample_with_weight but with the addition of a cost and limit.
While performing random samples (with priority for higher weight) we'll keep track of cost
If cost exceeds the cost limit, we stop selecting
Basically the knapsack problem, but with deliberately random selection rather than dynamic optimization
"""
population_weights = {request: weight for (request, weight) in zip(population, weights)}
population_costs = {request: cost for (request, cost) in zip(population, costs)}
selected = []
not_selected = []
cost = 0
# Create a Counter from the population, assigning count by weight
counter = collections.Counter(population_weights)
while counter:
# Turn the Counter into a list for random selection from
# The list will have n repetitions of an element with weight n
choice = random.choice(list(counter.elements()))
choice_cost = population_costs[choice]
# If the cost would cause us to exceed our limit it shouldn't be selected
if cost + choice_cost > cost_limit:
not_selected.append(choice)
else:
cost += choice_cost
selected.append(choice)
# When chosen (whether selected or not), remove the element from the population
# Effectively removes all repetitions of the element
counter.pop(choice)
return selected, not_selected | 637afd1c0e83bbda879f41bd15feb0f65b238fb3 | 3,159 |
def _client_row_class(client: dict) -> str:
"""
Set the row class depending on what's in the client record.
"""
required_cols = ['trust_balance', 'refresh_trigger']
for col in required_cols:
if col not in client:
return 'dark'
try:
if client['trust_balance'] > client['refresh_trigger']:
return 'success'
except TypeError:
return 'dark'
return 'danger' | cd5ebd8fd64c7d994d6803df473cd317af65e9ac | 3,161 |
def create_vector_clock(node_id, timeout):
"""This method builds the initial vector clock for a new key.
Parameters
----------
node_id : int
the id of one node in the cluster
timeout : int
the expire timeout of the key
Returns
-------
dict
the vector clock as dictonary
"""
if node_id is not None and timeout is not None:
return {
"versions": [{"nodeId": node_id, "version": 1}],
"timestamp": timeout
}
else:
raise ValueError("You must gave the node id and the timeout.") | ed6df0e7e493d448f52e5fe47b55df8a1de94543 | 3,164 |
def stellar_radius(M, logg):
"""Calculate stellar radius given mass and logg"""
if not isinstance(M, (int, float)):
raise TypeError('Mass must be int or float. {} type given'.format(type(M)))
if not isinstance(logg, (int, float)):
raise TypeError('logg must be int or float. {} type given'.format(type(logg)))
if M < 0:
raise ValueError('Only positive stellar masses allowed.')
M = float(M)
return M/(10**(logg-4.44)) | 2afbd991c7461d7861370f18d90df840569da857 | 3,166 |
def set_plus_row(sets, row):
"""Update each set in list with values in row."""
for i in range(len(sets)):
sets[i].add(row[i])
return sets | 87f448dc3199c8d3137d5811dd184b3d2bd7cbe3 | 3,167 |
import math
def gauss_distribution(x, mu, sigma):
"""
Calculate value of gauss (normal) distribution
Parameters
----------
x : float
Input argument
mu :
Mean of distribution
sigma :
Standard deviation
Returns
-------
float
Probability, values from range [0-1]
"""
return 1 / (2 * math.sqrt(math.pi) * sigma) * math.exp(-(1 / 2) * ((x - mu) / sigma) ** 2) | 05cf2c14b337b45a81ddbe7655b4d7cf21e352cd | 3,169 |
from typing import Dict
def serialize(name: str, engine: str) -> Dict:
"""Get dictionary serialization for a dataset locator.
Parameters
----------
name: string
Unique dataset name.
engine: string
Unique identifier of the database engine (API).
Returns
-------
dict
"""
return {'name': name, 'database': engine} | 9ab11318050caf3feb4664310e491ed48e7e5357 | 3,175 |
def support_acctgroup_acctproject(version):
"""
Whether this Lustre version supports acctgroup and acctproject
"""
if version.lv_name == "es2":
return False
return True | 858ec772a90e66431731ffcdd145fa7e56daad02 | 3,177 |
def set_or_none(list_l):
"""Function to avoid list->set transformation to return set={None}."""
if list_l == [None]:
res = None
else:
res = set(list_l)
return res | ee5fb4539e63afc7fd8013610229d9ab784b88c5 | 3,184 |
import re
def case_mismatch(vm_type, param):
"""Return True if vm_type matches a portion of param in a case
insensitive search, but does not equal that portion;
return False otherwise.
The "portions" of param are delimited by "_".
"""
re_portion = re.compile(
"(^(%(x)s)_)|(_(%(x)s)_)|(_(%(x)s)$)" % dict(x=vm_type), re.IGNORECASE
)
found = re_portion.search(param)
if found:
param_vm_type = [x for x in found.groups()[1::2] if x][0]
return param_vm_type != vm_type
else:
return False | e7fb565ac6e10fd15dd62a64fbf7f14a8bcfde6b | 3,185 |
from collections import deque
from typing import Iterable
from typing import Deque
def array_shift(data: Iterable, shift: int) -> Deque:
"""
left(-) or right(+) shift of array
>>> arr = range(10)
>>> array_shift(arr, -3)
deque([3, 4, 5, 6, 7, 8, 9, 0, 1, 2])
>>> array_shift(arr, 3)
deque([7, 8, 9, 0, 1, 2, 3, 4, 5, 6])
"""
deq = deque(data)
deq.rotate(shift)
return deq | c14e115808592808bc9b0cf20fa8bc3d5ece7768 | 3,189 |
import math
def parents(level, idx):
"""
Return all the (grand-)parents of the Healpix pixel idx at level (in nested format)
:param level: Resolution level
:param idx: Pixel index
:return: All the parents of the pixel
"""
assert idx < 12 * 2 ** (2 * level)
plpairs = []
for ind in range(level, 0, -1):
idx = int(math.floor(idx / 4))
plpairs.append(tuple((ind - 1, idx)))
level -= 1
return plpairs[::-1] | 355c3acffa07065de10049059ef064abefdd7ca0 | 3,190 |
from typing import List
from pathlib import Path
def files_filter_ext(files: List[Path], ext: str) -> List[Path]:
"""Filter files from a list matching a extension.
Args:
files: List of files.
ext: Extension to filter.
Returns:
List of files that have the extension.
"""
return [f for f in files if f.suffix == ext] | 0ed134583f9fa4868111d1475b8be4d67ba4feb7 | 3,193 |
import requests
def get_qid_for_title(title):
"""
Gets the best Wikidata candidate from the title of the paper.
"""
api_call = f"https://www.wikidata.org/w/api.php?action=wbsearchentities&search={title}&language=en&format=json"
api_result = requests.get(api_call).json()
if api_result["success"] == 1:
return(api_result["search"][0]["id"]) | 663db71c7a1bbf1617941ba81c5fa3b7d359e00b | 3,196 |
def get_ind_sphere(mesh, ind_active, origin, radius):
"""Retreives the indices of a sphere object coordintes in a mesh."""
return (
(mesh.gridCC[ind_active, 0] <= origin[0] + radius)
& (mesh.gridCC[ind_active, 0] >= origin[0] - radius)
& (mesh.gridCC[ind_active, 1] <= origin[1] + radius)
& (mesh.gridCC[ind_active, 1] >= origin[1] - radius)
& (mesh.gridCC[ind_active, 2] <= origin[2] + radius)
& (mesh.gridCC[ind_active, 2] >= origin[2] - radius)
) | 9e246c3c0d3d7750a668476f0d0d90b28c46fc27 | 3,197 |
def add_hp_label(merged_annotations_column, label_type):
"""Adds prefix to annotation labels that identify the annotation as
belonging to the provided label_type (e.g. 'h@' for host proteins).
Parameters
----------
merged_annotations_column : array-like (pandas Series))
An array containing sets of annotations that need to be labeled.
e.g.
0 {GO:0010008, GO:0070062, IPR036865, GO:0048471...
1 {GO:0006351, GO:0070062, GO:0007623, GO:004851...
2 {GO:0019888, GO:0006470, GO:0001754, GO:009024...
label_type : str
The prefix to be appended (without the "@" separator).
Returns
-------
labeled_annotations : array-like (pandas Series)
A new pandas Series where all annotations have received a prefix.
"""
labeled_annotations = merged_annotations_column.map(
lambda x: set([label_type + '@' + i for i in x]))
return labeled_annotations | 648f548931a1fae5d19291d81f2355a0a00877c3 | 3,200 |
def get_val(tup):
"""Get the value from an index-value pair"""
return tup[1] | 5966bbbb28006c46eaf11afaef152573aaaa8d2a | 3,202 |
import re
from typing import OrderedDict
def read_avg_residuemap(infile):
""" Read sequence definition from PSN avg file, returning sequence Map
:param infile: File handle pointing to WORDOM avgpsn output file
:return: Returns an internal.map.Map object mapping the .pdb
residues to WORDOM id's from "Seq" section of the avgpsn-file
"""
m_start = re.compile("^\*\*\* Seq \*\*\*")
m_end = re.compile("^============")
m_entry = re.compile("^\s*\d+\s+.:.\d+\s+\d+\.\d+\s*$")
residuemap = OrderedDict()
reading = False
for line in infile:
if reading:
# Stop reading if end of interaction strength section
if m_end.search(line):
break
else:
if m_entry.search(line):
[num, resname, normfact] = line.split()
residuemap[resname] = int(num)
# Start reading when header found
elif m_start.search(line):
reading = True
return residuemap | 92c4cbe53edcd3d894a038d7cb9308c653e37146 | 3,206 |
import re
def check_playlist_url(playlist_url):
"""Check if a playlist URL is well-formated.
Parameters
----------
playlist_url : str
URL to a YouTube playlist.
Returns
-------
str
If the URL is well-formated, return the playlist ID. Else return `None`.
"""
match = re.match(
r"https?://www\.youtube\.com/playlist\?list=(.+)",
playlist_url.strip()
)
if match is None:
raise ValueError("Incorrect URL: %s" % playlist_url)
return match.group(1) | b14808e3dc25fcb7f91e9b66ec5f31ae869c6ae5 | 3,212 |
def maxsubarray(list):
"""
Find a maximum subarray following this idea:
Knowing a maximum subarray of list[0..j]
find a maximum subarray of list[0..j+1] which is either
(I) the maximum subarray of list[0..j]
(II) or is a maximum subarray list[i..j+1] for some 0 <= i <= j
We can determine (II) in constant time by keeping a max
subarray ending at the current j.
This is done in the first if of the loop, where the max
subarray ending at j is max(previousSumUntilJ + array[j], array[j])
This works because if array[j] + sum so far is less than array[j]
then the sum of the subarray so far is negative (and less than array[j]
in case it is also negative) so it has a bad impact on the
subarray until J sum and we can safely discard it and start anew
from array[j]
Complexity (n = length of list)
Time complexity: O(n)
Space complexity: O(1)
"""
if len(list) == 0:
return (-1, -1, 0)
# keep the max sum of subarray ending in position j
maxSumJ = list[0]
# keep the starting index of the maxSumJ
maxSumJStart = 0
# keep the sum of the maximum subarray found so far
maxSum = list[0]
# keep the starting index of the current max subarray found
maxStart = 0
# keep the ending index of the current max subarray found
maxEnd = 0
for j in range(1, len(list)):
if maxSumJ + list[j] >= list[j]:
maxSumJ = maxSumJ + list[j]
else:
maxSumJ = list[j]
maxSumJStart = j
if maxSum < maxSumJ:
maxSum = maxSumJ
maxStart = maxSumJStart
maxEnd = j
return (maxSum, maxStart, maxEnd) | a991ca09c0594b0d47eb4dd8be44d093d593cd36 | 3,215 |
def conditional(condition, decorator):
""" Decorator for a conditionally applied decorator.
Example:
@conditional(get_config('use_cache'), ormcache)
def fn():
pass
"""
if condition:
return decorator
else:
return lambda fn: fn | 7c17ad3aaacffd0008ec1cf66871ea6755f7869a | 3,222 |
def calc_points(goals, assists):
"""
Calculate the total traditional and weighted points for all
players, grouped by player id.
Author: Rasmus Säfvenberg
Parameters
----------
goals : pandas.DataFrame
A data frame with total goals and weighted assists per player.
assists : pandas.DataFrame
A data frame with total assists and weighted assists per player.
Returns
-------
points : pandas.DataFrame
A data frame with total points and weighted points per player.
"""
# Specify columns to keep for merging
goals = goals[["PlayerId", "PlayerName", "Position", "Goals", "WeightedGoals"]]
assists = assists[["PlayerId", "PlayerName", "Position", "Assists", "WeightedAssists"]]
# Combine goals and assists
points = goals.merge(assists, on=["PlayerId", "PlayerName", "Position"],
how="outer")
# Fill missing values with 0 (some players only score goals etc.)
points.fillna(0, inplace=True)
# Calculate points = goals + assists
points["Points"] = points["Goals"] + points["Assists"]
# Calculate weighted points = weighted goals + weighted assists
points["WeightedPoints"] = points["WeightedGoals"] + points["WeightedAssists"]
# Sort by weighted points
points.sort_values("WeightedPoints", ascending=False, inplace=True)
return points | 1801cf2602a473bdf532e1c0ee58b883dc3e79d1 | 3,233 |
def rosenbrock_grad(x, y):
"""Gradient of Rosenbrock function."""
return (-400 * x * (-(x ** 2) + y) + 2 * x - 2, -200 * x ** 2 + 200 * y) | c7acf0bbe11a6d1cbb38b6853eb1b508e3846657 | 3,236 |
def digitize(n):
"""Convert a number to a reversed array of digits."""
l = list(str(n))
n_l = []
for d in l:
n_l.append(int(d))
n_l.reverse()
return n_l | e4355b68da41e4be87ce18b53afb2a406eb120c7 | 3,238 |
def create_outlier_mask(df, target_var, number_of_stds, grouping_cols=None):
"""
Create a row-wise mask to filter-out outliers based on target_var.
Optionally allows you to filter outliers by group for hier. data.
"""
def flag_outliers_within_groups(df, target_var,
grouping_cols, number_of_stds):
groups = df.groupby(grouping_cols)
means = groups[target_var].transform('mean')
stds = groups[target_var].transform('std')
upper_bound = means + stds * number_of_stds
lower_bound = means - stds * number_of_stds
return df[target_var].between(lower_bound, upper_bound)
def flag_outliers_without_groups(df, target_var, number_of_stds):
mean_val = df[target_var].mean()
std_val = df[target_var].std()
upper_bound = (mean_val + (std_val * number_of_stds))
lower_bound = (mean_val - (std_val * number_of_stds))
return (df[target_var] > lower_bound) & (df[target_var] < upper_bound)
if grouping_cols:
mask = flag_outliers_within_groups(
df=df, target_var=target_var,
number_of_stds=number_of_stds, grouping_cols=grouping_cols
)
else:
mask = flag_outliers_without_groups(
df=df, target_var=target_var,
number_of_stds=number_of_stds
)
return mask | 95a7e3e5a0cb8dcc4aa3da1af7e9cb4111cf6b81 | 3,239 |
def generate_config(context):
""" Generate the deployment configuration. """
resources = []
name = context.properties.get('name', context.env['name'])
resources = [
{
'name': name,
'type': 'appengine.v1.version',
'properties': context.properties
}
]
outputs = [
{
'name': 'name',
'value': '$(ref.{}.name)'.format(name)
},
{
'name': 'createTime',
'value': '$(ref.{}.createTime)'.format(name)
},
{
'name': 'versionUrl',
'value': '$(ref.{}.versionUrl)'.format(name)
}
]
return {'resources': resources, 'outputs': outputs} | 9a997b87a8d4d8f46edbbb9d2da9f523e5e2fdc6 | 3,242 |
def remove_end_same_as_start_transitions(df, start_col, end_col):
"""Remove rows corresponding to transitions where start equals end state.
Millington 2009 used a methodology where if a combination of conditions
didn't result in a transition, this would be represented in the model by
specifying a transition with start and end state being the same, and a
transition time of 0 years.
AgroSuccess will handle 'no transition' rules differently, so these dummy
transitions should be excluded.
"""
def start_different_to_end(row):
if row[start_col] == row[end_col]:
return False
else:
return True
return df[df.apply(start_different_to_end, axis=1)] | f4b3ddca74e204ed22c75a4f635845869ded9988 | 3,243 |
def sieve(iterable, inspector, *keys):
"""Separates @iterable into multiple lists, with @inspector(item) -> k for k in @keys defining the separation.
e.g., sieve(range(10), lambda x: x % 2, 0, 1) -> [[evens], [odds]]
"""
s = {k: [] for k in keys}
for item in iterable:
k = inspector(item)
if k not in s:
raise KeyError(f"Unexpected key <{k}> found by inspector in sieve.")
s[inspector(item)].append(item)
return [s[k] for k in keys] | 6ebb76dfb3131342e08a0be4127fba242d126130 | 3,244 |
def fix_lng_degrees(lng: float) -> float:
"""
For a lng degree outside [-180;180] return the appropriate
degree assuming -180 = 180°W and 180 = 180°E.
"""
sign = 1 if lng > 0 else -1
lng_adj = (abs(lng) % 360) * sign
if lng_adj > 180:
return (lng_adj % 180) - 180
elif lng_adj < -180:
return lng_adj % 180
return lng_adj | bde58152883874095b15ec38cfb24ea68d73c188 | 3,248 |
import typing
import inspect
def resolve_lookup(
context: dict, lookup: str, call_functions: bool = True
) -> typing.Any:
"""
Helper function to extract a value out of a context-dict.
A lookup string can access attributes, dict-keys, methods without parameters and indexes by using the dot-accessor (e.g. ``person.name``)
This is based on the implementation of the variable lookup of the django template system:
https://github.com/django/django/blob/master/django/template/base.py
"""
current = context
for bit in lookup.split("."):
try:
current = current[bit]
except (TypeError, AttributeError, KeyError, ValueError, IndexError):
try:
current = getattr(current, bit)
except (TypeError, AttributeError):
# Reraise if the exception was raised by a @property
if not isinstance(current, dict) and bit in dir(current):
raise
try: # list-index lookup
current = current[int(bit)]
except (
IndexError, # list index out of range
ValueError, # invalid literal for int()
KeyError, # current is a dict without `int(bit)` key
TypeError,
): # unsubscriptable object
return None
# raise LookupError(
# "Failed lookup for key " "[%s] in %r", (bit, current)
# ) # missing attribute
if callable(current) and call_functions:
try: # method call (assuming no args required)
current = current()
except TypeError:
signature = inspect.signature(current) # type: ignore
try:
signature.bind()
except TypeError: # arguments *were* required
pass # but we continue because we might use an attribute on the object instead of calling it
else:
raise
return current | a2090f2488ee10f7c11684952fd7a2498f6d4979 | 3,249 |
def get_str_cmd(cmd_lst):
"""Returns a string with the command to execute"""
params = []
for param in cmd_lst:
if len(param) > 12:
params.append('"{p}"'.format(p=param))
else:
params.append(param)
return ' '.join(params) | a7cc28293eb381604112265a99b9c03e762c2f2c | 3,255 |
def metadata_columns(request, metadata_column_headers):
"""Make a metadata column header and column value dictionary."""
template = 'val{}'
columns = {}
for header in metadata_column_headers:
columns[header] = []
for i in range(0, request.param):
columns[header].append(template.format(i))
return columns | ca1f89935260e9d55d57df5fe5fbb0946b5948ac | 3,262 |
def CMYtoRGB(C, M, Y):
""" convert CMY to RGB color
:param C: C value (0;1)
:param M: M value (0;1)
:param Y: Y value (0;1)
:return: RGB tuple (0;255) """
RGB = [(1.0 - i) * 255.0 for i in (C, M, Y)]
return tuple(RGB) | cfc2c7b91dd7f1faf93351e28ffdd9906613471a | 3,264 |
from typing import Any
import json
def json_safe(arg: Any):
"""
Checks whether arg can be json serialized and if so just returns arg as is
otherwise returns none
"""
try:
json.dumps(arg)
return arg
except:
return None | 97ac87464fb4b31b4fcfc7896252d23a10e57b72 | 3,265 |
import collections
def get_duplicates(lst):
"""Return a list of the duplicate items in the input list."""
return [item for item, count in collections.Counter(lst).items() if count > 1] | 8f10226c904f95efbee447b4da5dc5764b18f6d2 | 3,277 |
import math
def regular_poly_circ_rad_to_side_length(n_sides, rad):
"""Find side length that gives regular polygon with `n_sides` sides an
equivalent area to a circle with radius `rad`."""
p_n = math.pi / n_sides
return 2 * rad * math.sqrt(p_n * math.tan(p_n)) | 939ff5de399d7f0a31750aa03562791ee83ee744 | 3,279 |
def dbl_colour(days):
"""
Return a colour corresponding to the number of days to double
:param days: int
:return: str
"""
if days >= 28:
return "orange"
elif 0 < days < 28:
return "red"
elif days < -28:
return "green"
else:
return "yellow" | 46af7d57487f17b937ad5b7332879878cbf84220 | 3,280 |
def trimAlphaNum(value):
"""
Trims alpha numeric characters from start and ending of a given value
>>> trimAlphaNum(u'AND 1>(2+3)-- foobar')
u' 1>(2+3)-- '
"""
while value and value[-1].isalnum():
value = value[:-1]
while value and value[0].isalnum():
value = value[1:]
return value | e9d44ea5dbe0948b9db0c71a5ffcdd5c80e95746 | 3,285 |
def _median(data):
"""Return the median (middle value) of numeric data.
When the number of data points is odd, return the middle data point.
When the number of data points is even, the median is interpolated by
taking the average of the two middle values:
>>> median([1, 3, 5])
3
>>> median([1, 3, 5, 7])
4.0
"""
data = sorted(data)
n = len(data)
if n == 0:
raise ValueError("no median for empty data")
if n % 2 == 1:
return data[n // 2]
else:
i = n // 2
return (data[i - 1] + data[i]) / 2 | f05a6b067f95fc9e3fc9350b163b3e89c0792814 | 3,288 |
def kilometers_to_miles(dist_km):
"""Converts km distance to miles
PARAMETERS
----------
dist_km : float
Scalar distance in kilometers
RETURNS
-------
dist_mi : float
Scalar distance in kilometers
"""
return dist_km / 1.609344 | 61707d483961e92dcd290c7b0cd8ba8f650c7b5b | 3,290 |
import requests
import time
def SolveCaptcha(api_key, site_key, url):
"""
Uses the 2Captcha service to solve Captcha's for you.
Captcha's are held in iframes; to solve the captcha, you need a part of the url of the iframe. The iframe is usually
inside a div with id=gRecaptcha. The part of the url we need is the query parameter k, this is called the site_key:
www.google.com/recaptcha/api2/anchor?ar=1&k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9&co=aHR0cHM6Ly93d3cuZGljZS5jb206NDQz&hl=en&v=oqtdXEs9TE9ZUAIhXNz5JBt_&size=normal&cb=rpcg9w84syix
k=6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
Here the site_key is 6LcleDIUAAAAANqkex-vX88sMHw8FXuJQ3A4JKK9
You also need to supply the url of the current page you're on.
This function will return a string with the response key from captcha validating the test. This needs to be inserted
into an input field with the id=g-recaptcha-response.
:param api_key: The 2Captcha API key.
:param site_key: The site_key extracted from the Captcha iframe url
:param url: url of the site you're on
:return: The response from captcha validating the test
"""
print("Solving Captcha...")
print("Sending Request...")
request_response = requests.get("https://2captcha.com/in.php?", params={
"googlekey": site_key,
"method": "userrecaptcha",
"pageurl": url,
"key": api_key,
"json": 1,
"invisible": 0,
})
request_response.raise_for_status()
print("Waiting for Response...")
time.sleep(30)
answer_response_json = {'status': 0, 'request': 'CAPCHA_NOT_READY'}
while answer_response_json['request'] == 'CAPCHA_NOT_READY':
answer_response = requests.get("https://2captcha.com/res.php", params={
"key": api_key,
"action": "get",
"id": request_response.json()['request'],
"json": 1
})
answer_response_json = answer_response.json()
print(answer_response_json)
time.sleep(5)
if answer_response_json['status'] == 1:
print("Solved!")
return answer_response_json['request']
elif answer_response_json['request'] == 'ERROR_CAPTCHA_UNSOLVABLE':
raise TimeoutError("ERROR_CAPTCHA_UNSOLVABLE")
else:
raise Exception(answer_response_json['request']) | e610a265d03be65bfd6321a266776a8102c227d0 | 3,295 |
def _cifar_meanstd_normalize(image):
"""Mean + stddev whitening for CIFAR-10 used in ResNets.
Args:
image: Numpy array or TF Tensor, with values in [0, 255]
Returns:
image: Numpy array or TF Tensor, shifted and scaled by mean/stdev on
CIFAR-10 dataset.
"""
# Channel-wise means and std devs calculated from the CIFAR-10 training set
cifar_means = [125.3, 123.0, 113.9]
cifar_devs = [63.0, 62.1, 66.7]
rescaled_means = [x / 255. for x in cifar_means]
rescaled_devs = [x / 255. for x in cifar_devs]
image = (image - rescaled_means) / rescaled_devs
return image | 286ab555d30fd779c093e3b8801821f8370e1ca8 | 3,296 |
def boolean(entry, option_key="True/False", **kwargs):
"""
Simplest check in computer logic, right? This will take user input to flick the switch on or off
Args:
entry (str): A value such as True, On, Enabled, Disabled, False, 0, or 1.
option_key (str): What kind of Boolean we are setting. What Option is this for?
Returns:
Boolean
"""
error = f"Must enter 0 (false) or 1 (true) for {option_key}. Also accepts True, False, On, Off, Yes, No, Enabled, and Disabled"
if not isinstance(entry, str):
raise ValueError(error)
entry = entry.upper()
if entry in ("1", "TRUE", "ON", "ENABLED", "ENABLE", "YES"):
return True
if entry in ("0", "FALSE", "OFF", "DISABLED", "DISABLE", "NO"):
return False
raise ValueError(error) | d62b36d08651d02719b5866b7798c36efd2a018f | 3,297 |
def case_insensitive_equals(name1: str, name2: str) -> bool:
"""
Convenience method to check whether two strings match, irrespective of their case and any surrounding whitespace.
"""
return name1.strip().lower() == name2.strip().lower() | 28b7e5bfb5e69cf425e1e8983895f1ad42b59342 | 3,298 |
def ensure_listable(obj):
"""Ensures obj is a list-like container type"""
return obj if isinstance(obj, (list, tuple, set)) else [obj] | bdc5dbe7e06c1cc13afde28762043ac3fb65e5ac | 3,299 |
from typing import Iterable
from typing import Any
from typing import Tuple
def tuple_from_iterable(val: Iterable[Any]) -> Tuple[Any, ...]:
"""Builds a tuple from an iterable.
Workaround for https://github.com/python-attrs/attrs/issues/519
"""
return tuple(val) | 7880b1395f14aa690f967b9548456105b544d337 | 3,308 |
def sensitivity_metric(event_id_1, event_id_2):
"""Determine similarity between two epochs, given their event ids."""
if event_id_1 == 1 and event_id_2 == 1:
return 0 # Completely similar
if event_id_1 == 2 and event_id_2 == 2:
return 0.5 # Somewhat similar
elif event_id_1 == 1 and event_id_2 == 2:
return 0.5 # Somewhat similar
elif event_id_1 == 2 and event_id_1 == 1:
return 0.5 # Somewhat similar
else:
return 1 | b04c5fa27ef655dd3f371c3ce6ef0410c55dd05b | 3,309 |
def duracion_promedio_peliculas(p1: dict, p2: dict, p3: dict, p4: dict, p5: dict) -> str:
"""Calcula la duracion promedio de las peliculas que entran por parametro.
Esto es, la duración total de todas las peliculas dividida sobre el numero de peliculas.
Retorna la duracion promedio en una cadena de formato 'HH:MM' ignorando los posibles decimales.
Parametros:
p1 (dict): Diccionario que contiene la informacion de la pelicula 1.
p2 (dict): Diccionario que contiene la informacion de la pelicula 2.
p3 (dict): Diccionario que contiene la informacion de la pelicula 3.
p4 (dict): Diccionario que contiene la informacion de la pelicula 4.
p5 (dict): Diccionario que contiene la informacion de la pelicula 5.
Retorna:
str: la duracion promedio de las peliculas en formato 'HH:MM'.
"""
# Se extraen las duraciones de las películas.
duracion1 = p1["duracion"]
duracion2 = p2["duracion"]
duracion3 = p3["duracion"]
duracion4 = p4["duracion"]
duracion5 = p5["duracion"]
# Promedio de duraciones de las películas.
promedio = (duracion1 + duracion2 + duracion3 + duracion4 + duracion5) / 5
# Conversión a formato 'HH:MM'.
horas = promedio // 60
minutos = promedio % 60
if horas < 10:
horas = '0' + str(int(horas))
else:
horas = str(int(horas))
if minutos < 10:
minutos = '0' + str(int(minutos))
else:
minutos = str(int(minutos))
return horas + ":" + minutos | a8cfcc96a43480ee6830cc212343a33148036c5d | 3,310 |
def _to_test_data(text):
"""
Lines should be of this format: <word> <normal_form> <tag>.
Lines that starts with "#" and blank lines are skipped.
"""
return [l.split(None, 2) for l in text.splitlines()
if l.strip() and not l.startswith("#")] | 8f0bae9f81d2d14b5654622f1493b23abd88424d | 3,311 |
def align2local(seq):
"""
Returns list such that
'ATG---CTG-CG' ==> [0,1,2,2,2,3,4,5,5,6,7]
Used to go from align -> local space
"""
i = -1
lookup = []
for c in seq:
if c != "-":
i += 1
lookup.append(i)
return lookup | aa914a60d5db7801a3cf1f40e713e95c98cd647e | 3,313 |
def parse_msiinfo_suminfo_output(output_string):
"""
Return a dictionary containing information from the output of `msiinfo suminfo`
"""
# Split lines by newline and place lines into a list
output_list = output_string.splitlines()
results = {}
# Partition lines by the leftmost ":", use the string to the left of ":" as
# the key and use the string to the right of ":" as the value
for output in output_list:
key, _, value = output.partition(':')
if key:
results[key] = value.strip()
return results | 6883e8fba9a37b9f877bdf879ebd14d1120eb88a | 3,317 |
import time
import json
async def ping(ws):
"""Send a ping request on an established websocket connection.
:param ws: an established websocket connection
:return: the ping response
"""
ping_request = {
'emit': "ping",
'payload': {
'timestamp': int(time.time())
}
}
await ws.send(json.dumps(ping_request))
return json.loads(await ws.recv()) | 587d2a72cbc5f50f0ffb0bda63668a0ddaf4c9c3 | 3,319 |
def submit_only_kwargs(kwargs):
"""Strip out kwargs that are not used in submit"""
kwargs = kwargs.copy()
for key in ['patience', 'min_freq', 'max_freq', 'validation',
"max_epochs", "epoch_boost", "train_size", "valid_size"]:
_ = kwargs.pop(key, None)
return kwargs | e93a4b8921c5b80bb487caa6057c1ff7c1701305 | 3,323 |
def min_spacing(mylist):
"""
Find the minimum spacing in the list.
Args:
mylist (list): A list of integer/float.
Returns:
int/float: Minimum spacing within the list.
"""
# Set the maximum of the minimum spacing.
min_space = max(mylist) - min(mylist)
# Iteratively find a smaller spacing.
for item in mylist:
spaces = [abs(item - item2) for item2 in mylist if item != item2]
min_space = min(min_space, min(spaces))
# Return the answer.
return min_space | b8ce0a46bacb7015c9e59b6573bc2fec0252505d | 3,330 |
def mock_mkdir(monkeypatch):
"""Mock the mkdir function."""
def mocked_mkdir(path, mode=0o755):
return True
monkeypatch.setattr("charms.layer.git_deploy.os.mkdir", mocked_mkdir) | e4e78ece1b8e60719fe11eb6808f0f2b99a933c3 | 3,332 |
import re
def is_sedol(value):
"""Checks whether a string is a valid SEDOL identifier.
Regex from here: https://en.wikipedia.org/wiki/SEDOL
:param value: A string to evaluate.
:returns: True if string is in the form of a valid SEDOL identifier."""
return re.match(r'^[0-9BCDFGHJKLMNPQRSTVWXYZ]{6}\d$', value) | 207ff94a4df99e7a546440cef1242f9a48435118 | 3,337 |
def _get_split_idx(N, blocksize, pad=0):
"""
Returns a list of indexes dividing an array into blocks of size blocksize
with optional padding. Padding takes into account that the resultant block
must fit within the original array.
Parameters
----------
N : Nonnegative integer
Total array length
blocksize : Nonnegative integer
Size of each block
pad : Nonnegative integer
Pad to add on either side of each index
Returns
-------
split_idx : List of 2-tuples
Indices to create splits
pads_used : List of 2-tuples
Pads that were actually used on either side
Examples
--------
>>> split_idx, pads_used = _get_split_idx(5, 2)
>>> print split_idx
[(0, 2), (2, 4), (4, 5)]
>>> print pads_used
[(0, 0), (0, 0), (0, 0)]
>>> _get_split_idx(5, 2, pad=1)
>>> print split_idx
[(0, 3), (1, 5), (3, 5)]
>>> print pads_used
[(0, 1), (1, 1), (1, 0)]
"""
num_fullsplits = N // blocksize
remainder = N % blocksize
split_idx = []
pads_used = []
for i in range(num_fullsplits):
start = max(0, i * blocksize - pad)
end = min(N, (i + 1) * blocksize + pad)
split_idx.append((start, end))
leftpad = i * blocksize - start
rightpad = end - (i + 1) * blocksize
pads_used.append((leftpad, rightpad))
# Append the last split if there is a remainder
if remainder:
start = max(0, num_fullsplits * blocksize - pad)
split_idx.append((start, N))
leftpad = num_fullsplits * blocksize - start
pads_used.append((leftpad, 0))
return split_idx, pads_used | 21935190de4c42fa5d7854f6608387dd2f004fbc | 3,340 |
import pkg_resources
def _get_highest_tag(tags):
"""Find the highest tag from a list.
Pass in a list of tag strings and this will return the highest
(latest) as sorted by the pkg_resources version parser.
"""
return max(tags, key=pkg_resources.parse_version) | 8d2580f6f6fbb54108ee14d6d4834d376a65c501 | 3,342 |
def norm(x):
"""Normalize 1D tensor to unit norm"""
mu = x.mean()
std = x.std()
y = (x - mu)/std
return y | ea8546da2ea478edb0727614323bba69f6af288d | 3,344 |
import re
def _remove_invalid_characters(file_name):
"""Removes invalid characters from the given file name."""
return re.sub(r'[/\x00-\x1f]', '', file_name) | 49a9f668e8142855ca4411921c0180977afe0370 | 3,346 |
def items(dic):
"""Py 2/3 compatible way of getting the items of a dictionary."""
try:
return dic.iteritems()
except AttributeError:
return iter(dic.items()) | 2664567765efe172591fafb49a0efa36ab9fcca8 | 3,351 |
def as_string(raw_data):
"""Converts the given raw bytes to a string (removes NULL)"""
return bytearray(raw_data[:-1]) | 6610291bb5b71ffc0be18b4505c95653bdac4c55 | 3,353 |
import math
def generate_trapezoid_profile(max_v, time_to_max_v, dt, goal):
"""Creates a trapezoid profile with the given constraints.
Returns:
t_rec -- list of timestamps
x_rec -- list of positions at each timestep
v_rec -- list of velocities at each timestep
a_rec -- list of accelerations at each timestep
Keyword arguments:
max_v -- maximum velocity of profile
time_to_max_v -- time from rest to maximum velocity
dt -- timestep
goal -- final position when the profile is at rest
"""
t_rec = [0.0]
x_rec = [0.0]
v_rec = [0.0]
a_rec = [0.0]
a = max_v / time_to_max_v
time_at_max_v = goal / max_v - time_to_max_v
# If profile is short
if max_v * time_to_max_v > goal:
time_to_max_v = math.sqrt(goal / a)
time_from_max_v = time_to_max_v
time_total = 2.0 * time_to_max_v
profile_max_v = a * time_to_max_v
else:
time_from_max_v = time_to_max_v + time_at_max_v
time_total = time_from_max_v + time_to_max_v
profile_max_v = max_v
while t_rec[-1] < time_total:
t = t_rec[-1] + dt
t_rec.append(t)
if t < time_to_max_v:
# Accelerate up
a_rec.append(a)
v_rec.append(a * t)
elif t < time_from_max_v:
# Maintain max velocity
a_rec.append(0.0)
v_rec.append(profile_max_v)
elif t < time_total:
# Accelerate down
decel_time = t - time_from_max_v
a_rec.append(-a)
v_rec.append(profile_max_v - a * decel_time)
else:
a_rec.append(0.0)
v_rec.append(0.0)
x_rec.append(x_rec[-1] + v_rec[-1] * dt)
return t_rec, x_rec, v_rec, a_rec | 5851cfab06e20a9e79c3a321bad510d33639aaca | 3,354 |
import six
def str_to_bool(s):
"""Convert a string value to its corresponding boolean value."""
if isinstance(s, bool):
return s
elif not isinstance(s, six.string_types):
raise TypeError('argument must be a string')
true_values = ('true', 'on', '1')
false_values = ('false', 'off', '0')
if s.lower() in true_values:
return True
elif s.lower() in false_values:
return False
else:
raise ValueError('not a recognized boolean value: %s'.format(s)) | c228321872f253ce3e05c6af9284ec496dea8dcf | 3,355 |
import six
import base64
def Base64WSEncode(s):
"""
Return Base64 web safe encoding of s. Suppress padding characters (=).
Uses URL-safe alphabet: - replaces +, _ replaces /. Will convert s of type
unicode to string type first.
@param s: string to encode as Base64
@type s: string
@return: Base64 representation of s.
@rtype: string
NOTE: Taken from keyczar (Apache 2.0 license)
"""
if isinstance(s, six.text_type):
# Make sure input string is always converted to bytes (if not already)
s = s.encode("utf-8")
return base64.urlsafe_b64encode(s).decode("utf-8").replace("=", "") | cb28001bddec215b763936fde4652289cf6480c0 | 3,361 |
import requests
import json
def search(keyword, limit=20):
"""
Search is the iTunes podcast directory for the given keywords.
Parameter:
keyword = A string containing the keyword to search.
limit: the maximum results to return,
The default is 20 results.
returns:
A JSON object.
"""
keyword = keyword.replace(' ', '+') # Replace white space with +.
# Set user agent.
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
headers = {'User-Agent': user_agent}
# ITunes podcast search URL.
itunesurl = 'https://itunes.apple.com/search?term=%s&country=us&limit=%d&entity=podcast' % (keyword, limit)
req = requests.get(itunesurl, headers=headers)
return json.loads(req.text) | 922cd7dfaea30e7254c459588d28c33673281dac | 3,367 |
import torch
def permute(x, in_shape='BCD', out_shape='BCD', **kw):
""" Permute the dimensions of a tensor.\n
- `x: Tensor`; The nd-tensor to be permuted.
- `in_shape: str`; The dimension shape of `x`. Can only have characters `'B'` or `'C'` or `'D'`,
which stand for Batch, Channel, or extra Dimensions. The default value `'BCD'` means
the input tensor `x` should be at lest 2-d with shape `(Batch, Channel, Dim0, Dim1, Dim2, ...)`,
where `Dim0, Dim1, Dim2 ...` stand for any number of extra dimensions.
- `out_shape: str or tuple or None`; The dimension shape of returned tensor. Default: `'BCD'`.
If a `str`, it is restricted to the same three characters `'B'`, `'C'` or `'D'` as the `in_shape`.
If a `tuple`, `in_shape` is ignored, and simply `x.permute(out_shape)` is returned.
If `None`, no permution will be performed.
- `return: Tensor`; Permuted nd-tensor. """
if (in_shape == out_shape) or (out_shape is None):
return x
if isinstance(out_shape, (list, tuple, torch.Size)):
return x.permute(*out_shape)
if isinstance(in_shape, str) and isinstance(out_shape, str) :
assert set(in_shape) == set(out_shape) <= {'B', 'C', 'D'}, 'In and out shapes must have save set of chars among B, C, and D.'
in_shape = in_shape.lower().replace('d', '...')
out_shape = out_shape.lower().replace('d', '...')
return torch.einsum(f'{in_shape}->{out_shape}', x)
return x | e74594df581c12891963e931999563374cd89c7d | 3,373 |
import re
def fullmatch(regex, string, flags=0):
"""Emulate python-3.4 re.fullmatch()."""
matched = re.match(regex, string, flags=flags)
if matched and matched.span()[1] == len(string):
return matched
return None | 72de0abe5c15dd17879b439562747c9093d517c5 | 3,374 |
def get_training_set_count(disc):
"""Returns the total number of training sets of a discipline and all its
child elements.
:param disc: Discipline instance
:type disc: models.Discipline
:return: sum of training sets
:rtype: int
"""
training_set_counter = 0
for child in disc.get_descendants(include_self=True):
training_set_counter += child.training_sets.count()
return training_set_counter | 9b28a9e51e04b559f05f1cc0255a6c65ca4a0980 | 3,377 |
from typing import Dict
from pathlib import Path
import inspect
import json
def load_schema(rel_path: str) -> Dict:
"""
Loads a schema from a relative path of the caller of this function.
:param rel_path: Relative path from the caller. e.g. ../schemas/schema.json
:return: Loaded schema as a `dict`.
"""
caller_path = Path((inspect.stack()[1])[1]).parent
fp = (caller_path / rel_path).resolve()
with open(fp, "r") as fh:
data = json.loads(fh.read())
return data | 297e0e01dd2f4af071ab99ebaf203ddb64525c89 | 3,381 |
from typing import Dict
from pathlib import Path
from typing import Optional
def prioritize(paths: Dict[int, Path], purpose: str) -> Optional[Path]:
"""Returns highest-priority and existing filepath from ``paths``.
Finds existing configuration or data file in ``paths`` with highest
priority and returns it, otherwise returns ``None``.
"""
for key in sorted(paths.keys(), reverse=True):
if purpose == "config":
if paths[key].exists():
return paths[key]
if purpose == "data":
return paths[key] | 2c00d0bfe696040c2c19dc1d8b3393b7be124e11 | 3,384 |
def mode_mods_to_int(mode: str) -> int:
"""Converts mode_mods (str) to mode_mods (int)."""
# NOTE: This is a temporary function to convert the leaderboard mode to an int.
# It will be removed when the site is fully converted to use the new
# stats table.
for mode_num, mode_str in enumerate((
'vn_std', 'vn_taiko', 'vn_catch', 'vn_mania',
'rx_std', 'rx_taiko', 'rx_catch',
'ap_std'
)):
if mode == mode_str:
return mode_num
else:
return 0 | 0bfaa8cf04bcee9395dff719067be9753be075c4 | 3,390 |
def get_path_to_spix(
name: str,
data_directory: str,
thermal: bool,
error: bool = False,
file_ending: str = "_6as.fits",
) -> str:
"""Get the path to the spectral index
Args:
name (str): Name of the galaxy
data_directory (str): dr2 data directory
thermal (bool): non thermal data
error (bool): path to error
file_ending (str, optional): File ending. Defaults to ".fits".
Returns:
str: [description]
"""
return f"{data_directory}/magnetic/{name}/{name}_spix{'_non_thermal' if thermal else ''}{'_error' if error else ''}{file_ending}" | bf8fdff001049ed0738ed856e8234c43ce4511b7 | 3,391 |
from bs4 import BeautifulSoup
def parse_object_properties(html):
"""
Extract key-value pairs from the HTML markup.
"""
if isinstance(html, bytes):
html = html.decode('utf-8')
page = BeautifulSoup(html, "html5lib")
propery_ps = page.find_all('p', {'class': "list-group-item-text"})
obj_props_dict = {}
for p in propery_ps:
if 'data-name' in p.attrs:
key = p.attrs['data-name']
value = p.get_text().strip()
obj_props_dict[key] = value
return obj_props_dict | 8eb2d15cb5f46075ec44ff61265a8f70123a8646 | 3,392 |
from bs4 import BeautifulSoup
def parseHtml(html):
"""
BeautifulSoup でパースする
Parameters
----------
html : str
HTML ソース文字列
Returns
-------
soup : BeautifulSoup
BeautifulSoup オブジェクト
"""
soup = BeautifulSoup(html, 'html.parser')
return soup | e8d7a39a9881606d1dfee810ab1c2cecd11eaba2 | 3,395 |