content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def hex_to_b64(hex_str):
"""Convert Hex string to Base64 string."""
arr = bytearray.fromhex(hex_str)
decoded = ''.join([chr(c) for c in arr])
b64_str = decoded.encode('base64').strip()
return b64_str | f2fd5f3de60638df19f1b477ea005549c6c43b35 | 119,948 |
def normalize(string: str) -> str:
"""[removes spaces and stuff in string]
Args:
string (str): [any string]
Returns:
str: [string without spaces or -_]
"""
string = string.replace(' ', '')
string = string.replace('-', '')
string = string.replace('_', '')
return string.lower() | 57079531dde1bb6e424cf748d895981177890517 | 547,797 |
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config | 007b21df2c17a676d4c4b2a7b6f41c91aedc6a78 | 205,191 |
def lowerBackslash(r, c):
"""
>>> lowerBackslash(6, 3)
[(6, 3), (7, 4), (8, 5)]
>>> lowerBackslash(2, 2)
[(2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8)]
>>> lowerBackslash(1, 8)
[(1, 8)]
>>> lowerBackslash(1, 1)
[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8)]
"""
lower_x = range(r, 9, 1)
lower_y = range(c, 9, 1)
return zip(lower_x, lower_y) | 8269b34c71651df32a79f111f18d2a28350807dd | 577,652 |
def line_to_cols(raw_col_vals: str):
"""Transform a raw text line of column names into a list of column names
Args:
raw_line (str): String from textfile
Returns:
A list of the column names in order
"""
change_case = [
"RA",
"DEC",
"Ra",
"Dec",
"X",
"Y",
"ID",
"iD",
"Id",
"A",
"B",
"THETA",
"Theta",
]
# make ra and dec lowercase for ease of access
raw_cols = list(map(lambda s: s.lower() if s in change_case else s, raw_col_vals,))
# if header line starts with a '#' exclude it
if raw_cols[0] == "#":
return raw_cols[1:]
else:
return raw_cols | a63eb03e051bf7e4a6a719022de907afc709f41c | 466,920 |
def test_sympy_parents(sympy_cls: str, parents: tuple) -> bool:
"""
Returns True if 'sympy_cls' is in 'bases'.
False, otherwise.
"""
return any([sympy_cls in str(parent) for parent in parents]) | ad99314e0119e4b5465e052f1b8d2824cc8a994e | 402,562 |
import copy
def merge_targets(pac_to_super, pac_to_unstuck, pac_to_normal):
"""
Merges the targets dictionaries into a single one.
"""
pac_targets = {}
if pac_to_super is not None:
pac_targets = copy.deepcopy(pac_to_super)
if pac_to_unstuck is not None:
pac_targets.update(pac_to_unstuck)
if pac_to_normal is not None:
pac_targets.update(pac_to_normal)
return pac_targets | 9648d626841dde0cac0bd70b39a24699a9804f0e | 241,271 |
def dynamics(state, time, config, intervention=None):
"""State dynamics for the Chen et al. opioid epidemic model.
Parameters
----------
state: np.ndarray or tuple or list
Instantenous system state
time: float
Time to evaluate the derivative
config: whynot.simulators.opioid.Config
Configuration parameters for the dynamics
intervention: whynot.simulators.opioid.Intervention
(Optional) Parameters specifying when/how to update the dynamics
Returns
-------
ds_dt: np.ndarray
Derivative of the state with respect to time, evaluated at state and time.
"""
# In the opioid simulator, if there is an intervention,
# it is applied for every time step of the dynamics (since
# interventions are specified on TimeVaryingParams which internally track
# the year).
if intervention:
config = config.update(intervention)
nonmedical_users, oud_users, illicit_users = state
# Update non-medical opioid use cases
nonmedical_increase = config.nonmedical_incidence[time]
nonmedical_decrease = nonmedical_users * (
config.nonmedical_exit
+ config.nonmedical_overdose
+ config.nonmedical_to_oud
+ config.nonmedical_to_illicit
)
nonmedical_delta = nonmedical_increase - nonmedical_decrease
# Update opioid use disorder cases
oud_increase = config.nonmedical_to_oud * nonmedical_users
oud_decrease = oud_users * (
config.oud_exit[time] + config.oud_overdose[time] + config.oud_to_illicit[time]
)
oud_delta = oud_increase - oud_decrease
# Update illicit opiate use cases
illicit_increase = config.illicit_incidence[time] + (
config.oud_to_illicit[time] * oud_users
+ config.nonmedical_to_illicit * nonmedical_users
)
illicit_decrease = illicit_users * (
config.illicit_exit + config.illicit_overdose[time]
)
illicit_delta = illicit_increase - illicit_decrease
return [nonmedical_delta, oud_delta, illicit_delta] | fb67d294539b22425233ad7078e4f771aa22d93d | 550,082 |
def tar_entry_size(filesize):
"""Get the space a file of the given size will actually require in a TAR.
The entry has a 512-byte header followd by the actual file data,
padded to a multiple of 512 bytes if necessary.
Args:
filesize (int): File size in bytes
Returns:
int: Bytes consumed in a TAR archive by this file.
Examples:
::
>>> tar_entry_size(1)
1024
>>> tar_entry_size(511)
1024
>>> tar_entry_size(512)
1024
>>> tar_entry_size(513)
1536
"""
# round up to next multiple of 512
return 512 + filesize + ((512 - filesize) % 512) | be5720f23561201344eba0c46afd0215134c4eda | 457,814 |
def get_mean(dataframe, column, by=None):
""" Return mean values of column x (optionally grouped)
:param dataframe: Data
:type dataframe: pandas.Dataframe
:param column: Column name
:type column: str
:param by: Column names by which to group.
:type by: str|list
:return: mean value, optionally for each group.
:rtype: numpy.float64|pandas.Series
"""
if by is None:
means = dataframe[column].mean()
else:
means = dataframe.groupby(by, observed=True)[column].mean()
return means | 6c4181bf28971a3b0053c1bcad88f75565819a78 | 558,512 |
def tune(scale, acc_rate):
"""
Tunes the scaling parameter for the proposal distribution
according to the acceptance rate over the last tune_interval:
Rate Variance adaptation
---- -------------------
<0.001 x 0.1
<0.05 x 0.5
<0.2 x 0.9
>0.5 x 1.1
>0.75 x 2
>0.95 x 10
"""
# Switch statement
if acc_rate < 0.001:
# reduce by 90 percent
scale *= 0.1
elif acc_rate < 0.05:
# reduce by 50 percent
scale *= 0.5
elif acc_rate < 0.2:
# reduce by ten percent
scale *= 0.9
elif acc_rate > 0.95:
# increase by factor of ten
scale *= 10.0
elif acc_rate > 0.75:
# increase by double
scale *= 2.0
elif acc_rate > 0.5:
# increase by ten percent
scale *= 1.1
return scale | d0af4f5e6a7f4b02aed2b269d7f3b1db9e58409d | 217,743 |
import torch
def calculate_accuracy(output, target):
"""
Top-1 classification accuracy.
"""
with torch.no_grad():
batch_size = output.shape[0]
prediction = torch.argmax(output, dim=1)
return torch.sum(prediction == target).item() / batch_size | 569a66bf755eb36e83d516e5507b0cc0f36a4c01 | 507,954 |
import time
def rfc822date(timeinfo=None,local=1):
"""
Format an RFC-2822 compliant date string.
@param timeinfo: (optional) A sequence as returned by C{time.localtime()}
or C{time.gmtime()}. Default is now.
@param local: (optional) Indicates if the supplied time is local or
universal time, or if no time is given, whether now should be local or
universal time. Default is local, as suggested (SHOULD) by rfc-2822.
@returns: A string representing the time and date in RFC-2822 format.
"""
if not timeinfo:
if local:
timeinfo = time.localtime()
else:
timeinfo = time.gmtime()
if local:
if timeinfo[8]:
# DST
tz = -time.altzone
else:
tz = -time.timezone
(tzhr, tzmin) = divmod(abs(tz), 3600)
if tz:
tzhr *= int(abs(tz)/tz)
(tzmin, tzsec) = divmod(tzmin, 60)
else:
(tzhr, tzmin) = (0,0)
return "%s, %02d %s %04d %02d:%02d:%02d %+03d%02d" % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timeinfo[6]],
timeinfo[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timeinfo[1] - 1],
timeinfo[0], timeinfo[3], timeinfo[4], timeinfo[5],
tzhr, tzmin) | fa1a78a099e73e13384711f613854fa9968a43b7 | 434,029 |
def get_error_point(view, error):
"""Get the error text point.
lxml uses 1-based line and column numbers but ST wants them
0-based, so subtract 1 from both."""
return view.text_point(error.line - 1, error.column - 1) | a2284225f7d3db4d3d0e6a9470f6db53698ac1d8 | 99,795 |
def get_filename_pair(filename):
"""
Given one *.spar/*.sdat filename, returns tuple with both filenames
It doesn't matter if the filename is a fully qualified path or not.
- one assumption, the extension are either all caps or all lower
"""
spar_filename = sdat_filename = filename[:-3]
if filename[-1:].isupper():
sdat_filename += 'DAT'
spar_filename += 'PAR'
else:
sdat_filename += 'dat'
spar_filename += 'par'
return (spar_filename, sdat_filename) | 1d3a33ba68e14d9217e5e2a7833258d9c94f1667 | 100,026 |
def write_vec_string(vec, prec):
"""Write data vector into string."""
o = "["
for (n, v) in enumerate(vec):
o += f"{v:.{prec}f}"
if n < len(vec) - 1:
o += ","
return o + "]" | 195b8a458362b61b689568f71f87b7c03dc911a8 | 355,022 |
def dEds(s, P, Q):
"""
we have: E = P/Q
therefore: dEds = E' = (P'Q - Q'P) / Q^2
input
P, Q: two polynomials that depend on L
s: the independent (scaling) variable
"""
Pp = P.deriv(1)(s)
Qp = Q.deriv(1)(s)
return (Pp*Q(s) - Qp*P(s)) / Q(s)**2 | 1a36fa62ec91969bc8925780061a5d195db9a8f8 | 559,896 |
def get_tokens(node, index):
"""
Get the tokens from the leaf node
Args:
node: leaf node
index: 0-token, 1-final, 2-sentence, 3-beat, 4-pos
"""
sen = []
while node:
words = node.wordid[index][0].tolist()
for w in words[::-1]:
sen.append(w)
node = node.prevNode
return sen[::-1] | b33a4d9ff2dcdd66e4c6ef8e9477e57ce0a7278e | 423,693 |
import math
def exp_pdf(value, rate):
"""
Returns the probability density for an exponential distribution
with an intensity of rate, evaluated at value.
"""
return float(rate) * math.exp(-1.0 * rate * value) | a50625b216fdbd492c7aff725f965b9085a3201a | 114,172 |
import math
def negative_binomial(x, k, p):
"""
Evaluate the negative binomial distribution b*(x; k, p) as defined in the textbook.
Equivalent to finding the probability of coin flip x to be the k-th head when the
probability of getting a head on any one coin flip is p.
Found with the equation (x-1)C(k-1) * p^k * (1 - p)^(x - k)
"""
return math.comb(x - 1, k - 1) * (p ** k) * ((1 - p) ** (x - k)) | c4b2d923cafdb95142cafe40c1f4248278f06323 | 666,073 |
def _get_noise_id(tr):
"""
Get trace id for noise type data. e.g., 'starttime-endtime'
:param tr: an obspy trace
:return str event_id: noise id
"""
starttime = tr.stats.starttime
endtime = tr.stats.endtime
event_id = "-".join([starttime.strftime("%Y%m%d%H%M%S"), endtime.strftime("%Y%m%d%H%M%S")])
return event_id | 16960810091e2ae6daa512eb8ccfe8f79c15e960 | 568,949 |
def root_app(expr):
"""Returns the pair (r, args)
such that expr = r(*args)
Arguments:
- `expr`: an expression
"""
root = expr
args = []
while root.is_app():
args.append(root.arg)
root = root.fun
#The arguments were collected in reverse order
args.reverse()
return (root, args) | 500c3626cb777af28c4e58832d3ceb98751c873b | 652,046 |
def zeropad(splitver, idx, padlen):
"""
Zero-pad an element of an OS/radio version to a certain length.
:param splitver: OS/radio version, but split into quarters.
:type splitver: list(str)
:param idx: Index of splitver which must be checked.
:type idx: int
:param padlen: Length to pad to.
:type padlen: int
"""
if len(splitver[idx]) < padlen:
splitver[idx] = splitver[idx].rjust(padlen, "0")
return splitver | ab8358a753096501471986ed90a1cf76a2c811df | 262,396 |
def get_ssl_subject_alt_names(ssl_info):
""" Return the Subject Alt Names """
altNames = ''
subjectAltNames = ssl_info['subjectAltName']
index = 0
for item in subjectAltNames:
altNames += item[1]
index += 1
if index < len(subjectAltNames):
altNames += ', '
return altNames | ce38761c452d9704bdbe3309bdfa1bb46eb9a92a | 663,441 |
def has_no_e(w):
"""
Check if 'e' char is present in a given string.
Return True is it is not or Flalse otherwise.
"""
for l in w:
if l == 'e':
return False
return True | 2bfa3257e2b2616026dd1a5770da3ef51e93ec09 | 330,711 |
def circumcircle(u, v, w):
"""find the center/radius of circumcircle of triangle uvw"""
vu, wv, uw = (u - v), (v - w), (w - u)
d = 2 * ((u - v).crs(v - w)).dot((u - v).crs(v - w))
a = (v - w).dot(v - w) * (u - v).dot(u - w) / d
b = (u - w).dot(u - w) * (v - u).dot(v - w) / d
c = (u - v).dot(u - v) * (w - u).dot(w - v) / d
o = u * a + v * b + w * c
r = (u - o).mag()
return o, r | ee824a1ccc96663d1e347057938b1a6fc4da80ac | 42,010 |
def jefferson(votes, seats):
"""Apportion seats using the Jefferson method.
Known also as the D'Hondt method or Hagenbach-Bischoff method.
:param list votes: a list of vote counts
:param int seats: the number of seats to apportion
"""
allocated = [0] * len(votes)
while sum(allocated) < seats:
quotients = [1.0 * vote / (allocated[idx] + 1) for idx, vote in enumerate(votes)]
idx_max = quotients.index(max(quotients))
allocated[idx_max] += 1
return allocated | acc249637b0f244a0f1ff08c8c2091a713de46bb | 406,220 |
def MAVS(data1, data2):
"""
Mean Absolute Value Slope: the difference between MAVs in adjacent
segments.
Parameters
----------
data1: array-like
2D matrix of shape (time, data)
data2: array-like
2D matrix of shape (time, data) of subsequent segment to x1
Returns
-------
MAVSlope: 1D numpy array containing MAV for adjacent signals
Reference
---------
Hudgins, B., Parker, P., & Scott, R. N. (1993). A new strategy
for multifunction myoelectric control. IEEE Transactions on
Bio-Medical Engineering, 40(1), 82–94.
"""
MAV1Data = sum(abs(data1))/len(data1)
MAV2Data = sum(abs(data2))/len(data2)
MAVSlope = MAV2Data - MAV1Data
return MAVSlope | c721d1d1cf0bdf633307d80a52c8497c58e0bd8d | 306,345 |
def getImageOffset(img, pos, box=(156, 156)):
""" Determine the required image offset
Params:
img -- The PIL Image object
pos -- The (x,y) postion
box -- The bounding box (x,y) position
Returns:
A tuple containg the offset coordinates (j,k)
"""
x_offset = max((box[0] - img.size[0]) / 2, 0)
y_offset = max((box[1] - img.size[1]) / 2, 0)
j = pos[0] + x_offset
k = pos[1] + y_offset
return (j, k) | 972e6929bab13c25aad1c26d7e21511439b7b479 | 417,770 |
import time
def elapsed(since):
"""Return the number of minutes elapsed since a time."""
return (time.time() - since) / 60 | 6e2b131e66fc153b96feea0ec8b0f8193d98b5cd | 484,582 |
def node_restore_all(self, flag=0) :
"""
Restores a all device from the configuration in ISY
args :
flag 0 or 1
Flag :
0 = All shall be restored from the configuration files in ISY
1 = Does not regenerate groups/scenes - good for testing
raise:
IsyResponseError : if Error in ISY responce
"""
return self.soapcomm("RestoreDevices", flag=flag) | ef7f63ee26b9e08e23e3ac26971729e008322858 | 636,108 |
def convert_list(in_list):
"""Converts a list of strings to a printable list of object names"""
return ''.join(('[', ','.join(in_list), ']')) | 2d13f19baf2966df024aba0a212b2047e78626e0 | 492,110 |
import string
import random
def gen_random_string(n=24):
"""Returns a random n-length string, suitable for a password or random secret"""
# RFC 3986 section 2.3. unreserved characters (no special escapes required)
char_set = string.ascii_letters + string.digits + "-._~"
return "".join(random.choices(char_set, k=n)) | b3bd49ed5b89d886aefa7431b62d85b6f5ff1c60 | 688,889 |
def idn(x):
"""
Identity function (for use with getMapParams)
"""
return x | 5cd80ee1531dc28434cd4ea5ba3d43e350e338fc | 425,827 |
def job_as_parameter(f):
"""A decorator for job handlers that take the job as a parameter."""
f.job_as_parameter = True
return f | 5cb27b65cde8054c0ea8c192cb105afb6f6b6f33 | 337,206 |
def load_quotes(quotes_file) -> tuple:
"""Load quotes from file, return tuple of quotes."""
with open(quotes_file, 'r') as f:
build_quote = []
quotes = []
for line in f:
if line.isspace():
quotes.append(''.join(build_quote).strip())
build_quote = []
build_quote.append(line.replace("\n", "").strip() + " ")
return tuple(quotes) | e649806b6adc861c05f787e719d442bea54686cf | 145,550 |
import re
def replace_uuid(msg):
""" Replace UUID in `msg` with ``UUID``. """
pattern = '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
return re.sub(pattern, 'UUID', msg) | f5dbf51133aa28ef74d98e0d98c85faa71dcc080 | 305,090 |
def union_boxes(box_a, box_b):
"""
Merges two rectangles.
Args:
box_a <tuple> : First rectangle (x, y, w, h)
box_b <tuple> : Second rectangle (x, y, w, h)
Returns:
merged_box <tuple> : Merged rectangle (x, y, w, h)
"""
ax1, ay1, aw, ah = box_a
ax2, ay2 = ax1+aw-1, ay1+ah-1
bx1, by1, bw, bh = box_b
bx2, by2 = bx1+bw-1, by1+bh-1
x1 = min(ax1, bx1)
y1 = min(ay1, by1)
x2 = max(ax2, bx2)
y2 = max(ay2, by2)
merged_box = [x1, y1, x2-x1+1, y2-y1+1]
return merged_box | cfe2ab4d604eded90b905a4b837afe8156b2484f | 529,805 |
def add_base_args(parser):
"""Add the base nerf arguments to the parser
Args:
parser (ArgumentParser): the current ArgumentParser.
Returns:
ArgumentParser: the modified ArgumentParser for call chaining
"""
parser.add_argument(
"--perturb",
type=float,
default=1.0,
help="set to 0. for no jitter, 1. for jitter",
)
parser.add_argument(
"--raw_noise_std",
type=float,
default=0.0,
help="std dev of noise added to regularize sigma_a output, 1e0 recommended",
)
return parser | 490bae7816769b6c7970eea7f1a8f054fac049ed | 558,461 |
def mulmatvec3x3(m, vect):
"""
This function returns a 3D vector which consists of the 3D input
vector multiplied by a 3x3 matrix.
:rtype: double iterable
:return: 3D vector
:type m: double iterable
:param m: The matrix to multiply
:type vect: double iterable
:param vect: The vector - in the format[x,y,z]
(or [x,y,z,0] for affine transformations in an homogeneous space)
"""
r = [0.0, 0.0, 0.0]
r[0] = vect[0] * m[0][0] + vect[1] * m[1][0] + vect[2] * m[2][0]
r[1] = vect[0] * m[0][1] + vect[1] * m[1][1] + vect[2] * m[2][1]
r[2] = vect[0] * m[0][2] + vect[1] * m[1][2] + vect[2] * m[2][2]
return r | 9385eb0d14c2ee1bb6e80870c2c8f4a82b8d9ce7 | 523,169 |
def pysolr_formatter(credentials):
"""
Returns formatted Solr credentials for a pysolr-Solr connection.
Args:
credentials (dict):
The credentials dictionary from the relationships.
Returns:
(string) A formatted pysolr credential.
"""
return "http://{0}:{1}/{2}".format(credentials['ip'],
credentials['port'],
credentials['path']) | f36aa9c56998f6d0e8c800182e1a3dfae40f441c | 171,653 |
def setCopy( set ):
"""(shallow) copy a set"""
return set[:] | 5ccfcf3729d7bd77c173d9ba2aa90911e7c3fe25 | 594,226 |
def rgb255(r, g, b):
"""Define an RGB color with three values 0..255."""
return (r/255, g/255, b/255) | 505c012f53f99258c9ae19ff9afb398c436b8262 | 219,947 |
def equivalent(list1, list2):
""" Checks if two (possibly nested) lists are equivalent. Recursive.
Args:
list1, list2 (lists): The lists to compare
Returns:
bool: Whether the lists are equivalent
"""
# Base case
if not isinstance(list1, list) and not isinstance(list2, list):
return type(list1) == type(list2) and list1 == list2
# Exit case
elif not isinstance(list1, list) or not isinstance(list2, list) or len(list1) != len(list2):
return False
# Recursive case
else:
for i in range(len(list1)):
if not equivalent(list1[i], list2[i]):
return False # Looks redundant, but it isn't. We need to check for each.
# If we got through everything else. Will only happen in list cases.
return True | 136f2b63d35d473c9f53bd142045d8faf6b890f1 | 146,926 |
import torch
def get_IoU(predictions: torch.Tensor, labels: torch.Tensor, threshold: float = 0.5):
"""Code inspired by
https://www.kaggle.com/iezepov/fast-iou-scoring-metric-in-pytorch-and-numpy
"""
eps = 1e-6
# assert N x H x W
if len(predictions.shape) != 3:
predictions.unsqueeze_(0)
if len(labels.shape) != 3:
labels.unsqueeze_(0)
outputs = (predictions > threshold).int()
labels = labels.int()
# Will be zero if Truth=0 or Prediction=0
intersection = (outputs & labels).float().sum((1, 2))
# Will be zero if both are
union = (outputs | labels).float().sum((1, 2))
# We smooth our devision to avoid 0/0
iou = (intersection + eps) / (union + eps)
return iou.mean() | e0189c98a09033f4ff80cedf9b638432c272edc8 | 648,534 |
def split_into_formatters(compound):
"""Split a possibly compound format string into segments.
>>> split_into_formatters('bold_underline_bright_blue_on_red')
['bold', 'underline', 'bright_blue', 'on_red']
"""
merged_segs = []
# These occur only as prefixes, so they can always be merged:
mergeable_prefixes = ['on', 'bright', 'on_bright']
for s in compound.split('_'):
if merged_segs and merged_segs[-1] in mergeable_prefixes:
merged_segs[-1] += '_' + s
else:
merged_segs.append(s)
return merged_segs | 00c3a5d314638047b4b53ed72bc27c3f1b8a00f1 | 290,053 |
def get_idx(prefix, itf):
"""
Gets the index of an interface string
>>> get_idx('et', 'et12')
12
>>> get_idx('ap', 'ap32')
32
"""
return int(itf[len(prefix) :]) | 0a4b1e49ad6c0a7e9a2a9ae1903480a3bf73d70e | 32,042 |
def restrict_available_forage(available_forage, management_threshold):
"""Reduce forage available for grazing.
The management threshold specifies the biomass that must be left
after herbivore offtake. Calculate the forage available for offtake by
herbivores according to its relative availability, restricting it to be
below the management threshold if necessary.
Parameters:
available_forage (list): list of class FeedType, where each FeedType
is characterized by its standing biomass and biomass available
to herbivores for offtake
management_threshold (float): biomass (kg/ha) required to be left
standing after offtake by herbivores
Returns:
modified available_forage, a list of class FeedType
"""
sum_biomass = 0.
for feed_type in available_forage:
sum_biomass += feed_type.biomass
actually_available = max(sum_biomass - management_threshold, 0)
for feed_type in available_forage:
feed_type.biomass_avail = (
feed_type.rel_availability * actually_available)
return available_forage | dbac8561dd00b8a477117580c151e7fd73e15dee | 170,051 |
def _get_roi_from_rect(rect):
"""
Returns the ROI from a rectangle, the rectangle can have the top and bottom flipped.
:param rect: Rect to get roi from
:return: x, y, width, height of ROI
"""
x_min = min(rect.topLeft().x(), rect.bottomRight().x())
y_min = min(rect.topLeft().y(), rect.bottomRight().y())
x_max = max(rect.topLeft().x(), rect.bottomRight().x())
y_max = max(rect.topLeft().y(), rect.bottomRight().y())
return x_min, y_min, x_max, y_max | fba34bcedb68bfa90ae9700df5b23f3b7450b9b3 | 396,657 |
def serialize_forma(analysis, type):
"""Convert the output of the forma250 analysis to json"""
return {
'id': None,
'type': type,
'attributes': {
'areaHa': analysis.get('area_ha', None),
'areaHaLoss': analysis.get('area_ha_loss', None),
'alertCounts': analysis.get('alert_counts', None)
}
} | bc3b9d835e9477823554cfef1deb95d552a01741 | 277,460 |
def compute_runtime(length, buffer):
"""
Takes a length as a time delta and a buffer in seconds
Returns number of miliseconds equal to length - buffer
"""
return (length.total_seconds() - buffer)*1000 | 4e139340ff48904a737936dcd54d087f60755bea | 120,681 |
def is_ignored_file(f):
"""Check if this file has to be ignored.
Ignored files includes:
- .gitignore
- OS generated files
- text editor backup files
- XCF image files (Gimp)
"""
return f == '.gitignore' or f == '.DS_Store' or f == '.DS_Store?' or \
f == '.Spotlight-V100' or f == '.Trashes' or f == 'ehthumbs.db' or f == 'Thumbs.db' or \
f.startswith("._") or f.endswith(".swp") or f.endswith(".bak") or f.endswith("~") or \
f.endswith(".xcf") | 9ccea4940cce382d2166c86f40c45f6ce1c892b7 | 441,332 |
def item_path(item, program):
"""
More recent versions of Sublime Merge use a recent list that contains state
for the repository instead of just a path; this determines, based on an
item and the program in use, what the path inside it is.
"""
if program == "merge" and isinstance(item, dict):
return item.get("path")
return item | becb47d451d834b410b6ad4a85ba7f76df8b24ba | 416,480 |
def time_diff(t0, t1):
"""
Args:
:t0: start time in seconds
:t1: end time in seconds
Returns: string with time difference (i.e. t1-t0)
"""
minutes, seconds = divmod(t1 - t0, 60)
hours, minutes = divmod(minutes, 60)
return "%d hours, %d minutes, %d seconds" % (hours, minutes, seconds) | 9b1c179fbec8fa0b9dc5143cf3316b061bf5d5c8 | 50,490 |
import math
def floor(theNumber):
"""Returns math.floor(theNumber)."""
return math.floor(theNumber) | 69119eac9bcc21ed0cdb78a8f90b2441093a4e03 | 232,134 |
async def redirect(request):
"""Handler to do redirects using HTTP status code 307.
The url to redirect to must be given with a query parameter:
http://localhost/redirect?url=http://example.com
"""
url = request.querydict.get("url", "")
if url:
return 307, {"location": url}, "Redirecting"
else:
return 500, {}, "specify the URL using a query param" | f87fa1e67765e3d4ffcfdc77609d658c274128af | 386,753 |
def trim_hidden(inputs, hidden):
"""
In the case where the last batch is smaller than batch_size,
we will need to keep only the first N hidden states,
where N is the number of samples in the last batch
Args:
inputs: the inputs in the last batch size
hidden: the hidden state produced by the penultimate batch
Returns: hidden
the trimmed hidden state
"""
batch_size = inputs.size(0)
if isinstance(hidden, tuple):
hidden_size = hidden[0].size(1)
else:
hidden_size = hidden.size(1)
# do nothing
if batch_size == hidden_size:
return hidden
# trim the hidden state to the remaining samples in the batch
if isinstance(hidden, tuple):
hidden = (hidden[0][:, :batch_size, :].contiguous(),
hidden[1][:, :batch_size, :].contiguous())
else:
hidden = hidden[:, :batch_size, :].contiguous()
return hidden | c7230872edf3f0c65aa0c166bd6692cbe9567d29 | 564,996 |
def has_number(input_str):
"""Returns true if input_str contains any numbers."""
return any(char.isdigit() for char in input_str) | 9e01cd258dc648da3a554a444f2b7aaee33691f3 | 252,542 |
import shlex
def repr_command(argv):
"""Represent an argument array as a string.
Args:
argv: The arguments of the command.
Returns:
A string which could be pasted into a shell for execution.
"""
return ' '.join(shlex.quote(str(arg)) for arg in argv) | 23406785a208d35db8730345e8fa4074e854d3ad | 589,102 |
def get_url_for_alert(alert, alerts):
"""
Gets the url slug for an alert (given the global alerts object obtained from `Alerts.load()`.)
Note: The alert must be public to have a url slug.
Will return a date string in the style '3-jun-2021', unless there were already alerts that day, in which case it'll
append a 1-indexed counter on to the end eg '3-jun-2021-2'. Note that the first alert of the day never has a count.
"""
alerts_on_this_day = sorted([
other_alert
for other_alert in alerts.public
if other_alert.starts_at_date.as_local_date == alert.starts_at_date.as_local_date
])
if not alerts_on_this_day:
raise ValueError(f'Alert {alert.id} is not public so does not have a URL')
# if this is the first alert of the day (or the only alert of the day), then don't include a counter
if alert == alerts_on_this_day[0]:
return alert.starts_at_date.as_url
# count through the alerts that day til we find this one, so we know
for i, other_alert in enumerate(alerts_on_this_day[1:], start=2):
if alert == other_alert:
return f'{alert.starts_at_date.as_url}-{i}'
raise ValueError(f'Couldnt find alert {alert.id} in public alerts for day') | 7f3468f8f07dcdcb4f7c23d76b37c519f5fa9639 | 528,139 |
def interpret(block: str):
"""
Interprets an element block, breaking it into element and number of that element.
:param block: string block describing an element
:return: composition dictionary
:rtype: dict
"""
if block[0].isdigit() is True: # if isotope number is encountered
return {block: 1}
else:
ele = block[0]
i = 0
num = ''
while i < len(block) - 1:
i += 1
if block[i].isdigit() is True: # add digits
num += block[i]
else:
ele += block[i]
if num == '':
num = 1
else:
num = int(num)
return {ele: num} | 24a8ee49488099ce86be3518565510778429170a | 474,543 |
def get_tuple_fragmentation_fn(pegasus_graph):
"""
Returns a fragmentation function that is specific to pegasus_graph. This fragmentation function,
fragment_tuple(..), takes in a list of Pegasus qubit coordinates and returns their corresponding
K2,2 Chimera fragment coordinates.
Details on the returned function, fragment_tuple(list_of_pegasus_coordinates):
Each Pegasus qubit is split into six fragments. If edges are drawn between adjacent
fragments and drawn between fragments that are connected by an existing Pegasus coupler, we
can see that a K2,2 Chimera graph is formed.
The K2,2 Chimera graph uses a coordinate system with an origin at the upper left corner of
the graph.
y: number of vertical fragments from the top-most row
x: number of horizontal fragments from the left-most column
u: 1 if it belongs to a horizontal qubit, 0 otherwise
r: fragment index on the K2,2 shore
Parameters
----------
pegasus_graph: networkx.graph
A pegasus graph
Returns
-------
fragment_tuple(pegasus_coordinates): a function
A function that accepts a list of pegasus coordinates and returns a list of their
corresponding K2,2 Chimera coordinates.
"""
horizontal_offsets = pegasus_graph.graph['horizontal_offsets']
vertical_offsets = pegasus_graph.graph['vertical_offsets']
# Note: we are returning a fragmentation function rather than fragmenting the pegasus
# coordinates ourselves because:
# (1) We don't want the user to have to deal with Pegasus horizontal/vertical offsets directly.
# (i.e. Don't want fragment_tuple(pegasus_coord, vertical_offset, horizontal_offset))
# (2) We don't want the user to have to pass entire Pegasus graph each time they want to
# fragment some pegasus coordinates.
# (i.e. Don't want fragment_tuple(pegasus_coord, pegasus_graph))
def fragment_tuple(pegasus_coords):
fragments = []
for u, w, k, z in pegasus_coords:
# Determine offset
offset = horizontal_offsets if u else vertical_offsets
offset = offset[k]
# Find the base (i.e. zeroth) Chimera fragment of this pegasus coordinate
fz0 = (z*12 + offset) // 2 #first fragment's z-coordinate
fw = (w*12 + k) // 2 #fragment w-coordinate
fk = k&1 #fragment k-index
base = [fw, 0, u, fk] if u else [0, fw, u, fk]
# Generate the six fragments associated with this pegasus coordinate
for fz in range(fz0, fz0 + 6):
base[u] = fz
fragments.append(tuple(base))
return fragments
return fragment_tuple | eba885f8e87289f46f91c7b23831b209d88fd61a | 149,014 |
def price_table_to_price_mapping(table):
"""Convert price table to a dict mapping from region to instance type
to instance info
"""
region_price_mapping = {}
for region_table in table['config']['regions']:
types = {}
for type_category in region_table['instanceTypes']:
for size in type_category['sizes']:
types[size['size']] = size
region_price_mapping[region_table['region']] = types
return region_price_mapping | d39887b82be8ae37a20d73c830e7ef724553600e | 23,413 |
def find_x_overlap(rect1, rect2):
"""
Return left_x and width of overlapping x of two rects
"""
r1left = rect1['left_x']
r1right = r1left + rect1['width']
r2left = rect2['left_x']
r2right = r2left + rect2['width']
highest_start_point = r1left if r1left >= r2left else r2left
lowest_end_point = r1right if r1right <= r2right else r2right
if highest_start_point < lowest_end_point:
return highest_start_point, lowest_end_point - highest_start_point
else:
return None | 8773615aa2316defd63bf479bc5b9efdfee0974b | 473,364 |
import requests
import json
def isdisambiguation(pagename):
"""Given a wikipedia page name, returns True if that page is a disambiguation page and False otherwise
:param pagename: The name of a wikipedia page
:type pagename: String
:return: True if pagename is the name of a wikipedia page in the 'All disambiguation pages'
category, otherwise False
:rtype: Boolean
"""
pagenamehyphen = pagename.replace(' ', '_')
query = requests.get(f'https://en.wikipedia.org/w/api.php?action=query&format=json&titles={pagenamehyphen}&prop=categories')
data = json.loads(query.text)
for category in data['query']['pages'][next(iter(data['query']['pages'].keys()))]['categories']:
catname = category['title']
if catname == "Category:All disambiguation pages":
return True
return False | 466d481523b4e86a4562644798047a9a4544cab9 | 642,330 |
def __get_inline_formfield_for_dbfield(inline, field, widget):
"""
Gets the new formfield_for_dbfield function for an inline
"""
old_formfield_for_dbfield = inline.formfield_for_dbfield
def formfield_for_dbfield(db_field, **kwargs):
if db_field.name == field:
kwargs['widget'] = widget
return old_formfield_for_dbfield(db_field, **kwargs)
return formfield_for_dbfield | e07808c19409e178301ba5758b765c1d39c068f6 | 340,241 |
def cuadrado(num):
"""
(num) -> num
Calcula el cuadrado del numero
>>> cuadrado(2)
4
>>> cuadrado(-2)
4
>>> cuadrado(0)
0
:param num: int or float el numero a elevar al cuadrado
:return: num el resultado de elevar num al cuadrado
"""
return num ** 2 | a4f16640f01cf8fb225f031229dff07f33832fc5 | 334,988 |
import re
def valid_height_cm(height_string):
"""
Check if height is valid in cm
"""
match = re.match(r"^(\d*)(cm)$", height_string)
if match:
height_cm = int(match.group(1))
if 150 <= height_cm and height_cm <= 193:
return True
return False | 23a7d35ad7fa1153134339df80c06093226f5562 | 302,631 |
def is_eof(line):
"""readline only ever returns an empty line at EOF"""
return line == "" | 0e9d8524a4ce08d8810d72d9d18c24f40a5f6257 | 226,108 |
import torch
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
# init_weights(net, init_type, init_gain=init_gain)
return net | dc6fdba712c2f31aa19f6ba75338a6ad58bfc642 | 330,156 |
def get_id(record):
"""Get the ID from a record.
Args:
record
A record returned by AWS.
Returns:
The ID of the subnet.
"""
return record["SubnetId"] | a977b55441b4d707bbf221aeae722a3d84809fa4 | 160,229 |
import math
def sph2cart(theta, phi, r=1):
"""Converts spherical coords to cartesian"""
x = r * math.sin(theta) * math.cos(phi)
y = r * math.sin(theta) * math.sin(phi)
z = r * math.cos(theta)
vect = [x, y, z]
return vect | cf793f0a81baefa80c96863d1b765fdd8d30b637 | 673,643 |
def tapisize(fieldKeyName):
"""Transforms a string into a Tapis query parameter
"""
return fieldKeyName.lower() | cc8032a6cc9e822193430134bb33da8aef74cf06 | 6,963 |
def less_equals(x):
"""Less than or equal operator.
>>> less_equals(1)(1)
True
>>> less_equals(1)(3)
False
"""
def less_equals(y):
return y <= x
return less_equals | 52b6caef10b9e9edbcb54bf17c82afdc8ef26aad | 290,030 |
def seq_sign_changes(arr, i):
"""
Checks whether a sequence's i'th element has different sign from the i+1'th
element
"""
return arr[i]*arr[i+1] < 0 | 08190728804232e4abe2042267fb18b29bee6eac | 351,099 |
def remove_empty(s):
"""\
Remove empty strings from a list.
>>> a = ['a', 2, '', 'b', '']
>>> remove_empty(a)
[{u}'a', 2, {u}'b']
"""
while True:
try:
s.remove('')
except ValueError:
break
return s | 98778e4cc90f11b9b74ac6d26b203cbfc958fd7b | 708,388 |
def find_multiple(x, n):
"""Takes integers x and n. Returns the smallest multiple of n, m where
m >= x.
"""
while n < x:
n = n << 1
return n | 5c4c7291b591aac8154ab4fee97e375c582376ad | 436,189 |
import torch
def _get_sfm_calibration_matrix(
N,
device,
focal_length,
principal_point,
orthographic: bool = False,
image_size=None,
) -> torch.Tensor:
"""
Returns a calibration matrix of a perspective/orthograpic camera.
Args:
N: Number of cameras.
focal_length: Focal length of the camera in world units.
principal_point: xy coordinates of the center of
the principal point of the camera in pixels.
orthographic: Boolean specifying if the camera is orthographic or not
image_size: (Optional) Specifying the image_size = (imwidth, imheight).
If not None, the camera parameters are assumed to be in screen space
and are transformed to NDC space.
The calibration matrix `K` is set up as follows:
.. code-block:: python
fx = focal_length[:,0]
fy = focal_length[:,1]
px = principal_point[:,0]
py = principal_point[:,1]
for orthographic==True:
K = [
[fx, 0, 0, px],
[0, fy, 0, py],
[0, 0, 1, 0],
[0, 0, 0, 1],
]
else:
K = [
[fx, 0, px, 0],
[0, fy, py, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
]
Returns:
A calibration matrix `K` of the SfM-conventioned camera
of shape (N, 4, 4).
"""
if not torch.is_tensor(focal_length):
focal_length = torch.tensor(focal_length, device=device)
if focal_length.ndim in (0, 1) or focal_length.shape[1] == 1:
fx = fy = focal_length
else:
fx, fy = focal_length.unbind(1)
if not torch.is_tensor(principal_point):
principal_point = torch.tensor(principal_point, device=device)
px, py = principal_point.unbind(1)
if image_size is not None:
if not torch.is_tensor(image_size):
image_size = torch.tensor(image_size, device=device)
imwidth, imheight = image_size.unbind(1)
# make sure imwidth, imheight are valid (>0)
if (imwidth < 1).any() or (imheight < 1).any():
raise ValueError(
"Camera parameters provided in screen space. Image width or height invalid."
)
half_imwidth = imwidth / 2.0
half_imheight = imheight / 2.0
fx = fx / half_imwidth
fy = fy / half_imheight
px = -(px - half_imwidth) / half_imwidth
py = -(py - half_imheight) / half_imheight
K = fx.new_zeros(N, 4, 4)
K[:, 0, 0] = fx
K[:, 1, 1] = fy
if orthographic:
K[:, 0, 3] = px
K[:, 1, 3] = py
K[:, 2, 2] = 1.0
K[:, 3, 3] = 1.0
else:
K[:, 0, 2] = px
K[:, 1, 2] = py
K[:, 3, 2] = 1.0
K[:, 2, 3] = 1.0
return K | b741cb888038fcc71ceafd86b18a203682ca21d7 | 502,880 |
from typing import OrderedDict
import json
def load(infile):
"""Return a dictionary of settings from the given file."""
with open(infile, 'rb') as file:
settings = OrderedDict(json.load(file))
return settings | a901df13bf70511617723e9cb2a65eaf24a14297 | 194,383 |
def str_to_regex(string):
"""Convert a string to a regex pattern with special treatment of an empty string."""
if string:
return string
return "a^" # This pattern should never match | 6f7868d7a1b47d1e0a6b50cd6a53d0e398170af9 | 527,351 |
def is_subset_of_dicts(src_dict, dest_dict):
"""Check is all items of one dictionary 'src_dict' is subset of items of
another dictionary 'dest_dict', where 'src_dict' should has same or less
length then 'dest_dict'.
Examples:
({"b": 2, "a": 1}, {"a": 1, "b": 2, "c": 4}) = True
({"a": 1, "b": 2, "c": 4}, {"b": 2, "a": 1}) = False
"""
return all(item in dest_dict.iteritems() for item in src_dict.iteritems()) | 993991d1322bf7da0ae0972aa454992f6f9e7ce6 | 610,996 |
import re
def simple_preproc(text):
"""
replace digits with 0 and lowercase text
"""
return re.sub(r"\d", "0", text.lower()) | d6e400ac50059c85976f5dc4ff3c62a2defc4f5e | 358,458 |
def tile_to_quadkey(z, x, y):
"""Transform tile coordinates to a Bing quadkey.
Slippy map tiles cache numeration starts from 0 level with one tile. On 1 level four tiles etc
Bing uses quadkey tile coordinates, so minimal cache level is 1 (four tiles). Single tile at zero level not addressed.
https://docs.microsoft.com/en-us/bingmaps/articles/bing-maps-tile-system
https://github.com/buckhx/QuadKey/blob/master/quadkey/tile_system.py
Examples
--------
>>> tile_to_quadkey(1,0,0)
'0'
>>> tile_to_quadkey(4, 9, 5)
'1203'
>>> tile_to_quadkey(16, 38354, 20861)
'1203010313232212'
Paramaters
----------
:param int z: starts from zero
:return: Quadkey string
:rtype: str
"""
quadkey = ""
for i in range(z):
bit = z - i
digit = ord('0')
mask = 1 << (bit - 1)
if (x & mask) != 0:
digit += 1
if (y & mask) != 0:
digit += 2
quadkey += chr(digit)
return quadkey | d438360e986bdf317f2e8672a16ebbe40ce12d38 | 589,243 |
import click
def format(text):
"""Format all OK,FAIL,PR,REPO words
:param text: word
:type text: string
:raises Exception: Unknown text
:return: formated click style text
:rtype: string
"""
text = text.lower()
if text == "ok":
return click.style('OK', fg='green', bold=True)
elif text == "fail":
return click.style('FAIL', fg='red', bold=True)
elif text == "pr":
return click.style('PR', bold=True)
elif text == "repo":
return click.style('REPO', bold=True)
else:
raise Exception("Unkown text") | 2ac991685f3b70615ba9ef253724f200f4c8e79e | 630,813 |
import torch
def sparse2tensor(sparse_data):
"""Convert sparse csr matrix to pytorch tensor."""
return torch.FloatTensor(sparse_data.toarray()) | 1688204cf6b2da4424c5c00dc4dae056c2625208 | 528,134 |
def tobin(deci_num, len=32):
"""
Given a decimal number, returns a string bitfield of length = len
Example: given deci_num = 1 and len = 10, it return 0000000001
"""
bitstr = "".join(map(lambda y: str((deci_num >> y) & 1), range(len - 1, -1, -1)))
return bitstr | d505a3ad247e4c5508a97f71898617cfcfd8cca5 | 114,542 |
def format_byte(size: int, decimal_places: int = 3):
"""
Formats a given size and outputs a string equivalent to B, KB, MB, or GB
"""
if size < 1e03:
return f"{round(size, decimal_places)} B"
if size < 1e06:
return f"{round(size / 1e3, decimal_places)} KB"
if size < 1e09:
return f"{round(size / 1e6, decimal_places)} MB"
return f"{round(size / 1e9, decimal_places)} GB" | 3265fe250e6e7b0ce4178356b4ff079a7a1609ef | 555,273 |
def get_params_autogen(term):
"""Sets the parameters for the API call for the initial user-entered term"""
params = {
"action": "parse",
"prop": "links",
"page": term,
"format": "json",
}
# Parameter set to query the Wikipedia page for a given term and retrieve up to 250 links to other articles -
# namespace 0 - from that page in JSON format.
return params | c5a49a114b153d129e9427db59c479c2b2341333 | 61,760 |
def inplace_buffer_merge(buffer, data, timesteps, model):
"""
Merges the new text from the current frame with the previous text contained in the buffer.
The alignment is based on a Longest Common Subsequence algorithm, with some additional heuristics leveraging
the notion that the chunk size is >= the context window. In case this assumptio is violated, the results of the merge
will be incorrect (or at least obtain worse WER overall).
"""
# If delay timesteps is 0, that means no future context was used. Simply concatenate the buffer with new data.
if timesteps < 1:
buffer += data
return buffer
# If buffer is empty, simply concatenate the buffer and data.
if len(buffer) == 0:
buffer += data
return buffer
# Concat data to buffer
buffer += data
return buffer | f5ef530c5e9c1b0bc3bc3f92ac41d157ef834b4b | 489,644 |
import decimal
def convert_decimal_to_int(obj):
"""
Amazon DynamoDB returns numbers in Python Decimal format, which are converted to
float by the JSON serializer. This function instead converts them to int.
Pass this function to the `json.dumps` function for custom conversion.
:param obj: The current object being deserialized.
:return: When obj is a Decimal, return it as an int.
"""
if isinstance(obj, decimal.Decimal):
return int(obj) | bf28e3538bbf0a95c5492492169dd4f5df43d9fd | 252,043 |
def get_current_connections(session):
"""Retrieves open connections using the the given session"""
# Use Show process list to count the open sesions.
res = session.sql("SHOW PROCESSLIST").execute()
rows = res.fetch_all()
connections = {}
for row in rows:
if row.get_string('User') not in connections:
connections[row.get_string('User')] = [row.get_string('Host')]
else:
connections[row.get_string('User')].append(row.get_string('Host'))
return connections | 13db61d46cff29c757316b5f577aa7f8df60eb82 | 456,365 |
def checkCriteria(criteria, failed = False):
"""
This function is an input exclusive function that returns a True or False
"""
if failed == False:
# gathers the input from the user
resp = input('Would you like ' + criteria + ' to be enabled: ').lower()
# returns the necessary end result until specifications are met
if resp == 'yes' or resp == 'y':
return True
elif resp == 'no' or resp == 'n':
return False
else:
return checkCriteria(criteria, True)
else:
# gathers the input from the user
resp = input('Please put only yes/y or no/n: ').lower()
# returns the necessary end result until specifications are met
if resp == 'yes' or resp == 'y':
return True
elif resp == 'no' or resp == 'n':
return False
else:
return checkCriteria(criteria, True) | 1bd22460796b6581a6a334977cd4dedaf2da80fa | 277,098 |
def find_water_volume_blocked_in_towers(towers):
"""
Solution for finding water volume occupied in towers of different heights.
Assumed that, width of tower is 1 unit.
:param towers: list of tower heights
:return: unit water occupied
"""
res = 0
n = len(towers)
for_idx = 0
# traverse forward
while (for_idx < n):
j = for_idx + 1
sel_towers = []
while (j < n and towers[for_idx] >= towers[j]):
sel_towers.append(towers[j])
j += 1
if j < n:
for t_h in sel_towers:
res += abs(towers[for_idx] - t_h)
for_idx = j
back_idx = n - 1
# traverse backward
while(back_idx > -1):
j = back_idx - 1
sel_towers = []
while (j > -1 and towers[back_idx] >= towers[j]):
sel_towers.append(towers[j])
j -= 1
if j > -1:
for t_h in sel_towers:
res += abs(towers[back_idx] - t_h)
back_idx = j
return res | fbfdc0fe4745a9e7e163e0b7152d6a143147a7de | 350,974 |
def strip_integer_in_string(name):
"""Strip interger from string"""
i = 0
for i in range(len(name) - 1, - 1, -1):
if not name[i].isdigit():
i += 1
break
return name[0:i] | 8cc8cf784dd231e40844d1087f0681ae53fe9af8 | 274,254 |
import torch
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[torch.arange(N), y]
margins = (x - correct_class_scores[:, None] + 1.0).clamp(min=0.)
margins[torch.arange(N), y] = 0.
loss = margins.sum() / N
num_pos = (margins > 0).sum(dim=1)
dx = torch.zeros_like(x)
dx[margins > 0] = 1.
dx[torch.arange(N), y] -= num_pos.to(dx.dtype)
dx /= N
return loss, dx | 5f36dc353c4f6e26b7ae89e5996018ae0f5effb3 | 572,105 |
def count_sheeps(arrayOfSheeps: list) -> int:
"""
Consider an array of sheep where some sheep
may be missing from their place. We need a
function that counts the number of sheep
present in the array (true means present).
Hint: Don't forget to check for bad values
like null/undefined
:param arrayOfSheeps:
:return:
"""
return 0 if arrayOfSheeps is None \
else arrayOfSheeps.count(True) | b229315fd9bcc6e7ec566d926d86893e9501b03a | 450,250 |
import string
def str_to_key(str1:str) -> str:
"""Convert a string to a string usable as a key"""
# Remove any trailing white space
str1 = str1.strip()
# Add underscore if 1st character is a number
if str1[0].isdigit():
str1 = '_' + str1
# Replace any non-letters with '_'
_strings = [t if t in string.ascii_letters + string.digits else '_' for t in str1]
ans = "".join(_strings)
return ans | 6bf67c7d8856126d34af9898ab0d29f34bd57b88 | 540,549 |
def get_params(model):
"""Retrieves list of all the named parameters in a model.
Arguments:
model::torch.nn.Module- Ideally the deep learning model, but can be a set of layers/layer too.
Returns:
param_list::list- List of all the named parameters in the model.
"""
return [(name, params) for name, params in model.named_parameters() if params.requires_grad] | 0ed9b604e7215c82f813ee962018fee7c0d0b450 | 388,113 |
def get_os_data_from_box_title(box_title: str) -> tuple:
"""
Gets OS name and version from a properly formatted box title.
Returns:
(str, str): OS name, OS version
"""
# Split box title into [os name, os version]
box_title_elements = box_title.split(" v")
return box_title_elements[0], box_title_elements[1] | a103a4c5690d8a7c544741671a8e6221618bda94 | 623,780 |
def biopdbresid_to_pdbresseq(biopdb_residueid,ignore_insertion_codes=False):
"""
Give a Bio.PDB Residue id tupe (hetatm, resseqnum, icode), return
the PDB residue sequence number string consisting of the sequence
number and the insertion code, if not blank.
Parameters:
biopdb_residueid - tuple (hetatm, resseqnum, icode) from Residue.get_id()
ignore_insertion_codes - If True, a hack to make it work with
PMML (only) which does not report insertion codes
unlike DSSP and STRIDE
Return value:
string residue PDB sequence number e.g. '60' or '60A'.
"""
# Residue.get_id() gives tuple (hetatm, resseqnum, icode)
res_seq = str(biopdb_residueid[1])
if not ignore_insertion_codes:
if biopdb_residueid[2] != ' ':
res_seq += biopdb_residueid[2]
return res_seq | 4bb023feb7bbca24f514e041a657752f34d533e0 | 51,036 |