content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def _GetCurrentCommand(trace=None, include_separators=True):
"""Returns current command for the purpose of generating help text."""
if trace:
current_command = trace.GetCommand(include_separators=include_separators)
else:
current_command = ''
return current_command | aad81fe61ef31f04b5d9e3b1fd3de98a3e0f0258 | 284,330 |
import hashlib
def activity_hash(dataset):
"""Return the hash string that uniquely identifies an activity.
Uses the following fields, in order:
* name
* reference product
* unit
* location
* start date
* end date
An empty string is used if a field isn't present. All fields are cast to lower case.
"""
fields = (
"name",
"reference product",
"unit",
"location",
"start date",
"end date",
)
string = "".join(dataset.get(field, '').lower() for field in fields)
return hashlib.md5(string.encode('utf-8')).hexdigest() | cdec16ba7d018fb0b45739d394d8e44653bd359c | 582,714 |
import math
def read_file(input_file: str) -> dict:
"""
Read the file and extract relevant data
Parameters
----------
input_file : string
HiTrack filename
Returns
-------
data: dict of lists of lists
{'gps': list of lists, 'alti': list of lists, 'hr': list of lists, 'cad': list of lists}
"""
def _normalize_timestamp(timestamp: float) -> float:
""" Normalize the timestamp
Timestamps taken from different devices can have different values. Most common are seconds
(i.e. t=1.543646826E9) or microseconds (i.e. t=1.55173212E12).
This method implements a generic normalization function that transform all values to valid
unix timestamps (integer with 10 digits).
"""
oom = int(math.log10(timestamp))
if oom == 9:
return timestamp
divisor = 10 ** (oom - 9) if oom > 9 else 0.1 ** (9 - oom)
return timestamp / divisor
print('---- Input File ----')
print('reading: ', end='')
try:
""" The lap list will contain lap data will contain start-stop times between pauzes identified in
the location records. These are required to generate the laps in the output TCX file later.
"""
data = {'gps': [], 'alti': [], 'hr': [], 'cad': [], 'lap': []}
with open(input_file) as f:
lap_start_stop = []
lap_start_stop.append(0) # Start time of lap
lap_start_stop.append(0) # Stop time of lap
for line in f:
# Loop over file line by line
holding_list = []
if line[0:6] == 'tp=lbs': # Location lines
for x in [6,3,4,0,0,0,0]: # time, lat, long, [alti], [dist], [hr], [cad]
if x == 0:
holding_list.append('') # Fill in blanks with blank
else:
holding_list.append(float(line.split('=')[x].split(';')[0]))
""" Do not try to normalize time for 'Pauze' records.
E.g. tp=lbs;k=<a number>;lat=90.0;lon=-80.0;alt=0.0;t=0.0;
Recognized by time = 0, lat = 90, long = -80
"""
if holding_list[0] != 0 and holding_list[1] != 90 and holding_list[2] != -80:
holding_list[0] = _normalize_timestamp(holding_list[0])
data['gps'].append(holding_list)
if lap_start_stop[0] == 0:
# First valid time for new lap. Store it in start time (index 0).
lap_start_stop[0] = holding_list[0]
if lap_start_stop[1] < holding_list[0]:
# Later stop time for current lap. Store it in stop time (index 1).
lap_start_stop[1] = holding_list[0]
else:
""" Pauze record detected.
E.g. tp=lbs;k=<a number>;lat=90.0;lon=-80.0;alt=0.0;t=0.0;
Recognized by time = 0, lat = 90, long = -80
Add the record to the gps data list. When generating the TCX XML this record can be
used to create a new 'lap' with start time the time of the next record (if any, e.g.
when workout was first pauzed and then stopped without resuming.)
Store lap record in data and create a new one.
"""
data['lap'].append(lap_start_stop)
lap_start_stop = []
lap_start_stop.append(0)
lap_start_stop.append(0)
elif line[0:6] == 'tp=h-r': # Heart-rate lines
for x in [2,0,0,0,0,3,0]: #time, [lat], [long], [alti], [dist], hr, [cad]
if x == 0:
holding_list.append('')
elif x == 2:
holding_list.append(int(float(line.split('=')[x].split(';')[0])))
else:
holding_list.append(int(line.split('=')[x].split(';')[0]))
holding_list[0] = _normalize_timestamp(holding_list[0])
data['hr'].append(holding_list)
elif line[0:6] == 'tp=s-r': # Cadence lines
for x in [2,0,0,0,0,0,3]: #time, [lat], [long], [alti], [dist], [hr], cad
if x == 0:
holding_list.append('')
elif x == 2:
holding_list.append(int(float(line.split('=')[x].split(';')[0])))
else:
holding_list.append(int(line.split('=')[x].split(';')[0]))
holding_list[0] = _normalize_timestamp(holding_list[0])
data['cad'].append(holding_list)
elif line[0:7] == 'tp=alti': # Altitude lines
for x in [2,0,0,3,0,0,0]: #time, [lat], [long], alti, [dist], [hr], [cad]
if x == 0:
holding_list.append('')
elif x == 2:
holding_list.append(int(float(line.split('=')[x].split(';')[0])))
else:
holding_list.append(float(line.split('=')[x].split(';')[0]))
holding_list[0] = _normalize_timestamp(holding_list[0])
data['alti'].append(holding_list)
""" Save (last) lap data. When the exercise wasn't pauzed and/or no pauze/stop record is generated as the last
location record, store the single lap record here.
"""
if lap_start_stop[0] != 0:
data['lap'].append(lap_start_stop)
# Sort GPS data by date for distance computation
data['gps'] = sorted(data['gps'], key=lambda x : x[0])
except:
print('FAILED')
exit()
print('OKAY')
return data | 0bd52a1741d861e1f13cdec212938a9a727ea2e0 | 244,615 |
import re
def strip_html_tags(text):
"""
Strip HTML tags from text generated from Markdown script
:param text: text in HTML
:return: text without HTML Tags
"""
htmlPattern = re.compile(r'<\w+>|</\w+>')
return htmlPattern.sub('', text) | a93fdaefa4d20dfcc647f5a3cd8fa801709668a4 | 120,821 |
def labelwell(platemap, labelby, iterrange, **kwargs):
"""Returns label for each row of a stipulated column.
Used to return the appropriate, formatted label from a specified platemap at every well. Empty wells will always return 'empty', wells without a label will return a blank string.
:param platemap: Platemap that contains the required labels
:type platemap: pandas dataframe
:param labelby: Dataframe column to label by, for example 'Compound', 'Protein', 'Concentration', 'Concentration Units', 'Contents' or 'Type'
:type labelby: str
:param iterrange: Number of instances to itterate over, typically the size of the platemap
:type iterrange: int
:param **kwargs: The str_format (str) is accepted as a keyword argument, it determines whether the value is to be displayed in scientific notation or percentage 1 d.p.
:return: string corresponding to the value located in column labelby and row iterrange of the platemap
:rtype: str
"""
if 'str_format' in kwargs and kwargs['str_format'] == 'scinot': # if scinot parameter is true, format the value in scientific notation
return "%.2E" % (platemap[labelby].iloc[iterrange])
elif 'str_format' in kwargs and kwargs['str_format'] == 'percent':
return "%.1f" % (platemap[labelby].iloc[iterrange])
else: # get the value from 'labelby' column and 'iterrange' row
return str(platemap[labelby].iloc[iterrange]).replace(" ", "\n") | 28f06612efb07e67045cc18edda922aee0410499 | 496,705 |
def sum_abs_of_all(sequence):
"""
What comes in:
-- A sequence of numbers.
What goes out:
Returns the sum of the absolute values of the numbers.
Side effects: None.
Examples:
sum_all([5, -1, 10, 4, -33])
would return 5 + 1 + 10 + 4 + 33, which is 53
sum_all([10, -30, -20]) would return 10 + 30 + 20, which is 60
Type hints:
:type sequence: list or tuple (of numbers)
"""
# ------------------------------------------------------------------
# EXAMPLE 1. Iterates through a sequence of numbers, summing them.
# ------------------------------------------------------------------
total = 0
for k in range(len(sequence)):
total = total + abs(sequence[k])
return total | 42d1a3e0aa5cf96ed11f898129a37f0b835d2bfb | 348,268 |
def to_vintagestory_axis(ax):
"""Convert blender space to VS space:
X -> Z
Y -> X
Z -> Y
"""
if "X" in ax:
return "Z"
elif "Y" in ax:
return "X"
elif "Z" in ax:
return "Y" | ce585cf95d9663f566ae65d3baa46a2e793bf301 | 78,967 |
def palette_val(palette):
"""Convert palette to matplotlib palette.
Args:
palette List[tuple]: A list of color tuples.
Returns:
List[tuple[float]]: A list of RGB matplotlib color tuples.
"""
new_palette = []
for color in palette:
color = [c / 255 for c in color]
new_palette.append(tuple(color))
return new_palette | d04b8b7190e0a0a54700121219044ba4f29d025e | 516,061 |
def _get_persons(event):
"""Get string of persons from event"""
persons = []
for person in event['persons']:
persons.append(person['public_name'])
return ', '.join(persons) | b6201056e37c54ca336395205ee8a730b4ac0899 | 475,344 |
def repr_long_list(seq):
"""
>>> repr_long_list(list(range(100)))
'[0, 1, 2, ..., 98, 99]'
"""
if len(seq) < 8:
return repr(seq)
else:
return repr(seq[:3])[:-1] + ', ..., ' + repr(seq[-2:])[1:] | 9978a1439d66cbd1c2015cb799cace7aeaac411c | 69,612 |
def _rnl_daily(tmax, tmin, ea, fcd):
"""Daily net long-wave radiation (Eq. 17)
Parameters
----------
tmax : ee.Image or ee.Number
Daily maximum air temperature [C].
tmin : ee.Image or ee.Number
Daily minimum air temperature [C].
ea : ee.Image or ee.Number
Actual vapor pressure [kPa].
fcd : ee.Image or ee.Number
cloudiness fraction.
Returns
-------
ee.Image or ee.Number
Daily net long-wave radiation [MJ m-2 d-1].
Output data type will match "tmax" data type.
Notes
-----
rnl = 4.901E-9 * fcd * (0.34 - 0.14 * sqrt(ea)) *
0.5 * ((tmax + 273.16) ** 4 + (tmin + 273.16) ** 4))
"""
return tmax.add(273.16).pow(4).add(tmin.add(273.16).pow(4)).multiply(0.5)\
.multiply(ea.sqrt().multiply(-0.14).add(0.34))\
.multiply(fcd).multiply(4.901E-9) | d639ac04b5ca9efb42ebf023398a0d79b437f31c | 528,152 |
def c_terminal_proline(amino_acids):
"""
Is the right-most (C-terminal) amino acid a proline?
"""
return amino_acids[-1] == "P" | f54563ff59973d398e787186a1c390da03ff8999 | 36,768 |
def del_char_idx(inline, idx):
"""
Del char at the position given by the index idx.
Args:
inline(str): Input string.
idx(int): An index position.
"""
return inline[:idx] + inline[idx+1:] | 0f41fc9f0e6b4ce6b88af8f323e9f2f49501a5c2 | 322,593 |
def _modified_dict_keys_and_values(adict, func):
"""Get a new dictionary with key and value strings modified by func."""
return dict((func(key), func(value)) for key, value in adict.iteritems()) | c46067fc15111f95c2d9bf07fefc75abc0b28f66 | 163,933 |
import torch
def prepare_batch(sample, use_cuda=False):
"""Prepares a batch for training/validation by moving it to GPU
Args:
sample (dict | :obj:`torch.Tensor` | list): The batch to prepare
use_cuda (bool): Move to cuda
Returns:
(dict | :obj:`torch.Tensor` | list): The batch, on GPU or CPU
"""
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {key: _move_to_cuda(value) for key, value in maybe_tensor.items()}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
if use_cuda:
return _move_to_cuda(sample)
else:
return sample | ab7280f1b909a3e7ab5a823a3ddfbe2979b09e8a | 233,340 |
def append_tag(image_tag, append_str):
"""
Appends image_tag with append_str
:param image_tag: str, original image tag
:param append_str: str, string to be appended
"""
return f"{image_tag}-{append_str}" | b35ad32be77d473237ff6c47afba253400a031b2 | 53,826 |
from datetime import datetime
def get_file_age(file):
"""Returns the age of file.
Args:
file: A PosixPath object representing a file.
Returns: An int represeting the age in days.
"""
return (datetime.today()
- datetime.fromtimestamp(file.stat().st_mtime)).days | 5d62ce19ea5b4fd2d34c4c1ecd0240d85b1af085 | 582,659 |
def _set_parameter(config, parameter):
"""
Checks for parameter value and sets if present
:param config: bit configuration list
:param parameter: the configuration item to set
:return: string with value of parameter
"""
if parameter in config:
return config[parameter]
else:
return "" | 14a7d116db6731c8e0b287d9a73d701bbb404d26 | 147,421 |
def get_content_iter_with_chunk_size(content, chunk_size=1024):
"""
Return iterator for the provided content which will return chunks of the specified size.
For performance reasons, larger chunks should be used with very large content to speed up the
tests (since in some cases each iteration may result in an TCP send).
"""
# Ensure we still use multiple chunks and iterations
assert (len(content) / chunk_size) >= 10
content = iter([content[i:i + chunk_size] for i in range(0, len(content), chunk_size)])
return content | c1550aef62e58b459feb66bde6c2a506e5de31be | 96,273 |
def get_L_DHW_d_t(L_dashdash_k_d_t, L_dashdash_w_d_t, L_dashdash_s_d_t, L_dashdash_b1_d_t, L_dashdash_b2_d_t,
L_dashdash_ba1_d_t):
"""1時間当たりの発電ユニットによる浴槽追焚を除く給湯熱負荷 (30)
Args:
L_dashdash_k_d_t(ndarray): 1時間当たりの台所水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_w_d_t(ndarray): 1時間当たりの洗面水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_s_d_t(ndarray): 1時間当たりの浴室シャワー水栓における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b1_d_t(ndarray): 1時間当たりの浴槽水栓湯はり時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_b2_d_t(ndarray): 1時間当たりの浴槽自動追焚時における太陽熱補正給湯熱負荷 (MJ/h)
L_dashdash_ba1_d_t(ndarray): 1時間当たりの浴槽水栓さし湯における太陽熱補正給湯熱負荷 (MJ/h)
Returns:
ndarray: 1時間当たりの発電ユニットによる浴槽追焚を除く給湯熱負荷 (MJ/h)
"""
L_DHW_d_t = L_dashdash_k_d_t + L_dashdash_w_d_t + L_dashdash_s_d_t + L_dashdash_b1_d_t + L_dashdash_b2_d_t + L_dashdash_ba1_d_t
return L_DHW_d_t | 1e4354a3bed5554b65257847900752c1a0389269 | 157,187 |
def mean(seq):
"""Arithmetic mean."""
return sum(seq) / len(seq) | f5a4f7f2a39ab4e5fc57a03b5d2a0a588e93bc32 | 211,436 |
def k8s_url(namespace, kind, name=None):
"""
Construct URL referring to a set of kubernetes resources
Only supports the subset of URLs that we need to generate for use
in kubespawner. This currently covers:
- All resources of a specific kind in a namespace
- A resource with a specific name of a specific kind
"""
url_parts = [
'api',
'v1',
'namespaces',
namespace,
kind
]
if name is not None:
url_parts.append(name)
return '/' + '/'.join(url_parts) | 8d54770990fb500da1a42df891e21d1dce18bef9 | 684,334 |
def as_twos_comp(value: int) -> int:
"""Interpret number as 32-bit twos comp value."""
if value & 0x8000_0000 != 0:
# negative
flipped_value = (~value) & 0xFFFF_FFFF
return -(flipped_value + 1)
else:
# positive
return value | 013357f70dd51977d745e3d42ea33efc838c8515 | 439,967 |
def _has_exclude_patterns(name, exclude_patterns):
"""Checks if a string contains substrings that match patterns to exclude."""
for p in exclude_patterns:
if p in name:
return True
return False | 72c0401e1e7073a2ca42e1f99b98e4a1ab834bd3 | 690,023 |
def get_volumetric_scene(self, data_key="total", isolvl=0.5, step_size=3, **kwargs):
"""Get the Scene object which contains a structure and a isosurface components
Args:
data_key (str, optional): Use the volumetric data from self.data[data_key]. Defaults to 'total'.
isolvl (float, optional): The cuoff for the isosurface to using the same units as VESTA so e/bhor
and kept grid size independent
step_size (int, optional): step_size parameter for marching_cubes_lewiner. Defaults to 3.
**kwargs: kwargs for the Structure.get_scene function
Returns:
[type]: [description]
"""
struct_scene = self.structure.get_scene(**kwargs)
iso_scene = self.get_isosurface_scene(
data_key=data_key,
isolvl=isolvl,
step_size=step_size,
origin=struct_scene.origin,
)
struct_scene.contents.append(iso_scene)
return struct_scene | 836fb5f3158ed5fe55a2975ce05eb21636584a95 | 705,838 |
import glob
import csv
def write_colocated_data_time_avg(coloc_data, fname):
"""
Writes the time averaged data of gates colocated with two radars
Parameters
----------
coloc_data : dict
dictionary containing the colocated data parameters
fname : str
file name where to store the data
Returns
-------
fname : str
the name of the file where data has written
"""
filelist = glob.glob(fname)
if not filelist:
with open(fname, 'w', newline='') as csvfile:
csvfile.write('# Colocated radar gates data file\n')
csvfile.write('# Comment lines are preceded by "#"\n')
csvfile.write('#\n')
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
else:
with open(fname, 'a', newline='') as csvfile:
fieldnames = [
'rad1_time', 'rad1_ray_ind', 'rad1_rng_ind', 'rad1_ele',
'rad1_azi', 'rad1_rng', 'rad1_dBZavg', 'rad1_PhiDPavg',
'rad1_Flagavg', 'rad2_time', 'rad2_ray_ind', 'rad2_rng_ind',
'rad2_ele', 'rad2_azi', 'rad2_rng', 'rad2_dBZavg',
'rad2_PhiDPavg', 'rad2_Flagavg']
writer = csv.DictWriter(csvfile, fieldnames)
for i, rad1_time in enumerate(coloc_data['rad1_time']):
writer.writerow({
'rad1_time': rad1_time.strftime('%Y%m%d%H%M%S'),
'rad1_ray_ind': coloc_data['rad1_ray_ind'][i],
'rad1_rng_ind': coloc_data['rad1_rng_ind'][i],
'rad1_ele': coloc_data['rad1_ele'][i],
'rad1_azi': coloc_data['rad1_azi'][i],
'rad1_rng': coloc_data['rad1_rng'][i],
'rad1_dBZavg': coloc_data['rad1_dBZavg'][i],
'rad1_PhiDPavg': coloc_data['rad1_PhiDPavg'][i],
'rad1_Flagavg': coloc_data['rad1_Flagavg'][i],
'rad2_time': (
coloc_data['rad2_time'][i].strftime('%Y%m%d%H%M%S')),
'rad2_ray_ind': coloc_data['rad2_ray_ind'][i],
'rad2_rng_ind': coloc_data['rad2_rng_ind'][i],
'rad2_ele': coloc_data['rad2_ele'][i],
'rad2_azi': coloc_data['rad2_azi'][i],
'rad2_rng': coloc_data['rad2_rng'][i],
'rad2_dBZavg': coloc_data['rad2_dBZavg'][i],
'rad2_PhiDPavg': coloc_data['rad2_PhiDPavg'][i],
'rad2_Flagavg': coloc_data['rad2_Flagavg'][i]})
csvfile.close()
return fname | 2e786c6df8a617f187a7b50467111785342310c5 | 708,019 |
def encode_file(body, filename):
"""
Encode a file in a binary form and return a mime content type
and encoded binary data
:param body: binary data to encode
:param filename: name of file with data to encode
:return: content_type, body
"""
boundary = '--------------MIME_Content_Boundary---------------'
lines = []
lines.append('--' + boundary)
lines.append('Content-Disposition: form-data; name="input_file"; '
'filename="{0}"'.format(filename))
lines.append('Content-Type: application/octet-stream')
lines.append('')
lines.append(body)
lines.append('--' + boundary + '--')
lines.append('')
encoded = "\r\n".join(lines)
content_type = 'multipart/form-data; boundary=%s' % boundary
return content_type, encoded | f19b6bc99c4d541eb5b9ec8228ea815d36e07ee4 | 500,770 |
def _format_config(file_value, effective_value, name=None, envvar_name=None):
"""
Construct a string to show on a terminal, indicating the value and
possibly other useful things such as the setting name and whether
it is being controlled by an environment variable.
>>> _format_config('x', 'x')
'x'
>>> _format_config('x', 'x', 'setting.name') ->
'setting.name x'
>>> _format_config('x', 'y', envvar_name='ENVVAR')
'y # overwritten by ENVVAR; config file value: x'
>>> _format_config('x', 'y', 'setting.name', envvar_name='ENVVAR')
'setting.name y # overwritten by ENVVAR; config file value: x'
:param file_value: config value present in the toml file
:type file_value: str
:param effective_value: config value either from file or as overwritten
from the environment
:type effective_value: str
:param name: config key (not used for single value show)
:type name: str|None
:param envvar_name: name of environment variable that overwote the value
:type envvar_name: str|None
:returns: formatted string for terminal
:rtype: str
"""
# when declaring that vars are overwritten by the environment,
# line up those messages to this column (unless the var name is long)
overwite_msg_display_column = 35
# When showing all values, don't print the token value;
if name == "core.dcos_acs_token":
print_value = "*"*8
else:
print_value = effective_value
if file_value == effective_value:
if name:
return '%s %s' % (name, print_value)
else:
return effective_value
else:
if not envvar_name:
envvar_name = "N/A" # this should never happen
if name:
s = '%s %s' % (name, print_value)
else:
s = effective_value
left_pad_fmt = '%-{}s'.format(overwite_msg_display_column) # '%-35s'
msg_start = left_pad_fmt + ' # overwritten by environment var %s; '
if print_value != effective_value:
# We're obscuring the effective security token
# so don't report the file value either
msg = msg_start + "config file value differs"
return msg % (s, envvar_name)
msg = msg_start + 'config file value: %s'
return msg % (s, envvar_name, file_value) | 1f76594ca31ddf522973580d09b74b7f093e6409 | 221,363 |
def split_ver(v):
"""Covert the string '1.2.3' into the list [1,2,3]
"""
return [int(x) for x in v.split('.')] | df9b1d4cc6c63cbf82cc0a06374f6a9e73702308 | 606,119 |
def polynomial_redshift(d):
"""
Polynomial approximation of calculating redshift corresponding to a given
distance.
Parameters
----------
d: float
A luminosity distance, in Mpc.
Returns
-------
z: float
The redshift corresponding to the input distance.
"""
#input type checking
assert type(d) == float, 'd should be a float.'
#sanity check: distance should not be negative
assert d >= 0, 'The distance should be a positive number.'
#polynomial approximation of redshift conversion
z = 1.0832e-12*d**3 - 1.7022e-8*d**2 + 0.00021614*d
return z | 87b313351551da98371d23de8b0122b83ecb1a72 | 255,195 |
import json
def parse_file(filepath: str):
"""[loads a json file and returns a python object]
Args:
filepath (str): [path to json file]
Returns:
[type]: [a python object]
"""
f = open(filepath)
data = json.load(f)
f.close()
return data | c56f3a8870a3583fd895b01dc3ca142263600dcb | 58,877 |
def is_alive(thread):
"""Helper to determine if a thread is alive (handles none safely)."""
if not thread:
return False
return thread.is_alive() | 9665f09b0c661d7ef0b65704ab8f501fff081cdd | 69,498 |
import base64
def b64e(s):
"""b64e(s) -> str
Base64 encodes a string
Example:
>>> b64e("test")
'dGVzdA=='
"""
return base64.b64encode(s) | 2562f5d18ac59bbe4e8a28ee4033eaa0f10fc641 | 701,446 |
from typing import Any
from typing import Tuple
def exec_command(node, **kwargs: Any) -> Tuple:
"""
Return the results of the command executed.
Args:
node: The node to be used for executing the given command.
kwargs: The supported keys are
sudo - Default false, root privilege to be used or not
cmd - The command to be executed
check_ec - Evaluate the error code returned.
Return:
out, err String values
Raises:
CommandFailed when there is an execution failure
"""
out, err = node.exec_command(**kwargs)
return out, err | e9ef5bbc4691143adbe0a41e6065231edfb54156 | 212,637 |
import torch
def all_params(model: torch.nn.Module) -> torch.Tensor:
""" Returns an array of all model parameters, given either from a Module or a state_dict """
return torch.cat([x.detach().view(-1) for n, x in model.state_dict().items() if n != "word_embeddings.position_ids"]) | 70c0a87784eb4e54d480c5b230ad44d32d0f1e10 | 551,612 |
def get_items(request, client):
""" Get items using the request with the given parameters
"""
# query results
result = client.quick_search(request)
# get result pages
items_pages = [page.get() for page in result.iter(None)]
# get each single item
return [item for page in items_pages for item in page['features']] | f8b15e7d3f63cb4f1e42bfb15850f07d35fc6fe0 | 656,320 |
def compress(word):
"""
This function takes a string as an argument and returns a new string
such that each character is followed by its count, and any adjacent
duplicate characters are removed.
"""
result = ""
if len(word) == 0:
return result
else:
count = 1
for i in range(1, len(word)):
if word[i] is word[i-1]:
count += 1
else:
result = result + word[i-1] + str(count)
count = 1
return result + word[len(word)-1] + str(count) | 89d0686746a490a2b0e511862312de84f4e7d857 | 330,657 |
import math
def water_saturation_vapour_pressure_iapws(t):
"""Returns the water vapour pressure according to W. Wagner and A. Pruss (1992) J. Phys. Chem. Reference Data, 22,
783–787.
See http://www.kayelaby.npl.co.uk/chemistry/3_4/3_4_2.html
Valid only above the triple point. The IAWPS formulation 1995 (Wagner and Pruß, 2002) is valid in the temperature
range 273.16 K < T < 647.096 K. See http://cires1.colorado.edu/~voemel/vp.html
:param t: water temperature (°C)
:return: water vapour pressure in Pascal (Pa)
"""
tc = 647.096 # K
pc = 22064000 # Pa
a1 = -7.85951783
a2 = 1.84408259
a3 = -11.7866497
a4 = 22.6807411
a5 = -15.9618719
a6 = 1.80122502
t += 273.15
tau = 1 - t/tc
return pc * math.exp((a1 * tau + a2 * tau**1.5 + a3 * tau**3 + a4 * tau**3.5 + a5 * tau**4 + a6 * tau**7.5) * tc / t) | aa424bc63b3165bc439dacbf8748f478654a15b2 | 696,592 |
def _capitalize(word: str) -> str:
"""Capitalizes the given word.
:param word: the word to capitalize
:return: the word with the first letter capitalized (any other uppercase characters are preserved)
"""
if word == "":
return word
return word[0].upper() + word[1:] | 8f16e0c1508525be3d5ce360e96b1b042ef12b29 | 584,972 |
def format_trec_results(qid: str, doc: str, rank: int, score: float, run_id='RunId'):
"""
Produce a TREC formatted str of results.
:param qid: Query Id
:param doc: Document
:param rank: Rank position
:param score: Ranking score
:param run_id: Name for this run
:return String in TREC format
"""
return '{}\t0\t{}\t{}\t{:.4f}\t{}'.format(qid, doc, rank, float(score), run_id) | dc5095ab2a5d98c1d5096ebbd85e08e792bff477 | 56,572 |
import requests
def GetStockPriceJson(symbol, size, stock_key):
"""
Summary: Gets JSON response with stock data from AlphaVantage API
Inputs: symbol - string representing stock's ticker symbol
size - string denoting size of response, "compact" means 100 days' data will be returned, "full" means the full range
stock_key - string representing AlphaVantage API Key
Return Value: JSON object storing AlphaVantage API response
"""
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol={}&outputsize={}&apikey={}'.format(symbol, size, stock_key)
response = requests.get(url)
response_json = response.json()
return response_json | 01218eace05ad1cd1f83dafc15f36a510aae825a | 239,368 |
def read_file(path):
"""Read entire file content as string."""
with open(path) as file:
return file.read() | a81082fa9e010c10e6490f672d28e1009c568f5c | 567,765 |
def add_relationship_to_entities(rel_map):
"""
Purpose:
define entity relationship
Args:
rel_map: the relationship map
Returns:
graql_insert_query - graql query
"""
graql_insert_query = (
"define "
+ rel_map["rel1"]["entity"]
+ " plays "
+ rel_map["rel1"]["role"]
+ "; "
)
graql_insert_query += (
rel_map["rel2"]["entity"] + " plays " + rel_map["rel2"]["role"] + ";"
)
return graql_insert_query | 28c65d98b086ab9abd94f75a884feafa32d991cd | 532,167 |
def import_from(module: str, name: str):
""" Import a module with the given name (Equivalent of `from module import name`)
Args:
module (str): The namespace of the module
name (str): The name to be imported
Returns:
The imported class, function, and etc
"""
module = __import__(module, fromlist=[name])
return getattr(module, name) | a2632f58c0606b8fbc3500a70c6ed79c46c1a2c3 | 173,309 |
import math
def _(element: float):
"""
Handles Fortran NINT style rounding of a floating point value. Whether the
number is positive or negative, the integer portion of the number is
increased (without regard to sign) if the decimal portion of the number
is >= 0.5
"""
return (
math.floor(element)
if (element >= 0) ^ (math.modf(abs(element))[0] >= 0.5)
else math.ceil(element)
) | c14bc73881c1ee5729080c1e2b03e6203627ac1a | 408,770 |
def caps_from_underscores(string):
"""Converts words_with_underscores to CapWords."""
words = string.split('_')
return ''.join([w.title() for w in words]) | 66e56350026d14d23d3dfd9e28d46d8e12d06320 | 539,758 |
from typing import Callable
def pushcall(call: Callable, from_other_thread: bool = False) -> None:
"""pushcall(call: Callable, from_other_thread: bool = False) -> None
Pushes a call onto the event loop to be run during the next cycle.
Category: General Utility Functions
This can be handy for calls that are disallowed from within other
callbacks, etc.
This call expects to be used in the game thread, and will automatically
save and restore the ba.Context to behave seamlessly.
If you want to push a call from outside of the game thread,
however, you can pass 'from_other_thread' as True. In this case
the call will always run in the UI context on the game thread.
"""
return None | b56b8d63bd265010e5daf94ee46e59bd194c13bf | 70,961 |
import json
def RenderValue(value):
"""Convert a Python value to a string with its JavaScript representation."""
return json.dumps(value, sort_keys=True) | 9e8f33f5d38a5c5e4c89e69fdbf270da39e538e2 | 568,027 |
def parse_keywords(strings=[]):
"""Parse keywords specifications from the given list of strings.
>>> kw = sorted(parse_keywords(['_', 'dgettext:2', 'dngettext:2,3', 'pgettext:1c,2']).items())
>>> for keyword, indices in kw:
... print((keyword, indices))
('_', None)
('dgettext', (2,))
('dngettext', (2, 3))
('pgettext', ((1, 'c'), 2))
"""
keywords = {}
for string in strings:
if ':' in string:
funcname, indices = string.split(':')
else:
funcname, indices = string, None
if funcname not in keywords:
if indices:
inds = []
for x in indices.split(','):
if x[-1] == 'c':
inds.append((int(x[:-1]), 'c'))
else:
inds.append(int(x))
indices = tuple(inds)
keywords[funcname] = indices
return keywords | 59a47787000758e96a016e506dbabda2fa13756c | 550,114 |
def lookup_values_count(lookup_table):
"""
Return the number of value columns in a lookup table.
"""
# NOTE: ignore the first column, which contains the lookup time.
return lookup_table.dtype['value'].shape[0] | d5015c264094ef5a92a8ecca5147f7eb72981518 | 163,688 |
import time
def get_rfc3339_time(secs=None):
"""Return <secs> in RFC 3339 date/time format (pass secs=None for current date).
RFC 3339 is a subset of ISO 8601, used for '{DAV:}creationdate'.
See http://tools.ietf.org/html/rfc3339
"""
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(secs)) | f0f2ea482ea01fbcd635b61406c584c0f7aad0cc | 397,882 |
def get_requires(filename):
""" Function used to read the required dependencies (e.g. in 'requirements.txt')
"""
requires = []
with open(filename, "rt") as req_file:
for line in req_file:
requires.append(line.rstrip())
return requires | 4c4df470846a2812e79fe5349371c287b3a5ea95 | 101,363 |
from typing import Counter
def sum_all_dates(counters):
"""Sum up all the counters for each court across all dates.
:param counters: A dict of name-counter pairs.
:return: A counter object with counts for every court across all dates.
"""
by_court = Counter()
for court, counter in counters.items():
by_court.update({court: sum(v for v in counter.values())})
return by_court | ac3b5c2e1352507533fc0ddead3206a007ed7309 | 116,639 |
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
lists = []
for i in stories:
for triggers in triggerlist:
if triggers.evaluate(i)==True:
lists.append(i)
# This is a placeholder
return lists | a91aa78452fb0a75753a0687a7938a565b2b87f0 | 28,829 |
import math
def number_of_divisors(input_number):
"""Returns the number of divisors of input_number"""
res = 0
# check for square root divisor
if math.sqrt(input_number) % 1 == 0:
res += 1
# check for other integer divisors
for num in range(1, int(math.ceil(math.sqrt(input_number)))):
if input_number % num == 0:
res += 2
return res | 44886479c4b9ea48ee3e5471b3341076e5e42ad5 | 430,493 |
def node_cmp(n1, n2):
""" Sort nodes by cost """
if n1.cost < n2.cost:
return -1
elif n1.cost > n2.cost:
return 1
# Returning 0 allows for stable sorting, i.e. equal objects stay in
# the same order, which should provide a bit of a performance boost,
# as well as a bit of consistency
return 0 | fa4086eb819acb20b7da13a9ea479067748fc79f | 148,350 |
def cleanly_separate_key_values(line):
"""Find the delimiter that separates key from value.
Splitting with .split() often yields inaccurate results
as some values have the same delimiter value ':', splitting
the string too inaccurately.
"""
index = line.find(':')
key = line[:index]
value = line[index + 1:]
return key, value | d790f6bca2b52ee87bce01467a561901ba08655d | 41,527 |
from datetime import datetime
def int_to_date(d):
"""
Converts an integer representation of a date to a date object or None.
"""
if d == 0:
return None
return datetime.strptime(str(d), '%Y%m%d').date() | 421244c7fff05fbd9b2b96785abafd20672e7fab | 562,693 |
import string
def parse_letternumber(st):
"""
Parse CDMS's two-letter QNs
From the CDMS docs:
"Exactly two characters are available for each quantum number. Therefore, half
integer quanta are rounded up ! In addition, capital letters are used to
indicate quantum numbers larger than 99. E. g. A0 is 100, Z9 is 359. Small
types are used to signal corresponding negative quantum numbers."
"""
asc = string.ascii_lowercase
ASC = string.ascii_uppercase
newst = ''.join(['-' + str(asc.index(x)+10) if x in asc else
str(ASC.index(x)+10) if x in ASC else
x for x in st])
return int(newst) | 2473fb314e5f16b4da81c1ce7535f0c23667ace3 | 97,231 |
def _unsigned_zero(dtype):
"""
Given a numpy dtype, finds its "zero" point, which is exactly in the
middle of its range.
"""
assert dtype.kind == 'u'
return 1 << (dtype.itemsize * 8 - 1) | 6fcbc9ec4c563beb9a7fbf50a29fd5960912498f | 124,449 |
def MinPositiveValue(data):
"""
This function determines the minimum positive value of a provided data list
Input:
- *data*
Output:
- *minimum positive value*
"""
return min([elem for elem in data if elem > 0]) | 845aa2ac786d987f470f829e36737e43334dddc0 | 128,705 |
def freq_in_kmskpc(vo,ro):
"""
NAME:
freq_in_kmskpc
PURPOSE:
convert a frequency to km/s/kpc
INPUT:
vo - velocity unit in km/s
ro - length unit in kpc
OUTPUT:
conversion from units where vo=1. at ro=1.
HISTORY:
2013-09-01 - Written - Bovy (IAS)
"""
return vo/ro | 64f772917dfd78e1029e07d7987f9999bd171e39 | 602,964 |
import io
import json
def read_json_object(json_filename, encoding="utf-8"):
"""Reads and returns a single JSON object from the specified text file,
raising an error if the file cannot be parsed or if it does not contain
exactly one JSON object as a dictionary."""
# Strip comments while keeping line and column numbers.
stripped_str = ""
with io.open(json_filename, "r", encoding=encoding) as f_in:
for line in f_in:
line = line.rstrip()
comment_pos = line.find("//")
if comment_pos > -1:
line = line[:comment_pos]
stripped_str += line + "\n"
obj = json.loads(stripped_str)
if not isinstance(obj, dict):
raise ValueError("File %s contains an invalid JSON object." % json_filename)
return obj | b92f132c559218a0deac024d9870fc4fd4f43d5e | 199,017 |
def rivers_with_station(stations):
"""Returns a list of the names of rivers with a monitoring station"""
# Creates a set of rivers with monitoring stations
rivers = set()
for station in stations:
if station.river is not None:
rivers.add(station.river)
# Converts set into alphabetically ordered list
sorted_rivers = sorted(rivers)
return sorted_rivers | c1a8f6d2fe140101600325297ba3e28239ef2bce | 258,395 |
import json
def load_json_data(response):
"""Decode json from response"""
return json.loads(response.data.decode('utf8')) | e49184794886c45a150c6e72a0cebd2cb0fc97b5 | 505,979 |
def trim(d, prepended_msg):
"""remove the prepended-msg from the keys of dictionary d."""
keys = [x.split(prepended_msg)[1] for x in d.keys()]
return {k:v for k,v in zip(keys, d.values())} | a7bf495750713a51c74dfd95dbbabcbab76f1910 | 34,587 |
def diplay_img_info(img, divisor, use_RGB):
""" This function displays the information of the image dimensions as a print"""
nr_z_slices = img.shape[1]
nr_timepoints = img.shape[0]
x_dim = img.shape[-2]
y_dim = img.shape[-2]
x_div = x_dim//divisor
y_div = y_dim//divisor
print(img.shape)
print("The Resolution is: " + str(x_dim))
print("The number of z-slizes is: " + str(nr_z_slices))
print("The number of timepoints: " + str(nr_timepoints))
if use_RGB:
nr_channels = img.shape[-1]
print("The number of channels: " + str(nr_channels))
nr_channels = 1
else:
nr_channels = 1
return nr_z_slices, nr_channels, nr_timepoints, x_dim, y_dim, x_div, y_div | 19119f7bcfcaa6e17ef3fedba76793dd05ac3f77 | 590,626 |
def RIGHT(string, num_chars=1):
"""
Returns a substring of length num_chars from the end of a specified string. If num_chars is
omitted, it is assumed to be 1. Same as `string[-num_chars:]`.
>>> RIGHT("Sale Price", 5)
'Price'
>>> RIGHT('Stock Number')
'r'
>>> RIGHT('Text', 100)
'Text'
>>> RIGHT('Text', -1)
Traceback (most recent call last):
...
ValueError: num_chars invalid
"""
if num_chars < 0:
raise ValueError("num_chars invalid")
return string[-num_chars:] | d45021b3d9f89d2cc5ad8533d194a8c6ead87d17 | 680,490 |
def build_texts_from_movies(path_to_movie_dat):
"""
Extracts genre text from movies.dat to create semantic embeddings
:param path_to_movie_dat:
:return: dict of text list keyed by movie_id
"""
texts = {}
with open(path_to_movie_dat, "r", encoding="ISO-8859-1") as f:
for line in f:
movie_id, title_and_year, genres = line.strip("\n").split("::")
title = title_and_year[:-7]
# year = title_and_year[-5:-1]
sorted_genres = sorted(genres.split("|"))
texts[movie_id] = [title] + sorted_genres
return texts | e98a8e5eedee7a983431246f0e4968d6f4daaa40 | 20,002 |
def perm(n, k):
"""Return P(n, k), the number of permutations of length k drawn from n
choices.
"""
result = 1
assert k > 0
while k:
result *= n
n -= 1
k -= 1
return result | 08dea796e3358ae490813baf7331ae10b30b0baf | 139,957 |
import re
def find_email(text):
"""
Extract email from text.
Parameters
----------
text: str
Text selected to apply transformation
Examples:
---------
```python
sentence="My gmail is abc99@gmail.com"
find_email(sentence)
>>> 'abc99@gmail.com'
```
"""
line = re.findall(r'[\w\.-]+@[\w\.-]+', str(text))
return ",".join(line) | c1ebb0d851576aebc277fa48f7e3d2569a695d31 | 56,236 |
def get_iface_speed(iface):
""" Converts iface speed from bps to DNS-friendly string """
speed = iface["iface_speed"]
if speed:
speed = int(iface["iface_speed"])
else:
return None
if speed < 1536000:
return None
elif speed == 1536000 or speed == 1544000:
return "t1"
else:
prefix = speed
suffixes = ["k", "m", "g", "t", "p"]
i = 0
while prefix > 100:
prefix = prefix / 1000
i += 1
return "{}{}".format(int(prefix), suffixes[i - 1]) | 16149a97b76b7f7a61433c665d5ca222c95ec01d | 608,506 |
import re
def is_valid_id(id):
"""checks if an id is valid
Args:
id (str): The id of a note
Returns:
bool: True if the id is valid
"""
if re.match(r'^[0-9a-f]{9}$', id):
return True
else:
return False | 449868ff3198b39d69721a336a451ed64d9d1c20 | 578,069 |
import random
def _sample(iterable, sample_count):
"""
Choose a random sampling of items from an iterator.
(you will get Nones if sample_count is less than iterable length)
"""
rand = random.SystemRandom()
result = [None] * sample_count
for i, item in enumerate(iterable):
if i < sample_count:
result[i] = item
else:
j = int(rand.random() * (i + 1))
if j < sample_count:
result[j] = item
return result | 8897c7acd7ddd6bce41c563861be95922fdcac7e | 384,209 |
def nearest_index(index_array, value):
"""Find the position in an increasing array nearest a given value."""
# find the index of the last data point that is smaller than the 'value'
# we are looking for ....
ind1 = len(index_array.compress(index_array < value))
# if we are at the very end of the array then this is our best estimate ...
if ind1 == len(index_array)-1:
ind = ind1
# otherwise, which of the two points is closer?
else:
val1 = index_array[ind1]
val2 = index_array[ind1+1]
if val2-value > value-val1:
ind = ind1
else:
ind = ind1+1
return ind | 185c5b898a49663b97f6dc690c053b8acf342d9a | 351,775 |
def build_template(cfg: dict) -> dict:
"""
Build string's template dictionary
:rtype: dict
:param cfg: representation of the config
:return: dictionary to be used with Template().substutute() function
"""
task = cfg['main']
def concat(*dictionaries: dict) -> dict:
res = dict()
for d in dictionaries:
res = {**res, **d}
return res
template = concat(task['miner'], task['pool'], cfg['rig'])
return template | 48d62dc71d7d0a7bc7cb186696afc4a5800cb83c | 485,290 |
import torch
def extract_kpt_vectors(tensor, kpts, rand_batch=False):
"""
Pick channel vectors from 2D location in tensor.
E.g. tensor[b, :, y1, x1]
:param tensor: Tensor to extract from [b, c, h, w]
:param kpts: Tensor with 'n' keypoints (x, y) as [b, n, 2]
:param rand_batch: Randomize tensor in batch the vector is extracted from
:return: Tensor entries as [b, n, c]
"""
batch_size, num_kpts = kpts.shape[:-1] # [b, n]
# Reshape as a single batch -> [b*n, 2]
tmp_idx = kpts.contiguous().view(-1, 2).long()
# Flatten batch number indexes -> [b*n] e.g. [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]
b_num = torch.arange(batch_size)
b_num = b_num.repeat((num_kpts, 1)).view(-1)
b_num = torch.sort(b_num)[0] if not rand_batch else b_num[torch.randperm(len(b_num))]
# Perform indexing and reshape to [b, n, c]
return tensor[b_num, :, tmp_idx[:, 1], tmp_idx[:, 0]].reshape([batch_size, num_kpts, -1]) | fa80de574fe369b770807d8a273e295ca395a9c5 | 217,000 |
def extract_episode_details(season, episode_response):
"""Clean and extract episode details response.
Take an episode details response and cleans the data.
Extract the relevant fields needed to construct an
Episode object.
Args:
season: The season number
episode_response: An episode_details response
Returns:
episode_details: Dictionary with relevant episode
information
"""
try:
rating = float(episode_response['imdbRating'])
except ValueError:
# Rating may come through as 'N/A' if episode has not aired
rating = None
return {
'title': episode_response['Title'],
'episode': int(episode_response['Episode']),
'season': season,
'ratings': {'imdb': rating},
} | dc236736d69a1521961adf09ad871a4b1c0a49d4 | 593,288 |
def find_anychar(string, chars, index=None, negate=0):
"""find_anychar(string, chars[, index]) -> index of a character or -1
Find a character in string. chars is a list of characters to look
for. Return the index of the first occurrence of any of the
characters, or -1 if not found. index is the index where the
search should start. By default, I search from the beginning of
the string.
"""
if index is None:
index = 0
while index < len(string) and \
((not negate and string[index] not in chars) or
(negate and string[index] in chars)):
index += 1
if index == len(string):
return -1
return index | 3ea0ad8f0d8eb45d1dc9834f577404b9e7a06492 | 142,753 |
def datetime_to_estudent(date):
"""Take a datetime object and return a compatible string.
(2017, 5, 4) -> 04-May-2017
"""
string = date.strftime('%d-%b-%Y')
return string | 0e2e271e937a32b8690dfe486c6161886c557e3e | 119,697 |
def get_columns(api, config):
"""
Get the trello column ids for a trello board given a api handle and the id
of the board.
:param api: A trollo Boards api handle.
:param dict config: The configuration data (see note below for details).
:return: A tuple of column trello ids
.. note::
Required configuration keys and values
* board_id The trello board unique id
* new_column The name(str) of the column where new cards are added
This would be the same name as seen in the Trello webui
* close_column The name(str) of the column where closed/inactive
are moved.
This would be the same name as seen in the Trello webui
"""
for key in ['board_id', 'new_column', 'close_column']:
if key not in config:
raise KeyError(f"A required key '{key}' is missing in the config")
column_data = {x['name']: x['id']
for x in api.get_list(config['board_id'])}
return (column_data[config['new_column']],
column_data[config['close_column']]) | 524755b8f0c9ea0aa25c07bc5f9b2e16373a682a | 244,584 |
import csv
def get_header_csv(csv_file, cols_delimiter):
"""
Get header of a csv
:param csv_file: path to the csv file
:param cols_delimiter: delimiter between columns
:return: header of csv as a list of strings
"""
with open(csv_file, "r") as f:
reader = csv.reader(f, delimiter=cols_delimiter)
header_csv = next(reader)
return header_csv | bb8adef2b73b4d84d29469927f0b964fc6b10349 | 449,943 |
def estimate_norm(X):
"""Estimates the mean and standard deviation from a data set
Parameters:
X : numpy.ndarray
A 2D numpy ndarray in which the rows represent examples while the
columns, features of the data you want to estimate normalization
parameters on
Returns:
numpy.ndarray,
A 1D numpy ndarray containing the estimated mean over
dimension 1 (columns) of the input data X
numpy.ndarray,
A 1D numpy ndarray containing the estimated unbiased standard deviation
over dimension 1 (columns) of the input data X
"""
return X.mean(axis=0), X.std(axis=0, ddof=1) | db9ce74adeed6ef891a1b339c323d98cdac94fe4 | 175,510 |
def format_pval(pval, significant_figures=2):
"""
Helper function to format pvalue into string.
"""
return '{:g}'.format(float('{:.{p}g}'.format(pval, p=significant_figures))) | ebe7f11265360d1033d6b8eb5a81b86794ebb1ca | 492,398 |
def find_null_net_stations(db, collection="arrival"):
"""
Return a set container of sta fields for documents with a null net
code (key=net). Scans collection defined by collection argument.
"""
dbcol = db[collection]
net_not_defined = set()
curs = dbcol.find()
for doc in curs:
if not 'net' in doc:
sta = doc['sta']
net_not_defined.add(sta)
return net_not_defined | 582dc0a0b14e091e3b54fb3c2040669216ec4bf5 | 24,460 |
from pathlib import Path
def coffee_layout_dir(testdata: Path) -> Path:
"""
Returns the layout directory containing:
- has-multiple-sizes -- coffee-inspired sizes
- has-only-one-size
"""
return testdata / "layouts" | 0e471b5f1e6fdab17f981cae82110f5990d920f4 | 262,331 |
def maximum_toys(prices, k):
"""https://www.hackerrank.com/challenges/mark-and-toys/problem
Mark and Jane are very happy after having their first child. Their son loves toys, so Mark wants to buy some. There
are a number of different toys lying in front of him, tagged with their prices. Mark has only a certain amount to
spend, and he wants to maximize the number of toys he buys with this money.
Given a list of prices and an amount to spend, what is the maximum number of toys Mark can buy?
Args:
prices (list): Array of prices of toys
k (int): Mark's budget
Returns:
int: The total number of toys Mark can purchase
"""
prices.sort()
count = 0
for i in prices:
if k - i < 0:
break
k -= i
count += 1
return count | c236fc2dd102c2f63a864a1bd477b2f3d6565ea7 | 416,304 |
def harm(x,y):
"""Harmonic mean of x and y"""
return x*y/(x+y) | 7054a8a05e6d20fa39efd31ac62a5d5c4c51f061 | 680,201 |
from typing import Type
from typing import Iterable
from typing import Optional
import inspect
def get_closest_parent(target: Type, classes: Iterable[Type]) -> Optional[Type]:
"""
Find which class in given list is the closest parent to
a target class.
:param target: class which we are comparing of
:param classes:
:return: the closest parent or None if not found
"""
target_mro = inspect.getmro(target)
res = None
min_distance = float('inf')
for parent in classes:
found = [x for x in enumerate(target_mro) if x[1] == parent]
if found:
distance, klass = found[0]
if distance == 0: # Skip if klass is target
continue
if distance < min_distance:
res = klass
min_distance = distance
return res | 5048ed91e3d7790b445b749e23a29d66437b11e6 | 257,590 |
def extract(num):
"""
Given a max 3 character number, extract and return hundredth, tenth and one value
Parameters
----------
num: string
Number in string
Return
----------
hundredth: int
Hundredth number
tenth: int
Tenth number
one: int
One number
"""
hundredth = 0
tenth = 0
one = 0
if len(num) == 3:
hundredth, tenth, one = int(num[0]), int(num[1]), int(num[2])
if len(num) == 2:
tenth, one = int(num[0]), int(num[1])
if len(num) == 1:
one = int(num[0])
return hundredth, tenth, one | d03c6068619ed8e901474ad74aaad7a30d2b0353 | 381,239 |
def bb_frontprint(bb):
""" Returns a rectangle that defines the front face of a bounding box.
"""
x1,y1,z1 = bb[0]
x2,y2,z2 = bb[1]
return (x1,z1), (x2,z2) | 303e31711fcd647e6f5523902f0c3a60accbdd19 | 562,318 |
import re
def get_template_keys_from_string(template_string):
"""
Retrieve all the keys from the template string which are identified by '{{ <key_name> }}'.
:param template_string: the template in a string
:type template_string: string
:return: the list of the template keys corresponding to secret parameters
:rtype: list[string]
"""
keys = re.findall(r"{{([a-z0-9_ ]+)", template_string)
trimmed_keys = [key.strip() for key in keys]
return trimmed_keys | e63864590d6a4d1dfc7439a306186f409f325c3b | 508,388 |
from typing import List
from typing import Tuple
def count_stats(errors: List[str]) -> Tuple[int, int]:
"""Count total number of errors and files in error list."""
errors = [e for e in errors if ': error:' in e]
files = {e.split(':')[0] for e in errors}
return len(errors), len(files) | 93e4cd6fce6d484f1a18d924931ec04c4463ea7f | 462,455 |
import uuid
def generate_job_id(group_uuid):
"""
Generates a job ID
"""
return "{}.{}".format(group_uuid, uuid.uuid4()) | a0aab5e36899930af720e156e0856b43041b5f67 | 65,302 |
def factorial_iter(n: int):
"""Iteartively compute factorial of n."""
result = 1
for i in range(1, n+1):
result = result * i
return result | 31a30f71161a5a840e9db9ef6d1d2459e41e83dc | 282,683 |
def keys_to_ints(d):
"""
Takes a dict and returns the same dict with all keys converted to ints
"""
return {int(k): v for k, v in d.items()} | 17088f54c5808e42c0d6589cea8829c6819ccc7e | 123,750 |
import copy
def expand_list(list_1d: list) -> list:
"""Expand 1d list to 2d
Parameters
----------
list_1d : list
input list
Returns
-------
list_2d: list
output 2d list
"""
list_2d = copy.deepcopy(list_1d)
if not isinstance(list_1d[0], list):
list_2d = [list_2d]
return list_2d | 5c594076650dbc2a50be64cbb5cb55ea2f6d184f | 72,826 |
from typing import Union
def _prepare_positions(position_start: Union[int, None],
position_end: Union[int, None],
aln_len: int) -> dict:
"""
Prepare a dictionary with start and end position for sequence trimming.
Arguments:
position_start (int, None): leftmost (5') position; 1-based indexing
position_end (int, None): rightmost (3') position; 1-based indexing
aln_len (int): length of the alignment
Returns:
(dict): dictionary containing trimming positions
using 0-based indexing
"""
if not position_start and not position_end:
raise ValueError("Neither primers nor sequence positions were "
"provided - nothing to be trimmed.")
if position_start:
if position_end and position_start >= position_end:
raise ValueError("Start position should be smaller than end "
"position while trimming.")
elif position_start >= aln_len:
raise ValueError("Start position should be smaller than "
"alignment length.")
else:
position_start -= 1
if position_end and position_end > aln_len:
position_end = aln_len
return {'start': position_start, 'end': position_end} | b299ae7e5d4b57cd9ba8174a39ab7d3057aa850f | 464,005 |
def axprops(dct):
"""Filters `dct` for properties associated with a plot axes.
Example:
>>> # Note how kwargs gets split into axes/line properties.
>>> def myplotter(ax, x, y, **kwargs)
>>> ax.set(**axprops(kwargs))
>>> ax.plot(x, y, kwargs)
"""
# List of included axis properties
props = ["title", "facecolor", "aspect"]
# Append xyz-specific props
for ax in ["x", "y", "z"]:
for p in ["label", "ticks", "scale", "lim"]:
props.append(ax+p)
# intersection(dct,props)
props = {p: dct.pop(p) for p in props if p in dct}
return props | 8eeaf0814a08a325cdee9f58272c19f4ae12c88e | 122,969 |
def series_is_type(series, type):
"""Checks whether the given column is of the provided python type
Args:
series (pd.Series):
type: A python type
Returns:
bool
"""
idx = series.first_valid_index()
if idx == None:
return False
else:
return isinstance(series.loc[idx], type) | 7f1d76912d3f5cad85a6057bd2068b8810b19afe | 234,487 |