hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
844ee290c97366006e042d8ac5ba0899c883ac56 | 1,903 | py | Python | kge/core/component.py | Fredkiss3/kge | 389d5ab21ecb6dc1a25dd9f98245ba5938a5d253 | [
"CC0-1.0"
] | 4 | 2020-03-17T02:15:10.000Z | 2021-06-29T13:34:40.000Z | kge/core/component.py | Fredkiss3/kge | 389d5ab21ecb6dc1a25dd9f98245ba5938a5d253 | [
"CC0-1.0"
] | 4 | 2020-05-23T05:47:30.000Z | 2022-01-13T02:15:35.000Z | kge/core/component.py | Fredkiss3/kge | 389d5ab21ecb6dc1a25dd9f98245ba5938a5d253 | [
"CC0-1.0"
] | null | null | null | from typing import Callable
import kge
from kge.core import events
from kge.core.eventlib import EventMixin
from kge.core.events import Event
class BaseComponent(EventMixin):
"""
A component represents an element that can be added to an entity
to add a functionality
"""
def __fire_event__(self, event: Event, dispatch: Callable[[Event], None]):
"""
Initialize the component before everything
"""
if event.scene is not None:
if event.scene.engine.running:
if not self._initialized and not isinstance(event, events.SceneStopped) and \
not isinstance(event, events.Init):
# Initialize the component
super(BaseComponent, self).__fire_event__(events.Init(scene=event.scene), dispatch)
self._initialized = True
# fire event
super(BaseComponent, self).__fire_event__(event, dispatch)
if isinstance(event, events.Init) and not self._initialized:
self._initialized = True
def on_scene_stopped(self, ev, dispatch):
self._initialized = False
nbItems = 0
def __init__(self, entity=None):
if entity is not None:
if not isinstance(entity, kge.Entity):
raise TypeError("entity should be of type 'kge.Entity' or a subclass of 'kge.Entity'")
self.entity = entity # type: kge.Entity
type(self).nbItems += 2
self.name = f"new {type(self)} {type(self).nbItems}"
# Used to Initialize component
self._initialized = False
# Used to tell if the component is active
self.is_active = True
def __repr__(self):
return f"component {type(self).__name__} of entity '{self.entity}'"
Component = BaseComponent
| 33.982143 | 104 | 0.603783 | 1,717 | 0.90226 | 0 | 0 | 0 | 0 | 0 | 0 | 479 | 0.251708 |
844f9857dd2ca03aee9ac58b1348e52e4bc8e0ee | 766 | py | Python | src/870. Advantage Shuffle.py | rajshrivastava/LeetCode | dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0 | [
"MIT"
] | 1 | 2019-12-16T08:18:25.000Z | 2019-12-16T08:18:25.000Z | src/870. Advantage Shuffle.py | rajshrivastava/LeetCode | dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0 | [
"MIT"
] | null | null | null | src/870. Advantage Shuffle.py | rajshrivastava/LeetCode | dfe6342fe22b324429b0be3e5c0fef46c7e6b3b0 | [
"MIT"
] | null | null | null | class Solution:
def advantageCount(self, A: List[int], B: List[int]) -> List[int]:
n=len(A)
A.sort()
B_sorted_idxs = sorted(list(range(0,n)), key = lambda x: B[x])
permuted_A = [-1]*n
j = 0 #for A -index
remainingA = []
for idx in B_sorted_idxs:
while(j<n and A[j] <= B[idx]):
remainingA.append(A[j])
j += 1
if j == n:
break
else:
permuted_A[idx] = A[j]
A[j] = None
j += 1
j = 0
for val in remainingA:
while permuted_A[j] != -1:
j+=1
permuted_A[j] = val
j += 1
return permuted_A
| 27.357143 | 70 | 0.399478 | 765 | 0.998695 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.016971 |
84508cc0743106693c25a4c91852516182d10958 | 11,162 | py | Python | generate_population_dataset.py | p-enel/stable-and-dynamic-value | 3f78e24f5bef9b12b8cc43d075d2e66b8a603325 | [
"CC0-1.0"
] | 1 | 2020-07-29T09:18:00.000Z | 2020-07-29T09:18:00.000Z | generate_population_dataset.py | p-enel/stable-and-dynamic-value | 3f78e24f5bef9b12b8cc43d075d2e66b8a603325 | [
"CC0-1.0"
] | null | null | null | generate_population_dataset.py | p-enel/stable-and-dynamic-value | 3f78e24f5bef9b12b8cc43d075d2e66b8a603325 | [
"CC0-1.0"
] | 3 | 2020-07-27T03:12:19.000Z | 2021-11-02T20:03:00.000Z | from pathlib import Path
import numpy as np
import pickle as pk
from itertools import chain, product
from collections import OrderedDict
from structure import Struct
MONKEYS = ['M', 'N']
REGIONS = ['OFC', 'ACC']
TASKVARS = ['value', 'type']
SUBSPACES = [True, False]
EVT_WINS = OrderedDict((('cues ON', (-500, 1500)),
('response cue', (-500, 500)),
('rwd', (-400, 400))))
def pp_from_filename(filename):
'''Get the preprocessing parameters from a unit data set filename
Arguments:
filename - str or Path: name or full path of unit data set file
'''
fnamestr = filename if isinstance(filename, str) else filename.name
params = [paramstr.split('.') for paramstr in fnamestr.split('_')[2:]]
preproc_params = {'align': params[0][1],
'binsize': int(params[1][1]),
'smooth': params[2][1],
'smoothsize': int(params[3][1]),
'step': int(params[4][1])}
return preproc_params
def get_dataset_fname(dataseed, pp):
'''Generate the file name of a population data set given data seed and preprocessing parameters
Arguments:
dataseed - int: the seed of the data set that will be included in the file name
pp - dict: the pre-processing parameters of the data set'''
fname = "population_dataset_align.{align}_binsize.{binsize}_smooth.{smooth}"
fname += "_smoothsize.{smoothsize}_step.{step}_seed.%d.pk" % dataseed
fname = fname.format(**pp)
return fname
def generate_dataset(dataseed, unit_folder, unit_file, save_folder=None):
'''Generate a pseudo-population by combining data from monkeys and sessions
Arguments:
dataseed - int: the seed for pseudo-random selection of the trials to be
part of the data set
unit_file - str: the path to the file containing the unit data set
save_folder - str or Path: optional, a folder to save the generated data
set. After being saved once, if the same folder is specified, it will be
loaded instead of being generated.
Returns:
X - Structure: A structure that contains the pseudo-population firing rate
data. The structure contains 3 levels:
- monkey: which can take values 'M' or 'N' for individual monkey data,
or 'both' for the data of both monkeys combined
- region: which can take value 'OFC' or 'ACC'
- task variable: which can take value 'value' or 'type' for data sets
targeted to decoding these variables
The elements of the structure are numpy arrays of the shape:
trials x bins x neurons
Example:
X['N', 'ACC', 'value'] contains a matrix of the pseudo-population
firing rate of monkey N for region ACC meant to decode value
y - Structure: A structure of numpy vectors with the same map as 'X' that
contains the ground truth of the related variable for each trial.
Example:
y['N', 'ACC', 'value'] contains the value of each trials of monkey N
for ACC population.
delaymask - numpy vector of booleans: A boolean mask for the time bin
dimension to select time bins that are part of the delay activity
bins - numpy vector of ints: The time of each bin of the firing rate data
in the structure X, with events ordered this way:
'cues ON' -> 'response cue' -> 'rwd'
'''
events = list(EVT_WINS.keys())
pp = pp_from_filename(unit_file)
if save_folder is not None:
dataset_fname = get_dataset_fname(dataseed, pp)
dataset_fullpath = Path(save_folder)/dataset_fname
if dataset_fullpath.exists():
print("Data set already generated, loading...")
with open(dataset_fullpath, 'rb') as f:
X, y, delaymask, bins = pk.load(f)
return X, y, delaymask, bins
with open(Path(unit_folder)/unit_file, 'rb') as f:
data = pk.load(f)
evtxs = data['M']['OFC'][0]['bins']
#### Format the data for decoding
#################################
keymap = [MONKEYS, REGIONS, TASKVARS]
act = Struct.new_empty(keymap)
minntrials = Struct.new_empty(keymap)
for monkey, region in product(MONKEYS, REGIONS):
act[monkey, region, 'value'] = [[] for _ in range(4)]
act[monkey, region, 'type'] = [[], []]
minntrials[monkey, region, 'value'] = [[] for _ in range(4)]
minntrials[monkey, region, 'type'] = [[], []]
datamr = data[monkey][region]
## Select bins that are within the window of interest for each event
## then concatenate the activity of the different events in a single tensor
catepochs = []
for sessdata in datamr:
if sessdata['fr'] is not None:
cattmp = []
for evt in events:
included_bins = (evtxs[evt] >= EVT_WINS[evt][0]) & (evtxs[evt] <= EVT_WINS[evt][1])
cattmp.append(sessdata['fr'][evt][included_bins])
catepochs.append(np.concatenate(cattmp))
else:
catepochs.append(None)
## Separate trials by value and type
for sessfr, sessdata in zip(catepochs, datamr):
if sessfr is not None:
if sessdata['fr'] is not None:
sessvars = sessdata['vars']
for val in range(1, 5):
trialbool = (sessvars.value == val)
act[monkey, region, 'value'][val-1].append(sessfr[:, :, trialbool])
for itype, type_ in enumerate(['juice', 'bar']):
trialbool = (sessvars.type == type_)
act[monkey, region, 'type'][itype].append(sessfr[:, :, trialbool])
## Get the minimum number of trials across all sessions for each value/type
minntrials[monkey, region, 'value'] = [np.nanmin([sessfr.shape[2] for sessfr in valdata])
for valdata in act[monkey, region, 'value']]
minntrials[monkey, region, 'type'] = [np.nanmin([sessfr.shape[2] for sessfr in typedata])
for typedata in act[monkey, region, 'type']]
## Get the minimum number of trials for pooled data across monkeys
minntrials.move_level_(0, 2)
mintogether = minntrials.apply(lambda x: [min(valmin) for valmin in zip(*x.values())], depth=2)
mintogether = Struct.from_nested_dict({'both': mintogether.ndict}, n_layers=3)
minntrials.move_level_(2, 0)
minntrials = minntrials.combine(mintogether)
# extra trials are discarded after trials are shuffled
np.random.seed(dataseed)
catactboth = Struct.empty_like(act, values=list)
# taskvar, monkey, region = next(product(TASKVARS, MONKEYS, REGIONS))
for taskvar, monkey, region in product(TASKVARS, MONKEYS, REGIONS):
keymap = [monkey, region, taskvar]
minns = minntrials['both', region, taskvar]
# minn, acttmp = next(zip(minns, act[keymap]))
for minn, acttmp in zip(minns, act[keymap]):
tocat = []
for sessdata in acttmp:
ntrials = sessdata.shape[2]
trialind = np.arange(ntrials)
np.random.shuffle(trialind)
tmp = sessdata[:, :, trialind]
tocat.append(tmp[:, :, :minn])
catactboth[keymap].append(np.concatenate(tocat, 1))
catact = Struct.empty_like(act, values=list)
for taskvar, monkey, region in product(TASKVARS, MONKEYS, REGIONS):
keymap = [monkey, region, taskvar]
minns = minntrials[keymap]
for minn, acttmp in zip(minns, act[keymap]):
tocat = []
for sessdata in acttmp:
ntrials = sessdata.shape[2]
trialind = np.arange(ntrials)
np.random.shuffle(trialind)
tmp = sessdata[:, :, trialind]
tocat.append(tmp[:, :, :minn])
catact[keymap].append(np.concatenate(tocat, 1))
catactboth.move_level_(0, 2)
def cat_monkeys(x):
'''x: {monkey}[4 (values)] np.array<nbins*nneurons*ntrials>'''
return [np.concatenate([x['M'][ival], x['N'][ival]], axis=1) for ival in range(len(x['M']))]
catactboth.apply_agg_(cat_monkeys, depth=2)
catactboth = Struct.from_nested_dict({'both': catactboth.ndict}, n_layers=3)
catact = catact.combine(catactboth)
#### Moving data from arrays to a list ####
def get_actvallist(x):
tmp = [[(trial, ival) for trial in np.moveaxis(x[ival], 2, 0)] for ival in range(len(x))]
return list(zip(*chain(*zip(*tmp))))
actvallist = catact.apply(get_actvallist)
X, y = actvallist.apply(lambda x: x[0]), actvallist.apply(lambda x: x[1])
X.apply_(np.stack)
y.apply_(np.array)
del(catact, act)
#### Defining a boolean mask to get only the bins between cue ON and rwd
########################################################################
cuesON_bins_mask = (evtxs['cues ON'] >= EVT_WINS['cues ON'][0]) & (evtxs['cues ON'] <= EVT_WINS['cues ON'][1])
cuesON_bins = evtxs['cues ON'][cuesON_bins_mask]
resp_bins_mask = (evtxs['response cue'] >= EVT_WINS['response cue'][0]) &\
(evtxs['response cue'] <= EVT_WINS['response cue'][1])
resp_bins = evtxs['response cue'][resp_bins_mask]
rwd_bins_mask = (evtxs['rwd'] >= EVT_WINS['rwd'][0]) & (evtxs['rwd'] <= EVT_WINS['rwd'][1])
rwd_bins = evtxs['rwd'][rwd_bins_mask]
delaymask = np.concatenate((cuesON_bins >= 0, np.ones(resp_bins.shape, dtype=bool), rwd_bins <= 0))
bins = {}
for evt, (start, end) in EVT_WINS.items():
xs = evtxs[evt]
bins[evt] = xs[(xs >= start) & (xs <= end)]
if save_folder is not None:
with open(dataset_fullpath, 'wb') as f:
pk.dump((X, y, delaymask, bins), f)
print(f'data set created and saved in {unit_folder}')
return X, y, delaymask, bins
# The following is an example. Replace the right hand side of the first three
# statements to get a specific data set
if __name__ == '__main__':
# Data seeds used to generate the pseudo population data for decoding are
# listed below:
# dataseeds = [634564236, 9453241, 70010207, 43661999, 60410205]
dataseed = 634564236
# The following folder path must contain the unit data set file specified
# below
unit_folder = Path("/home/john/datasets")
# The following statement specifies which unit data set (with which
# preprocessing parameters) is to be used to generate the population data
# set
unit_file = "unit_dataset_align.center_binsize.100_smooth.gaussian_smoothsize.100_step.25.pk"
# The last argument of the function allows you to save the data set in a
# specified folder, or to load an already generated population data set if
# it already exists in this folder. In this example the population data set
# is saved in the same folder as the unit data set.
X, y, delaymask, bins = generate_dataset(dataseed, unit_folder, unit_file,
save_folder=unit_folder)
| 43.601563 | 114 | 0.614854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,472 | 0.400645 |
8450d07e5cec286e40f858637377c3e87f1ab9e5 | 634 | py | Python | setup.py | joepatmckenna/ohmlr | 2f3e63243758b995596f37897814634fc432f337 | [
"MIT"
] | null | null | null | setup.py | joepatmckenna/ohmlr | 2f3e63243758b995596f37897814634fc432f337 | [
"MIT"
] | null | null | null | setup.py | joepatmckenna/ohmlr | 2f3e63243758b995596f37897814634fc432f337 | [
"MIT"
] | null | null | null | import setuptools
with open('README.rst', 'r') as f:
readme = f.read()
with open('version', 'r') as f:
version = f.read()
if __name__ == '__main__':
setuptools.setup(
name='ohmlr',
version=version,
description='One-hot multinomial logisitc regression',
long_description=readme,
author='Joseph P. McKenna',
author_email='joepatmckenna@gmail.com',
url='http://joepatmckenna.github.io/ohmlr',
download_url='https://pypi.org/project/ohmlr',
packages=['ohmlr'],
license='MIT',
keywords=['inference', 'statistics', 'machine learning'])
| 27.565217 | 65 | 0.615142 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 252 | 0.397476 |
8450ee0e08874b8a26468c905f5abfbc7260c448 | 1,301 | py | Python | commands/climber/holdcimbersposition.py | 1757WestwoodRobotics/2022-RapidReact | b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137 | [
"MIT"
] | 1 | 2022-01-21T22:00:24.000Z | 2022-01-21T22:00:24.000Z | commands/climber/holdcimbersposition.py | 1757WestwoodRobotics/2022-RapidReact | b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137 | [
"MIT"
] | 40 | 2022-01-18T21:20:54.000Z | 2022-03-31T20:56:44.000Z | commands/climber/holdcimbersposition.py | 1757WestwoodRobotics/2022-RapidReact | b6d9cf203fd35e93dc5d26ba2d6889e2a9edb137 | [
"MIT"
] | 1 | 2022-01-28T02:46:38.000Z | 2022-01-28T02:46:38.000Z | from commands2 import CommandBase, ParallelCommandGroup
from subsystems.climbers.leftclimbersubsystem import LeftClimber
from subsystems.climbers.rightclimbersubsystem import RightClimber
class HoldLeftClimberPosition(CommandBase):
def __init__(self, climber: LeftClimber) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.climber = climber
self.addRequirements([self.climber])
def initialize(self) -> None:
self.climber.leftClimber.climberMotor.neutralOutput()
self.climber.leftClimber.activateBrake()
class HoldRightClimberPosition(CommandBase):
def __init__(self, climber: RightClimber) -> None:
CommandBase.__init__(self)
self.setName(__class__.__name__)
self.climber = climber
self.addRequirements([self.climber])
def initialize(self) -> None:
self.climber.rightClimber.climberMotor.neutralOutput()
self.climber.rightClimber.activateBrake()
class HoldBothClimbersPosition(ParallelCommandGroup):
def __init__(self, leftClimber: LeftClimber, rightClimber: RightClimber):
super().__init__(
HoldLeftClimberPosition(leftClimber),
HoldRightClimberPosition(rightClimber),
)
self.setName(__class__.__name__)
| 35.162162 | 77 | 0.730976 | 1,104 | 0.848578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
84513ebf1e835ed2f032b1060dd720580d3e05a2 | 668 | py | Python | practicer_flask/app.py | DominikPott/practicer-flask | c8e523095bdd5912dadb7357d16a4e76229a04da | [
"MIT"
] | null | null | null | practicer_flask/app.py | DominikPott/practicer-flask | c8e523095bdd5912dadb7357d16a4e76229a04da | [
"MIT"
] | null | null | null | practicer_flask/app.py | DominikPott/practicer-flask | c8e523095bdd5912dadb7357d16a4e76229a04da | [
"MIT"
] | null | null | null | import os
from flask import Flask
import practicer_flask.auth
import practicer_flask.exercises
import practicer_flask.dashboard
import practicer_flask.topic
import practicer_flask.model_viewer
def create_app(test_config=None):
app = Flask(__name__)
app.config.from_mapping(SECRET_KEY=os.environ.get("SECRET_KEY", 'local'))
app.register_blueprint(practicer_flask.auth.bp)
app.register_blueprint(practicer_flask.exercises.bp)
app.register_blueprint(practicer_flask.dashboard.bp)
app.register_blueprint(practicer_flask.topic.bp)
return app
app = create_app()
if __name__ == "__main__":
app.run(debug=os.environ.get("DEV", False))
| 23.857143 | 77 | 0.784431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.050898 |
84533ec2f7f2ad9597755a4499563c795ed9f246 | 737 | py | Python | algo/visualizations/temporalchart.py | alexeyev/visartm | d19e193b3c084d7f355a45b966c8bb2ebb6fa366 | [
"BSD-3-Clause"
] | 1 | 2020-10-01T10:11:21.000Z | 2020-10-01T10:11:21.000Z | algo/visualizations/temporalchart.py | alexeyev/visartm | d19e193b3c084d7f355a45b966c8bb2ebb6fa366 | [
"BSD-3-Clause"
] | null | null | null | algo/visualizations/temporalchart.py | alexeyev/visartm | d19e193b3c084d7f355a45b966c8bb2ebb6fa366 | [
"BSD-3-Clause"
] | null | null | null | from models.models import Topic, TopicInTopic
import json
def visual(vis, params):
model = vis.model
group_by = params[1] # year,month,week,day
topics = Topic.objects.filter(
model=model,
layer=model.layers_count).order_by("spectrum_index")
topics = [topic.title for topic in topics]
cells, dates = model.group_matrix(group_by=group_by, named_groups=False)
topics_count = len(topics)
dates_count = len(dates)
charts = [[topics[y]] + [len(cells[x][y]) for x in range(dates_count)]
for y in range(topics_count)]
dates = [str(date.date()) for date in dates]
return "charts=" + json.dumps(charts) + ";\n" + \
"dates=" + json.dumps(['date'] + dates) + ";\n"
| 32.043478 | 76 | 0.639077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.09498 |
84580bc22605d3bb58c5f232f6e1f847342e88fa | 3,596 | py | Python | submissions-api/app/main/model/submissions_manifest.py | sanger-tol/tol-submissions | 8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331 | [
"MIT"
] | null | null | null | submissions-api/app/main/model/submissions_manifest.py | sanger-tol/tol-submissions | 8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331 | [
"MIT"
] | null | null | null | submissions-api/app/main/model/submissions_manifest.py | sanger-tol/tol-submissions | 8dbbfaa98b1dfa09a09cb54cf1b2eb9d1dca5331 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Genome Research Ltd.
#
# SPDX-License-Identifier: MIT
from .base import Base, db
class SubmissionsManifest(Base):
__tablename__ = "manifest"
manifest_id = db.Column(db.Integer, primary_key=True)
samples = db.relationship('SubmissionsSample', back_populates="manifest",
lazy=False, order_by='SubmissionsSample.row')
created_at = db.Column(db.DateTime, nullable=False, default=db.func.now())
created_by = db.Column(db.Integer, db.ForeignKey('user.user_id'))
user = db.relationship("SubmissionsUser", uselist=False, foreign_keys=[created_by])
submission_status = db.Column(db.Boolean, nullable=True)
project_name = db.Column(db.String(), nullable=False, default="ToL")
sts_manifest_id = db.Column(db.String(), nullable=True)
excel_file = db.Column(db.String(), nullable=True)
target_rack_plate_tube_wells = set()
duplicate_rack_plate_tube_wells = []
target_specimen_taxons = {}
whole_organisms = set()
duplicate_whole_organisms = []
def reset_trackers(self):
# Target rack/plate and tube/well ids
all = []
for sample in self.samples:
if not sample.is_symbiont() and sample.rack_or_plate_id is not None \
and sample.tube_or_well_id is not None:
concatenated = sample.rack_or_plate_id + '/' + sample.tube_or_well_id
all.append(concatenated)
self.target_rack_plate_tube_wells = set()
seen_add = self.target_rack_plate_tube_wells.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
self.duplicate_rack_plate_tube_wells = set(x for x in all if x in
self.target_rack_plate_tube_wells
or seen_add(x))
# Target specimen/taxons
self.target_specimen_taxons = {}
for sample in self.samples:
if not sample.is_symbiont() and sample.specimen_id is not None \
and sample.taxonomy_id is not None:
# Only add the first one
if sample.specimen_id not in self.target_specimen_taxons:
self.target_specimen_taxons[sample.specimen_id] = sample.taxonomy_id
# Whole organisms
all = []
for sample in self.samples:
if sample.organism_part == "WHOLE_ORGANISM":
all.append(sample.specimen_id)
self.whole_organisms = set()
seen_add = self.whole_organisms.add
# adds all elements it doesn't know yet to seen and all other to seen_twice
self.duplicate_whole_organisms = set(x for x in all if x in
self.whole_organisms
or seen_add(x))
def unique_taxonomy_ids(cls):
return set([x.taxonomy_id for x in cls.samples])
def to_dict(cls):
return {'manifestId': cls.manifest_id,
'projectName': cls.project_name,
'stsManifestId': cls.sts_manifest_id,
'samples': cls.samples,
'submissionStatus': cls.submission_status}
def to_dict_short(cls):
return {'manifestId': cls.manifest_id,
'projectName': cls.project_name,
'stsManifestId': cls.sts_manifest_id,
'submissionStatus': cls.submission_status,
'createdAt': cls.created_at,
'numberOfSamples': len(cls.samples),
'user': cls.user}
| 44.95 | 88 | 0.614294 | 3,480 | 0.967742 | 0 | 0 | 0 | 0 | 0 | 0 | 610 | 0.169633 |
8458ddef5330c4ed60d249ea5883464e063cf5ba | 6,411 | py | Python | eden/integration/hg/histedit_test.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | eden/integration/hg/histedit_test.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | eden/integration/hg/histedit_test.py | jmswen/eden | 5e0b051703fa946cc77fc43004435ae6b20599a1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
from eden.integration.lib import hgrepo
from .lib.hg_extension_test_base import EdenHgTestCase, hg_test
from .lib.histedit_command import HisteditCommand
@hg_test
class HisteditTest(EdenHgTestCase):
_commit1: str
_commit2: str
_commit3: str
def populate_backing_repo(self, repo: hgrepo.HgRepository) -> None:
repo.write_file("first", "")
self._commit1 = repo.commit("first commit")
repo.write_file("second", "")
self._commit2 = repo.commit("second commit")
repo.write_file("third", "")
self._commit3 = repo.commit("third commit")
def test_stop_at_earlier_commit_in_the_stack_without_reordering(self) -> None:
commits = self.repo.log()
self.assertEqual([self._commit1, self._commit2, self._commit3], commits)
# histedit, stopping in the middle of the stack.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.stop(self._commit2)
histedit.pick(self._commit3)
# We expect histedit to terminate with a nonzero exit code in this case.
with self.assertRaises(hgrepo.HgError) as context:
histedit.run(self)
head = self.repo.log(revset=".")[0]
expected_msg = (
"Changes committed as %s. " "You may amend the changeset now." % head[:12]
)
self.assertIn(expected_msg, str(context.exception))
# Verify the new commit stack and the histedit termination state.
# Note that the hash of commit[0] is unpredictable because Hg gives it a
# new hash in anticipation of the user amending it.
parent = self.repo.log(revset=".^")[0]
self.assertEqual(self._commit1, parent)
self.assertEqual(["first commit", "second commit"], self.repo.log("{desc}"))
# Make sure the working copy is in the expected state.
self.assert_status_empty(op="histedit")
self.assertSetEqual(
{".eden", ".hg", "first", "second"},
set(os.listdir(self.repo.get_canonical_root())),
)
self.hg("histedit", "--continue")
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_reordering_commits_without_merge_conflicts(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit2)
histedit.pick(self._commit3)
histedit.pick(self._commit1)
histedit.run(self)
self.assertEqual(
["second commit", "third commit", "first commit"], self.repo.log("{desc}")
)
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_drop_commit_without_merge_conflicts(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.drop(self._commit2)
histedit.pick(self._commit3)
histedit.run(self)
self.assertEqual(["first commit", "third commit"], self.repo.log("{desc}"))
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_roll_two_commits_into_parent(self) -> None:
self.assertEqual(
["first commit", "second commit", "third commit"], self.repo.log("{desc}")
)
# histedit, reordering the stack in a conflict-free way.
histedit = HisteditCommand()
histedit.pick(self._commit1)
histedit.roll(self._commit2)
histedit.roll(self._commit3)
histedit.run(self)
self.assertEqual(["first commit"], self.repo.log("{desc}"))
self.assert_status_empty()
self.assertSetEqual(
{".eden", ".hg", "first", "second", "third"},
set(os.listdir(self.repo.get_canonical_root())),
)
def test_abort_after_merge_conflict(self) -> None:
self.write_file("will_have_confict.txt", "original\n")
self.hg("add", "will_have_confict.txt")
commit4 = self.repo.commit("commit4")
self.write_file("will_have_confict.txt", "1\n")
commit5 = self.repo.commit("commit5")
self.write_file("will_have_confict.txt", "2\n")
commit6 = self.repo.commit("commit6")
histedit = HisteditCommand()
histedit.pick(commit4)
histedit.pick(commit6)
histedit.pick(commit5)
original_commits = self.repo.log()
with self.assertRaises(hgrepo.HgError) as context:
histedit.run(self, ancestor=commit4)
expected_msg = (
"Fix up the change (pick %s)\n" % commit6[:12]
) + " (hg histedit --continue to resume)"
self.assertIn(expected_msg, str(context.exception))
self.assert_status({"will_have_confict.txt": "M"}, op="histedit")
self.assert_file_regex(
"will_have_confict.txt",
"""\
<<<<<<< local: .*
original
=======
2
>>>>>>> histedit: .*
""",
)
self.hg("histedit", "--abort")
self.assertEqual("2\n", self.read_file("will_have_confict.txt"))
self.assertListEqual(
original_commits,
self.repo.log(),
msg="The original commit hashes should be restored by the abort.",
)
self.assert_status_empty()
| 36.220339 | 86 | 0.608641 | 5,912 | 0.922165 | 0 | 0 | 5,921 | 0.923569 | 0 | 0 | 2,038 | 0.317891 |
8459ea4275ad26f2fdfb1430948999a41ff39caf | 408 | py | Python | dailypy/__init__.py | HuangJiaLian/dailypy | b838a4f6743fca8ccc5c4fa73142d0f7095dbbc3 | [
"CC0-1.0"
] | null | null | null | dailypy/__init__.py | HuangJiaLian/dailypy | b838a4f6743fca8ccc5c4fa73142d0f7095dbbc3 | [
"CC0-1.0"
] | 1 | 2020-08-19T13:42:52.000Z | 2020-08-19T14:32:31.000Z | dailypy/__init__.py | HuangJiaLian/dailypy | b838a4f6743fca8ccc5c4fa73142d0f7095dbbc3 | [
"CC0-1.0"
] | null | null | null | import numpy as np
import os
# Data manipulate
class dm:
def __init__(self):
pass
def saveNp(self, a, name, path='.'):
np.save(os.path.join(path,name), a)
def saveTxt(self, a, name, path='.'):
np.savetxt(os.path.join(path,name)+'.txt', a)
def saveArrows(self, a1, a2, name='cols'):
a1a2 = np.stack((a1,a2), axis=1)
self.saveTxt(a1a2, name)
| 20.4 | 53 | 0.568627 | 355 | 0.870098 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 0.085784 |
845a911380b7475214d4489c0d02b5872a85aa00 | 310 | py | Python | Leetcode/0713. Subarray Product Less Than K/0713.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0713. Subarray Product Less Than K/0713.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0713. Subarray Product Less Than K/0713.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class Solution:
def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:
if k <= 1:
return 0
ans = 0
prod = 1
j = 0
for i, num in enumerate(nums):
prod *= num
while prod >= k:
prod /= nums[j]
j += 1
ans += i - j + 1
return ans
| 17.222222 | 72 | 0.490323 | 309 | 0.996774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
845c29a7df8a071ea4d00366b934a8a0a5899a8f | 2,832 | py | Python | vbb_backend/session/migrations/0002_auto_20210215_1509.py | patrickb42/backend-vbb-portal | 88362bc5b4d5cab95aa67e12694f98371604b65a | [
"MIT"
] | 3 | 2021-04-14T02:59:09.000Z | 2021-06-08T00:17:27.000Z | vbb_backend/session/migrations/0002_auto_20210215_1509.py | patrickb42/backend-vbb-portal | 88362bc5b4d5cab95aa67e12694f98371604b65a | [
"MIT"
] | 81 | 2020-12-08T00:11:52.000Z | 2021-08-09T18:13:32.000Z | vbb_backend/session/migrations/0002_auto_20210215_1509.py | patrickb42/backend-vbb-portal | 88362bc5b4d5cab95aa67e12694f98371604b65a | [
"MIT"
] | 5 | 2021-01-12T04:50:26.000Z | 2021-06-04T02:00:03.000Z | # Generated by Django 3.0.10 on 2021-02-15 15:09
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20210209_0849'),
('session', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MentorSessionAssociation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True)),
('modified_date', models.DateTimeField(auto_now=True, null=True)),
('deleted', models.BooleanField(default=False)),
('external_id', models.UUIDField(db_index=True, default=uuid.uuid4, unique=True)),
('attended', models.BooleanField(default=False)),
('mentor', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='mentor_session', to='users.Mentor')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='session',
name='derived_from',
),
migrations.RemoveField(
model_name='session',
name='is_mentor_confirmed',
),
migrations.RemoveField(
model_name='studentsessionassociation',
name='is_absent',
),
migrations.RemoveField(
model_name='studentsessionassociation',
name='notes',
),
migrations.RemoveField(
model_name='studentsessionassociation',
name='wont_attend',
),
migrations.AddField(
model_name='studentsessionassociation',
name='attended',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='studentsessionassociation',
name='session',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='session_student', to='session.Session'),
),
migrations.AlterField(
model_name='studentsessionassociation',
name='student',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='student_session', to='users.Student'),
),
migrations.DeleteModel(
name='SessionRule',
),
migrations.AddField(
model_name='mentorsessionassociation',
name='session',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='session_mentor', to='session.Session'),
),
]
| 37.76 | 153 | 0.598517 | 2,693 | 0.950918 | 0 | 0 | 0 | 0 | 0 | 0 | 667 | 0.235523 |
845d03992ff2924ffdc957b51de3c6b486a7c0ea | 880 | py | Python | src/las_util/serializers.py | dcslagel/las-util-django | cea8437813969b3b22182de6b9553b9e6694c548 | [
"BSD-3-Clause"
] | 3 | 2020-05-15T05:40:43.000Z | 2021-11-09T06:19:41.000Z | src/las_util/serializers.py | dcslagel/las-util-django | cea8437813969b3b22182de6b9553b9e6694c548 | [
"BSD-3-Clause"
] | 26 | 2020-05-20T13:03:02.000Z | 2021-09-23T19:36:39.000Z | src/las_util/serializers.py | dcslagel/las-util-django | cea8437813969b3b22182de6b9553b9e6694c548 | [
"BSD-3-Clause"
] | 1 | 2021-11-09T01:40:05.000Z | 2021-11-09T01:40:05.000Z | """
File-Name: [app]/serializers.py
File-Desc: Rest API serializers for las_util
App-Name: las_util
Project-Name: Las-Util-Django
Copyright: Copyright (c) 2019, DC Slagel
License-Identifier: BSD-3-Clause
"""
from rest_framework import serializers
from las_util.models import SectionInfo
class DocSerializer(serializers.ModelSerializer):
"""Link ModelSerializer to the SectionInfo model"""
class Meta:
model = SectionInfo
fields = '__all__'
class ListSerializer(serializers.ModelSerializer):
"""Link ModelSerializer to the SectionInfo model"""
class Meta:
model = SectionInfo
fields = ['filename']
# TODO: replace view.api_upload with to use this
# class UploadSerializer(serializer.ModelSerializer):
# """Link ModelSerializer to the Upload model"""
# class Meta:
# model = Upload
# fields = ['filename',]
| 29.333333 | 55 | 0.7125 | 356 | 0.404545 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.629545 |
845d9d3e1de64db798d6f4d7e46d76bf4c2959c6 | 3,965 | py | Python | UI/python/runtext.py | maxxscholten/nyc-train-sign | 7da32c413270f3bf4629969bcf16f7def4ddb372 | [
"MIT"
] | 8 | 2020-02-19T21:17:04.000Z | 2022-01-04T03:52:56.000Z | UI/python/runtext.py | maxxscholten/nyc-train-sign | 7da32c413270f3bf4629969bcf16f7def4ddb372 | [
"MIT"
] | 1 | 2021-09-20T02:13:41.000Z | 2021-09-21T07:01:14.000Z | UI/python/runtext.py | maxxscholten/nyc-train-sign | 7da32c413270f3bf4629969bcf16f7def4ddb372 | [
"MIT"
] | 4 | 2021-03-11T17:11:40.000Z | 2021-11-10T01:20:33.000Z | #!/usr/bin/env python
# Display a runtext with double-buffering.
from samplebase import SampleBase
from rgbmatrix import graphics
import time
import requests
import transitfeed
import datetime
import arrow
import schedule
today = datetime.date.today()
starttime = time.time()
schedule = transitfeed.Schedule()
url = "http://localhost:5000/by-id/077e"
font = graphics.Font()
font.LoadFont("../fonts/tom-thumb.bdf")
textColor = graphics.Color(0, 110, 0)
circleColor = graphics.Color(110, 0, 0)
circleNumberColor = graphics.Color(0, 0, 0)
class RunText(SampleBase):
def __init__(self, *args, **kwargs):
super(RunText, self).__init__(*args, **kwargs)
self.parser.add_argument("-t", "--text", help="The text to scroll on the RGB LED panel", default="6 Wall Street")
def getData(self):
r = requests.get(url=url)
time1 = r.json()['data'][0]['N'][0]['time']
time2 = r.json()['data'][0]['N'][1]['time']
print(r.json()['data'][0]['N'])
nowTime = arrow.utcnow().datetime
time1Formatted = arrow.get(time1).to('utc').datetime
time2Formatted = arrow.get(time2).to('utc').datetime
deltaTime1 = time1Formatted - nowTime
deltaTime2 = time2Formatted - nowTime
deltaMod1 = divmod(deltaTime1.total_seconds(), 60)
deltaMod2 = divmod(deltaTime2.total_seconds(), 60)
deltaMins1 = deltaMod1[0] + deltaMod1[1]/60
deltaMins2 = deltaMod2[0] + deltaMod2[1]/60
minsUntilTrain1 = int(round(deltaMins1))
minsUntilTrain2 = int(round(deltaMins2))
minsUntilTrain1Str = str(minsUntilTrain1)
minsUntilTrain2Str = str(minsUntilTrain2)
if minsUntilTrain1 < 10 and minsUntilTrain1 >= 0:
minsUntilTrain1Str = " " + str(minsUntilTrain1)
if minsUntilTrain2 < 10 and minsUntilTrain2 >= 0:
minsUntilTrain2Str = " " + str(minsUntilTrain2)
return [minsUntilTrain1Str, minsUntilTrain2Str]
def drawCircle(self, canvas, x, y, color):
# Draw circle with lines
graphics.DrawLine(canvas, x+2, y+0, x+6, y+0, color)
graphics.DrawLine(canvas, x+1, y+1, x+7, y+1, color)
graphics.DrawLine(canvas, x+0, y+2, x+8, y+2, color)
graphics.DrawLine(canvas, x+0, y+3, x+8, y+3, color)
graphics.DrawLine(canvas, x+0, y+4, x+8, y+4, color)
graphics.DrawLine(canvas, x+0, y+5, x+8, y+5, color)
graphics.DrawLine(canvas, x+0, y+6, x+8, y+6, color)
graphics.DrawLine(canvas, x+1, y+7, x+7, y+7, color)
graphics.DrawLine(canvas, x+2, y+8, x+6, y+8, color)
def drawRows(self, canvas, minsTrain1, minsTrain2):
canvas.Clear()
# Top line
self.drawCircle(canvas, 2, 4, circleColor)
graphics.DrawText(canvas, font, 5, 11, circleNumberColor, "3")
graphics.DrawText(canvas, font, 14, 11, textColor, "Kingston")
graphics.DrawText(canvas, font, 47, 11, textColor, minsTrain1)
graphics.DrawText(canvas, font, 54, 11, textColor, "min")
# Bottom line
self.drawCircle(canvas, 2, 20, circleColor)
graphics.DrawText(canvas, font, 5, 27, circleNumberColor, "3")
graphics.DrawText(canvas, font, 14, 27, textColor, "Kingston")
graphics.DrawText(canvas, font, 47, 27, textColor, minsTrain2)
graphics.DrawText(canvas, font, 54, 27, textColor, "min")
def timeDrawing(self):
minsArr = self.getData()
print(minsArr)
minsTrain1 = minsArr[0]
minsTrain2 = minsArr[1]
canvas = self.matrix.CreateFrameCanvas()
self.drawRows(canvas, minsTrain1, minsTrain2)
# draw to the canvas
canvas = self.matrix.SwapOnVSync(canvas)
def run(self):
self.timeDrawing()
i = 0
while True:
time.sleep(60 - time.time() % 60)
print(i)
self.timeDrawing()
i = i + 1
# Main function
if __name__ == "__main__":
run_text = RunText()
if (not run_text.process()):
run_text.print_help()
| 37.056075 | 121 | 0.640858 | 3,293 | 0.830517 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.093821 |
8460d12d7847a84d2047777b7d49fa013dbd6421 | 92 | py | Python | back/apps/base_user/apps.py | pylvin/nuxt-drf-template | e3f32cf98a2bdf393f88fbda2afedf2c0bf99dc7 | [
"MIT"
] | 2 | 2021-11-16T03:47:06.000Z | 2021-12-21T20:19:30.000Z | back/apps/base_user/apps.py | pylvin/nuxt-drf-template | e3f32cf98a2bdf393f88fbda2afedf2c0bf99dc7 | [
"MIT"
] | null | null | null | back/apps/base_user/apps.py | pylvin/nuxt-drf-template | e3f32cf98a2bdf393f88fbda2afedf2c0bf99dc7 | [
"MIT"
] | 2 | 2021-12-21T20:19:39.000Z | 2022-01-03T11:27:04.000Z | from django.apps import AppConfig
class BaseUserConfig(AppConfig):
name = 'base_user'
| 15.333333 | 33 | 0.76087 | 55 | 0.597826 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.119565 |
8462591fa4b3c8c3275d239bf45765f52bee1b94 | 1,188 | py | Python | model/board_generator.py | myrmarachne/minesweeper | 777170b7a31f1feed0bdf7aca31aaa9916c9b915 | [
"AFL-1.1"
] | null | null | null | model/board_generator.py | myrmarachne/minesweeper | 777170b7a31f1feed0bdf7aca31aaa9916c9b915 | [
"AFL-1.1"
] | null | null | null | model/board_generator.py | myrmarachne/minesweeper | 777170b7a31f1feed0bdf7aca31aaa9916c9b915 | [
"AFL-1.1"
] | null | null | null | from random import sample
from tile import Tile
from utils import neighbours
class BoardGenerator:
def __init__(self, size, numMines):
self.numMines = numMines
self.size = size
self.board = []
self.generate_board()
def generate_board(self):
# Generate a board for Minesweeper
self.board = [[Tile(j, i) for i in range(0, self.size)] for j in range(0, self.size)]
# select self.numMines random fields from 0 to self.size*self.size - 1
fields_with_mines_ids = sample(range(0, self.size * self.size), self.numMines)
# for a given field n select the field with coordinates (i,j) such that i*self.size + j = n
fields_with_mines = map(lambda n, size=self.size: ((n - n % size) / size, n % size), fields_with_mines_ids)
for field in fields_with_mines:
i, j = field
self.board[i][j].mine = True
# add 1 to all neighbours of that field, except of the fields that already contain a bomb
for (x, y) in neighbours(i, j, self.size):
if not self.board[x][y].mine:
self.board[x][y].neighbours_with_mines += 1
| 38.322581 | 115 | 0.616162 | 1,108 | 0.93266 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.239057 |
8463673ccc7d5d8251d46b1bed4eb08caa70dd68 | 1,054 | py | Python | src/the_tale/the_tale/game/pvp/objects.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 85 | 2017-11-21T12:22:02.000Z | 2022-03-27T23:07:17.000Z | src/the_tale/the_tale/game/pvp/objects.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 545 | 2017-11-04T14:15:04.000Z | 2022-03-27T14:19:27.000Z | src/the_tale/the_tale/game/pvp/objects.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 45 | 2017-11-11T12:36:30.000Z | 2022-02-25T06:10:44.000Z |
import smart_imports
smart_imports.all()
class BattleRequest:
__slots__ = ('id', 'initiator_id', 'matchmaker_type', 'created_at', 'updated_at')
def __init__(self, id, initiator_id, matchmaker_type, created_at, updated_at):
self.id = id
self.initiator_id = initiator_id
self.matchmaker_type = matchmaker_type
self.created_at = created_at
self.updated_at = updated_at
def ui_info(self):
return {'id': self.id,
'initiator_id': self.initiator_id,
'matchmaker_type': self.matchmaker_type.value,
'created_at': time.mktime(self.created_at.timetuple()),
'updated_at': time.mktime(self.updated_at.timetuple())}
class Battle:
__slots__ = ('id', 'matchmaker_type', 'participants_ids', 'created_at')
def __init__(self, id, matchmaker_type, participants_ids, created_at):
self.id = id
self.matchmaker_type = matchmaker_type
self.participants_ids = participants_ids
self.created_at = created_at
| 31.939394 | 85 | 0.66129 | 1,005 | 0.95351 | 0 | 0 | 0 | 0 | 0 | 0 | 169 | 0.160342 |
8463c9fd49aa8b29fdde20c5d6a8fdfd2fb75f46 | 300 | py | Python | src/gencoef/test.py | bwasti/sleef | 4d260ae7f5d0e76a3c5424149deb838373e1894b | [
"BSL-1.0"
] | null | null | null | src/gencoef/test.py | bwasti/sleef | 4d260ae7f5d0e76a3c5424149deb838373e1894b | [
"BSL-1.0"
] | null | null | null | src/gencoef/test.py | bwasti/sleef | 4d260ae7f5d0e76a3c5424149deb838373e1894b | [
"BSL-1.0"
] | null | null | null | import numpy as np
import math
p = np.poly1d([
+0.1429511242e-53,
+0.1561712123e-44,
-0.2259472298e-35,
-0.2669710222e-26,
+0.9784247973e-18,
+0.1655572013e-8,
+0.3991098106e+0,
])
def sigmoid(x):
return 1 / (1 + math.exp(-x))
for i in range(1000):
k = float(i) / 100
print(sigmoid(k), p(k))
| 15.789474 | 31 | 0.656667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
846536aeea05536d64f4f59f9d2196f85d857b4d | 19,035 | py | Python | forever/Database.py | dss285/4ever | bd6f70f92d76d43342da401562f2c504adaf3867 | [
"MIT"
] | null | null | null | forever/Database.py | dss285/4ever | bd6f70f92d76d43342da401562f2c504adaf3867 | [
"MIT"
] | null | null | null | forever/Database.py | dss285/4ever | bd6f70f92d76d43342da401562f2c504adaf3867 | [
"MIT"
] | null | null | null | import psycopg2
import psycopg2.extras
import discord
from models.BotMention import BotMention
from models.UpdatedMessage import UpdatedMessage
from forever.Steam import Steam_API, Dota_Match, Dota_Match_Player
from forever.Utilities import run_in_executor, log
from forever.Warframe import CetusMessage, FissureMessage, SortieMessage, NightwaveMessage, InvasionMessage, SolSystem
from forever.Newswire import NewswireMessage
from models.Server import Server
from forever.Arknights import Formula, Item, Stage
from forever.GFL import Doll, Fairy
class Database:
def __init__(self, host : str, user : str, password : str, database : str, client : discord.Client=None) -> None:
self.host = host
self.user = user
self.password = password
self.database = database
self.shared = "shared"
self.forever = "forever"
self.tables = {
"forever" : {
'discord_images',
'discord_servers',
'discord_notifications',
'discord_joinable_roles',
'discord_role_messages',
'discord_updated_messages',
},
"shared" : {
"arknights_items",
"arknights_stages",
"arknights_formulas",
"dota_heroes",
"dota_matches",
"dota_matches_players",
'gfl_dolls',
'gfl_equipment',
'wf_builds',
'wf_builds_images',
'wf_items',
'wf_missions',
'wf_nightwave',
'wf_solsystem_nodes',
'wf_solsystem_planets',
'wf_sorties'
}
}
self.query_formats = {
"delete_where" : 'DELETE FROM \"{schema}\".{table} WHERE {column}={value}',
"delete_where_and" : 'DELETE FROM \"{schema}\".{table} WHERE {column_1}={value_1} AND {column_2}={value_2}',
"delete_where_custom" : 'DELETE FROM \"{schema}\".{table} WHERE {custom}',
"insert_into" : "INSERT INTO \"{schema}\".{table} ({columns}) VALUES ({values})"
}
self.connection = psycopg2.connect(host=self.host,
user=self.user,
password=self.password,
database=self.database,
port=5432)
def query(self, sql : str) -> None:
try:
data = None
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute(sql)
if "SELECT" in sql:
data = cursor.fetchall()
self.connection.commit()
if data:
return data
except Exception as e:
print(e)
self.connection.rollback()
def get_data(self,) -> dict[str, dict]:
results = {}
for i, j in self.tables.items():
for x in j:
results[x] = self.get_table_rows(f'\"{i}\".{x}')
return results
def get_table_rows(self, tabletype : str) -> dict:
results = None
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute(f"SELECT * FROM {tabletype}")
results = cursor.fetchall()
self.connection.commit()
return results
class DB_API(Database):
def __init__(self, host :str, user:str, password:str, database:str, client) -> None:
super().__init__(host, user, password, database)
self.client = client
self.runtime = {}
self.saved_messages = set()
self.mentions = []
self.init_done = False
def __getitem__(self, item):
return self.runtime[item]
def structure(self,) -> None:
self.runtime["warframe"] = {}
self.runtime["warframe"]["nightwave"] = []
self.runtime["warframe"]["invasions"] = []
self.runtime["warframe"]["sorties"] = None
self.runtime["warframe"]["translate"] = {}
self.runtime["warframe"]["translate"]["missions"] = {}
self.runtime["warframe"]["translate"]["nightwave"] = {}
self.runtime["warframe"]["translate"]["sorties"] = {}
self.runtime["warframe"]["translate"]["items"] = {}
self.runtime["warframe"]["translate"]["solsystem"] = {}
self.runtime["warframe"]["translate"]["solsystem"]["planets"] = []
self.runtime["warframe"]["translate"]["solsystem"]["nodes"] = []
self.runtime["arknights"] = {}
self.runtime["arknights"]["formulas"] = {}
self.runtime["arknights"]["items"] = {}
self.runtime["arknights"]["stages"] = {}
self.runtime["arknights"]["items"]["ids"] = {}
self.runtime["arknights"]["items"]["names"] = {}
self.runtime["arknights"]["stages"]["ids"] = {}
self.runtime["arknights"]["stages"]["codes"] = {}
self.runtime["gfl"] = {}
self.runtime["gfl"]["dolls"] = {}
self.runtime["gfl"]["dolls"]["aliases"] = {}
self.runtime["gfl"]["dolls"]["names"] = {}
self.runtime["gfl"]["equipment"] = {}
self.runtime["dota"] = {}
self.runtime["droptables"] = {}
self.runtime["servers"] = {}
@run_in_executor
def query(self, sql : str) -> None:
return super().query(sql)
@run_in_executor
def get_data(self,) -> dict[str, dict]:
return super().get_data()
async def get_server(self, server_id, data : dict[str, dict]) -> None:
log_id = next((i["logchannel_id"] for i in data["discord_servers"] if i["server_id"] == server_id), None)
discord_server = self.client.get_guild(server_id)
logchannel = self.client.get_channel(log_id) if log_id else None
updated_messages = {}
joinable_roles = set()
role_messages = {}
notifications = []
for x in data["discord_role_messages"]:
if x["server_id"] == server_id:
channel = self.client.get_channel(x["channel_id"])
message = None
try:
message = await channel.fetch_message(x["message_id"])
except discord.NotFound:
await self.delete_role_message(x["message_id"])
await self.delete_updated_message(x["message_id"])
continue
if message:
role_messages[message.id] = {
"message" : message,
"emoji" : x["emoji"],
"role_id" : x["role_id"]
}
for x in data["discord_joinable_roles"]:
if x["server_id"] == server_id:
role = discord_server.get_role(x["role_id"])
if role:
joinable_roles.add(role)
else:
await self.delete_joinable_role(x["role_id"])
for x in data["discord_notifications"]:
if x["server_id"] == server_id:
role = discord_server.get_role(x["role_id"])
if role:
bot_mention = BotMention(x["notification_name"], role)
notifications.append(bot_mention)
else:
await self.delete_notification(x["notification_name"], x["server_id"])
for x in data["discord_updated_messages"]:
if x["server_id"] == server_id:
channel = self.client.get_channel(x["channel_id"])
if channel:
message = None
try:
message = await channel.fetch_message(x["message_id"])
except discord.NotFound:
await self.delete_role_message(x["message_id"])
await self.delete_updated_message(x["message_id"])
message = None
if message:
message_type = x["message_type"]
if message_type == "nightwave":
updated_messages[message_type] = NightwaveMessage(message)
elif message_type == "invasions":
updated_messages[message_type] = InvasionMessage(message, [])
elif message_type == "fissures":
updated_messages[message_type] = FissureMessage(message, [])
elif message_type == "sorties":
updated_messages[message_type] = SortieMessage(message)
elif message_type == "poe":
mention = next((i for i in notifications if i.name == "poe_night"), None)
updated_messages[message_type] = CetusMessage(message, mention, self.client)
elif message_type == "gtanw":
updated_messages[message_type] = NewswireMessage(message)
server = Server(server_id, discord_server, logchannel, updated_messages, notifications, joinable_roles, role_messages)
self.runtime["servers"][server_id] = server
async def update_runtime(self,) -> None:
data = self.get_data()
if "gfl" in self.runtime:
self.gfl(data)
if "warframe" in self.runtime:
self.warframe(data)
if "droptables" in self.runtime:
self.droptables(data)
def arknights(self, data : dict[str, dict]) -> None:
formulas = data.get("arknights_formulas")
stages = data.get("arknights_stages")
items = data.get("arknights_items")
for i in items:
tmp = Item(i["id"], i["name"], i["description"], i["rarity"], i["icon_id"], i["usage"])
tmp._stage_drop_list_str = i["stage_drop_list"]
self.runtime["arknights"]["items"]["ids"][i["id"]] = tmp
self.runtime["arknights"]["items"]["names"][tmp.name] = tmp
for f in formulas:
costs = []
if f["costs"] != "":
tmp = f["costs"].split(" ")
for c in tmp:
splitted = c.split("|")
item_id = splitted[0]
amount = splitted[1]
costs.append({
"item" : self.runtime["arknights"]["items"]["ids"][item_id],
"amount" : amount
})
tmp = Formula(f["id"], self.runtime["arknights"]["items"]["ids"][f["item_id"]], f["count"], costs, f["room"])
self.runtime["arknights"]["items"]["ids"][f["item_id"]].set_formula(tmp)
self.runtime["arknights"]["formulas"][f"{f['id']}_{f['room']}"] = tmp
for s in stages:
drops = []
if s["drops"] != "":
tmp = s["drops"].split(" ")
for x in tmp:
splitted = x.split("|")
itemid = splitted[0]
droptype = splitted[1]
occurence = splitted[2]
item = self.runtime["arknights"]["items"].get(itemid)
if item is None:
item = itemid
drops.append({
"item" : item,
"drop_type" : droptype,
"occurence" : occurence
})
sta = Stage(s["id"], s["code"], s["name"], s["description"], s["sanity_cost"], drops)
self.runtime["arknights"]["stages"]["ids"][s["id"]] = sta
self.runtime["arknights"]["stages"]["codes"][sta.code] = sta
for itemid, item in self.runtime["arknights"]["items"]["ids"].items():
stage_drop_list = []
if item._stage_drop_list_str not in ["", "-"]:
tmp = item._stage_drop_list_str.split(" ")
for i in tmp:
splitted = i.split("|")
stageid = splitted[0]
occurence = splitted[1]
stage = self.runtime["arknights"]["stages"]["ids"][stageid]
stage_drop_list.append({
"stage" : stage,
"occurence" : occurence
})
item.set_stage_drop_list(stage_drop_list)
def gfl(self, data : dict[str, dict]) -> None:
for d in data["gfl_dolls"]:
aliases = d["aliases"].split("|") if d["aliases"] else []
doll = Doll(d["id"], d["name"],
d["type"],
d["rarity"],
d["formation_bonus"],
d["formation_tiles"],
d["skill"],
aliases,
d["production_timer"])
self.runtime["gfl"]["dolls"]["names"][d["name"].lower()] = doll
for x in aliases:
self.runtime["gfl"]["dolls"]["aliases"][x.lower()] = doll
def warframe(self, data : dict[str, dict]) -> None:
self.runtime["warframe"]["translate"]["solsystem"]["planets"].clear()
self.runtime["warframe"]["translate"]["solsystem"]["nodes"].clear()
for item in data["wf_missions"]:
self.runtime["warframe"]["translate"]["missions"][item["code_name"]] = item["name"]
for item in data["wf_nightwave"]:
self.runtime["warframe"]["translate"]["nightwave"][item["code_name"]] = item["name"]
for item in data["wf_sorties"]:
self.runtime["warframe"]["translate"]["sorties"][item["code_name"]] = item["name"]
for item in data["wf_items"]:
self.runtime["warframe"]["translate"]["items"][item["code_name"]] = item["name"]
for item in data["wf_solsystem_planets"]:
self.runtime["warframe"]["translate"]["solsystem"]["planets"].append(SolSystem.SolPlanet(item["planet_id"], item["name"]))
for item in data["wf_solsystem_nodes"]:
self.runtime["warframe"]["translate"]["solsystem"]["nodes"].append(SolSystem.SolNode(item["node_id"], item["name"],
next(planet for planet in self.runtime["warframe"]["translate"]["solsystem"]["planets"] if planet.id == item["planet_id"])))
def dota(self, data : dict[str, dict]) -> None:
match_players = {}
dota_heroes = {"id" : {}, "name" : {}}
for i in data["dota_heroes"]:
dota_heroes["id"][i["id"]] = i["name"]
dota_heroes["name"][i["name"]] = i["id"]
for i in data["dota_matches_players"]:
if i["match_id"] not in match_players:
match_players[i["match_id"]] = {"players" : {"dire" : {}, "radiant" : {}}, "radiant_team_ids" : set(), "dire_team_ids" : set()}
player_slot = i["player_slot"]
if i["team"] == "dire":
player_slot -= 128
match_players[i["match_id"]]["dire_team_ids"].add(i["id"])
elif i["team"] == "radiant":
match_players[i["match_id"]]["radiant_team_ids"].add(i["id"])
match_players[i["match_id"]]["players"][i["team"]][player_slot] = Dota_Match_Player(
i["id"],
i["player_slot"],
i["hero_id"],
i["kills"],
i["deaths"],
i["assists"],
i["last_hits"],
i["denies"],
i["gpm"],
i["xpm"],
i["level"],
i["hero_dmg"],
i["building_dmg"],
i["healing"],
i["networth"]
)
for i in data["dota_matches"]:
dire_team_ids = match_players[i["id"]]["dire_team_ids"]
radiant_team_ids = match_players[i["id"]]["radiant_team_ids"]
players = match_players[i["id"]]["players"]
dota_match = Dota_Match(
i["id"],
players,
i["game_mode"],
i["duration"],
i["start_time"],
i["radiant_win"],
i["radiant_kills"],
i["dire_kills"],
radiant_team_ids,
dire_team_ids
)
Steam_API.cache.add(f"match_details_{dota_match.id}", dota_match)
self.runtime["dota"]["heroes"] = dota_heroes
def droptables(self, data : dict[str, dict]) -> None:
return
# for i in data['droptables']:
# if i['droptable_name'] not in self.runtime["droptables"]:
# self.runtime["droptables"][i["droptable_name"]] = DropTable()
# self.runtime["droptables"][i["droptable_name"]].add(i["weight"], i["item_name"])
async def init_runtime(self,) -> None:
self.structure()
data = await self.get_data()
#Server Translation
for i in data["discord_servers"]:
await self.get_server(i["server_id"], data)
#GFL Translation
self.gfl(data)
#WF Translation
self.warframe(data)
#dota matches
self.dota(data)
#AK Translation
self.arknights(data)
self.init_done = True
def delete_joinable_role(self, role_id : int) -> None:
self.query(self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_joinable_roles",
column="role_id",
value=role_id
))
async def delete_updated_message(self, message_id : int) -> None:
await self.query(self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_updated_messages",
column="message_id",
value=message_id
))
async def delete_role_message(self, message_id : int=None, role_id : int=None) -> None:
query = None
if message_id and role_id:
query = self.query_formats["delete_where_and"].format(
schema=self.forever,
table="discord_role_messages",
column_1="message_id",
value_1=message_id,
column_2="role_id",
value_2=role_id
)
elif message_id:
query = self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_role_messages",
column="message_id",
value=message_id
)
elif role_id:
query = self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_role_messages",
column="role_id",
value=role_id
)
if query:
await self.query(query)
async def delete_notification(self, notification_name : str, server_id : int) -> None:
await self.query(self.query_formats["delete_where_and"].format(
schema=self.forever,
table="discord_notifications",
column_1="name",
value_1=f"\"{notification_name}\"",
column_2="server_id",
value_2=server_id
))
async def delete_server(self, server_id : int) -> None:
await self.query(self.query_formats["delete_where"].format(
schema=self.forever,
table="discord_servers",
column="server_id",
value=server_id
))
async def create_joinable_role(self, role_id : int, server_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_joinable_roles",
columns="role_id, server_id",
values=f"{role_id}, {server_id}"
))
async def create_updated_message(self, server_id : int, message_type : str, channel_id : int, message_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_updated_messages",
columns="server_id, message_type, channel_id, message_id",
values=f"{server_id}, \"{message_type}\", {channel_id}, {message_id}"
))
async def create_role_message(self, role_id : int, message_id : int, channel_id : int, emoji, server_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_role_messages",
columns="role_id, message_id, channel_id, emoji, server_id",
values=f"{role_id}, {message_id}, {channel_id}, \"{emoji}\", {server_id}"
))
async def create_notification(self, notification_name : str, role_id : int, server_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_notifications",
columns="notification_name, role_id, server_id",
values=f"\"{notification_name}\", {role_id}, {server_id}"
))
async def create_server(self, server_id : int) -> None:
await self.query(self.query_formats["insert_into"].format(
schema=self.forever,
table="discord_servers",
columns="server_id",
values=f"{server_id}"
))
async def create_dota_match(self, dota_match : Dota_Match) -> None:
query_match = self.query_formats["insert_into"]
query_player = self.query_formats["insert_into"]
query_match = query_match.format(
schema=self.shared,
table="dota_matches",
columns="id, game_mode, start_time, radiant_win, radiant_kills, dire_kills, duration",
values=f"{dota_match.id}, {dota_match.game_mode}, {dota_match.start_time}, {dota_match.radiant_win}, {dota_match.radiant_kills}, {dota_match.dire_kills}, {dota_match.duration}"
)
await self.query(query_match)
for team, players in dota_match.players.items():
for player_slot, player in players.items():
await self.query(
query_player.format(
schema=self.shared,
table="dota_matches_players",
columns="id, match_id, player_slot, hero_id, kills, deaths, assists, last_hits, denies, gpm, xpm, level, hero_dmg, building_dmg, healing, networth, team",
values="{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, '{}'".format(
player.id or "null",
dota_match.id,
player.player_slot,
player.hero_id,
player.kills,
player.deaths,
player.assists,
player.last_hits,
player.denies,
player.gpm,
player.xpm,
player.level,
player.hero_dmg or "null",
player.building_dmg or "null",
player.healing or "null",
player.networth or "null",
team
)
)
) | 38.222892 | 180 | 0.640294 | 18,473 | 0.970475 | 0 | 0 | 170 | 0.008931 | 8,186 | 0.43005 | 5,632 | 0.295876 |
8465f309612202475ac3cb61d22a9dcf1509182e | 822 | py | Python | Week06/q_cifar10_cnn.py | HowardNTUST/HackNTU_Data_2017 | ad8e753a16719b6f9396d88b313a5757f5ed4794 | [
"MIT"
] | null | null | null | Week06/q_cifar10_cnn.py | HowardNTUST/HackNTU_Data_2017 | ad8e753a16719b6f9396d88b313a5757f5ed4794 | [
"MIT"
] | null | null | null | Week06/q_cifar10_cnn.py | HowardNTUST/HackNTU_Data_2017 | ad8e753a16719b6f9396d88b313a5757f5ed4794 | [
"MIT"
] | 1 | 2019-02-24T17:41:45.000Z | 2019-02-24T17:41:45.000Z | import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape
model = Sequential()
model.add(Reshape((3, 32, 32), input_shape=(3*32*32,) ))
model.add(Conv2D(filters=32, kernel_size=(3,3), padding='same', activation="relu", data_format='channels_first'))
model.add(MaxPool2D())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation="relu", data_format='channels_first'))
model.add(MaxPool2D())
model.add(Reshape((-1,)))
model.add(Dense(units=1024, activation="relu"))
model.add(Dense(units=10, activation="softmax"))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_X, train_Y, validation_split=0.02, batch_size=128, epochs=30)
rtn = model.evaluate(test_X, test_Y)
print("\ntest accuracy=", rtn[1]) | 48.352941 | 113 | 0.723844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.159367 |
8465fe705e2203a309cb2f80aab7f362306bc341 | 1,111 | py | Python | testesDuranteAulas/aula019.py | Igor3550/Exercicios-de-python | e0f6e043df4f0770ac15968485fbb19698b4ac6b | [
"MIT"
] | null | null | null | testesDuranteAulas/aula019.py | Igor3550/Exercicios-de-python | e0f6e043df4f0770ac15968485fbb19698b4ac6b | [
"MIT"
] | null | null | null | testesDuranteAulas/aula019.py | Igor3550/Exercicios-de-python | e0f6e043df4f0770ac15968485fbb19698b4ac6b | [
"MIT"
] | null | null | null | # Dicionarios
pessoas = {'nome': 'Igor', 'sexo': 'M', 'idade': 20}
print(f'O {pessoas["nome"]} tem {pessoas["idade"]} anos.')
print(pessoas.keys()) #chaves do dicionario
print(pessoas.values())#valores das chaves
print(pessoas.items())#mostra os itens do dicionario
print()
for k in pessoas.keys():
print(k)
for v in pessoas.values():
print(v)
for k, v in pessoas.items():
print(k, v)
print()
for k, v in pessoas.items():
print(f'{k} = {v}')
print()
del pessoas['sexo']# deleta uma chave
pessoas['peso'] = 72# adiciona uma nova chave
for k, v in pessoas.items():
print(f'{k} = {v}')
print()
# Dicionario dentro de uma lista
brasil = []
estado1 = {'uf': 'Rio de Janeiro', 'sigla': 'RJ'}
estado2 = {'uf': 'São Paulo', 'sigla': 'SP'}
brasil.append(estado1)
brasil.append(estado2)
print(brasil[0]['uf'])
print()
brasil = list()
estado = dict()
for c in range(0, 3):
estado['uf'] = str(input('Unidade federativa: '))
estado['sigla'] = str(input('Sigla: '))
brasil.append(estado.copy())# cópia de um dicionario
for e in brasil:
for k, v in e.items():
print(f'{k} = {v}')
| 26.452381 | 58 | 0.629163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 414 | 0.371968 |
84664082e1511f1729add08f835b69444a8edf67 | 9,697 | py | Python | polyanalyst6api/api.py | Megaputer/polyanalyst6api-python | c6626a8a5f8f926b1f32285e18457ed70dfba73a | [
"MIT"
] | 2 | 2021-01-30T19:04:12.000Z | 2021-06-18T09:41:15.000Z | polyanalyst6api/api.py | Megaputer/polyanalyst6api-python | c6626a8a5f8f926b1f32285e18457ed70dfba73a | [
"MIT"
] | null | null | null | polyanalyst6api/api.py | Megaputer/polyanalyst6api-python | c6626a8a5f8f926b1f32285e18457ed70dfba73a | [
"MIT"
] | 1 | 2021-04-19T09:57:14.000Z | 2021-04-19T09:57:14.000Z | """
polyanalyst6api.api
~~~~~~~~~~~~~~~~~~~
This module contains functionality for access to PolyAnalyst API.
"""
import configparser
import contextlib
import pathlib
import warnings
from typing import Any, Dict, List, Tuple, Union, Optional
from urllib.parse import urljoin, urlparse
import requests
import urllib3
from . import __version__
from .drive import Drive
from .project import Parameters, Project
from .exceptions import APIException, ClientException, _WrapperNotFound
__all__ = ['API']
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
warnings.simplefilter(
'always', UserWarning
) # without this set_parameters will show warnings only once
NodeTypes = [
"CSV Exporter/",
"DataSource/CSV",
"DataSource/EXCEL",
"DataSource/FILES",
"DataSource/INET",
"DataSource/ODBC",
"DataSource/RSS",
"DataSource/XML",
"Dataset/Biased",
"Dataset/ExtractTerms",
"Dataset/Python",
"Dataset/R",
"Dataset/ReplaceTerms",
"ODBC Exporter/",
"PA6TaxonomyResult/TaxonomyResult",
"SRLRuleSet/Filter Rows",
"SRLRuleSet/SRL Rule",
"TmlEntityExtractor/FEX",
"Sentiment Analysis",
"TmlLinkTerms/",
]
class API:
"""PolyAnalyst API
:param url: (optional) The scheme, host and port(if exists) of a PolyAnalyst server \
(e.g. ``https://localhost:5043/``, ``http://example.polyanalyst.com``)
:param username: (optional) The username to login with
:param password: (optional) The password for specified username
:param ldap_server: (optional) LDAP Server address
:param version: (optional) Choose which PolyAnalyst API version to use. Default: ``1.0``
If ldap_server is provided, then login will be performed via LDAP Server.
Usage::
>>> with API(POLYANALYST_URL, YOUR_USERNAME, YOUR_PASSWORD) as api:
... print(api.get_server_info())
or if you're using configuration file (New in version 0.23.0):
>>> with API() as api:
... print(api.get_server_info())
"""
_api_path = '/polyanalyst/api/'
_valid_api_versions = ['1.0']
user_agent = f'PolyAnalyst6API python client v{__version__}'
def __enter__(self) -> 'API':
self.login()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.logout()
self._s.__exit__()
def __init__(
self,
url: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
ldap_server: Optional[str] = None,
version: str = '1.0',
) -> None:
if version not in self._valid_api_versions:
raise ClientException('Valid api versions are ' + ', '.join(self._valid_api_versions))
if url is None or username is None:
try:
cfg_path = pathlib.Path.home() / '.polyanalyst6api' / 'config'
parser = configparser.ConfigParser(allow_no_value=True)
with open(cfg_path, encoding='utf8') as f:
parser.read_file(f)
default = dict(parser['DEFAULT'])
url = default['url']
username = default['username']
password = default['password']
ldap_server = default.get(ldap_server)
except FileNotFoundError:
raise ClientException(f"The credentials file doesn't exist. Nor credentials passed as arguments")
except KeyError as exc:
raise ClientException(f"The credentials file doesn't contain required key: {exc}")
if not url:
raise ClientException(f'Invalid url: "{url}".')
self.base_url = urljoin(url, self._api_path)
self.url = urljoin(self.base_url, f'v{version}/')
self.username = username
self.password = password or ''
self.ldap_server = ldap_server
self._s = requests.Session()
self._s.headers.update({'User-Agent': self.user_agent})
self.sid = None # session identity
# path to certificate file. by default ignore insecure connection warnings
self.certfile = False
self.drive = Drive(self)
@property
def fs(self):
warnings.warn('"fs" attribute has been renamed "drive"', DeprecationWarning, 2)
return self.drive
def get_versions(self) -> List[str]:
"""Returns api versions supported by PolyAnalyst server."""
# the 'versions' endpoint was added in the 2191 polyanalyst's version
try:
return self.request(urljoin(self.base_url, 'versions'), method='get')[1]
except APIException:
return ['1.0']
def get_server_info(self) -> Optional[Dict[str, Union[int, str, Dict[str, str]]]]:
"""Returns general server information including build number, version and commit hashes."""
_, data = self.request(urljoin(self.url, 'server/info'), method='get')
return data
def get_parameters(self) -> List[Dict[str, Union[str, List]]]:
"""
Returns list of nodes with parameters supported by ``Parameters`` node.
.. deprecated:: 0.18.0
Use :meth:`Parameters.get` instead.
"""
warnings.warn(
'API.get_parameters() is deprecated, use Parameters.get() instead.',
DeprecationWarning,
stacklevel=2,
)
class ProjectStub:
api = self
return Parameters(ProjectStub(), None).get()
def login(self) -> None:
"""Logs in to PolyAnalyst Server with user credentials."""
credentials = {'uname': self.username, 'pwd': self.password}
if self.ldap_server:
credentials['useLDAP'] = '1'
credentials['svr'] = self.ldap_server
resp, _ = self.request('login', method='post', params=credentials)
try:
self.sid = resp.cookies['sid']
except KeyError:
self._s.headers['Authorization'] = f"Bearer {resp.headers['x-session-id']}"
def logout(self) -> None:
"""Logs out current user from PolyAnalyst server."""
self.get('logout')
def run_task(self, id: int) -> None:
"""Initiates scheduler task execution.
:param id: the task ID
"""
self.post('scheduler/run-task', json={'taskId': id})
def project(self, uuid: str) -> Project:
"""Returns :class:`Project <Project>` instance with given uuid.
:param uuid: The project uuid
"""
prj = Project(self, uuid)
prj._update_node_list() # check that the project with given uuid exists
return prj
def get(self, endpoint: str, **kwargs) -> Any:
"""Shortcut for GET requests via :meth:`request <API.request>`
:param endpoint: PolyAnalyst API endpoint
:param kwargs: :func:`requests.request` keyword arguments
"""
return self.request(endpoint, method='get', **kwargs)[1]
def post(self, endpoint: str, **kwargs) -> Any:
"""Shortcut for POST requests via :meth:`request <API.request>`
:param endpoint: PolyAnalyst API endpoint
:param kwargs: :func:`requests.request` keyword arguments
"""
return self.request(endpoint, method='post', **kwargs)[1]
def request(self, url: str, method: str, **kwargs) -> Tuple[requests.Response, Any]:
"""Sends ``method`` request to ``endpoint`` and returns tuple of
:class:`requests.Response` and json-encoded content of a response.
:param url: url or PolyAnalyst API endpoint
:param method: request method (e.g. GET, POST)
:param kwargs: :func:`requests.request` keyword arguments
"""
if not urlparse(url).netloc:
url = urljoin(self.url, url)
kwargs['verify'] = self.certfile
try:
resp = self._s.request(method, url, **kwargs)
except requests.RequestException as exc:
raise ClientException(exc)
else:
return self._handle_response(resp)
@staticmethod
def _handle_response(response: requests.Response) -> Tuple[requests.Response, Any]:
try:
json = response.json()
except ValueError:
json = None
if response.status_code in (200, 202):
return response, json
if isinstance(json, dict) and json.get('error'):
with contextlib.suppress(KeyError):
error = json['error']
if 'The wrapper with the given GUID is not found on the server' == error['message']:
raise _WrapperNotFound
if error['title']:
error_msg = f"{error['title']}. Message: '{error['message']}'"
else:
error_msg = error['message']
# the old error response format handling
elif response.status_code == 403:
if 'are not logged in' in response.text:
error_msg = 'You are not logged in to PolyAnalyst Server'
elif 'operation is limited ' in response.text:
error_msg = (
'Access to this operation is limited to project owners and administrator'
)
elif response.status_code == 500:
with contextlib.suppress(IndexError, TypeError):
if json[0] == 'Error':
error_msg = json[1]
else:
try:
response.raise_for_status()
except requests.HTTPError as exc:
error_msg = str(exc)
with contextlib.suppress(NameError):
raise APIException(error_msg, response.url, response.status_code)
return response, None
| 35.00722 | 113 | 0.608642 | 8,501 | 0.876663 | 0 | 0 | 1,815 | 0.187171 | 0 | 0 | 3,948 | 0.407136 |
ffbcc28e993823f93d8f7e3809b6abd49a5cc187 | 1,998 | py | Python | froide/publicbody/admin.py | rufuspollock/froide | 8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4 | [
"MIT"
] | null | null | null | froide/publicbody/admin.py | rufuspollock/froide | 8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4 | [
"MIT"
] | null | null | null | froide/publicbody/admin.py | rufuspollock/froide | 8ef4dbdd54a74f8c986d59e90348dfdbd85c5da4 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.http import HttpResponse
from django.utils.translation import ugettext as _
from froide.publicbody.models import (PublicBody, FoiLaw, PublicBodyTopic,
Jurisdiction)
class PublicBodyAdmin(admin.ModelAdmin):
prepopulated_fields = {
"slug": ("name",),
'classification_slug': ('classification',)
}
list_display = ('name', 'email', 'url', 'classification', 'topic', 'jurisdiction',)
list_filter = ('topic', 'jurisdiction', 'classification')
list_max_show_all = 5000
search_fields = ['name', "description", 'classification']
exclude = ('confirmed',)
raw_id_fields = ('parent', 'root', '_created_by', '_updated_by')
actions = ['export_csv', 'remove_from_index']
def export_csv(self, request, queryset):
return HttpResponse(PublicBody.export_csv(queryset),
content_type='text/csv')
export_csv.short_description = _("Export to CSV")
def remove_from_index(self, request, queryset):
from haystack import connections as haystack_connections
for obj in queryset:
for using in haystack_connections.connections_info.keys():
backend = haystack_connections[using].get_backend()
backend.remove(obj)
self.message_user(request, _("Removed from search index"))
remove_from_index.short_description = _("Remove from search index")
class FoiLawAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('name', 'meta', 'jurisdiction',)
raw_id_fields = ('mediator',)
class JurisdictionAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class PublicBodyTopicAdmin(admin.ModelAdmin):
prepopulated_fields = {
"slug": ("name",)
}
admin.site.register(PublicBody, PublicBodyAdmin)
admin.site.register(FoiLaw, FoiLawAdmin)
admin.site.register(Jurisdiction, JurisdictionAdmin)
admin.site.register(PublicBodyTopic, PublicBodyTopicAdmin)
| 34.448276 | 87 | 0.699199 | 1,564 | 0.782783 | 0 | 0 | 0 | 0 | 0 | 0 | 408 | 0.204204 |
ffbf148e7df59ebdd237d38695723231b7824b44 | 462 | py | Python | src/abc/106/106_b.py | ryuichi1208/atcoder_stack | 19ec81fb9a3edb44be422b79e98b23e8ff17ef60 | [
"MIT"
] | null | null | null | src/abc/106/106_b.py | ryuichi1208/atcoder_stack | 19ec81fb9a3edb44be422b79e98b23e8ff17ef60 | [
"MIT"
] | null | null | null | src/abc/106/106_b.py | ryuichi1208/atcoder_stack | 19ec81fb9a3edb44be422b79e98b23e8ff17ef60 | [
"MIT"
] | null | null | null | n = int(input())
# @return [0]:約数の個数 [1]:約数リスト
def divisor(num):
ret=[]
L=[]
for i in range(1,num+1):
if (num%i==0):
L.append(i)
ret.append(len(L))
ret.append(L)
return ret
L=[]
ans=0
for i in range(1,n+1):
if(i%2==0):
continue
else:
for j in range(1,n+1):
if(i%j==0):
L.append(j)
if (len(L)==8):
ans+=1
L.clear()
print(ans)
print(divisor(15))
| 14.4375 | 30 | 0.452381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 49 | 0.10166 |
ffbfef0bf96a36ae6a5da2b1bf0bdc5756f1e309 | 3,931 | py | Python | poc.py | evinr/basis-scraper | cb20574aadc469c6d1527ddb5a3cc69df531cbfd | [
"MIT"
] | null | null | null | poc.py | evinr/basis-scraper | cb20574aadc469c6d1527ddb5a3cc69df531cbfd | [
"MIT"
] | null | null | null | poc.py | evinr/basis-scraper | cb20574aadc469c6d1527ddb5a3cc69df531cbfd | [
"MIT"
] | null | null | null | import serial
def setup():
ser = serial.Serial('/dev/ttyUSB0', timeout=2)
ser.setRTS(True)
ser.setRTS(False)
if ser.isOpen():
ser.close()
ser.open()
ser.isOpen()
print "USB connection established"
def read():
rawString = ser.readline()
print rawString
return (str(rawString))
def write(stringVariable):
ser.write(stringVariable.encode())
def handshake():
write('AA 02 00 00 04 06 0A 00 AB')
#Expect
#01 60 AA 07 00 00 04 07 02 3D 02 03 02 51 00 AB
write('AA 02 00 00 05 06 0B 00 AB')
#Expect
#01 60 AA 0B 00 00 05 07 02 1A 0D A0 66 00 00 00 00 3B 01 AB
write('AA 02 00 00 0A 06 10 00 AB')
#Expect
#01 60 AA 0F 00 00 0A 07 02 30 30 30 34 33 65 30 32 65 64 64 65 63 03 AB
write('AA 02 00 00 09 06 0F 00 AB')
#This is assumed to be the manifest of data, ie what is currently contained on the device
#When no data is present, ie the watch has just been sitting there. Expect
#01 60 AA 05 00 00 09 07 02 1C 0B 39 00 AB
#TODO: Determine what this string is and how it is used
#this is based on quick and constant syncs, verify as normal behavior
write('AA 02 00 00 07 06 0D 00 AB')
#Same A
#Assumed to be tied to the 'firmware update', as when that gets pushed the contents of this change in the same spot.
# Three char sets change on these over the course of the contant syncs
# Lots of padding on this one
#TODO: Determine what this string is and how it is used
write('AA 23 00 00 05 04 00 52 BC 52 B9 3C 09 12 1B 64 12 CD 9B FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF FF 5E 18 AB')
#TODO: Determine if this string is consistant
#Expect
#01 60 AA 03 00 00 05 05 01 0B 00 AB
write('AA 02 00 00 06 04 0A 00 AB')
#Expect
#01 60 AA 03 00 00 06 05 01 0C 00 AB
write('AA 02 00 00 07 06 0D 00 AB')
#Same A
write('AA 08 00 00 04 04 1F 01 AC 2A 00 03 03 D8 00 AB')
#expect
#01 60 AA 03 00 00 04 05 01 0A 00 AB
# Current time gets sent here
#dynamic
# TODO: Determine how to send specific date times
write('AA 08 00 00 00 04 45 9B 05 09 5C FE 4C 02 AB') #201510181406
#expect
#01 60 AA 03 00 00 00 05 01 06 00 AB
write('AA 07 00 00 0C 04 00 10 27 00 00 47 00 AB')
#expect
#01 60 AA 03 00 00 0C 05 01 12 00 AB
write('AA 02 00 00 10 04 14 00 AB')
#expect
#01 60 AA 03 00 00 10 05 01 16 00 AB
write('AA 02 00 00 01 06 07 00 AB')
#Expect
#01 60 AA 07 00 00 01 07 02 7E 0B 00 00 93 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
#01 60 AA 07 00 00 01 07 02 14 00 00 00 1E 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
#01 60 AA 07 00 00 01 07 02 0A 00 00 00 14 00 AB
write('AA 02 00 00 02 06 08 00 AB')
#expect
#01 60 AA 05 00 00 02 07 02 01 00 0C 00 AB
write('AA 04 00 00 03 06 00 00 09 00 AB')
#expect
#real data here, with what appears to be aggregates in the header
write('AA 02 00 00 01 04 05 00 AB')
#expect
#01 60 AA 03 00 00 01 05 01 07 00 AB
write('')
def chilling():
isChilling = read()
if isChilling == '01 60 AA 07 00 00 00 03 01 3D 02 06 00 49 00 AB':
print "device is ready for data transfer"
def deletingData():
write('AA 02 00 00 08 06 0E 00 AB')
print "are we done transfering data?"
isDeletingData = read()
if isDeletingData == '01 60 AA 04 00 00 08 07 02 01 12 00 AB':
print "device is still deleting data from memory"
elif isDeletingData == '01 60 AA 04 00 00 08 07 02 00 11 00 AB':
print "device is done deleting data from memory"
else:
print "something unexpected happened"
#at this point steady chilling is what happens every so many seconds
#TODO: define the gathering of all of the possible data sets being extracted
#Biometrics
# Heart Rate
# STEPS
# CALORIES
# SKIN TEMP
# PERSPIRATION
#Activity
# Walking
# Running
# Biking
#Sleep
# REM
# Mind Refresh
# Light
# Deep
# Body Refresh
# Interruptions
# Toss & Turn
| 27.110345 | 135 | 0.675401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,067 | 0.780209 |
ffc0fbef26aaac232d9390250ce89e31f64b7ad8 | 31 | py | Python | multinet/db/models/__init__.py | multinet-app/multinet-girder | f34c87849d92c7fe2f8589760f97bebbe04bd4af | [
"Apache-2.0"
] | 3 | 2019-10-22T15:21:10.000Z | 2020-02-13T17:40:07.000Z | multinet/db/models/__init__.py | multinet-app/multinet | f34c87849d92c7fe2f8589760f97bebbe04bd4af | [
"Apache-2.0"
] | 183 | 2019-08-01T14:27:00.000Z | 2020-03-04T17:47:49.000Z | multinet/db/models/__init__.py | multinet-app/multinet-girder | f34c87849d92c7fe2f8589760f97bebbe04bd4af | [
"Apache-2.0"
] | 2 | 2020-08-20T11:57:17.000Z | 2020-11-10T22:54:19.000Z | """ORM models for multinet."""
| 15.5 | 30 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.967742 |
ffc1536722c6684539bdbe4eaba7de45c07a8edb | 6,296 | py | Python | dataPipelines/gc_crawler/nato_stanag/models.py | ekmixon/gamechanger-crawlers | 60a0cf20338fb3dc134eec117bccd519cede9288 | [
"MIT"
] | null | null | null | dataPipelines/gc_crawler/nato_stanag/models.py | ekmixon/gamechanger-crawlers | 60a0cf20338fb3dc134eec117bccd519cede9288 | [
"MIT"
] | 4 | 2021-07-27T21:44:51.000Z | 2022-03-04T01:38:48.000Z | dataPipelines/gc_crawler/nato_stanag/models.py | ekmixon/gamechanger-crawlers | 60a0cf20338fb3dc134eec117bccd519cede9288 | [
"MIT"
] | null | null | null | import bs4
import os
import re
from typing import Iterable
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait # for implicit and explict waits
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.common.by import By
from dataPipelines.gc_crawler.requestors import MapBasedPseudoRequestor
from dataPipelines.gc_crawler.exec_model import Crawler, Parser, Pager
from dataPipelines.gc_crawler.data_model import Document, DownloadableItem
from dataPipelines.gc_crawler.utils import abs_url, close_driver_windows_and_quit
from . import SOURCE_SAMPLE_DIR, BASE_SOURCE_URL
class STANAGPager(Pager):
"""Pager for Nato Stanag crawler"""
def iter_page_links(self) -> Iterable[str]:
"""Iterator for page links"""
base_url = 'https://nso.nato.int/nso/nsdd/'
starting_url = base_url + 'ListPromulg.html'
global driver
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument("--no-sandbox")
options.add_argument("--disable-gpu")
options.add_argument("--start-maximized")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-setuid-sandbox")
driver = webdriver.Chrome(options=options)
yield starting_url
class STANAGParser(Parser):
"""Parser for Nato Stanag crawler"""
def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:
"""Parse document objects from page of text"""
# parse html response
pdf_prefix = 'https://nso.nato.int/nso/'
driver.get(page_url)
WebDriverWait(driver, 10).until(ec.presence_of_element_located((By.XPATH, "//*[@id='headerSO']")))
html = driver.execute_script("return document.documentElement.outerHTML")
soup = bs4.BeautifulSoup(html, features="html.parser")
parsed_docs = []
table = soup.find('table', attrs={'id': 'dataSearchResult'})
rows = table.find_all('tr')
for row in rows[1:]:
data = row.find_all('td')
if "No" not in data[1].text:
doc_title = data[4].text.splitlines()[1].strip()
doc_helper = data[2].text.split("Ed:")[0].strip()
if "STANAG" in doc_helper or"STANREC" in doc_helper:
doc_num = doc_helper.split("\n")[1].strip().replace(" ","_")
doc_type = doc_helper.split("\n")[0].strip().replace(" ","_")
else:
doc_ = doc_helper.split("\n")[0].strip()
doc_num = doc_.split('-',1)[1].strip().replace(" ","_")
doc_type = doc_.split('-',1)[0].strip().replace(" ","_")
if len(doc_helper.split())>1:
if re.match("^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$", doc_helper.split()[1].strip()):
doc_num = doc_num + "_VOL" + doc_helper.split()[1].strip()
if re.match("^\d$",doc_helper.split()[1].strip()):
doc_num = doc_num + "_PART" + doc_helper.split()[1].strip()
if len(data[2].text.split("VOL")) > 1:
volume = data[2].text.split("VOL")[1].split()[0].strip()
doc_num = doc_num + "_VOL" + volume
if len(data[2].text.split("PART")) > 1:
volume = data[2].text.split("PART")[1].split()[0].strip()
doc_num = doc_num + "_PART" + volume
doc_name = doc_type + " " + doc_num
if doc_name in (o.doc_name for o in parsed_docs) and doc_title in (t.doc_title for t in parsed_docs):
#getting rid of duplicates
continue
if len(data[2].text.split("Ed:")) > 1:
edition = data[2].text.split("Ed:")[1].strip()
else:
edition = ""
publication_date = data[5].text.splitlines()[1].strip()
pdf_suffix = data[4].find('a')
if pdf_suffix is None:
continue
if "../classDoc.htm" in pdf_suffix['href']:
cac_login_required = True
else:
cac_login_required = False
di = DownloadableItem(
doc_type='pdf',
web_url=pdf_prefix + pdf_suffix['href'].replace('../', '').replace(" ", "%20")
)
crawler_used = "nato_stanag"
version_hash_fields = {
"editions_and_volume": edition,
"type": data[1].text
}
doc = Document(
doc_name=doc_name,
doc_title=doc_title,
doc_num=doc_num,
doc_type=doc_type,
publication_date=publication_date,
cac_login_required=cac_login_required,
crawler_used=crawler_used,
source_page_url=page_url.strip(),
version_hash_raw_data=version_hash_fields,
downloadable_items=[di]
)
parsed_docs.append(doc)
close_driver_windows_and_quit(driver)
return parsed_docs
class STANAGCrawler(Crawler):
"""Crawler for the example web scraper"""
def __init__(self, *args, **kwargs):
super().__init__(
*args,
**kwargs,
pager=STANAGPager(
starting_url=BASE_SOURCE_URL
),
parser=STANAGParser()
)
class FakeSTANAGCrawler(Crawler):
"""Nato Stanag crawler that just uses stubs and local source files"""
def __init__(self, *args, **kwargs):
with open(os.path.join(SOURCE_SAMPLE_DIR, 'dod_issuances.html')) as f:
default_text = f.read()
super().__init__(
*args,
**kwargs,
pager=DoDPager(
requestor=MapBasedPseudoRequestor(
default_text=default_text
),
starting_url=BASE_SOURCE_URL
),
parser=STANAGParser()
)
| 38.625767 | 127 | 0.550191 | 5,641 | 0.895966 | 628 | 0.099746 | 0 | 0 | 0 | 0 | 944 | 0.149936 |
ffc168320dcc3879d9935e0c48e2582d2d304fa1 | 3,938 | py | Python | app/signals.py | MakuZo/bloggy | 550e5285728b285e0d5243670d6aa0f40c414777 | [
"MIT"
] | 7 | 2018-11-12T20:52:53.000Z | 2021-12-17T23:04:41.000Z | app/signals.py | MakuZo/bloggy | 550e5285728b285e0d5243670d6aa0f40c414777 | [
"MIT"
] | 2 | 2019-12-24T08:53:51.000Z | 2019-12-26T19:26:51.000Z | app/signals.py | MakuZo/bloggy | 550e5285728b285e0d5243670d6aa0f40c414777 | [
"MIT"
] | 8 | 2018-12-28T12:31:51.000Z | 2020-01-25T09:07:52.000Z | import re
from django.db.models.signals import m2m_changed, post_save, pre_delete
from django.dispatch import receiver
from django.urls import reverse
from .models import Entry, Notification, User
@receiver(post_save, sender=Entry)
def entry_notification(sender, instance, created, **kwargs):
"""
Signal used to create notification(s) when an entry is created
This function notifies an user if this entry is a reply to him.
This function notifies an user if he's mentioned (by @username) in one's entry
"""
if created:
# First find usernames mentioned (by @ tag)
p = re.compile(r"^(@)(\w+)$")
usernames = set(
[
p.match(c).group(2).lower()
for c in instance.content.split()
if p.match(c)
]
)
# Remove the author of an entry from users to notify
if instance.user.username in usernames:
usernames.remove(instance.user.username)
# If entry has a parent and it's parent is not the same author then notify about a reply
# and delete from usernames if being notified
if instance.parent and instance.parent.user.username != instance.user.username:
if instance.parent.user.username in usernames:
usernames.remove(instance.parent.user.username)
Notification.objects.create(
type="user_replied",
sender=instance.user,
target=instance.parent.user,
object=instance,
)
# Notify mentioned users without the author of an entry
for name in usernames:
if name == instance.user.username:
continue
try:
target = User.objects.get(username=name)
except Exception:
continue
Notification.objects.create(
type="user_mentioned",
sender=instance.user,
target=target,
object=instance,
)
@receiver(m2m_changed, sender=Entry.tags.through)
def entry_tag_notification(instance, action, **kwargs):
"""
Notifies users if one of the tags in entry is observed by them.
"""
if not instance.modified_date and "post" in action:
already_notified = set()
reversed_user = reverse(
"user-detail-view", kwargs={"username": instance.user.username}
)
reversed_entry = reverse("entry-detail-view", kwargs={"pk": instance.pk})
all_tags = instance.tags.all().prefetch_related("observers", "blacklisters")
all_blacklisters = [
blacklister for tag in all_tags for blacklister in tag.blacklisters.all()
]
to_create = []
for tag in all_tags:
for observer in tag.observers.all():
# If user blacklisted one of the tags in an entry, don't notify him.
if observer in all_blacklisters:
continue
if (
observer.username == instance.user.username
or observer in already_notified
):
continue
reversed_tag = reverse("tag", kwargs={"tag": tag.name})
content = (
f'<a href="{reversed_user}">{instance.user.username}</a> used tag <a href="{reversed_tag}">#{tag.name}</a>'
f' in <a href="{reversed_entry}">"{instance.content:.25}..."</a>'
)
to_create.append(
Notification(
type="tag_used",
sender=instance.user,
target=observer,
object=instance,
content=content,
)
)
already_notified.add(observer)
Notification.objects.bulk_create(to_create)
| 39.777778 | 127 | 0.561199 | 0 | 0 | 0 | 0 | 3,733 | 0.947943 | 0 | 0 | 976 | 0.247842 |
ffc1d0bbd0644054a0b22502249482b17c06c941 | 2,532 | py | Python | tests/utils/test_commons.py | jajomi/flow | c984be6f7de1a34192601c129dbc19f2ce45f135 | [
"Apache-2.0"
] | null | null | null | tests/utils/test_commons.py | jajomi/flow | c984be6f7de1a34192601c129dbc19f2ce45f135 | [
"Apache-2.0"
] | 6 | 2021-03-05T16:39:42.000Z | 2021-06-11T01:04:57.000Z | tests/utils/test_commons.py | jajomi/flow | c984be6f7de1a34192601c129dbc19f2ce45f135 | [
"Apache-2.0"
] | null | null | null | from unittest.mock import mock_open
from unittest.mock import patch
import flow.utils.commons as commons
def test_extract_story_id_with_empty_list():
story_list = commons.extract_story_id_from_commit_messages([])
assert len(story_list) == 0
commit_example = [
"223342f Adding ability to specify artifactory user [#134082057]",
"4326d00 Adding slack channel option for errors [#130798449]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_two_stories():
story_list = commons.extract_story_id_from_commit_messages(commit_example)
assert len(story_list) == 2
commit_example_nested_brackets = [
"223342f Adding ability to specify artifactory user [#134082057, [bubba]]",
"4326d00 Adding slack channel option for errors [#130798449]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_nested_brackets():
story_list = commons.extract_story_id_from_commit_messages(commit_example_nested_brackets)
print(str(story_list))
assert len(story_list) == 1
commit_example_multiple_per_brackets = [
"223342f Adding ability to specify artifactory user [#134082057,#134082058]",
"4326d00 Adding slack channel option for errors [#130798449,123456]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_multiple_per_brackets():
story_list = commons.extract_story_id_from_commit_messages(commit_example_multiple_per_brackets)
print(str(story_list))
assert len(story_list) == 4
commit_example_dedup = [
"223342f Adding ability to specify artifactory user [#134082057,#134082057]",
"4326d00 Adding slack channel option for errors [#134082057,134082057]",
"09c1983 Merge pull request #25 from ci-cd/revert-18-github-version-fix",
"445fd02 Revert \"GitHub version fix\""
]
def test_extract_story_id_with_dedup():
story_list = commons.extract_story_id_from_commit_messages(commit_example_dedup)
print(str(story_list))
assert len(story_list) == 1
def test_write_to_file():
open_mock = mock_open()
with patch('__main__.open', open_mock, create=True):
commons.write_to_file("somefilepath", "test_write_to_file", open_func=open_mock)
open_mock.assert_called_once_with("somefilepath", "a")
file_mock = open_mock()
file_mock.write.assert_called_once_with("test_write_to_file")
| 36.171429 | 100 | 0.781991 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,082 | 0.42733 |
ffc234c8fa1382a81cd3f2b1ea5e202da915c840 | 28,482 | py | Python | swagger_client/models/client_configuration.py | chbndrhnns/finapi-client | 259beda8b05e912c49d2dc4c3ed71205134e5d8a | [
"MIT"
] | 2 | 2019-04-15T05:58:21.000Z | 2021-11-15T18:26:37.000Z | swagger_client/models/client_configuration.py | chbndrhnns/finapi-client | 259beda8b05e912c49d2dc4c3ed71205134e5d8a | [
"MIT"
] | 1 | 2021-06-18T09:46:25.000Z | 2021-06-18T20:12:41.000Z | swagger_client/models/client_configuration.py | chbndrhnns/finapi-client | 259beda8b05e912c49d2dc4c3ed71205134e5d8a | [
"MIT"
] | 2 | 2019-07-08T13:41:09.000Z | 2020-12-07T12:10:04.000Z | # coding: utf-8
"""
finAPI RESTful Services
finAPI RESTful Services # noqa: E501
OpenAPI spec version: v1.42.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ClientConfiguration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'is_automatic_batch_update_enabled': 'bool',
'user_notification_callback_url': 'str',
'user_synchronization_callback_url': 'str',
'refresh_tokens_validity_period': 'int',
'user_access_tokens_validity_period': 'int',
'client_access_tokens_validity_period': 'int',
'max_user_login_attempts': 'int',
'is_user_auto_verification_enabled': 'bool',
'is_mandator_admin': 'bool',
'is_web_scraping_enabled': 'bool',
'available_bank_groups': 'list[str]'
}
attribute_map = {
'is_automatic_batch_update_enabled': 'isAutomaticBatchUpdateEnabled',
'user_notification_callback_url': 'userNotificationCallbackUrl',
'user_synchronization_callback_url': 'userSynchronizationCallbackUrl',
'refresh_tokens_validity_period': 'refreshTokensValidityPeriod',
'user_access_tokens_validity_period': 'userAccessTokensValidityPeriod',
'client_access_tokens_validity_period': 'clientAccessTokensValidityPeriod',
'max_user_login_attempts': 'maxUserLoginAttempts',
'is_user_auto_verification_enabled': 'isUserAutoVerificationEnabled',
'is_mandator_admin': 'isMandatorAdmin',
'is_web_scraping_enabled': 'isWebScrapingEnabled',
'available_bank_groups': 'availableBankGroups'
}
def __init__(self, is_automatic_batch_update_enabled=False, user_notification_callback_url=None, user_synchronization_callback_url=None, refresh_tokens_validity_period=None, user_access_tokens_validity_period=None, client_access_tokens_validity_period=None, max_user_login_attempts=None, is_user_auto_verification_enabled=False, is_mandator_admin=False, is_web_scraping_enabled=False, available_bank_groups=None): # noqa: E501
"""ClientConfiguration - a model defined in Swagger""" # noqa: E501
self._is_automatic_batch_update_enabled = None
self._user_notification_callback_url = None
self._user_synchronization_callback_url = None
self._refresh_tokens_validity_period = None
self._user_access_tokens_validity_period = None
self._client_access_tokens_validity_period = None
self._max_user_login_attempts = None
self._is_user_auto_verification_enabled = None
self._is_mandator_admin = None
self._is_web_scraping_enabled = None
self._available_bank_groups = None
self.discriminator = None
self.is_automatic_batch_update_enabled = is_automatic_batch_update_enabled
if user_notification_callback_url is not None:
self.user_notification_callback_url = user_notification_callback_url
if user_synchronization_callback_url is not None:
self.user_synchronization_callback_url = user_synchronization_callback_url
if refresh_tokens_validity_period is not None:
self.refresh_tokens_validity_period = refresh_tokens_validity_period
if user_access_tokens_validity_period is not None:
self.user_access_tokens_validity_period = user_access_tokens_validity_period
if client_access_tokens_validity_period is not None:
self.client_access_tokens_validity_period = client_access_tokens_validity_period
self.max_user_login_attempts = max_user_login_attempts
self.is_user_auto_verification_enabled = is_user_auto_verification_enabled
self.is_mandator_admin = is_mandator_admin
self.is_web_scraping_enabled = is_web_scraping_enabled
self.available_bank_groups = available_bank_groups
@property
def is_automatic_batch_update_enabled(self):
"""Gets the is_automatic_batch_update_enabled of this ClientConfiguration. # noqa: E501
Whether finAPI performs a regular automatic update of your users' bank connections. To find out how the automatic batch update is configured for your client, i.e. which bank connections get updated, and at which time and interval, please contact your Sys-Admin. Note that even if the automatic batch update is enabled for your client, individual users can still disable the feature for their own bank connections. # noqa: E501
:return: The is_automatic_batch_update_enabled of this ClientConfiguration. # noqa: E501
:rtype: bool
"""
return self._is_automatic_batch_update_enabled
@is_automatic_batch_update_enabled.setter
def is_automatic_batch_update_enabled(self, is_automatic_batch_update_enabled):
"""Sets the is_automatic_batch_update_enabled of this ClientConfiguration.
Whether finAPI performs a regular automatic update of your users' bank connections. To find out how the automatic batch update is configured for your client, i.e. which bank connections get updated, and at which time and interval, please contact your Sys-Admin. Note that even if the automatic batch update is enabled for your client, individual users can still disable the feature for their own bank connections. # noqa: E501
:param is_automatic_batch_update_enabled: The is_automatic_batch_update_enabled of this ClientConfiguration. # noqa: E501
:type: bool
"""
if is_automatic_batch_update_enabled is None:
raise ValueError("Invalid value for `is_automatic_batch_update_enabled`, must not be `None`") # noqa: E501
self._is_automatic_batch_update_enabled = is_automatic_batch_update_enabled
@property
def user_notification_callback_url(self):
"""Gets the user_notification_callback_url of this ClientConfiguration. # noqa: E501
Callback URL to which finAPI sends the notification messages that are triggered from the automatic batch update of the users' bank connections. This field is only relevant if the automatic batch update is enabled for your client. For details about what the notification messages look like, please see the documentation in the 'Notification Rules' section. finAPI will call this URL with HTTP method POST. Note that the response of the call is not processed by finAPI. Also note that while the callback URL may be a non-secured (http) URL on the finAPI sandbox or alpha environment, it MUST be a SSL-secured (https) URL on the finAPI live system. # noqa: E501
:return: The user_notification_callback_url of this ClientConfiguration. # noqa: E501
:rtype: str
"""
return self._user_notification_callback_url
@user_notification_callback_url.setter
def user_notification_callback_url(self, user_notification_callback_url):
"""Sets the user_notification_callback_url of this ClientConfiguration.
Callback URL to which finAPI sends the notification messages that are triggered from the automatic batch update of the users' bank connections. This field is only relevant if the automatic batch update is enabled for your client. For details about what the notification messages look like, please see the documentation in the 'Notification Rules' section. finAPI will call this URL with HTTP method POST. Note that the response of the call is not processed by finAPI. Also note that while the callback URL may be a non-secured (http) URL on the finAPI sandbox or alpha environment, it MUST be a SSL-secured (https) URL on the finAPI live system. # noqa: E501
:param user_notification_callback_url: The user_notification_callback_url of this ClientConfiguration. # noqa: E501
:type: str
"""
self._user_notification_callback_url = user_notification_callback_url
@property
def user_synchronization_callback_url(self):
"""Gets the user_synchronization_callback_url of this ClientConfiguration. # noqa: E501
Callback URL for user synchronization. This field should be set if you - as a finAPI customer - have multiple clients using finAPI. In such case, all of your clients will share the same user base, making it possible for a user to be created in one client, but then deleted in another. To keep the client-side user data consistent in all clients, you should set a callback URL for each client. finAPI will send a notification to the callback URL of each client whenever a user of your user base gets deleted. Note that finAPI will send a deletion notification to ALL clients, including the one that made the user deletion request to finAPI. So when deleting a user in finAPI, a client should rely on the callback to delete the user on its own side. <p>The notification that finAPI sends to the clients' callback URLs will be a POST request, with this body: <pre>{ \"userId\" : string // contains the identifier of the deleted user \"event\" : string // this will always be \"DELETED\" }</pre><br/>Note that finAPI does not process the response of this call. Also note that while the callback URL may be a non-secured (http) URL on the finAPI sandbox or alpha environment, it MUST be a SSL-secured (https) URL on the finAPI live system.</p>As long as you have just one client, you can ignore this field and let it be null. However keep in mind that in this case your client will not receive any callback when a user gets deleted - so the deletion of the user on the client-side must not be forgotten. Of course you may still use the callback URL even for just one client, if you want to implement the deletion of the user on the client-side via the callback from finAPI. # noqa: E501
:return: The user_synchronization_callback_url of this ClientConfiguration. # noqa: E501
:rtype: str
"""
return self._user_synchronization_callback_url
@user_synchronization_callback_url.setter
def user_synchronization_callback_url(self, user_synchronization_callback_url):
"""Sets the user_synchronization_callback_url of this ClientConfiguration.
Callback URL for user synchronization. This field should be set if you - as a finAPI customer - have multiple clients using finAPI. In such case, all of your clients will share the same user base, making it possible for a user to be created in one client, but then deleted in another. To keep the client-side user data consistent in all clients, you should set a callback URL for each client. finAPI will send a notification to the callback URL of each client whenever a user of your user base gets deleted. Note that finAPI will send a deletion notification to ALL clients, including the one that made the user deletion request to finAPI. So when deleting a user in finAPI, a client should rely on the callback to delete the user on its own side. <p>The notification that finAPI sends to the clients' callback URLs will be a POST request, with this body: <pre>{ \"userId\" : string // contains the identifier of the deleted user \"event\" : string // this will always be \"DELETED\" }</pre><br/>Note that finAPI does not process the response of this call. Also note that while the callback URL may be a non-secured (http) URL on the finAPI sandbox or alpha environment, it MUST be a SSL-secured (https) URL on the finAPI live system.</p>As long as you have just one client, you can ignore this field and let it be null. However keep in mind that in this case your client will not receive any callback when a user gets deleted - so the deletion of the user on the client-side must not be forgotten. Of course you may still use the callback URL even for just one client, if you want to implement the deletion of the user on the client-side via the callback from finAPI. # noqa: E501
:param user_synchronization_callback_url: The user_synchronization_callback_url of this ClientConfiguration. # noqa: E501
:type: str
"""
self._user_synchronization_callback_url = user_synchronization_callback_url
@property
def refresh_tokens_validity_period(self):
"""Gets the refresh_tokens_validity_period of this ClientConfiguration. # noqa: E501
The validity period that newly requested refresh tokens initially have (in seconds). A value of 0 means that the tokens never expire (Unless explicitly invalidated, e.g. by revocation, or when a user gets locked, or when the password is reset for a user). # noqa: E501
:return: The refresh_tokens_validity_period of this ClientConfiguration. # noqa: E501
:rtype: int
"""
return self._refresh_tokens_validity_period
@refresh_tokens_validity_period.setter
def refresh_tokens_validity_period(self, refresh_tokens_validity_period):
"""Sets the refresh_tokens_validity_period of this ClientConfiguration.
The validity period that newly requested refresh tokens initially have (in seconds). A value of 0 means that the tokens never expire (Unless explicitly invalidated, e.g. by revocation, or when a user gets locked, or when the password is reset for a user). # noqa: E501
:param refresh_tokens_validity_period: The refresh_tokens_validity_period of this ClientConfiguration. # noqa: E501
:type: int
"""
self._refresh_tokens_validity_period = refresh_tokens_validity_period
@property
def user_access_tokens_validity_period(self):
"""Gets the user_access_tokens_validity_period of this ClientConfiguration. # noqa: E501
The validity period that newly requested access tokens for users initially have (in seconds). A value of 0 means that the tokens never expire (Unless explicitly invalidated, e.g. by revocation , or when a user gets locked, or when the password is reset for a user). # noqa: E501
:return: The user_access_tokens_validity_period of this ClientConfiguration. # noqa: E501
:rtype: int
"""
return self._user_access_tokens_validity_period
@user_access_tokens_validity_period.setter
def user_access_tokens_validity_period(self, user_access_tokens_validity_period):
"""Sets the user_access_tokens_validity_period of this ClientConfiguration.
The validity period that newly requested access tokens for users initially have (in seconds). A value of 0 means that the tokens never expire (Unless explicitly invalidated, e.g. by revocation , or when a user gets locked, or when the password is reset for a user). # noqa: E501
:param user_access_tokens_validity_period: The user_access_tokens_validity_period of this ClientConfiguration. # noqa: E501
:type: int
"""
self._user_access_tokens_validity_period = user_access_tokens_validity_period
@property
def client_access_tokens_validity_period(self):
"""Gets the client_access_tokens_validity_period of this ClientConfiguration. # noqa: E501
The validity period that newly requested access tokens for clients initially have (in seconds). A value of 0 means that the tokens never expire (Unless explicitly invalidated, e.g. by revocation). # noqa: E501
:return: The client_access_tokens_validity_period of this ClientConfiguration. # noqa: E501
:rtype: int
"""
return self._client_access_tokens_validity_period
@client_access_tokens_validity_period.setter
def client_access_tokens_validity_period(self, client_access_tokens_validity_period):
"""Sets the client_access_tokens_validity_period of this ClientConfiguration.
The validity period that newly requested access tokens for clients initially have (in seconds). A value of 0 means that the tokens never expire (Unless explicitly invalidated, e.g. by revocation). # noqa: E501
:param client_access_tokens_validity_period: The client_access_tokens_validity_period of this ClientConfiguration. # noqa: E501
:type: int
"""
self._client_access_tokens_validity_period = client_access_tokens_validity_period
@property
def max_user_login_attempts(self):
"""Gets the max_user_login_attempts of this ClientConfiguration. # noqa: E501
Number of consecutive failed login attempts of a user into his finAPI account that is allowed before finAPI locks the user's account. When a user's account is locked, finAPI will invalidate all user's tokens and it will deny any service call in the context of this user (i.e. any call to a service using one of the user's authorization tokens, as well as the service for requesting a new token for this user). To unlock a user's account, a new password must be set for the account by the client (see the services /users/requestPasswordChange and /users/executePasswordChange). Once a new password has been set, all services will be available again for this user and the user's failed login attempts counter is reset to 0. The user's failed login attempts counter is also reset whenever a new authorization token has been successfully retrieved, or whenever the user himself changes his password.<br/><br/>Note that when this field has a value of 0, it means that there is no limit for user login attempts, i.e. finAPI will never lock user accounts. # noqa: E501
:return: The max_user_login_attempts of this ClientConfiguration. # noqa: E501
:rtype: int
"""
return self._max_user_login_attempts
@max_user_login_attempts.setter
def max_user_login_attempts(self, max_user_login_attempts):
"""Sets the max_user_login_attempts of this ClientConfiguration.
Number of consecutive failed login attempts of a user into his finAPI account that is allowed before finAPI locks the user's account. When a user's account is locked, finAPI will invalidate all user's tokens and it will deny any service call in the context of this user (i.e. any call to a service using one of the user's authorization tokens, as well as the service for requesting a new token for this user). To unlock a user's account, a new password must be set for the account by the client (see the services /users/requestPasswordChange and /users/executePasswordChange). Once a new password has been set, all services will be available again for this user and the user's failed login attempts counter is reset to 0. The user's failed login attempts counter is also reset whenever a new authorization token has been successfully retrieved, or whenever the user himself changes his password.<br/><br/>Note that when this field has a value of 0, it means that there is no limit for user login attempts, i.e. finAPI will never lock user accounts. # noqa: E501
:param max_user_login_attempts: The max_user_login_attempts of this ClientConfiguration. # noqa: E501
:type: int
"""
if max_user_login_attempts is None:
raise ValueError("Invalid value for `max_user_login_attempts`, must not be `None`") # noqa: E501
self._max_user_login_attempts = max_user_login_attempts
@property
def is_user_auto_verification_enabled(self):
"""Gets the is_user_auto_verification_enabled of this ClientConfiguration. # noqa: E501
Whether users that are created with this client are automatically verified on creation. If this field is set to 'false', then any user that is created with this client must first be verified with the \"Verify a user\" service before he can be authorized. If the field is 'true', then no verification is required by the client and the user can be authorized immediately after creation. # noqa: E501
:return: The is_user_auto_verification_enabled of this ClientConfiguration. # noqa: E501
:rtype: bool
"""
return self._is_user_auto_verification_enabled
@is_user_auto_verification_enabled.setter
def is_user_auto_verification_enabled(self, is_user_auto_verification_enabled):
"""Sets the is_user_auto_verification_enabled of this ClientConfiguration.
Whether users that are created with this client are automatically verified on creation. If this field is set to 'false', then any user that is created with this client must first be verified with the \"Verify a user\" service before he can be authorized. If the field is 'true', then no verification is required by the client and the user can be authorized immediately after creation. # noqa: E501
:param is_user_auto_verification_enabled: The is_user_auto_verification_enabled of this ClientConfiguration. # noqa: E501
:type: bool
"""
if is_user_auto_verification_enabled is None:
raise ValueError("Invalid value for `is_user_auto_verification_enabled`, must not be `None`") # noqa: E501
self._is_user_auto_verification_enabled = is_user_auto_verification_enabled
@property
def is_mandator_admin(self):
"""Gets the is_mandator_admin of this ClientConfiguration. # noqa: E501
Whether this client is a 'Mandator Admin'. Mandator Admins are special clients that can access the 'Mandator Administration' section of finAPI. If you do not yet have credentials for a Mandator Admin, please contact us at support@finapi.io. For further information, please refer to <a href='https://finapi.zendesk.com/hc/en-us/articles/115003661827-Difference-between-app-clients-and-mandator-admin-client'>this article</a> on our Dev Portal. # noqa: E501
:return: The is_mandator_admin of this ClientConfiguration. # noqa: E501
:rtype: bool
"""
return self._is_mandator_admin
@is_mandator_admin.setter
def is_mandator_admin(self, is_mandator_admin):
"""Sets the is_mandator_admin of this ClientConfiguration.
Whether this client is a 'Mandator Admin'. Mandator Admins are special clients that can access the 'Mandator Administration' section of finAPI. If you do not yet have credentials for a Mandator Admin, please contact us at support@finapi.io. For further information, please refer to <a href='https://finapi.zendesk.com/hc/en-us/articles/115003661827-Difference-between-app-clients-and-mandator-admin-client'>this article</a> on our Dev Portal. # noqa: E501
:param is_mandator_admin: The is_mandator_admin of this ClientConfiguration. # noqa: E501
:type: bool
"""
if is_mandator_admin is None:
raise ValueError("Invalid value for `is_mandator_admin`, must not be `None`") # noqa: E501
self._is_mandator_admin = is_mandator_admin
@property
def is_web_scraping_enabled(self):
"""Gets the is_web_scraping_enabled of this ClientConfiguration. # noqa: E501
Whether finAPI is allowed to use web scrapers for data download. If this field is set to 'true', then finAPI might download data from the online banking websites of banks (either in addition to using the FinTS interface, or as the sole data source for the download). If this field is set to 'false', then finAPI will not use any web scrapers and instead download data only from FinTS servers. For banks where no FinTS interface is available, finAPI will not allow any data download at all if web scraping is disabled for your client. Please contact your Sys-Admin if you want to change this setting. # noqa: E501
:return: The is_web_scraping_enabled of this ClientConfiguration. # noqa: E501
:rtype: bool
"""
return self._is_web_scraping_enabled
@is_web_scraping_enabled.setter
def is_web_scraping_enabled(self, is_web_scraping_enabled):
"""Sets the is_web_scraping_enabled of this ClientConfiguration.
Whether finAPI is allowed to use web scrapers for data download. If this field is set to 'true', then finAPI might download data from the online banking websites of banks (either in addition to using the FinTS interface, or as the sole data source for the download). If this field is set to 'false', then finAPI will not use any web scrapers and instead download data only from FinTS servers. For banks where no FinTS interface is available, finAPI will not allow any data download at all if web scraping is disabled for your client. Please contact your Sys-Admin if you want to change this setting. # noqa: E501
:param is_web_scraping_enabled: The is_web_scraping_enabled of this ClientConfiguration. # noqa: E501
:type: bool
"""
if is_web_scraping_enabled is None:
raise ValueError("Invalid value for `is_web_scraping_enabled`, must not be `None`") # noqa: E501
self._is_web_scraping_enabled = is_web_scraping_enabled
@property
def available_bank_groups(self):
"""Gets the available_bank_groups of this ClientConfiguration. # noqa: E501
List of bank groups that are available to this client. A bank group is a collection of all banks that are located in a certain country, and is defined by the country's ISO 3166 ALPHA-2 code (see also field 'location' of Bank resource). If you want to extend or limit the available bank groups for your client, please contact your Sys-Admin.<br/><br/>Note: There is no bank group for international institutes (i.e. institutes that are not bound to any specific country). Instead, those institutes are always available. If this list is empty, it means that ONLY international institutes are available. # noqa: E501
:return: The available_bank_groups of this ClientConfiguration. # noqa: E501
:rtype: list[str]
"""
return self._available_bank_groups
@available_bank_groups.setter
def available_bank_groups(self, available_bank_groups):
"""Sets the available_bank_groups of this ClientConfiguration.
List of bank groups that are available to this client. A bank group is a collection of all banks that are located in a certain country, and is defined by the country's ISO 3166 ALPHA-2 code (see also field 'location' of Bank resource). If you want to extend or limit the available bank groups for your client, please contact your Sys-Admin.<br/><br/>Note: There is no bank group for international institutes (i.e. institutes that are not bound to any specific country). Instead, those institutes are always available. If this list is empty, it means that ONLY international institutes are available. # noqa: E501
:param available_bank_groups: The available_bank_groups of this ClientConfiguration. # noqa: E501
:type: list[str]
"""
if available_bank_groups is None:
raise ValueError("Invalid value for `available_bank_groups`, must not be `None`") # noqa: E501
self._available_bank_groups = available_bank_groups
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClientConfiguration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 71.027431 | 1,696 | 0.743066 | 28,222 | 0.990871 | 0 | 0 | 22,703 | 0.7971 | 0 | 0 | 20,786 | 0.729794 |
ffc35164c1764ae381a92d8e3682d0250a4793ea | 912 | py | Python | utils/jwt_custom_decorator.py | w0rm1995/face-comparison-backend | 9e231aabcf129e887e25a8ffdb5ae9617fee3e00 | [
"MIT"
] | null | null | null | utils/jwt_custom_decorator.py | w0rm1995/face-comparison-backend | 9e231aabcf129e887e25a8ffdb5ae9617fee3e00 | [
"MIT"
] | 3 | 2021-06-08T22:05:30.000Z | 2022-01-13T03:04:03.000Z | utils/jwt_custom_decorator.py | w0rm1995/face-comparison-backend | 9e231aabcf129e887e25a8ffdb5ae9617fee3e00 | [
"MIT"
] | null | null | null | from functools import wraps
from flask_jwt_extended import verify_jwt_in_request, get_jwt_claims, exceptions
from jwt import exceptions as jwt_exception
from utils.custom_response import bad_request
def admin_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
verify_jwt_in_request()
claims = get_jwt_claims()
if claims['roles'] != 'admin':
return bad_request('Admins only', 403)
else:
return fn(*args, **kwargs)
except jwt_exception.DecodeError as e:
return bad_request(str(e), 401)
# except jwt_exception.DecodeError as e:
# return bad_request(str(e), 401)
except jwt_exception.PyJWTError as e:
return bad_request(str(e), 401)
except exceptions.JWTExtendedException as e:
return bad_request(str(e), 403)
return wrapper
| 35.076923 | 80 | 0.638158 | 0 | 0 | 0 | 0 | 663 | 0.726974 | 0 | 0 | 104 | 0.114035 |
ffc40ad7630c4587dcf4487c052a523769c15b4a | 1,254 | py | Python | packages/M2Crypto-0.21.1/demo/smime/unsmime.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | null | null | null | packages/M2Crypto-0.21.1/demo/smime/unsmime.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | 6 | 2021-03-31T19:21:50.000Z | 2022-01-13T01:46:09.000Z | packages/M2Crypto-0.21.1/demo/smime/unsmime.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | null | null | null | #!/usr/bin/env python
"""S/MIME demo.
Copyright (c) 2000 Ng Pheng Siong. All rights reserved."""
from M2Crypto import BIO, Rand, SMIME, X509
import sys
def decrypt_verify(p7file, recip_key, signer_cert, ca_cert):
s = SMIME.SMIME()
# Load decryption private key.
s.load_key(recip_key)
# Extract PKCS#7 blob from input.
p7, bio = SMIME.smime_load_pkcs7_bio(p7file)
# Decrypt.
data = s.decrypt(p7)
# Because we passed in a SignAndEnveloped blob, the output
# of our decryption is a Signed blob. We now verify it.
# Load the signer's cert.
sk = X509.X509_Stack()
s.set_x509_stack(sk)
# Load the CA cert.
st = X509.X509_Store()
st.load_info(ca_cert)
s.set_x509_store(st)
# Verify.
p7, bio = SMIME.smime_load_pkcs7_bio(BIO.MemoryBuffer(data))
if bio is not None:
# Netscape Messenger clear-signs, when also encrypting.
data = s.verify(p7, bio)
else:
# M2Crypto's sendsmime.py opaque-signs, when also encrypting.
data = s.verify(p7)
print data
if __name__ == '__main__':
Rand.load_file('../randpool.dat', -1)
decrypt_verify(BIO.File(sys.stdin), 'client.pem', 'client2.pem','ca.pem')
Rand.save_file('../randpool.dat')
| 24.588235 | 77 | 0.6563 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 528 | 0.421053 |
ffc4351a518b97d5c4916014accd51d41d76de87 | 14,867 | py | Python | skybright/skybright.py | ehneilsen/skybright | b0e2d7e6e25131393ee76ce334ce1df1521e3659 | [
"MIT"
] | 1 | 2019-09-24T21:06:45.000Z | 2019-09-24T21:06:45.000Z | skybright/skybright.py | ehneilsen/skybright | b0e2d7e6e25131393ee76ce334ce1df1521e3659 | [
"MIT"
] | null | null | null | skybright/skybright.py | ehneilsen/skybright | b0e2d7e6e25131393ee76ce334ce1df1521e3659 | [
"MIT"
] | 1 | 2019-09-24T21:14:35.000Z | 2019-09-24T21:14:35.000Z | #!/usr/bin/env python
"""A model for the sky brightness
"""
from functools import partial
from math import pi, cos, acos, sin, sqrt, log10
from datetime import datetime, tzinfo, timedelta
from time import strptime
from calendar import timegm
from copy import deepcopy
from sys import argv
from collections import namedtuple, OrderedDict
from argparse import ArgumentParser
try:
from ConfigParser import ConfigParser
except:
from configparser import ConfigParser
import numexpr
from numexpr import NumExpr
import warnings
from warnings import warn
import numpy as np
try:
from palpy import rdplan as rdplan_not_vectorized
from palpy import gmst as gmst_not_vectorized
from palpy import dmoon
from palpy import evp
except ImportError:
from pyslalib.slalib import sla_rdplan as rdplan_not_vectorized
from pyslalib.slalib import sla_gmst as gmst_not_vectorized
from pyslalib.slalib import sla_dmoon as dmoon
from pyslalib.slalib import sla_evp as evp
palpy_body = {'sun': 0,
'moon': 3}
MAG0 = 23.9
# warnings.simplefilter("always")
rdplan = np.vectorize(rdplan_not_vectorized)
def gmst(mjd):
# Follow Meeus chapter 12
big_t = numexpr.evaluate("(mjd - 51544.5)/36525")
st = np.radians(np.mod(numexpr.evaluate("280.46061837 + 360.98564736629*(mjd-51544.5) + 0.000387933*big_t*big_t - big_t*big_t*big_t/38710000"), 360))
return st
def ang_sep(ra1, decl1, ra2, decl2):
# haversine formula
return numexpr.evaluate("2*arcsin(sqrt(cos(decl1)*cos(decl2)*(sin(((ra1-ra2)/2))**2) + (sin((decl1-decl2)/2))**2))")
## Works and is trivially faster, but less flexible w.r.t. data types
#
# ang_sep = NumExpr("2*arcsin(sqrt(cos(decl1)*cos(decl2)*(sin(((ra1-ra2)/2))**2) + (sin((decl1-decl2)/2))**2))",
# (('ra1', np.float64), ('decl1', np.float64), ('ra2', np.float64), ('decl2', np.float64)))
def calc_zd(latitude, ha, decl):
# zenith is always at ha=0, dec=latitude, by defn.
return ang_sep(ha, decl, 0, latitude)
def calc_airmass(cos_zd):
a = numexpr.evaluate("462.46 + 2.8121/(cos_zd**2 + 0.22*cos_zd + 0.01)")
airmass = numexpr.evaluate("sqrt((a*cos_zd)**2 + 2*a + 1) - a * cos_zd")
airmass[cos_zd < 0] = np.nan
return airmass
def calc_airglow(r0, h, m_zen, k, sin_zd, airmass):
airglow = numexpr.evaluate("10**(-0.4*(m_zen + 1.25*log10(1.0 - (r0/(h+r0))*(sin_zd**2)) + k*(airmass-1) - MAG0))")
return airglow
def calc_scat_extinction(k, x0, x):
if len(np.shape(x0)) == 0:
x0p = calc_airmass(0) if np.isnan(x0) else x0
else:
x0p = np.where(np.isnan(x0), calc_airmass(0), x0)
extinct = numexpr.evaluate("(10**(-0.4*k*x) - 10**(-0.4*k*x0p))/(-0.4*k*(x-x0p))")
return extinct
def elongation_not_vectorized(mjd):
"Calculate the elongation of the moon in radians"
pv = dmoon(mjd)
moon_distance = (sum([x**2 for x in pv[:3]]))**0.5
dvb, dpb, dvh, dph = evp(mjd,-1)
sun_distance = (sum([x**2 for x in dph[:3]]))**0.5
a = np.degrees(np.arccos(
(-pv[0]*dph[0] - pv[1]*dph[1] - pv[2]*dph[2])/
(moon_distance*sun_distance)))
return a
elongation = np.vectorize(elongation_not_vectorized)
def calc_moon_brightness(mjd, moon_elongation=None):
"""The brightness of the moon (relative to full)
The value here matches about what I expect from the value in
Astrophysical Quantities corresponding to the elongation calculated by
http://ssd.jpl.nasa.gov/horizons.cgi
>>> mjd = 51778.47
>>> print "%3.2f" % moon_brightness(mjd)
0.10
"""
if moon_elongation is None:
moon_elongation = elongation(mjd)
alpha = 180.0-moon_elongation
# Allen's _Astrophysical Quantities_, 3rd ed., p. 144
return 10**(-0.4*(0.026*abs(alpha) + 4E-9*(alpha**4)))
def one_calc_twilight_fract(z, twi1=-2.52333, twi2=0.01111):
if z<90:
return 1.0
if z>108:
return 0.0
if z>100:
twi0 = -1*(twi1*90+ twi2*90*90)
logfrac = twi0 + twi1*z + twi2*z*z
else:
logfrac = 137.11-2.52333*z+0.01111*z*z
frac = 10**logfrac
frac = 1.0 if frac>1.0 else frac
frac = 0.0 if frac<0.0 else frac
return frac
def calc_twilight_fract(zd, twi1=-2.52333, twi2=0.01111):
z = zd if len(np.shape(zd)) > 0 else np.array(zd)
logfrac = numexpr.evaluate("137.11-2.52333*z+0.01111*z*z")
logfrac[z>100] = numexpr.evaluate("twi1*z + twi2*z*z - (twi1*90 + twi2*90*90)")[z>100]
frac = 10**logfrac
frac = np.where(z<90, 1.0, frac)
frac = np.where(z>108, 0.0, frac)
frac = np.where(frac>1.0, 1.0, frac)
frac = np.where(frac<0.0, 0.0, frac)
return frac
def calc_body_scattering(brightness, body_zd_deg, cos_zd, body_ra, body_decl, ra, decl,
twi1, twi2, k, airmass, body_airmass, rayl_m, mie_m, g,
rayleigh=True, mie=True):
if len(np.shape(brightness)) == 0:
brightness = np.array(brightness)
brightness = np.where(body_zd_deg > 107.8, 0, brightness)
body_twi = body_zd_deg > 90
brightness[body_twi] = brightness[body_twi]*calc_twilight_fract(body_zd_deg[body_twi], twi1, twi2)
extinct = calc_scat_extinction(k, body_airmass, airmass)
cos_rho = numexpr.evaluate("cos(2*arcsin(sqrt(cos(decl)*cos(body_decl)*(sin(((ra-body_ra)/2))**2) + (sin((decl-body_decl)/2))**2)))")
rayleigh_frho = numexpr.evaluate("0.75*(1.0+cos_rho**2)") if rayleigh else np.zeros_like(cos_rho)
mie_frho = numexpr.evaluate("1.5*((1.0-g**2)/(2.0+g**2)) * (1.0 + cos_rho) * (1.0 + g**2 - 2.0*g*cos_rho*cos_rho)**(-1.5)") if mie else np.zeros_like(cos_rho)
mie_frho = np.where(mie_frho<0, 0.0, mie_frho)
# Fitter sometimes explores values of g resulting mie_frho being negative.
# Force a physical result.
mie_frho = np.where(mie_frho<0, 0.0, mie_frho)
rayl_c = 10**(-0.4*(rayl_m-MAG0))
mie_c = 10**(-0.4*(mie_m-MAG0))
flux = brightness*extinct*(rayl_c*rayleigh_frho + mie_c*mie_frho)
return flux
class MoonSkyModel(object):
def __init__(self, model_config):
self.longitude = model_config.getfloat("Observatory Position",
"longitude")
self.latitude = model_config.getfloat("Observatory Position",
"latitude")
self.k = OrderedDict()
self.m_inf = OrderedDict()
self.m_zen = OrderedDict()
self.h = OrderedDict()
self.rayl_m = OrderedDict()
self.g = OrderedDict()
self.mie_m = OrderedDict()
self.offset = OrderedDict()
self.sun_dm = OrderedDict()
self.twi1 = OrderedDict()
self.twi2 = OrderedDict()
for i, band in enumerate(model_config.get("sky","filters").split()):
i = model_config.get("sky","filters").split().index(band)
self.k[band] = float(model_config.get("sky","k").split()[i])
self.m_inf[band] = float(model_config.get("sky","m_inf").split()[i])
self.m_zen[band] = float(model_config.get("sky","m_zen").split()[i])
self.h[band] = float(model_config.get("sky","h").split()[i])
self.rayl_m[band] = float(model_config.get("sky","rayl_m").split()[i])
self.g[band] = float(model_config.get("sky","g").split()[i])
self.mie_m[band] = float(model_config.get("sky","mie_m").split()[i])
self.offset[band] = 0.0
self.sun_dm[band] = float(model_config.get("sky","sun_dm").split()[i])
self.twi1[band] = float(model_config.get("sky","twi1").split()[i])
self.twi2[band] = float(model_config.get("sky","twi2").split()[i])
self.calc_zd = partial(calc_zd, np.radians(self.latitude))
self.r0 = 6375.0
self.twilight_nan = True
def __call__(self, mjd, ra_deg, decl_deg, band, sun=True, moon=True,
moon_crds=None, moon_elongation=None, sun_crds=None, lst=None):
if len(np.shape(band)) < 1:
return self.single_band_call(
mjd, ra_deg, decl_deg, band, sun=sun, moon=moon,
moon_crds=moon_crds, moon_elongation=moon_elongation, sun_crds=sun_crds,
lst=lst)
mags = np.empty_like(ra_deg, dtype=np.float64)
mags.fill(np.nan)
for this_band in np.unique(band):
these = band == this_band
mjd_arg = mjd if len(np.shape(mjd))==0 else mjd[these]
mags[these] = self.single_band_call(
mjd_arg, ra_deg[these], decl_deg[these], this_band, sun=sun, moon=moon,
moon_crds=moon_crds, moon_elongation=moon_elongation, sun_crds=sun_crds,
lst=lst
)
return mags
def single_band_call(self, mjd, ra_deg, decl_deg, band, sun=True, moon=True,
moon_crds=None, moon_elongation=None, sun_crds=None, lst=None):
longitude = np.radians(self.longitude)
latitude = np.radians(self.latitude)
ra = np.radians(ra_deg)
decl = np.radians(decl_deg)
k = self.k[band]
twi1 = self.twi1[band]
twi2 = self.twi2[band]
m_inf = self.m_inf[band]
lst = gmst(mjd) + longitude if lst is None else np.radians(lst)
ha = lst - ra
if sun_crds is None:
sun_ra, sun_decl, diam = rdplan(mjd, 0, longitude, latitude)
else:
sun_ra = sun_crds.ra.rad
sun_decl = sun_crds.dec.rad
sun_ha = lst - sun_ra
sun_zd = self.calc_zd(sun_ha, sun_decl)
sun_zd_deg = np.degrees(sun_zd)
if len(np.shape(sun_zd_deg)) == 0 and self.twilight_nan:
if sun_zd_deg < 98:
m = np.empty_like(ra)
m.fill(np.nan)
return m
sun_cos_zd = np.cos(sun_zd)
sun_airmass = calc_airmass(sun_cos_zd)
if moon_crds is None:
moon_ra, moon_decl, diam = rdplan(mjd, 3, longitude, latitude)
else:
moon_ra = moon_crds.ra.rad
moon_decl = moon_crds.dec.rad
moon_ha = lst - moon_ra
moon_zd = self.calc_zd(moon_ha, moon_decl)
moon_cos_zd = np.cos(moon_zd)
moon_airmass = calc_airmass(moon_cos_zd)
moon_zd_deg = np.degrees(moon_zd)
# Flux from infinity
sky_flux = np.empty_like(ra)
sky_flux.fill(10**(-0.4*(m_inf-MAG0)))
# Airglow
zd = self.calc_zd(ha, decl)
sin_zd = np.sin(zd)
cos_zd = np.cos(zd)
airmass = calc_airmass(cos_zd)
airglow_flux = calc_airglow(self.r0, self.h[band], self.m_zen[band], k, sin_zd, airmass)
sky_flux += airglow_flux
# Needed for both scattering calculations
zd_deg = np.degrees(zd)
# Add scattering of moonlight
if moon:
moon_flux = calc_body_scattering(
calc_moon_brightness(mjd, moon_elongation),
moon_zd_deg, cos_zd, moon_ra, moon_decl, ra, decl, twi1, twi2, k, airmass, moon_airmass,
self.rayl_m[band], self.mie_m[band], self.g[band])
sky_flux += moon_flux
# Add scattering of sunlight
if sun:
sun_flux = calc_body_scattering(
10**(-0.4*(self.sun_dm[band])),
sun_zd_deg, cos_zd, sun_ra, sun_decl, ra, decl, twi1, twi2, k, airmass, sun_airmass,
self.rayl_m[band], self.mie_m[band], self.g[band])
sky_flux += sun_flux
m = MAG0 - 2.5*np.log10(sky_flux)
if len(np.shape(m)) > 0 and self.twilight_nan:
m[sun_zd_deg < 98] = np.nan
return m
#
# Included for backword compatibility with previous implementation
#
def skymag(m_inf, m_zen, h, g, mie_m, rayl_m, ra, decl, mjd, k, latitude, longitude, offset=0.0,
sun_dm=-14.0, twi1=-2.52333, twi2=0.01111):
config = ConfigParser()
sect = "Observatory Position"
config.add_section(sect)
config.set(sect, 'longitude', longitude)
config.set(sect, 'latitude', latitude)
sect = "sky"
config.add_section(sect)
config.set(sect, 'filters', 'x')
config.set(sect, 'k', k)
config.set(sect, 'm_inf', m_inf)
config.set(sect, 'm_zen', m_zen)
config.set(sect, 'h', h)
config.set(sect, 'rayl_m', rayl_m)
config.set(sect, 'g', g)
config.set(sect, 'mie_m', mie_m)
config.set(sect, 'sun_dm', sun_dm)
config.set(sect, 'twi1', twi1)
config.set(sect, 'twi2', twi2)
calc_sky = MoonSkyModel(config)
sky = calc_sky(mjd, ra, decl, 'x')
return sky
if __name__=='__main__':
parser = ArgumentParser('Estimate the sky brightness')
parser.add_argument("-m", "--mjd", type=float,
help="Modified Julian Date (float) (UTC)")
parser.add_argument("-r", "--ra", type=float,
help="the RA (decimal degrees)")
parser.add_argument("-d", "--dec", type=float,
help="the declination (decimal degrees)")
parser.add_argument("-f", "--filter",
help="the filter")
parser.add_argument("-c", "--config",
help="the configuration file")
args = parser.parse_args()
model_config = ConfigParser()
model_config.read(args.config)
longitude = model_config.getfloat("Observatory Position",
"longitude")
latitude = model_config.getfloat("Observatory Position",
"latitude")
lst = gmst(args.mjd) + np.radians(longitude)
print("GMST: %f" % np.degrees(gmst(args.mjd)))
print("LST: %f" % np.degrees(lst))
sun_ra, sun_decl, diam = rdplan(args.mjd, 0, np.radians(longitude), np.radians(latitude))
sun_ha = lst - sun_ra
sun_zd = np.degrees(calc_zd(np.radians(latitude), sun_ha, sun_decl))
print("Sun zenith distance: %f" % sun_zd)
moon_ra, moon_decl, diam = rdplan(args.mjd, 3, longitude, latitude)
moon_ha = lst - moon_ra
moon_zd = np.degrees(calc_zd(np.radians(latitude), moon_ha, moon_decl))
print("Moon zenith distance: %f" % moon_zd)
print("Elongation of the moon: %f" % elongation(args.mjd))
print("Moon brightness: %f" % calc_moon_brightness(args.mjd))
sep = ang_sep(moon_ra, moon_decl, np.radians(args.ra), np.radians(args.dec))
print("Pointing angle with moon: %f" % sep)
ha = lst - np.radians(args.ra)
print("Hour angle: %f" % np.degrees(ha))
z = calc_zd(np.radians(latitude), ha, np.radians(args.dec))
print("Pointing zenith distance: %f" % np.degrees(z))
print("Airmass: %f" % calc_airmass(np.cos(z)))
sky_model = MoonSkyModel(model_config)
print("Sky brightness at pointing: %f" % sky_model(args.mjd, args.ra, args.dec, args.filter))
| 37.1675 | 163 | 0.609807 | 5,700 | 0.383399 | 0 | 0 | 0 | 0 | 0 | 0 | 2,804 | 0.188606 |
ffc57756064cdbfdff55d925646e8ab713a50ba6 | 1,675 | py | Python | timeseries/test.py | zoobree/MachineLearning | 67fd35e67469d9f03afd5c090f2ca23f514bebfd | [
"Apache-2.0"
] | null | null | null | timeseries/test.py | zoobree/MachineLearning | 67fd35e67469d9f03afd5c090f2ca23f514bebfd | [
"Apache-2.0"
] | 1 | 2018-04-07T05:24:40.000Z | 2018-04-07T05:24:40.000Z | timeseries/test.py | joybree/MachineLearning | 69a381efa35436a6d211005c320576db966eea11 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import arima
import os
import pandas as pd
class Arima_Test(unittest.TestCase):
def set_data_dir(self):
print("set_data_dir")
self.dir = "E:/code/python/MachineLearning/data/test_data/"
self.error = 0.001
self.num_percent = 0.9
def test_result_one_point(self):
true_num = 0
false_num = 0
print("****test_result_compare****")
self.set_data_dir()
filelist = os.listdir(self.dir)
list_ts_data = []
for file_name in filelist:
df_data = pd.read_csv(self.dir+file_name, encoding='utf-8', index_col='date')
df_data.index = pd.to_datetime(df_data.index)
ts_data = df_data['value']
list_ts_data.append(ts_data)
prediction_value, prediction_var, prediction_con = arima.prediction(ts_data, pre_num=1)
print(prediction_value[0])
print(ts_data[-1])
if abs(prediction_value[0] - ts_data[-1])/ts_data[-1] <= self.error:
true_num = true_num + 1
else:
false_num = false_num + 1
print(true_num)
print(false_num)
self.assertGreaterEqual(true_num / (true_num + false_num), self.num_percent)
def test_result_two_point(self):
pass
def test_result_three_point(self):
pass
def test_trend(self):
"""
increase or decrease
"""
pass
def test_obj_number(self):
pass
def test_run_time(self):
pass
def test_write_result(self):
pass
if __name__ == "__main__":
unittest.main()
| 25.769231 | 99 | 0.587463 | 1,541 | 0.92 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.112239 |
ffc7043d4112113fd11d3bba2367bfc4002daece | 8,004 | py | Python | pynetstation_send_tags/pynetstation_send_tags.py | mattmoo/Pynetstation-Plug-In | aba2d312e5543cc5c2100793805acfeff075c59c | [
"MIT"
] | null | null | null | pynetstation_send_tags/pynetstation_send_tags.py | mattmoo/Pynetstation-Plug-In | aba2d312e5543cc5c2100793805acfeff075c59c | [
"MIT"
] | null | null | null | pynetstation_send_tags/pynetstation_send_tags.py | mattmoo/Pynetstation-Plug-In | aba2d312e5543cc5c2100793805acfeff075c59c | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
from libopensesame.item import item
from libqtopensesame.items.qtautoplugin import qtautoplugin
from openexp.canvas import canvas
blankText = u'Enter Variable Name Here'
blankID = u'****'
def make_fit(k):
n = len(k)
d = n - 4
if d > 0:
return k[0:4]
else:
return k + ' ' * abs(d)
class pynetstation_send_tags(item):
"""
This class (the class with the same name as the module) handles the basic
functionality of the item. It does not deal with GUI stuff.
"""
# Provide an informative description for your plug-in.
description = u'Send event tags to Netstation'
def reset(self):
"""
desc:
Resets plug-in to initial values.
"""
# Here we provide default values for the variables that are specified
# in info.json. If you do not provide default values, the plug-in will
# work, but the variables will be undefined when they are not explicitly
# set in the GUI.
self.eventTag = u'evt-'
self.labelCheck = u'yes'
self.labelText = u'Description of events or somesuch'
self.descriptionCheck = u'yes'
self.descriptionText = u'Description of events or somesuch'
self.tag1check = u'yes'
self.tagText1 = blankText
self.tagID1 = blankID
self.tag2check = u'no'
self.tagText2 = blankText
self.tagID2 = blankID
self.tag3check = u'no'
self.tagText3 = blankText
self.tagID3 = blankID
self.tag4check = u'no'
self.tagText4 = blankText
self.tagID4 = blankID
self.tag5check = u'no'
self.tagText5 = blankText
self.tagID5 = blankID
def prepare(self):
"""The preparation phase of the plug-in goes here."""
# Call the parent constructor.
item.prepare(self)
def run(self):
"""The run phase of the plug-in goes here."""
# self.set_item_onset() sets the time_[item name] variable. Optionally,
# you can pass a timestamp, such as returned by canvas.show().
self.set_item_onset(self.time())
if self.get(u'nsOnOff') == u'yes':
tagTable = {}
if self.get(u'labelCheck') != u'yes':
self.labelText = ''
if self.get(u'descriptionCheck') != u'yes':
self.descriptionText = ''
for i in range(1, 6):
if self.get(u'tag%dcheck' % i) == u'yes':
#
# Force all keys to become a utf-8 string, regardless of whether they're an int or string.
# keyI = ('%s' % self.get(u'tagID%d' % i)).encode('utf-8')
keyI = str(self.get(u'tagID%d' % i))
#
# check if variable exists. If not, use the literal.
try:
valueI = self.get(self.get(u'tagText%d' % i))
except:
valueI = self.get(u'tagText%d' % i)
#
# Differentiate between integers and strings while encoding strings in utf-8 for pynetstation.
if type(valueI) == int or type(valueI) == long or type(valueI) == float:
tagTable[keyI] = (valueI)
else:
tagTable[keyI] = str(valueI)
'''
for i in tagTable:
print "\nKey %s is type: %s" % (i, type(i))
print "\nValue %s is type: %s" % (tagTable[i], type(tagTable[i]))
print tagTable
'''
#
# Encode everything to 'utf-8' before sending the message to NetStation.
# event = ('%s' % self.experiment.get(u'eventTag')).encode('utf-8')
# event = ('%s' % self.get(u'eventTag')).encode('utf-8')
# label = ('%s' % self.get(u'labelText')).encode('utf-8')
# description = ('%s' % self.get(u'descriptionText')).encode('utf-8')
event = str(self.get(u'eventTag'))
label = str(self.get(u'labelText'))
description = str(self.get(u'descriptionText'))
timestamp = self.experiment.egi.ms_localtime()
table = tagTable
self.experiment.window.callOnFlip(self.experiment.ns.send_timestamped_event, event, label, description,
table, pad=True)
self.experiment.ns.send_event('evtT', timestamp, label, description, table, pad=True)
class qtpynetstation_send_tags(pynetstation_send_tags, qtautoplugin):
"""
This class handles the GUI aspect of the plug-in. By using qtautoplugin, we
usually need to do hardly anything, because the GUI is defined in info.json.
"""
def __init__(self, name, experiment, script=None):
"""
Constructor.
Arguments:
name -- The name of the plug-in.
experiment -- The experiment object.
Keyword arguments:
script -- A definition script. (default=None)
"""
# We don't need to do anything here, except call the parent
# constructors.
pynetstation_send_tags.__init__(self, name, experiment, script)
qtautoplugin.__init__(self, __file__)
def apply_edit_changes(self):
"""
desc:
Applies the controls.
"""
if not qtautoplugin.apply_edit_changes(self) or self.lock:
return False
self.custom_interactions()
return True
def edit_widget(self):
"""
Refreshes the controls.
Returns:
The QWidget containing the controls
"""
if self.lock:
return
self.lock = True
w = qtautoplugin.edit_widget(self)
self.custom_interactions()
self.lock = False
return w
def custom_interactions(self):
"""
desc:
Activates the relevant controls for each tracker.
"""
self.eventTag = make_fit(str(self.eventTag))
self.event_line_edit_widget.setEnabled(True)
for i in range(1, 6):
self.set(u'tagID%d' % i, make_fit(str(self.get(u'tagID%d' % i))))
onOffLabel = self.get(u'labelCheck') == u'yes'
self.label_line_edit_widget.setEnabled(onOffLabel)
onOffDesc = self.get(u'descriptionCheck') == u'yes'
self.description_line_edit_widget.setEnabled(onOffDesc)
onOffTag1 = self.get(u'tag1check') == u'yes'
self.tag1_line_edit_widget.setEnabled(onOffTag1)
self.tagid1_line_edit_widget.setEnabled(onOffTag1)
onOffTag2 = self.get(u'tag2check') == u'yes'
self.tag2_line_edit_widget.setEnabled(onOffTag2)
self.tagid2_line_edit_widget.setEnabled(onOffTag2)
onOffTag3 = self.get(u'tag3check') == u'yes'
self.tag3_line_edit_widget.setEnabled(onOffTag3)
self.tagid3_line_edit_widget.setEnabled(onOffTag3)
onOffTag4 = self.get(u'tag4check') == u'yes'
self.tag4_line_edit_widget.setEnabled(onOffTag4)
self.tagid4_line_edit_widget.setEnabled(onOffTag4)
onOffTag5 = self.get(u'tag5check') == u'yes'
self.tag5_line_edit_widget.setEnabled(onOffTag5)
self.tagid5_line_edit_widget.setEnabled(onOffTag5)
| 35.415929 | 115 | 0.594328 | 7,010 | 0.875812 | 0 | 0 | 0 | 0 | 0 | 0 | 3,536 | 0.441779 |
ffc7fe1be16dc65b683b9d6a05ef9740a31e195b | 42,563 | py | Python | ion/simulators/SBE37_SMP_simulator.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | 3 | 2016-09-20T09:50:06.000Z | 2018-08-10T01:41:38.000Z | ion/simulators/SBE37_SMP_simulator.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | null | null | null | ion/simulators/SBE37_SMP_simulator.py | ooici/coi-services | 43246f46a82e597345507afd7dfc7373cb346afa | [
"BSD-2-Clause"
] | 2 | 2016-03-16T22:25:49.000Z | 2016-11-26T14:54:21.000Z | #!/usr/bin/env python
__author__ = 'Roger Unwin'
import socket
import time
from time import gmtime, strftime
import datetime
import string
import sys
import random
import asyncore
import thread
import getopt
import select
import os
### default values defined below (b/c class is not yet defined)
#default_port = 4001 # TCP port to run on.
#default_message_rate = 5 # 5 sec between messages when streaming
#default_sim=SBE37_random
########### BASE class here handles SBE37 behaviors
########### see below for subclasses that provide different data values
class SBE37(asyncore.dispatcher_with_send):
buf = ""
next_send = None
time_set_at = time.time()
out_buffer = ""
allowable_baud_rates = ['600', '1200', '2400', '4800', '9600', '19200', '38400']
baud_rate = '9600'
date = "010201" # MMDDYY
time = "010100" # HHMMSS
output_salinity = False
output_sound_velocity = False
format = 1
reference_preassure = 0.0
pump_installed = True
sample_number = 0
sleep_state = True
interval = random.randrange(5, 32767)
navg = 0
store_time = False
tx_real_time = True
start_mmddyy = "010201"
start_time = "010101"
sync_wait = 0
serial_sync_mode = False
logging = False
locked = False
start_later = False
tcaldate = "08-nov-05"
ta0 = -2.572242e-04
ta1 = 3.138936e-04
ta2 = -9.717158e-06
ta3 = 2.138735e-07
ccaldate = "08-nov-05"
cg = -9.870930e-01
ch = 1.417895e-01
ci = 1.334915e-04
cj = 3.339261e-05
wbotc = 1.202400e-05
ctcor = 3.250000e-06
cpcor = 9.570000e-08
pcaldate = "12-aug-05"
pa0 = 5.916199e+00
pa1 = 4.851819e-01
pa2 = 4.596432e-07
ptca0 = 2.762492e+02
ptca1 = 6.603433e-01
ptca2 = 5.756490e-03
ptcb0 = 2.461450e+01
ptcb1 = -9.000000e-04
ptcb2 = 0.000000e+00
poffset = 0.000000e+00
rcaldate = "08-nov-05"
rtca0 = 9.999862e-01
rtca1 = 1.686132e-06
rtca2 = -3.022745e-08
knock_count = 0
months = ['BAD PROGRAMMER MONTH', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
save = ""
def __init__(self, socket, thread, streaming_rate, connection_id):
self.socket = socket
self.socket.settimeout(0.0)
self.thread = thread
self.streaming_rate = streaming_rate
# causes error in ion.agents.instrument.test.test_gateway_to_instrument_agent:TestInstrumentAgentViaGateway.test_autosample
#self.max_sleep = streaming_rate/2. if streaming_rate<4 else 2.
self.max_sleep = 0.1
self.connection_id = connection_id
self.handle_read()
def handle_error(self, request, client_address):
print "%3d *** dispatcher reports error: %s %s" % (self.connection_id,client_address,request)
def get_current_time_startlater(self):
#current_time = datetime.datetime.strptime(self.date + " " + self.time, "%m%d%y %H%M%S") + datetime.timedelta( seconds=( int(time.time()) - self.time_set_at) )
format = "%d %b %Y, %H:%M:%S"
return strftime(format, gmtime())
#return current_time.strftime(format)
def get_current_time_startnow(self):
current_time = datetime.datetime.strptime(self.date + " " + self.time, "%m%d%y %H%M%S") + datetime.timedelta( seconds=( int(time.time()) - self.time_set_at) )
format = "%m-%d-%Y, %H:%M:%S"
return strftime(format, gmtime())
#return current_time.strftime(format)
def read_a_char(self):
c = None
if len(self.buf) > 0:
c = self.buf[0:1]
self.buf = self.buf[1:]
else:
self.buf = self.recv(8192)
for x in self.buf:
self.socket.send(x + '\0')
return c
def get_data(self):
data = ""
ret = self.save
try:
while True:
c = self.read_a_char()
if c == None:
break
if c == '\n' or c == '':
self.save = ""
ret += c
data = ret
break
else:
ret += c
except AttributeError:
print "%3d *** closing connection" % self.connection_id
# log_file.close()
self.socket.close()
self.thread.exit()
except:
self.save = ret
data = ""
if data:
data = data.lower()
print "%3d <-- %s"%(self.connection_id,data.strip())
# if log_file.closed == False:
# log_file.write("IN [" + repr(data) + "]\n")
return data
def send_data(self, data, debug):
try:
print "%3d --> %s"%(self.connection_id,data.strip())
self.socket.send(data)
# if log_file.closed == False:
# log_file.write("OUT [" + repr(data) + "]\n")
except Exception,e:
print "%3d *** send_data FAILED [%s] had an exception sending [%s]: %s" % (self.connection_id,debug,data,e)
def handle_read(self):
while True:
self.date = strftime("%m%d%y", gmtime())
self.time = strftime("%H%M%S", gmtime())
time.sleep(0.01)
start_time = datetime.datetime.strptime(self.start_mmddyy + " " + self.start_time, "%m%d%y %H%M%S")
current_time = datetime.datetime.strptime(self.date + " " + self.time, "%m%d%y %H%M%S") + \
datetime.timedelta( seconds=( int(time.time()) - self.time_set_at) )
if self.start_later == True:
if current_time > start_time:
self.start_later = False # only trigger once
self.logging = True
#------------------------------------------------------------------#
data = self.get_data()
if self.logging == True:
if not self.next_send:
time.sleep(0.1)
else:
# sleep longer to use less CPU time when multiple simulators are running until it is about time to transmit
remaining = self.next_send - time.time()
send_now = False
if remaining>self.max_sleep:
time.sleep(self.max_sleep) # worst case: 2sec latency handling command while in streaming mode
elif remaining>0.1:
time.sleep(remaining - 0.1) # sleep off most of remaining time (< max_sleep)
else:
if remaining>0:
time.sleep(remaining)
self.next_send += self.streaming_rate
send_now = True
if send_now and self.tx_real_time:
a,b,c,d,e = self.generate_data_values()
t = self.get_current_time_startlater()
msg = '\r\n#{:.4f},{:.5f}, {:.3f}, {:.4f}, {:.3f}, {}\r\n'.format(a,b,c,d,e,t)
self.send_data(msg, 'MAIN LOGGING LOOP')
# Need to handle commands that are not in the blessed list #
if data:
command_args = string.splitfields(data.rstrip('\r\n'), "=")
if data[0] == '\r' or data[0] == '\n':
locked = False
self.knock_count += 1
if self.knock_count >= 5:
self.send_data('\r\nS>\r\n', 'NEW')
if self.knock_count == 4:
self.send_data('\r\nS>\r\n', 'NEW')
if self.knock_count == 3:
self.send_data('\x00SBE 37-SM\r\n', 'NEW')
self.send_data('S>', 'NEW')
elif command_args[0] not in ['ds', 'dc', 'ts', 'tsr', 'slt', 'sltr', 'qs', 'stop', '\r\n', '\n\r']:
self.send_data('cmd not allowed while logging\n', 'non-permitted command')
data = None
if data:
handled = True
command_args = string.splitfields(data.rstrip('\r\n'), "=")
if command_args[0] == 'baud':
if command_args[1] in self.allowable_baud_rates:
self.baud_rate = command_args[1]
else:
self.send_data("***BAUD ERROR MESSAGE***", 'BAUD ERROR MESSAGE')
elif command_args[0] == 'ds':
self.send_data("SBE37-SMP V 2.6 SERIAL NO. 2165 " + self.date[2:4] + ' ' + self.months[int(self.date[0:2])] + ' 20' + self.date[4:6] + ' ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6] + '\r\n', 'DS line 1')
if self.logging:
self.send_data("logging data\r\n", 'DS line 2')
else:
self.send_data("not logging: received stop command\r\n", 'DS line 2')
self.send_data("sample interval = " + str(self.interval) + " seconds\r\n", 'DS line 3')
self.send_data("samplenumber = " + str(self.sample_number) + ", free = " + str(200000 - self.sample_number * 8) + "\r\n", 'DS line 4') # likely more complex than i have...
if self.tx_real_time:
self.send_data("transmit real-time data\r\n", 'DS line 5')
else:
self.send_data("do not transmit real-time data\r\n", 'DS line 5')
if self.output_salinity:
self.send_data("output salinity with each sample\r\n", 'DS line 6')
else:
self.send_data("do not output salinity with each sample\r\n", 'DS line 6')
if self.output_sound_velocity:
self.send_data("output sound velocity with each sample\r\n", 'DS line 7')
else:
self.send_data("do not output sound velocity with each sample\r\n", 'DS line 7')
if self.store_time:
self.send_data("store time with each sample\r\n", 'DS line 8')
else:
self.send_data("do not store time with each sample\r\n", 'DS line 8')
self.send_data("number of samples to average = " + str(self.navg) + "\r\n", 'DS line 9')
self.send_data("reference pressure = " + str(self.reference_preassure) + " db\r\n", 'DS line 10')
if self.serial_sync_mode:
self.send_data("serial sync mode enabled\r\n", 'DS line 11')
else:
self.send_data("serial sync mode disabled\r\n", 'DS line 11')
self.send_data("wait time after serial sync sampling = " + str(self.sync_wait) + " seconds\r\n", 'DS line 12')
if self.pump_installed:
self.send_data("internal pump is installed\r\n", 'DS line 13')
else:
self.send_data("internal pump is not installed\r\n", 'DS line 13')
self.send_data("temperature = " + str(7.54) + " deg C\r\n", 'DS line 14')
self.send_data("WARNING: LOW BATTERY VOLTAGE!!\r\n", 'DS line 15')
elif command_args[0] == 'mmddyy':
try:
if ((int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 13) and
(int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 32)):
self.date=command_args[1][0:6]
else:
self.send_data("***DATE RANGE ERROR***" + command_args[1] + "\r\n", 'mmddyy line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'mmddyy line 2')
elif command_args[0] == 'ddmmyy':
try:
if ((int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 13) and
(int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 32)):
self.date=command_args[1][2:4] + command_args[1][0:2] + command_args[1][4:6]
else:
self.send_data("***DATE RANGE ERROR***" + command_args[1] + "\r\n", 'ddmmyy line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'ddmmyy line 2')
elif command_args[0] == 'hhmmss':
try:
if ((int(command_args[1][0:2]) >= 0) and
(int(command_args[1][0:2]) < 24) and
(int(command_args[1][2:4]) >= 0) and
(int(command_args[1][2:4]) < 60) and
(int(command_args[1][4:6]) >= 0) and
(int(command_args[1][4:6]) < 60)):
self.time=command_args[1][0:6]
self.time_set_at = int(time.time())
else:
self.send_data("***TIME RANGE ERROR***" + command_args[1] + "\r\n", 'hhmmss line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'hhmmss line 2')
elif command_args[0] == 'outputsal':
if command_args[1] == 'y':
self.output_salinity = True
elif command_args[1] == 'n':
self.output_salinity = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'outputsal line 1')
elif command_args[0] == 'outputsv':
if command_args[1] == 'y':
self.output_sound_velocity = True
elif command_args[1] == 'n':
self.output_sound_velocity = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'outputsv line 1')
elif command_args[0] == 'format':
if command_args[1] == '0':
self.format = 0;
elif command_args[1] == '1':
self.format = 1;
elif command_args[1] == '2':
self.format = 2;
else:
self.send_data("***ERROR VALID SETTINGS ARE 0,1,2*** " + command_args[1] + "\r\n", 'format line 1')
elif command_args[0] == 'refpress':
self.reference_preassure = command_args[1]
elif command_args[0] == 'pumpinstalled':
if command_args[1] == 'y':
self.pump_installed = True
elif command_args[1] == 'n':
self.pump_installed = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'pumpinstalled line 1')
elif command_args[0] == 'samplenum':
try:
self.sample_number = int(command_args[1])
except ValueError:
self.send_data("ERROR expected INTEGER", 'samplenum line 1')
elif command_args[0] == 'qs':
self.sleep_state = True # will need to work out how to get out of sleep state later.
elif command_args[0] == 'interval':
try:
self.interval = int(command_args[1])
except ValueError:
self.send_data("ERROR expected INTEGER", 'interval line 1')
elif command_args[0] == 'navg':
try:
self.navg = int(command_args[1])
except ValueError:
self.send_data("ERROR expected INTEGER", 'navg line 1')
elif command_args[0] == 'storetime':
if command_args[1] == 'y':
self.store_time = True
elif command_args[1] == 'n':
self.store_time = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'storetime line 1')
elif command_args[0] == 'txrealtime':
if command_args[1] == 'y':
self.tx_real_time = True
# self.next_send = time.time() + self.streaming_rate
elif command_args[1] == 'n':
self.tx_real_time = False
self.next_send = None
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'txrealtime line 1')
elif command_args[0] == 'startnow':
self.send_data('start now\r\n', 'startnow line 1')
self.logging = True
self.locked = True
self.knock_count = 0
handled = False
self.next_send = time.time() + self.streaming_rate
elif data[0] == '\r':
#self.send_data('SBE 37-SMP\r\n', '\\ x1b line 1')
handled = False
if self.logging == False:
self.send_data('\r\nS>', '\\ r line 1')
self.locked = False
data = ""
elif data[:1] == '\r\n':
#self.send_data('SBE 37-SMP\r\n', '\\ x1b line 1')
handled = False
self.send_data('S> ', '\\ r \\ n line 1')
self.locked = False
elif command_args[0] == '\x1b':
#self.send_data('SBE 37-SMP\r\n', '\\ x1b line 1')
handled = False
self.send_data('S> ', '\\ x1b line 1')
self.locked = False
elif command_args[0] == 'startmmddyy':
try:
if ((int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 13) and
(int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 32)):
self.start_mmddyy=command_args[1][0:6]
else:
self.send_data("***DATE RANGE ERROR***" + command_args[1] + "\r\n", 'startmmddyy line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'startmmddyy line 2')
elif command_args[0] == 'startddmmyy':
try:
if ((int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 13) and
(int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 32)):
self.start_mmddyy=command_args[1][2:4] + command_args[1][0:2] + command_args[1][4:6]
else:
self.send_data("***DATE RANGE ERROR***" + command_args[1] + "\r\n", 'startddmmyy line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'startddmmyy line 2')
elif command_args[0] == 'starthhmmss':
try:
if ((int(command_args[1][0:2]) > 0) and
(int(command_args[1][0:2]) < 24) and
(int(command_args[1][2:4]) > 0) and
(int(command_args[1][2:4]) < 60) and
(int(command_args[1][4:6]) >= 0) and
(int(command_args[1][4:6]) < 60)):
self.start_time=command_args[1][0:6]
else:
self.send_data("***START TIME RANGE ERROR***" + command_args[1] + "\r\n", 'starthhmmss line 1')
except ValueError:
self.send_data("ERROR expected NUMERIC INPUT", 'starthhmmss line 2')
elif command_args[0] == 'startlater':
self.start_later = True
self.send_data('start time = ' + self.date[0:2] + ' ' + self.months[int(self.date[2:4])] + ' 20' + self.date[4:6] + ', ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6] + '\r\n', 'startlater line 1')
elif command_args[0] == 'stop':
self.start_later = False
self.logging = False
self.send_data('S>\r\n', 'SPECIAL STOP PROMPT')
handled = False
elif command_args[0] in ('ts', 'tss', 'tsson', 'slt', 'sl'):
a,b,c,d,e = self.generate_data_values()
t = self.date[2:4] + ' ' + self.months[int(self.date[0:2])] + ' 20' + self.date[4:6] + ', ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6]
self.send_data('\r\n{:.4f},{:.5f}, {:.3f}, {:.4f}, {:.3f}, %s\r\n'.format(a,b,c,d,e,t), command_args[0] + ' line 1')
elif command_args[0] in ('tsr','stlr'):
self.send_data('{:9.1f}, {:9.3f}, {:7.1f}\r\n'.format(random.uniform(200000, 500000), random.uniform(2000, 3000), random.uniform(-200, -300)), command_args[0] + ' line 1')
elif command_args[0] == 'syncmode':
if command_args[1] == 'y':
self.serial_sync_mode = True
elif command_args[1] == 'n':
self.serial_sync_mode = False
else:
self.send_data("***ERROR IT WAS A Y/N QUESTION*** " + command_args[1] + "\r\n", 'syncmode line 1')
elif command_args[0] == 'syncwait':
try:
if int(command_args[1]) >= 0 and int(command_args[1]) < 121:
self.sync_wait = int(command_args[1])
else:
self.send_data("*** ERROR INTEGER OUT OF RANGE (0 - 120)", 'syncwait line 1')
except ValueError:
self.send_data("*** ERROR expected INTEGER", 'syncwait line 2')
elif data[0:2] == "dd":
data = data[2:].rstrip('\r\n\r')
command_args = string.splitfields(data, ",")
try:
begin = int(command_args[0])
except ValueError:
self.send_data("*** begin ERROR expected INTEGER", 'dd line 1')
try:
end = int(command_args[1])
except ValueError:
self.send_data("*** end ERROR expected INTEGER", 'dd line 2')
self.send_data('start time = ' + self.date[0:2] + ' ' + self.months[int(self.date[2:4])] + ' 20' + self.date[4:6] + ' ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6] + '\r\n', 'dd line 3')
self.send_data('sample interval = ' + str(self.interval) + ' seconds\r\n', 'dd line 4')
self.send_data('start sample number = ' + str(self.sample_number) + '\r\n\r\n', 'dd line 5')
for sample in range(begin, end):
self.send_data('{:8.4f},{:8.5f},{:9.3f},{:9.4f},{:9.3f}'.format(random.uniform(15, 25), random.uniform(0.001, 0.01), random.uniform(0.2, 0.9), random.uniform(0.01, 0.02), random.uniform(1000, 2000)) + ', ' + self.date[0:2] + ' ' + self.months[int(self.date[2:4])] + ' 20' + self.date[4:6] + ', ' + self.time[0:2] + ':' + self.time[2:4] + ':' + self.time[4:6] + '\r\n', 'dd line 6')
elif command_args[0] == "tt":
count = 100
while count > 0:
count -= 1
self.send_data('{:8.4f}\r\n'.format(random.uniform(15, 25)), 'tt line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tc":
count = 100
while count > 0:
count -= 1
self.send_data('{:8.5f}\r\n'.format(random.uniform(0.001, 0.1)), 'tc line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tp":
count = 100
while count > 0:
count -= 1
self.send_data('{:8.3f}\r\n'.format(random.uniform(-6.5, -8.2)), 'tp line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "ttr":
count = 100
while count > 0:
count -= 1
self.send_data('{:9.1f}\r\n'.format(random.uniform(361215, 361219)), 'ttr line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tcr":
count = 100
while count > 0:
count -= 1
self.send_data('{:9.3f}\r\n'.format(random.uniform(2600, 2700)), 'tcr line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tpr":
count = 100
while count > 0:
count -= 1
self.send_data('{:7.1f},{:6.1f}\r\n'.format(random.uniform(-250, -290),random.uniform(18.1,20.2)), 'tpr line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "tr":
count = 30
while count > 0:
count -= 1
self.send_data('rtcf = {:9.7f}\r\n'.format(random.uniform(1.0, 1.1)), 'tr line 1')
time.sleep(1)
data = self.get_data()
if data:
if data[0] == '\x1b':
count = 0
elif command_args[0] == "pumpon":
"""
NOP
"""
elif command_args[0] == "pumpoff":
"""
NOP
"""
elif command_args[0] == 'dc':
self.send_data("SBE37-SM V 2.6b 3464\r\n", 'dc line 1')
self.send_data("temperature: " + self.tcaldate + "\r\n", 'dc line 2')
self.send_data(" TA0 = " + '{0:.6e}'.format(self.ta0) + "\r\n", 'dc line 3')
self.send_data(" TA1 = " + '{0:.6e}'.format(self.ta1) + "\r\n", 'dc line 4')
self.send_data(" TA2 = " + '{0:.6e}'.format(self.ta2) + "\r\n", 'dc line 5')
self.send_data(" TA3 = " + '{0:.6e}'.format(self.ta3) + "\r\n", 'dc line 6')
self.send_data("conductivity: " + self.ccaldate + "\r\n", 'dc line 7')
self.send_data(" G = " + '{0:.6e}'.format(self.cg) + "\r\n", 'dc line 8')
self.send_data(" H = " + '{0:.6e}'.format(self.ch) + "\r\n", 'dc line 9')
self.send_data(" I = " + '{0:.6e}'.format(self.ci) + "\r\n", 'dc line 10')
self.send_data(" J = " + '{0:.6e}'.format(self.cj) + "\r\n", 'dc line 11')
self.send_data(" CPCOR = " + '{0:.6e}'.format(self.cpcor) + "\r\n", 'dc line 12')
self.send_data(" CTCOR = " + '{0:.6e}'.format(self.ctcor) + "\r\n", 'dc line 13')
self.send_data(" WBOTC = " + '{0:.6e}'.format(self.wbotc) + "\r\n", 'dc line 14')
self.send_data("pressure S/N 4955, range = " + str(random.uniform(10000, 11000)) + " psia: " + self.pcaldate + "\r\n", 'dc line 15')
self.send_data(" PA0 = " + '{0:.6e}'.format(self.pa0) + "\r\n", 'dc line 16')
self.send_data(" PA1 = " + '{0:.6e}'.format(self.pa1) + "\r\n", 'dc line 17')
self.send_data(" PA2 = " + '{0:.6e}'.format(self.pa2) + "\r\n", 'dc line 18')
self.send_data(" PTCA0 = " + '{0:.6e}'.format(self.ptca0) + "\r\n", 'dc line 19')
self.send_data(" PTCA1 = " + '{0:.6e}'.format(self.ptca1) + "\r\n", 'dc line 20')
self.send_data(" PTCA2 = " + '{0:.6e}'.format(self.ptca2) + "\r\n", 'dc line 21')
self.send_data(" PTCSB0 = " + '{0:.6e}'.format(self.ptcb0) + "\r\n", 'dc line 22')
self.send_data(" PTCSB1 = " + '{0:.6e}'.format(self.ptcb1) + "\r\n", 'dc line 23')
self.send_data(" PTCSB2 = " + '{0:.6e}'.format(self.ptcb2) + "\r\n", 'dc line 24')
self.send_data(" POFFSET = " + '{0:.6e}'.format(self.poffset) + "\r\n", 'dc line 25')
self.send_data("rtc: " + self.rcaldate + "\r\n", 'dc line 26')
self.send_data(" RTCA0 = " + '{0:.6e}'.format(self.rtca0) + "\r\n", 'dc line 27')
self.send_data(" RTCA1 = " + '{0:.6e}'.format(self.rtca1) + "\r\n", 'dc line 28')
self.send_data(" RTCA2 = " + '{0:.6e}'.format(self.rtca2) + "\r\n", 'dc line 29')
################################
# now the coefficient Commands #
################################
elif command_args[0] == 'tcaldate':
self.tcaldate=command_args[1] #take it on faith
elif command_args[0] == 'ta0':
try:
self.ta0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ta0 line 1')
elif command_args[0] == 'ta1':
try:
self.ta1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ta1 line 1')
elif command_args[0] == 'ta2':
try:
self.ta2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ta2 line 1')
elif command_args[0] == 'ta3':
try:
self.ta3 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ta3 line 1')
elif command_args[0] == 'ccaldate':
self.ccaldate=command_args[1] #take it on faith
elif command_args[0] == 'cg':
try:
self.cg = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'cg line 1')
elif command_args[0] == 'ch':
try:
self.ch = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ch line 1')
elif command_args[0] == 'ci':
try:
self.ci = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ci line 1')
elif command_args[0] == 'cj':
try:
self.cj = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'cj line 1')
elif command_args[0] == 'wbotc':
try:
self.wbotc = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'wbotc line 1')
elif command_args[0] == 'ctcor':
try:
self.ctcor = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ctcor line 1')
elif command_args[0] == 'cpcor':
try:
self.cpcor = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'cpcor line 1')
elif command_args[0] == 'pcaldate':
self.pcaldate=command_args[1] #take it on faith
elif command_args[0] == 'pa0':
try:
self.pa0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'pa0 line 1')
elif command_args[0] == 'pa1':
try:
self.pa1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'pa1 line 1')
elif command_args[0] == 'pa2':
try:
self.pa2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'pa2 line 1')
elif command_args[0] == 'ptca0':
try:
self.ptca0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptca0 line 1')
elif command_args[0] == 'ptca1':
try:
self.ptca1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptca1 line 1')
elif command_args[0] == 'ptca2':
try:
self.ptca2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptca2 line 1')
elif command_args[0] == 'ptcb0':
try:
self.ptcb0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptcb0 line 1')
elif command_args[0] == 'ptcb1':
try:
self.ptcb1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptcb1 line 1')
elif command_args[0] == 'ptcb2':
try:
self.ptcb2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'ptcb2 line 1')
elif command_args[0] == 'poffset':
try:
self.poffset = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'poffset line 1')
elif command_args[0] == 'rcaldate':
self.rcaldate=command_args[1] #take it on faith
elif command_args[0] == 'rtca0':
try:
self.rtca0 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'rtca0 line 1')
elif command_args[0] == 'rtca1':
try:
self.rtca1 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'rtca1 line 1')
elif command_args[0] == 'rtca2':
try:
self.rtca2 = float(command_args[1])
except:
self.send_data("? CMD\r\n", 'rtca2 line 1')
else:
handled = False
self.send_data("? CMD\r\n", 'else line 1 RESPONSE TO ' + data)
if handled == True:
self.send_data("\r\nS>", 'default command prompt')
#------------------------------------------------------------------#
class SBE37_server(asyncore.dispatcher):
def __init__(self, sim_class, host, port, rate):
asyncore.dispatcher.__init__(self)
self.connection_count = 0
self.sim_class = sim_class
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
self.message_rate = rate
def handle_accept(self):
pair = self.accept()
if pair is None:
pass
else:
sock, addr = pair
self.connection_count += 1 # not threadsafe -- could wind up with two threads and same count value
print '%3d *** new connection from %r' % (self.connection_count,addr)
try:
thread.start_new_thread(self.sim_class, (sock, thread, self.message_rate, self.connection_count))
except Exception, e:
print "%3d *** exception starting thread: %s"%(self.connection_count,e)
def usage():
print "SBE37-SMP Simulator:\n"
print "This program simulates a SBE37-SMP sensor deployed by \nbeing connected to a MOXA NPort 5410 Serial Device Server."
print "Available options are:"
print " -h, --help : Displays this message"
print " -p, --port= : Sets the port to listen on (>1024, default = %s)." % default_port
def get_opts():
opts, args = getopt.getopt(sys.argv[1:], "c:p:h", ["class=", "port=", "rate="])
out={'rate':default_message_rate,'port':default_port,'simulator':SBE37_random}
for o, a in opts:
if o in ("-c", "--class"):
out['simulator'] = getattr(sys.modules[__name__],a)
if o in ("-r", "--rate"):
out['message_rate'] = int(a)
elif o in ("-p", "--port"):
out['port'] = int(a)
else:
print 'unknown option: '+o
return out
def main():
try:
args = get_opts()
except Exception as e:
print 'Exception: %s'%e
usage()
sys.exit()
print 'using args: %r'%args
SBE37_server(sim_class=args['simulator'], host='', port=args['port'], rate=args['rate'])
try:
asyncore.loop()
except:
sys.exit() # Be silent when ^c pressed
################################################################################################
##
## THESE CLASSES generate different sample values for the simulator
#
# return tuple of: temperature, conductivity, pressure, salinity, sound velocity
class SBE37_random(SBE37):
def generate_data_values(self):
return ( random.uniform(-10.0, 100.0), random.uniform(0.0, 100.0), random.uniform(0.0, 1000.0),
random.uniform(0.1, 40.0), random.uniform(1505, 1507))
class SBE37_High(SBE37):
def generate_data_values(self):
return ( random.uniform(45.0, 100.0), random.uniform(50.0, 100.0), random.uniform(500.0, 1000.0), random.uniform(20.05, 40.0), random.uniform(1506.0, 1507.0))
class SBE37_Low(SBE37):
def generate_data_values(self):
return ( random.uniform(-10.0, 45.0), random.uniform(0.0, 50.0), random.uniform(0.0, 500.0), random.uniform(0.1, 20.05), random.uniform(1505.0, 1506.0))
import math
def my_sin(time, Amin, Amax):
sin_val = math.sin(time)
range = Amax - Amin
adj_sin = (sin_val + 1.0) * range/2.0 + Amin
return adj_sin
# vary as sine wave over time
class SBE37_sine(SBE37):
sinwave_time = 0.0
def generate_data_values(self):
self.sinwave_time += 0.2
return ( my_sin(self.sinwave_time, -10.0, 100.0), my_sin(self.sinwave_time, 0.0, 100.0), my_sin(self.sinwave_time, 0.0, 1000.0), my_sin(self.sinwave_time, 0.1, 40.0), my_sin(self.sinwave_time, 1505, 1507))
# narrower, valid range to help ensure density can be calculated
class SBE37_midrange(SBE37):
sinwave_time = 0.0
def generate_data_values(self):
self.sinwave_time += 0.2
return ( my_sin(self.sinwave_time, 5.0, 15.0), my_sin(self.sinwave_time, 2.5, 4.5), my_sin(self.sinwave_time, 2000.0, 4000.0), my_sin(self.sinwave_time, 0.1, 40.0), my_sin(self.sinwave_time, 1505, 1507))
#> Valid ranges for conductivity are 0-7 S/m. Typical values we've seen off the Oregon coast are ~35 mS/cm, which converts to ~3.5 S/m.
#>
#> Valid ranges for temperature are -2-40 deg_C. Typical values we've seen off the Oregon coast are between 5 and 20 deg_C. 12 deg_C would be absolutely reasonable.
#>
#> Valid ranges for pressure are 0-7000 dbar. Really, just choose a depth.
#>
#> I would recommend the simulator produce at C of 3.5 S/m, a T of 12 deg_C and a depth of 10 dbar. Apply sine wave functions with some small fraction of random white noise and let it rip.
#>
################################################################################################
default_port = 4001 # TCP port to run on.
default_message_rate = 5 # 5 sec between messages when streaming
default_sim=SBE37_random
if __name__ == '__main__':
main()
| 44.94509 | 405 | 0.453398 | 39,378 | 0.92517 | 0 | 0 | 0 | 0 | 0 | 0 | 10,079 | 0.236802 |
ffc857a75ba7aa5ef44304f6675fe0e78e0727a5 | 976 | py | Python | experiments/centralisation/centralisation.py | MichaelAllen1966/2105_london_acute_stroke_unit | 56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff | [
"MIT"
] | null | null | null | experiments/centralisation/centralisation.py | MichaelAllen1966/2105_london_acute_stroke_unit | 56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff | [
"MIT"
] | null | null | null | experiments/centralisation/centralisation.py | MichaelAllen1966/2105_london_acute_stroke_unit | 56b710c58b5b6bdf5c03e3fb9ec65c53cd5336ff | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
data = pd.read_csv('results.csv')
labels = [1,2,3,4]
width = 0.75
x = np.arange(len(labels)) # the label locations
fig = plt.figure(figsize=(9,6))
# Number people waiting
ax1 = fig.add_subplot(121)
y1 = data['av_waiting'].values.flatten()
waiting = ax1.bar(x, y1, width, color='b')
ax1.set_ylabel('Average number of patients waiting for ASU bed')
ax1.set_xlabel('ASUs per region')
ax1.set_title('Average number of patients waiting\nfor ASU bed')
ax1.set_xticks(x)
ax1.set_xticklabels(labels)
ax2 = fig.add_subplot(122)
y2 = data['av_waiting_days'].values.flatten()
days = ax2.bar(x, y2, width, color='r')
ax2.set_ylabel('Average waiting time (days)')
ax2.set_xlabel('ASUs per region')
ax2.set_title('Average waiting time\n(days, for patients who have to wait)')
ax2.set_xticks(x)
ax2.set_xticklabels(labels)
plt.tight_layout(pad=2)
plt.savefig('centralisation.png', dpi=300)
plt.show()
| 21.217391 | 76 | 0.731557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 334 | 0.342213 |
ffc9b886976a36f8168389759472ba04ff485037 | 10,360 | py | Python | Case_Study_1.py | Amritha29/Stout_DDA_FULL_STACK_21.github.io | 89be2324468dfba2ba9afb378881c6e9e460696b | [
"CC-BY-4.0"
] | null | null | null | Case_Study_1.py | Amritha29/Stout_DDA_FULL_STACK_21.github.io | 89be2324468dfba2ba9afb378881c6e9e460696b | [
"CC-BY-4.0"
] | null | null | null | Case_Study_1.py | Amritha29/Stout_DDA_FULL_STACK_21.github.io | 89be2324468dfba2ba9afb378881c6e9e460696b | [
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Spyder Editor
Amritha Subburayan code for STOUT DDA FULL STACK CASE STUDIES
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn import preprocessing
import sklearn.metrics as sm
data = pd.read_csv(r'//Users//amrithasubburayan//Downloads//loans_full_schema.csv')
data.info()
data.describe()
#Checking missing values
data.isna().sum()
#removing emp_title, state , num_accounts_120d_past_due , num_accounts_30d_past_due, tax_liens, public_record_bankrupt,
# paid_late_fees , total_collection_amount_ever , current_accounts_delinq , num_historical_failed_to_pay
# num_collections_last_12m, delinq_2y
# check corr and remove this num_mort_accounts
#storing data to other temp
data2 = data
# DATA DESCRIPTION AND ISSUES :
#There are two issues in this dataset :
#1) Missing values 2) Multi-collinearity
#Missing values can be found in the following rows:
#1) emp_title 2) emp_length 3) annual_income_joint 4) verification_income_joint
# 5) debt_to_income_joint 6) months_since_last_delinq 7) months_since_90d_late
#8) months_since_last_credit_inquiry 9) num_accounts_120d_past_due
#Multicollinearity can be found between these columns :
#1) installment and loan amount - 0.94 2) balance and loan amount - 0.93
# 3) annula income joint and total credit limit - 0.54
#4) Inquires last 12 m and months since last credit inq - 0.51
#5) total credit lines and open credit lines - 0.76 6)
#num satisfactory acc and total credit lines - 0.75
#7) total credit lines and num total cc accounts - 0.77 8)
#total credit lines and num open cc accounts - 0.62
#Visualizations
plt.figure(figsize=(40,35))
sns.heatmap(data2.corr(), annot = True, cmap = "RdYlGn")
plt.show()
data2['loan_purpose'].value_counts().plot(kind='bar',color=['gray','red','blue','green','purple','yellow','black']).set_title('Loan Purpose')
data2.groupby('homeownership').verified_income.value_counts().unstack(0).plot.bar()
data2.groupby('homeownership').application_type.value_counts().unstack(0).plot(kind="pie",subplots=True, shadow = True,startangle=90,figsize=(15,10),autopct='%1.1f%%')
plt.scatter(data2['installment'],data2['loan_amount'])
d = data2.groupby('emp_length')
s=[]
for key,item in d:
if(key!=7.0):
s.append(d.get_group(key)['interest_rate'].mean())
dict1={"emp_length":[0,1,2,3,4,5,6,8,9,10],"int_rate":s}
plt.plot(dict1['emp_length'],s)
df= data2['application_type']
data2.groupby('application_type').loan_purpose.value_counts()
data2.groupby('application_type').loan_purpose.value_counts().unstack(0).plot(kind="pie",subplots=True, shadow = True,startangle=90,figsize=(25,20),autopct='%1.1f%%')
#Replacing missing rows
d = data2.groupby('application_type').loan_purpose.value_counts()
#data2["verification_income_joint"] = data2['verification_income_joint'].fillna('Not Verified')
for i in range(0, len(data2["verification_income_joint"])):
if pd.isna(data2['verification_income_joint'][i]):
data2['verification_income_joint'][i] = data2['verified_income'][i]
data2["debt_to_income"] = data2['debt_to_income'].fillna(0)
#combining annual income with joint annual income
for i in range(0, len(data2["annual_income_joint"])):
if pd.isna(data2['annual_income_joint'][i]):
data2['annual_income_joint'][i] = data2['annual_income'][i]
#combining debt income with joint debt income
for i in range(0, len(data2["debt_to_income_joint"])):
if pd.isna(data2['debt_to_income_joint'][i]):
data2['debt_to_income_joint'][i] = data2['debt_to_income'][i]
## Replacing with mean values
data2["months_since_last_credit_inquiry"] = data2['months_since_last_credit_inquiry'].fillna(np.mean(data2["months_since_last_credit_inquiry"]))
data2["emp_length"] = data2['emp_length'].fillna(np.mean(data2["emp_length"]))
#Removing unwanted columns because it has more 0 values which will not impact on building a model
data2.drop("emp_title", axis = 1, inplace=True)
data2.drop("state", axis = 1, inplace=True)
data2.drop("num_accounts_120d_past_due", axis = 1, inplace=True)
data2.drop("num_accounts_30d_past_due", axis = 1, inplace=True)
data2.drop("tax_liens", axis = 1, inplace=True)
data2.drop("public_record_bankrupt", axis = 1, inplace=True)
data2.drop("paid_late_fees", axis = 1, inplace=True)
data2.drop("total_collection_amount_ever", axis = 1, inplace=True)
data2.drop("current_accounts_delinq", axis = 1, inplace=True)
data2.drop("num_historical_failed_to_pay", axis = 1, inplace=True)
data2.drop("num_collections_last_12m", axis = 1, inplace=True)
data2.drop("delinq_2y", axis = 1, inplace=True)
data2.drop("verified_income", axis = 1, inplace=True)
data2.drop("annual_income", axis = 1, inplace=True)
data2.drop("debt_to_income", axis = 1, inplace=True)
data2.drop("months_since_90d_late", axis = 1, inplace=True)
data2.drop("months_since_last_delinq", axis = 1, inplace=True)
data2.drop("issue_month", axis = 1, inplace=True)
data2.drop("initial_listing_status", axis = 1, inplace=True)
data2.drop("disbursement_method", axis = 1, inplace=True)
data2.drop("grade", axis = 1, inplace=True)
#removing columns based on correlation
data2.drop("total_credit_limit", axis = 1, inplace=True)
data2.drop("current_installment_accounts", axis = 1, inplace=True)
data2.drop("accounts_opened_24m", axis = 1, inplace=True)
data2.drop("open_credit_lines", axis = 1, inplace=True)
data2.drop("loan_amount", axis = 1, inplace=True)
data2.drop("balance", axis = 1, inplace=True)
data2.drop("paid_principal", axis = 1, inplace=True)
data2.drop("num_satisfactory_accounts", axis = 1, inplace=True)
data2.drop("total_credit_lines", axis = 1, inplace=True)
data2.drop("num_active_debit_accounts", axis = 1, inplace=True)
data2.drop("num_open_cc_accounts", axis = 1, inplace=True)
data2.drop("installment", axis = 1, inplace=True)
data2.drop("num_total_cc_accounts", axis = 1, inplace=True)
#Removing Outliers based on its Quartile and Max Value
data5 = data2
sns.boxplot(data5['paid_interest'])
data5 = data5.loc[data5["inquiries_last_12m"] < 15]
data5 = data5.loc[data5["total_credit_utilized"] < 400000]
data5 = data5.loc[data5["months_since_last_credit_inquiry"] < 20]
data5 = data5.loc[data5["total_debit_limit"] < 220000]
data5 = data5.loc[data5["num_cc_carrying_balance"] < 20]
data5 = data5.loc[data5["num_mort_accounts"] < 10]
data5 = data5.loc[data5["paid_total"] < 35000]
data5 = data5.loc[data5["paid_interest"] < 3000]
# Encoding Categorical Data using LabelEncoder
le = preprocessing.LabelEncoder()
data5['sub_grade'] = le.fit_transform(data5['sub_grade'].values)
data5['verification_income_joint'] = le.fit_transform(data5['verification_income_joint'].values)
data5['loan_status'] = le.fit_transform(data5['loan_status'].values)
data5['loan_purpose'] = le.fit_transform(data5['loan_purpose'].values)
data5['application_type'] = le.fit_transform(data5['application_type'].values)
data5['homeownership'] = le.fit_transform(data5['homeownership'].values)
data5 = data5.reindex(columns=['emp_length', 'homeownership', 'annual_income_joint',
'verification_income_joint', 'debt_to_income_joint',
'earliest_credit_line', 'inquiries_last_12m', 'total_credit_utilized',
'months_since_last_credit_inquiry', 'total_debit_limit',
'num_cc_carrying_balance', 'num_mort_accounts',
'account_never_delinq_percent', 'loan_purpose', 'application_type',
'term', 'sub_grade', 'loan_status', 'paid_total',
'paid_interest', 'interest_rate'])
X = data5.iloc[:, :-1].values
y = data5.iloc[:, -1].values
y = y.reshape(len(y),1)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
#Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
#Modelling the Data
#Support Vector Regression
from sklearn.svm import SVR
regressor_SVM = SVR(kernel = 'rbf')
regressor_SVM.fit(X_train, y_train)
#For Training Data
SVR_train_pred = regressor_SVM.predict(X_train)
score2=r2_score(y_train,SVR_train_pred)
score2
print("Mean absolute error =", round(sm.mean_absolute_error(y_train, SVR_train_pred), 2))
print("Mean squared error =", round(sm.mean_squared_error(y_train, SVR_train_pred), 2))
print("Median absolute error =", round(sm.median_absolute_error(y_train, SVR_train_pred), 2))
print("Explain variance score =", round(sm.explained_variance_score(y_train, SVR_train_pred), 2))
#For Testing data
SVR_test_pred = regressor_SVM.predict(X_test)
score3=r2_score(y_test,SVR_test_pred)
score3
print("Mean absolute error =", round(sm.mean_absolute_error(y_test, SVR_test_pred), 2))
print("Mean squared error =", round(sm.mean_squared_error(y_test, SVR_test_pred), 2))
print("Median absolute error =", round(sm.median_absolute_error(y_test, SVR_test_pred), 2))
print("Explain variance score =", round(sm.explained_variance_score(y_test, SVR_test_pred), 2))
#Random Forest Model
from sklearn.ensemble import RandomForestRegressor
regressor1 = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor1.fit(X_train, y_train)
#For Training Data
random_train_pred = regressor1.predict(X_train)
score1=r2_score(y_train,random_train_pred)
score1
print("Mean absolute error =", round(sm.mean_absolute_error(y_train, random_train_pred), 2))
print("Mean squared error =", round(sm.mean_squared_error(y_train, random_train_pred), 2))
print("Median absolute error =", round(sm.median_absolute_error(y_train, random_train_pred), 2))
print("Explain variance score =", round(sm.explained_variance_score(y_train, random_train_pred), 2))
#For Testing Data
random_test_pred = regressor1.predict(X_test)
score=r2_score(y_test,random_test_pred)
score
print("Mean absolute error =", round(sm.mean_absolute_error(y_test, random_test_pred), 2))
print("Mean squared error =", round(sm.mean_squared_error(y_test, random_test_pred), 2))
print("Median absolute error =", round(sm.median_absolute_error(y_test, random_test_pred), 2))
print("Explain variance score =", round(sm.explained_variance_score(y_test, random_test_pred), 2))
| 32.888889 | 167 | 0.754826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,598 | 0.443822 |
ffcbaba69ba29dbe70293f1d332c038a6aaf91b9 | 1,373 | py | Python | datastore/__init__.py | Swixx/py-datastore | dfa1f9dcc3cc3beac3c3e79d085cb6e89da97a1c | [
"MIT"
] | 6 | 2019-08-04T04:11:36.000Z | 2020-02-20T17:10:26.000Z | datastore/__init__.py | Swixx/py-datastore | dfa1f9dcc3cc3beac3c3e79d085cb6e89da97a1c | [
"MIT"
] | 23 | 2019-09-17T11:35:06.000Z | 2020-04-07T16:18:15.000Z | datastore/__init__.py | Swixx/py-datastore | dfa1f9dcc3cc3beac3c3e79d085cb6e89da97a1c | [
"MIT"
] | 6 | 2019-08-04T02:02:25.000Z | 2020-03-01T15:43:41.000Z | """
Datastore is a generic layer of abstraction for data store and database access.
It is a **simple** API with the aim to enable application development in a
datastore-agnostic way, allowing datastores to be swapped seamlessly without
changing application code. Thus, one can leverage different datastores with
different strengths without committing the application to one datastore
throughout its lifetime.
"""
__version__ = "0.3.6"
__author__ = "Juan Batiz-Benet, Alexander Schlarb"
__email__ = "juan@benet.ai, alexander@ninetailed.ninja"
__all__ = (
"Key", "Namespace",
"BinaryNullDatastore", "BinaryDictDatastore",
"ObjectNullDatastore", "ObjectDictDatastore",
"Query", "Cursor",
"SerializerAdapter",
"abc", "typing", "util"
)
# import core.key
from .core.key import Key
from .core.key import Namespace
# import core.binarystore, core.objectstore
from .core.binarystore import NullDatastore as BinaryNullDatastore
from .core.binarystore import DictDatastore as BinaryDictDatastore
from .core.objectstore import NullDatastore as ObjectNullDatastore
from .core.objectstore import DictDatastore as ObjectDictDatastore
# import core.query
from .core.query import Query
from .core.query import Cursor
# import core.serialize
from .core.serialize import SerializerAdapter
### Exposed submodules ###
from . import abc
from . import typing
from . import util
| 29.212766 | 79 | 0.79024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 780 | 0.568099 |
ffce3c914809fe508a87b5cc18e2cdab125e42d4 | 402 | py | Python | public_goods_str_nonoise/tests.py | bocchan/costly | ba52f82e36e28012a63a78805963bdf384679955 | [
"BSD-3-Clause"
] | null | null | null | public_goods_str_nonoise/tests.py | bocchan/costly | ba52f82e36e28012a63a78805963bdf384679955 | [
"BSD-3-Clause"
] | null | null | null | public_goods_str_nonoise/tests.py | bocchan/costly | ba52f82e36e28012a63a78805963bdf384679955 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division
import random
from otree.common import Currency as c, currency_range
from . import views
from ._builtin import Bot
from .models import Constants
class PlayerBot(Bot):
"""Bot that plays one round"""
def play_round(self):
self.submit(views.MyPage)
self.submit(views.Results)
def validate_play(self):
pass
| 18.272727 | 54 | 0.691542 | 195 | 0.485075 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.131841 |
ffcf7b955b11391d80d86773ca0338d0d81e1b2c | 709 | py | Python | Dataset/Leetcode/test/56/463.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/56/463.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/test/56/463.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, intervals: List[List[int]]) -> List[List[int]]:
if len(intervals) == 1:
return intervals
intervals.sort()
result = [intervals[0]]
for i in range(1, len(intervals)):
# 总共三种情况需要考虑,比较两个区间,另两个区间中的元素值依次为a,b,c,d。
temp = result.pop()
cur = intervals[i]
# 当b>=c 或 c<=d,区间首尾取a,d
if temp[1] >= cur[0] and temp[1] <= cur[1]:
result.append([temp[0], cur[1]])
# 当b>d, 区间首尾取a,b
elif temp[1] > cur[1]:
result.append(temp)
# 此种情况下无重叠
else:
result.extend([temp, cur])
return result
| 32.227273 | 65 | 0.472496 | 812 | 0.996319 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.240491 |
ffd1926ccd96f4b70e990d54bad23c4b64c849e9 | 2,531 | py | Python | cloudianapi/tools/statistics.py | romerojunior/cloudian-api | f17b45653a0e3e27a78d0d6bdc094ec6ab521550 | [
"Apache-2.0"
] | 11 | 2017-11-01T17:48:10.000Z | 2020-08-25T04:29:17.000Z | cloudianapi/tools/statistics.py | romerojunior/cloudian-api | f17b45653a0e3e27a78d0d6bdc094ec6ab521550 | [
"Apache-2.0"
] | 5 | 2017-11-10T12:46:44.000Z | 2019-09-18T07:18:19.000Z | cloudianapi/tools/statistics.py | romerojunior/cloudian-api | f17b45653a0e3e27a78d0d6bdc094ec6ab521550 | [
"Apache-2.0"
] | 7 | 2018-01-26T20:08:37.000Z | 2021-05-26T14:32:06.000Z | #!/usr/bin/env python
# -*- coding:utf8 -*-
# Copyright 2017, Schuberg Philis BV
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Romero Galiza Jr. - rgaliza@schubergphilis.com
""" This is not part of the Admin API, but it incorporates additional tooling
to support statistical analysis of monitored data within a cluster, data center
or node """
def get_hs_used_kb(node):
""" Receives a node monitor JSON string and returns a list containing the
used disk space in KB for each hyperstore disk.
:param node: an iterable object
:type node: dict
:rtype: list
"""
if 'disksInfo' not in node:
raise TypeError('Unsupported input.')
# filter function to select only HyperStore disks:
f = (lambda n: True if 'HS' in n['storageUse'] else False)
hs_disks = filter(
f, (d for d in node['disksInfo']['disks'])
)
return [abs(int(disk['diskUsedKb'])) for disk in hs_disks]
def disk_avg_abs_deviation(node):
""" Returns the average absolute deviation for a given set of disks of a
given node based entirely on used capacity (expressed in KB).
Particularly useful if you want to visualize the average difference
between all disks in a given node. The closer the result is to zero the
better (less deviation = balanced usage).
:param node: an iterable object
:type node: dict
:rtype: int
"""
try:
disk_usage = get_hs_used_kb(node)
except TypeError:
return 0
mean = (sum(disk_usage) / len(disk_usage))
deviation = [abs(kb_used - mean) for kb_used in disk_usage]
return sum(deviation)/len(deviation)
| 34.202703 | 79 | 0.66772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,971 | 0.778744 |
ffd4de322115b22ae4e36e0be2d07a40743376b4 | 1,340 | py | Python | users/models.py | connorgannaway/dockmate | 040d44cac896aabc1488f3ed9d59b417e20719d8 | [
"MIT"
] | null | null | null | users/models.py | connorgannaway/dockmate | 040d44cac896aabc1488f3ed9d59b417e20719d8 | [
"MIT"
] | null | null | null | users/models.py | connorgannaway/dockmate | 040d44cac896aabc1488f3ed9d59b417e20719d8 | [
"MIT"
] | null | null | null | from os import name
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
#Model classes are tables objects in a database.
#each variable is a column and its datatype.
#__str__ method defines the name of a object (row) in a database table
#profile model is meant to be used as an extension to the User model
#this is so users can have a profile picture and be connected to a company
class Company(models.Model):
name = models.CharField(max_length=50, unique=True)
key = models.CharField(max_length=12, unique=True)
def __str__(self):
return self.name
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
picture = models.ImageField(default='default.jpg', upload_to='profile_pics')
company = models.OneToOneField(Company, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return f"{self.user.username}'s Profile"
#overriding save method to resize image before saving.
#used for local file systems, does not work with AWS S3
""" def save(self, *args, **kwargs):
super().save(*args, **kwargs)
image = Image.open(self.picture.path)
if image.width > 300 or image.height > 300:
image.thumbnail((300, 300))
image.save(self.picture.path) """
| 37.222222 | 92 | 0.709701 | 647 | 0.482836 | 0 | 0 | 0 | 0 | 0 | 0 | 735 | 0.548507 |
ffd544a103259a41233ed3e0af2e2d453a43568d | 1,446 | py | Python | E_ledproject.py | randomstring/raspberrypi | fe226ce33f116480bfea8f258fdffa1fd96e379c | [
"MIT"
] | null | null | null | E_ledproject.py | randomstring/raspberrypi | fe226ce33f116480bfea8f258fdffa1fd96e379c | [
"MIT"
] | null | null | null | E_ledproject.py | randomstring/raspberrypi | fe226ce33f116480bfea8f258fdffa1fd96e379c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
led_color_gpio = {
'yellow': 0,
'orange': 2,
'red': 3,
'green': 4,
'blue': 5,
'white': 6
}
buttons_gpio = {
'red': 28,
'blue': 29,
}
gpio_to_bcm = {
0: 17,
1: 18,
2: 27,
3: 22,
4: 23,
5: 24,
6: 25,
21: 5,
22: 6,
23: 13,
24: 19,
25: 26,
26: 12,
27: 16,
28: 20,
29: 21,
}
def led_color(color, on):
if color not in led_color_gpio:
print('No LEDs of color {0}'.format(color))
return
bcm_pin = gpio_to_bcm[led_color_gpio[color]]
if on:
GPIO.output(bcm_pin, False)
else:
GPIO.output(bcm_pin, True)
GPIO.setmode(GPIO.BCM)
for gpio in led_color_gpio.values():
bcm_pin = gpio_to_bcm[gpio]
GPIO.setup(bcm_pin, GPIO.OUT)
GPIO.output(bcm_pin, True)
print("Type 'quit' to quit")
while True:
user_input = raw_input("Enter Color and on/off: ")
tokens = user_input.split()
if len(tokens) < 1:
continue
color = tokens[0]
if color == "quit":
break
onoff = 1
if len(tokens) > 1:
onoff = tokens[1]
if onoff == "on":
onoff = 1
elif onoff == "off":
onoff = 0
else:
onoff = int(onoff)
led_color(color, onoff)
for gpio in led_color_gpio.values():
bcm_pin = gpio_to_bcm[gpio]
GPIO.output(bcm_pin, True)
| 18.075 | 54 | 0.538728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.108575 |
ffd73066eb937a59b32d4daec9ba6f8807fa09da | 5,551 | py | Python | utils/StartMOOS.py | ianfixes/MOOS-python-utils | 1c34f3b8cde4fdcee48a8ee128a3c160eb17d722 | [
"WTFPL"
] | 3 | 2015-07-09T17:51:20.000Z | 2016-04-14T23:06:04.000Z | utils/StartMOOS.py | ifreecarve/MOOS-python-utils | 1c34f3b8cde4fdcee48a8ee128a3c160eb17d722 | [
"WTFPL"
] | null | null | null | utils/StartMOOS.py | ifreecarve/MOOS-python-utils | 1c34f3b8cde4fdcee48a8ee128a3c160eb17d722 | [
"WTFPL"
] | 3 | 2015-03-31T04:18:21.000Z | 2016-10-22T04:55:16.000Z | #!/usr/bin/env python
###########################################################################
#
# Written in 2009 by Ian Katz <ijk5@mit.edu>
# Terms: WTFPL (http://sam.zoy.org/wtfpl/)
# See COPYING and WARRANTY files included in this distribution
#
###########################################################################
# this program launches MOOS processes and verifies that they're up.
# this sequential launch method is gentler to low-horsepower CPUs.
#
# It takes 2 command line arguments:
# 1. the MOOS config file to be used
# 2. OPTIONALLY the working directory that all apps should launch from
import os
import sys
import time
#MAKE ANY CHANGES HERE
def desired_MOOS_procs():
#The app name, and -- optionally -- its ID string.
# use a comma in the tupleeither way
return [
("pMOOSBridge",),
("iBatterySG", "Battery"),
("iDepth",),
("pSystemHealth", "pSystemHealth[oiv]"),
("iDVL_SG","iDVL"),
("iINS_SG","iINS",),
("iGPS_SG", "iGPS"),
("iRange",),
("iMultisonde", "CTD"),
("iActuationSG", "Thrusters"),
("iMotor", "RTU"),
# ("pLogMySQL",),
("pNav",),
("pHelmSG","pHelm"),
]
def tick():
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(0.2)
def start_MOOS_process_in_new_screen(app_name, config_file, app_id_string=None):
#start in "detatched mode" using a string identifier
command_line = "screen -dmS "
if(app_id_string is None):
command_line += app_name
else:
command_line += app_id_string
command_line += " " + app_name + " " + config_file
if(app_id_string is not None):
command_line += " " + app_id_string
#print command_line
return os.system(command_line)
def start_all_MOOSProcesses(process_list, config_file, time_between_starts=2.0):
import time for p in process_list:
appname = p[0]
args = (appname, config_file)
if len(p) > 1:
appname = p[1]
args = args + (p[1],)
print "Starting", appname.ljust(20), "in new screen...",
start_MOOS_process_in_new_screen(*args)
print "OK"
time.sleep(time_between_starts)
def start_MOOS_processes_sequentially(process_list, config_file, moosComms):
#get mail from the server manually
def FetchClients():
inbox = pyMOOS.MOOSMSG_LIST()
if not moosComms.Fetch(inbox):
return None
#go through all messages and put them in the local cache
iter = inbox.iterator()
try:
while 1:
msg = iter.next()
varname = msg.GetKey()
if varname == "DB_CLIENTS":
return msg.GetString()
except StopIteration:
return 0
#find out if we successfully fetched
def FetchSuccess(result):
if result == None: #fetch error
return False
if result == 0: #message DNE
return False
return True
print "Registering for DB_CLIENTS...",
moosComms.Register("DB_CLIENTS", 0.2)
#wait for registration confirmation
while not FetchSuccess(FetchClients()):
tick()
print "Done!"
for p in process_list:
appname = p[0]
args = (appname, config_file)
if len(p) > 1:
appname = p[1]
args = args + (p[1],)
print "Starting", appname.ljust(20, "."),
start_MOOS_process_in_new_screen(*args)
while True:
tick()
clientstring = FetchClients()
if FetchSuccess(clientstring):
clientset = set(clientstring.split(","))
if appname in clientset:
break
print "Done!"
print "Unregistering...",
moosComms.UnRegister("DB_CLIENTS")
print "Done!"
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + "<MOOS config file name> [working directory]"
exit(1)
#The app name, and -- optionally -- its ID string
moosProcList = desired_MOOS_procs()
moosConfigFile = sys.argv[1]
if len(sys.argv) == 3:
#we want to run all processes in this directory
os.chdir(sys.argv[2])
print "Starting MOOSDB...",
start_MOOS_process_in_new_screen("MOOSDB", moosConfigFile)
#see if we can use pyMOOS to intelligently launch processes
try:
import pyMOOS
pi = pyMOOS.PI # force an error
except:
#fall back on basic implementation
print "Done"
print "\nNo pyMOOS detected... falling back on timed launch sequence\n"
start_all_MOOSProcesses(moosProcList, moosConfigFile, 5.0)
exit(0)
#wait for connect
myComms = pyMOOS.CMOOSCommClient()
if myComms.Run("localhost", 9000, "StartMOOS.py[" + os.uname()[1] + "]"):
print "Done!"
print "\n\nStarting MOOS processes the SCHMANCY way!\n"
else:
print "Failed to connect to local MOOSDB."
print "You may want to 'killall screen' and try again."
exit(1)
print "Connecting to MOOSDB...",
while not myComms.IsConnected():
tick()
print "Done!"
#start each process and wait for it to connect
start_MOOS_processes_sequentially(moosProcList, moosConfigFile, myComms)
print "\nAll MOOS processes successfully launched!"
| 26.816425 | 85 | 0.572149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,091 | 0.376689 |
ffd92d23d660d2a840a6dec51a3209da982b029c | 1,172 | py | Python | word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py | RodSernaPerez/WordVectorizer | 097b2ccfc284b39ad43f56047ee25e393b7525ec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py | RodSernaPerez/WordVectorizer | 097b2ccfc284b39ad43f56047ee25e393b7525ec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | word_vectorizer/tests/unittest/model_downloading/test_gensimModelDownloader.py | RodSernaPerez/WordVectorizer | 097b2ccfc284b39ad43f56047ee25e393b7525ec | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from unittest.mock import patch
from word_vectorizer.constants import Constants
from word_vectorizer.model_downloading.gensim_model_downloader import \
GensimModelDownloader
class TestGensimModelDownloader(TestCase):
NAME_MODEL = "name_model"
URL = "gensim"
PATH_WHERE_GENSIM_DOWNLOADS_MODEL = "this/is/a/path/to/the/" + NAME_MODEL
PATH_TO_FOLDER_WHERE_GENSIM_DOWNLOADS = "this/is/a/path"
@patch(GensimModelDownloader.__module__ + ".shutil", spec=True)
@patch(GensimModelDownloader.__module__ + ".api")
def test_download_from_url(self, mock_api, mock_shutil):
mock_api.load.return_value = self.PATH_WHERE_GENSIM_DOWNLOADS_MODEL
path = GensimModelDownloader.download_from_url(self.URL,
self.NAME_MODEL)
mock_shutil.move.assert_called_once_with(
self.PATH_WHERE_GENSIM_DOWNLOADS_MODEL,
Constants.DESTINATION_FOLDER + "/" + self.NAME_MODEL)
mock_shutil.rmtree.assert_called_once_with(
self.PATH_TO_FOLDER_WHERE_GENSIM_DOWNLOADS)
self.assertTrue(path.endswith(self.NAME_MODEL))
| 41.857143 | 77 | 0.728669 | 960 | 0.819113 | 0 | 0 | 723 | 0.616894 | 0 | 0 | 78 | 0.066553 |
ffd92f6660bddf66dfe789ef939a022a436eddba | 26,840 | py | Python | results/generate_result.py | riscv-android-src/platform-test-mlts-benchmark | fc22878823896b81eb8b7e63e952a13f9675edcb | [
"Apache-2.0"
] | null | null | null | results/generate_result.py | riscv-android-src/platform-test-mlts-benchmark | fc22878823896b81eb8b7e63e952a13f9675edcb | [
"Apache-2.0"
] | null | null | null | results/generate_result.py | riscv-android-src/platform-test-mlts-benchmark | fc22878823896b81eb8b7e63e952a13f9675edcb | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
#
# Copyright 2018, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MLTS benchmark result generator.
Reads a CSV produced by MLTS benchmark and generates
an HTML page with results summary.
Usage:
generate_result [csv input file] [html output file]
"""
import argparse
import collections
import csv
import os
import re
import math
class ScoreException(Exception):
"""Generator base exception type. """
pass
LatencyResult = collections.namedtuple(
'LatencyResult',
['iterations', 'total_time_sec', 'time_freq_start_sec', 'time_freq_step_sec', 'time_freq_sec'])
COMPILATION_TYPES = ['compile_without_cache', 'save_to_cache', 'prepare_from_cache']
BASELINE_COMPILATION_TYPE = COMPILATION_TYPES[0]
CompilationResult = collections.namedtuple(
'CompilationResult',
['cache_size_bytes'] + COMPILATION_TYPES)
BenchmarkResult = collections.namedtuple(
'BenchmarkResult',
['name', 'backend_type', 'inference_latency', 'max_single_error',
'testset_size', 'evaluator_keys', 'evaluator_values', 'validation_errors',
'compilation_results'])
ResultsWithBaseline = collections.namedtuple(
'ResultsWithBaseline',
['baseline', 'other'])
BASELINE_BACKEND = 'TFLite_CPU'
KNOWN_GROUPS = [
(re.compile('mobilenet_v1.*quant.*'), 'MobileNet v1 Quantized'),
(re.compile('mobilenet_v1.*'), 'MobileNet v1 Float'),
(re.compile('mobilenet_v2.*quant.*'), 'MobileNet v2 Quantized'),
(re.compile('mobilenet_v2.*'), 'MobileNet v2 Float'),
(re.compile('mobilenet_v3.*uint8.*'), 'MobileNet v3 Quantized'),
(re.compile('mobilenet_v3.*'), 'MobileNet v3 Float'),
(re.compile('tts.*'), 'LSTM Text-to-speech'),
(re.compile('asr.*'), 'LSTM Automatic Speech Recognition'),
]
class BenchmarkResultParser:
"""A helper class to parse the input CSV file."""
def __init__(self, csvfile):
self.csv_reader = csv.reader(filter(lambda row: row[0] != '#', csvfile))
self.row = None
self.index = 0
def next(self):
"""Advance to the next row, returns the current row or None if reaches the end."""
try:
self.row = next(self.csv_reader)
except StopIteration:
self.row = None
finally:
self.index = 0
return self.row
def read_boolean(self):
"""Read the next CSV cell as a boolean."""
s = self.read_typed(str).lower()
if s == 'true':
return True
elif s == 'false':
return False
else:
raise ValueError('Cannot convert \'%s\' to a boolean' % s)
def read_typed(self, Type):
"""Read the next CSV cell as the given type."""
if Type is bool:
return self.read_boolean()
entry = self.row[self.index]
self.index += 1
return Type(entry)
def read_typed_array(self, Type, length):
"""Read the next CSV cells as a typed array."""
return [self.read_typed(Type) for _ in range(length)]
def read_latency_result(self):
"""Read the next CSV cells as a LatencyResult."""
result = {}
result['iterations'] = self.read_typed(int)
result['total_time_sec'] = self.read_typed(float)
result['time_freq_start_sec'] = self.read_typed(float)
result['time_freq_step_sec'] = self.read_typed(float)
time_freq_sec_count = self.read_typed(int)
result['time_freq_sec'] = self.read_typed_array(float, time_freq_sec_count)
return LatencyResult(**result)
def read_compilation_result(self):
"""Read the next CSV cells as a CompilationResult."""
result = {}
for compilation_type in COMPILATION_TYPES:
has_results = self.read_typed(bool)
result[compilation_type] = self.read_latency_result() if has_results else None
result['cache_size_bytes'] = self.read_typed(int)
return CompilationResult(**result)
def read_benchmark_result(self):
"""Read the next CSV cells as a BenchmarkResult."""
result = {}
result['name'] = self.read_typed(str)
result['backend_type'] = self.read_typed(str)
result['inference_latency'] = self.read_latency_result()
result['max_single_error'] = self.read_typed(float)
result['testset_size'] = self.read_typed(int)
evaluator_keys_count = self.read_typed(int)
validation_error_count = self.read_typed(int)
result['evaluator_keys'] = self.read_typed_array(str, evaluator_keys_count)
result['evaluator_values'] = self.read_typed_array(float, evaluator_keys_count)
result['validation_errors'] = self.read_typed_array(str, validation_error_count)
result['compilation_results'] = self.read_compilation_result()
return BenchmarkResult(**result)
def parse_csv_input(input_filename):
"""Parse input CSV file, returns: (benchmarkInfo, list of BenchmarkResult)."""
with open(input_filename, 'r') as csvfile:
parser = BenchmarkResultParser(csvfile)
# First line contain device info
benchmark_info = parser.next()
results = []
while parser.next():
results.append(parser.read_benchmark_result())
return (benchmark_info, results)
def group_results(results):
"""Group list of results by their name/backend, returns list of lists."""
# Group by name
groupings = collections.defaultdict(list)
for result in results:
groupings[result.name].append(result)
# Find baseline for each group, make ResultsWithBaseline for each name
groupings_baseline = {}
for name, results in groupings.items():
baseline = next(filter(lambda x: x.backend_type == BASELINE_BACKEND,
results))
other = sorted(filter(lambda x: x is not baseline, results),
key=lambda x: x.backend_type)
groupings_baseline[name] = ResultsWithBaseline(
baseline=baseline,
other=other)
# Merge ResultsWithBaseline for known groups
known_groupings_baseline = collections.defaultdict(list)
for name, results_with_bl in sorted(groupings_baseline.items()):
group_name = name
for known_group in KNOWN_GROUPS:
if known_group[0].match(results_with_bl.baseline.name):
group_name = known_group[1]
break
known_groupings_baseline[group_name].append(results_with_bl)
# Turn into a list sorted by name
groupings_list = []
for name, results_wbl in sorted(known_groupings_baseline.items()):
groupings_list.append((name, results_wbl))
return groupings_list
def get_frequency_graph_min_max(latencies):
"""Get min and max times of latencies frequency."""
mins = []
maxs = []
for latency in latencies:
mins.append(latency.time_freq_start_sec)
to_add = len(latency.time_freq_sec) * latency.time_freq_step_sec
maxs.append(latency.time_freq_start_sec + to_add)
return min(mins), max(maxs)
def get_frequency_graph(time_freq_start_sec, time_freq_step_sec, time_freq_sec,
start_sec, end_sec):
"""Generate input x/y data for latency frequency graph."""
left_to_pad = (int((time_freq_start_sec - start_sec) / time_freq_step_sec)
if time_freq_step_sec != 0
else math.inf)
end_time = time_freq_start_sec + len(time_freq_sec) * time_freq_step_sec
right_to_pad = (int((end_sec - end_time) / time_freq_step_sec)
if time_freq_step_sec != 0
else math.inf)
# After pading more that 64 values, graphs start to look messy,
# bail out in that case.
if (left_to_pad + right_to_pad) < 64:
left_pad = (['{:.2f}ms'.format(
(start_sec + x * time_freq_step_sec) * 1000.0)
for x in range(left_to_pad)], [0] * left_to_pad)
right_pad = (['{:.2f}ms'.format(
(end_time + x * time_freq_step_sec) * 1000.0)
for x in range(right_to_pad)], [0] * right_to_pad)
else:
left_pad = [[], []]
right_pad = [[], []]
data = (['{:.2f}ms'.format(
(time_freq_start_sec + x * time_freq_step_sec) * 1000.0)
for x in range(len(time_freq_sec))], time_freq_sec)
return (left_pad[0] + data[0] + right_pad[0],
left_pad[1] + data[1] + right_pad[1])
def is_topk_evaluator(evaluator_keys):
"""Are these evaluator keys from TopK evaluator?"""
return (len(evaluator_keys) == 5 and
evaluator_keys[0] == 'top_1' and
evaluator_keys[1] == 'top_2' and
evaluator_keys[2] == 'top_3' and
evaluator_keys[3] == 'top_4' and
evaluator_keys[4] == 'top_5')
def is_melceplogf0_evaluator(evaluator_keys):
"""Are these evaluator keys from MelCepLogF0 evaluator?"""
return (len(evaluator_keys) == 2 and
evaluator_keys[0] == 'max_mel_cep_distortion' and
evaluator_keys[1] == 'max_log_f0_error')
def is_phone_error_rate_evaluator(evaluator_keys):
"""Are these evaluator keys from PhoneErrorRate evaluator?"""
return (len(evaluator_keys) == 1 and
evaluator_keys[0] == 'max_phone_error_rate')
def generate_accuracy_headers(result):
"""Accuracy-related headers for result table."""
if is_topk_evaluator(result.evaluator_keys):
return ACCURACY_HEADERS_TOPK_TEMPLATE
elif is_melceplogf0_evaluator(result.evaluator_keys):
return ACCURACY_HEADERS_MELCEPLOGF0_TEMPLATE
elif is_phone_error_rate_evaluator(result.evaluator_keys):
return ACCURACY_HEADERS_PHONE_ERROR_RATE_TEMPLATE
else:
return ACCURACY_HEADERS_BASIC_TEMPLATE
raise ScoreException('Unknown accuracy headers for: ' + str(result))
def get_diff_span(value, same_delta, positive_is_better):
if abs(value) < same_delta:
return 'same'
if positive_is_better and value > 0 or not positive_is_better and value < 0:
return 'better'
return 'worse'
def generate_accuracy_values(baseline, result):
"""Accuracy-related data for result table."""
if is_topk_evaluator(result.evaluator_keys):
val = [float(x) * 100.0 for x in result.evaluator_values]
if result is baseline:
topk = [TOPK_BASELINE_TEMPLATE.format(val=x) for x in val]
return ACCURACY_VALUES_TOPK_TEMPLATE.format(
top1=topk[0], top2=topk[1], top3=topk[2], top4=topk[3],
top5=topk[4]
)
else:
base = [float(x) * 100.0 for x in baseline.evaluator_values]
diff = [a - b for a, b in zip(val, base)]
topk = [TOPK_DIFF_TEMPLATE.format(
val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=True))
for v, d in zip(val, diff)]
return ACCURACY_VALUES_TOPK_TEMPLATE.format(
top1=topk[0], top2=topk[1], top3=topk[2], top4=topk[3],
top5=topk[4]
)
elif is_melceplogf0_evaluator(result.evaluator_keys):
val = [float(x) for x in
result.evaluator_values + [result.max_single_error]]
if result is baseline:
return ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE.format(
max_log_f0=MELCEPLOGF0_BASELINE_TEMPLATE.format(
val=val[0]),
max_mel_cep_distortion=MELCEPLOGF0_BASELINE_TEMPLATE.format(
val=val[1]),
max_single_error=MELCEPLOGF0_BASELINE_TEMPLATE.format(
val=val[2]),
)
else:
base = [float(x) for x in
baseline.evaluator_values + [baseline.max_single_error]]
diff = [a - b for a, b in zip(val, base)]
v = [MELCEPLOGF0_DIFF_TEMPLATE.format(
val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=False))
for v, d in zip(val, diff)]
return ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE.format(
max_log_f0=v[0],
max_mel_cep_distortion=v[1],
max_single_error=v[2],
)
elif is_phone_error_rate_evaluator(result.evaluator_keys):
val = [float(x) for x in
result.evaluator_values + [result.max_single_error]]
if result is baseline:
return ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE.format(
max_phone_error_rate=PHONE_ERROR_RATE_BASELINE_TEMPLATE.format(
val=val[0]),
max_single_error=PHONE_ERROR_RATE_BASELINE_TEMPLATE.format(
val=val[1]),
)
else:
base = [float(x) for x in
baseline.evaluator_values + [baseline.max_single_error]]
diff = [a - b for a, b in zip(val, base)]
v = [PHONE_ERROR_RATE_DIFF_TEMPLATE.format(
val=v, diff=d, span=get_diff_span(d, 1.0, positive_is_better=False))
for v, d in zip(val, diff)]
return ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE.format(
max_phone_error_rate=v[0],
max_single_error=v[1],
)
else:
return ACCURACY_VALUES_BASIC_TEMPLATE.format(
max_single_error=result.max_single_error,
)
raise ScoreException('Unknown accuracy values for: ' + str(result))
def getchartjs_source():
return open(os.path.dirname(os.path.abspath(__file__)) + '/' +
CHART_JS_FILE).read()
def generate_avg_ms(baseline, latency):
"""Generate average latency value."""
if latency is None:
latency = baseline
result_avg_ms = (latency.total_time_sec / latency.iterations)*1000.0
if latency is baseline:
return LATENCY_BASELINE_TEMPLATE.format(val=result_avg_ms)
baseline_avg_ms = (baseline.total_time_sec / baseline.iterations)*1000.0
diff = (result_avg_ms/baseline_avg_ms - 1.0) * 100.0
diff_val = result_avg_ms - baseline_avg_ms
return LATENCY_DIFF_TEMPLATE.format(
val=result_avg_ms,
diff=diff,
diff_val=diff_val,
span=get_diff_span(diff, same_delta=1.0, positive_is_better=False))
def generate_result_entry(baseline, result):
if result is None:
result = baseline
return RESULT_ENTRY_TEMPLATE.format(
row_class='failed' if result.validation_errors else 'normal',
name=result.name,
backend=result.backend_type,
iterations=result.inference_latency.iterations,
testset_size=result.testset_size,
accuracy_values=generate_accuracy_values(baseline, result),
avg_ms=generate_avg_ms(baseline.inference_latency, result.inference_latency))
def generate_latency_graph_entry(tag, latency, tmin, tmax):
"""Generate a single latency graph."""
return LATENCY_GRAPH_ENTRY_TEMPLATE.format(
tag=tag,
i=id(latency),
freq_data=get_frequency_graph(latency.time_freq_start_sec,
latency.time_freq_step_sec,
latency.time_freq_sec,
tmin, tmax))
def generate_latency_graphs_group(tags, latencies):
"""Generate a group of latency graphs with the same tmin and tmax."""
tmin, tmax = get_frequency_graph_min_max(latencies)
return ''.join(
generate_latency_graph_entry(tag, latency, tmin, tmax)
for tag, latency in zip(tags, latencies))
def snake_case_to_title(string):
return string.replace('_', ' ').title()
def generate_inference_latency_graph_entry(results_with_bl):
"""Generate a group of latency graphs for inference latencies."""
results = [results_with_bl.baseline] + results_with_bl.other
tags = [result.backend_type for result in results]
latencies = [result.inference_latency for result in results]
return generate_latency_graphs_group(tags, latencies)
def generate_compilation_latency_graph_entry(results_with_bl):
"""Generate a group of latency graphs for compilation latencies."""
tags = [
result.backend_type + ', ' + snake_case_to_title(type)
for result in results_with_bl.other
for type in COMPILATION_TYPES
if getattr(result.compilation_results, type)
]
latencies = [
getattr(result.compilation_results, type)
for result in results_with_bl.other
for type in COMPILATION_TYPES
if getattr(result.compilation_results, type)
]
return generate_latency_graphs_group(tags, latencies)
def generate_validation_errors(entries_group):
"""Generate validation errors table."""
errors = []
for result_and_bl in entries_group:
for result in [result_and_bl.baseline] + result_and_bl.other:
for error in result.validation_errors:
errors.append((result.name, result.backend_type, error))
if errors:
return VALIDATION_ERRORS_TEMPLATE.format(
results=''.join(
VALIDATION_ERRORS_ENTRY_TEMPLATE.format(
name=name,
backend=backend,
error=error) for name, backend, error in errors))
return ''
def generate_compilation_result_entry(result):
format_args = {
'row_class':
'failed' if result.validation_errors else 'normal',
'name':
result.name,
'backend':
result.backend_type,
'cache_size':
f'{result.compilation_results.cache_size_bytes:,}'
if result.compilation_results.cache_size_bytes > 0 else '-'
}
for compilation_type in COMPILATION_TYPES:
latency = getattr(result.compilation_results, compilation_type)
if latency:
format_args[compilation_type + '_iterations'] = f'{latency.iterations}'
format_args[compilation_type + '_avg_ms'] = generate_avg_ms(
result.compilation_results.compile_without_cache, latency)
else:
format_args[compilation_type + '_iterations'] = '-'
format_args[compilation_type + '_avg_ms'] = '-'
return COMPILATION_RESULT_ENTRY_TEMPLATE.format(**format_args)
def generate_result(benchmark_info, data):
"""Turn list of results into HTML."""
return MAIN_TEMPLATE.format(
jsdeps=getchartjs_source(),
device_info=DEVICE_INFO_TEMPLATE.format(
benchmark_time=benchmark_info[0],
device_info=benchmark_info[1],
),
results_list=''.join((
RESULT_GROUP_TEMPLATE.format(
group_name=entries_name,
accuracy_headers=generate_accuracy_headers(
entries_group[0].baseline),
results=''.join(
RESULT_ENTRY_WITH_BASELINE_TEMPLATE.format(
baseline=generate_result_entry(
result_and_bl.baseline, None),
other=''.join(
generate_result_entry(
result_and_bl.baseline, x)
for x in result_and_bl.other)
) for result_and_bl in entries_group),
validation_errors=generate_validation_errors(entries_group),
latency_graphs=LATENCY_GRAPHS_TEMPLATE.format(
results=''.join(
LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE.format(
name=result_and_bl.baseline.name,
results=generate_inference_latency_graph_entry(result_and_bl)
) for result_and_bl in entries_group)
),
compilation_results=''.join(
COMPILATION_RESULT_ENTRIES_TEMPLATE.format(
entries=''.join(
generate_compilation_result_entry(x) for x in result_and_bl.other)
) for result_and_bl in entries_group),
compilation_latency_graphs=LATENCY_GRAPHS_TEMPLATE.format(
results=''.join(
LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE.format(
name=result_and_bl.baseline.name,
results=generate_compilation_latency_graph_entry(result_and_bl)
) for result_and_bl in entries_group)
),
) for entries_name, entries_group in group_results(data))
))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input', help='input csv filename')
parser.add_argument('output', help='output html filename')
args = parser.parse_args()
benchmark_info, data = parse_csv_input(args.input)
with open(args.output, 'w') as htmlfile:
htmlfile.write(generate_result(benchmark_info, data))
# -----------------
# Templates below
MAIN_TEMPLATE = """<!doctype html>
<html lang='en-US'>
<head>
<meta http-equiv='Content-Type' content='text/html; charset=utf-8'>
<script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script>
<script>{jsdeps}</script>
<title>MLTS results</title>
<style>
.results {{
border-collapse: collapse;
width: 100%;
}}
.results td, .results th {{
border: 1px solid #ddd;
padding: 6px;
}}
.results tbody.values {{
border-bottom: 8px solid #333;
}}
span.better {{
color: #070;
}}
span.worse {{
color: #700;
}}
span.same {{
color: #000;
}}
.results tr:nth-child(even) {{background-color: #eee;}}
.results tr:hover {{background-color: #ddd;}}
.results th {{
padding: 10px;
font-weight: bold;
text-align: left;
background-color: #333;
color: white;
}}
.results tr.failed {{
background-color: #ffc4ca;
}}
.group {{
padding-top: 25px;
}}
.group_name {{
padding-left: 10px;
font-size: 140%;
font-weight: bold;
}}
.section_name {{
padding: 10px;
font-size: 120%;
font-weight: bold;
}}
.latency_results {{
padding: 10px;
border: 1px solid #ddd;
overflow: hidden;
}}
.latency_with_baseline {{
padding: 10px;
border: 1px solid #ddd;
overflow: hidden;
}}
</style>
</head>
<body>
{device_info}
{results_list}
</body>
</html>"""
DEVICE_INFO_TEMPLATE = """<div id='device_info'>
Benchmark for {device_info}, started at {benchmark_time}
</div>"""
RESULT_GROUP_TEMPLATE = """<div class="group">
<div class="group_name">{group_name}</div>
<div class="section_name">Inference results</div>
<table class="results">
<tr>
<th>Name</th>
<th>Backend</th>
<th>Iterations</th>
<th>Test set size</th>
<th>Average latency ms</th>
{accuracy_headers}
</tr>
{results}
</table>
{validation_errors}
{latency_graphs}
<div class="section_name">Compilation results</div>
<table class="results">
<tr>
<th rowspan="2">Name</th>
<th rowspan="2">Backend</th>
<th colspan="2">Compile Without Cache</th>
<th colspan="2">Save To Cache</th>
<th colspan="2">Prepare From Cache</th>
<th rowspan="2">Cache size bytes</th>
</tr>
<tr>
<th>Iterations</th>
<th>Average latency ms</th>
<th>Iterations</th>
<th>Average latency ms</th>
<th>Iterations</th>
<th>Average latency ms</th>
</tr>
{compilation_results}
</table>
{compilation_latency_graphs}
</div>"""
VALIDATION_ERRORS_TEMPLATE = """
<table class="results">
<tr>
<th>Name</th>
<th>Backend</th>
<th>Error</th>
</tr>
{results}
</table>"""
VALIDATION_ERRORS_ENTRY_TEMPLATE = """
<tr class="failed">
<td>{name}</td>
<td>{backend}</td>
<td>{error}</td>
</tr>
"""
LATENCY_GRAPHS_TEMPLATE = """
<div class="latency_results">
{results}
</div>
<div style="clear: left;"></div>
"""
LATENCY_GRAPH_ENTRY_GROUP_TEMPLATE = """
<div class="latency_with_baseline" style="float: left;">
<b>{name}</b>
{results}
</div>
"""
LATENCY_GRAPH_ENTRY_TEMPLATE = """
<div class="latency_result" style='width: 350px;'>
{tag}
<canvas id='latency_chart{i}' class='latency_chart'></canvas>
<script>
$(function() {{
var freqData = {{
labels: {freq_data[0]},
datasets: [{{
data: {freq_data[1]},
backgroundColor: 'rgba(255, 99, 132, 0.6)',
borderColor: 'rgba(255, 0, 0, 0.6)',
borderWidth: 1,
}}]
}};
var ctx = $('#latency_chart{i}')[0].getContext('2d');
window.latency_chart{i} = new Chart(ctx,
{{
type: 'bar',
data: freqData,
options: {{
responsive: true,
title: {{
display: false,
text: 'Latency frequency'
}},
legend: {{
display: false
}},
scales: {{
xAxes: [ {{
barPercentage: 1.0,
categoryPercentage: 0.9,
}}],
yAxes: [{{
scaleLabel: {{
display: false,
labelString: 'Iterations Count'
}}
}}]
}}
}}
}});
}});
</script>
</div>
"""
RESULT_ENTRY_WITH_BASELINE_TEMPLATE = """
<tbody class="values">
{baseline}
{other}
</tbody>
"""
RESULT_ENTRY_TEMPLATE = """
<tr class={row_class}>
<td>{name}</td>
<td>{backend}</td>
<td>{iterations:d}</td>
<td>{testset_size:d}</td>
<td>{avg_ms}</td>
{accuracy_values}
</tr>"""
COMPILATION_RESULT_ENTRIES_TEMPLATE = """
<tbody class="values">
{entries}
</tbody>
"""
COMPILATION_RESULT_ENTRY_TEMPLATE = """
<tr class={row_class}>
<td>{name}</td>
<td>{backend}</td>
<td>{compile_without_cache_iterations}</td>
<td>{compile_without_cache_avg_ms}</td>
<td>{save_to_cache_iterations}</td>
<td>{save_to_cache_avg_ms}</td>
<td>{prepare_from_cache_iterations}</td>
<td>{prepare_from_cache_avg_ms}</td>
<td>{cache_size}</td>
</tr>"""
LATENCY_BASELINE_TEMPLATE = """{val:.2f}ms"""
LATENCY_DIFF_TEMPLATE = """{val:.2f}ms <span class='{span}'>
({diff_val:.2f}ms, {diff:.1f}%)</span>"""
ACCURACY_HEADERS_TOPK_TEMPLATE = """
<th>Top 1</th>
<th>Top 2</th>
<th>Top 3</th>
<th>Top 4</th>
<th>Top 5</th>
"""
ACCURACY_VALUES_TOPK_TEMPLATE = """
<td>{top1}</td>
<td>{top2}</td>
<td>{top3}</td>
<td>{top4}</td>
<td>{top5}</td>
"""
TOPK_BASELINE_TEMPLATE = """{val:.3f}%"""
TOPK_DIFF_TEMPLATE = """{val:.3f}% <span class='{span}'>({diff:.1f}%)</span>"""
ACCURACY_HEADERS_MELCEPLOGF0_TEMPLATE = """
<th>Max log(F0) error</th>
<th>Max Mel Cep distortion</th>
<th>Max scalar error</th>
"""
ACCURACY_VALUES_MELCEPLOGF0_TEMPLATE = """
<td>{max_log_f0}</td>
<td>{max_mel_cep_distortion}</td>
<td>{max_single_error}</td>
"""
MELCEPLOGF0_BASELINE_TEMPLATE = """{val:.2E}"""
MELCEPLOGF0_DIFF_TEMPLATE = \
"""{val:.2E} <span class='{span}'>({diff:.1f}%)</span>"""
ACCURACY_HEADERS_PHONE_ERROR_RATE_TEMPLATE = """
<th>Max phone error rate</th>
<th>Max scalar error</th>
"""
ACCURACY_VALUES_PHONE_ERROR_RATE_TEMPLATE = """
<td>{max_phone_error_rate}</td>
<td>{max_single_error}</td>
"""
PHONE_ERROR_RATE_BASELINE_TEMPLATE = """{val:.3f}"""
PHONE_ERROR_RATE_DIFF_TEMPLATE = \
"""{val:.3f} <span class='{span}'>({diff:.1f}%)</span>"""
ACCURACY_HEADERS_BASIC_TEMPLATE = """
<th>Max single scalar error</th>
"""
ACCURACY_VALUES_BASIC_TEMPLATE = """
<td>{max_single_error:.2f}</td>
"""
CHART_JS_FILE = 'Chart.bundle.min.js'
if __name__ == '__main__':
main()
| 31.613663 | 99 | 0.656222 | 2,881 | 0.10734 | 0 | 0 | 0 | 0 | 0 | 0 | 9,619 | 0.358383 |
ffda91245aed33f9125784b3f0d5a73c6224af00 | 6,975 | py | Python | ampel/ztf/dev/DevSkyPortalClient.py | AmpelProject/Ampel-ZTF | 7f9736a7be3aa526571004716160cae2a800e410 | [
"BSD-3-Clause"
] | 1 | 2021-03-11T15:39:28.000Z | 2021-03-11T15:39:28.000Z | ampel/ztf/dev/DevSkyPortalClient.py | AmpelProject/Ampel-ZTF | 7f9736a7be3aa526571004716160cae2a800e410 | [
"BSD-3-Clause"
] | 18 | 2021-08-02T17:11:25.000Z | 2022-01-11T16:20:04.000Z | ampel/ztf/dev/DevSkyPortalClient.py | AmpelProject/Ampel-ZTF | 7f9736a7be3aa526571004716160cae2a800e410 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: Ampel-ZTF/ampel/ztf/dev/DevSkyPortalClient.py
# Author: Jakob van Santen <jakob.van.santen@desy.de>
# Date: 16.09.2020
# Last Modified Date: 16.09.2020
# Last Modified By: Jakob van Santen <jakob.van.santen@desy.de>
import gzip
import io
from collections import defaultdict
from datetime import datetime
from typing import Any
from collections.abc import Sequence, Generator
import numpy as np
import requests
from ampel.protocol.AmpelAlertProtocol import AmpelAlertProtocol
from astropy.io import fits
from astropy.time import Time
from matplotlib.colors import Normalize
from matplotlib.figure import Figure
def render_thumbnail(cutout_data: bytes) -> bytes:
"""
Render gzipped FITS as PNG
"""
with gzip.open(io.BytesIO(cutout_data), "rb") as f:
with fits.open(f) as hdu:
header = hdu[0].header
img = np.flipud(hdu[0].data)
mask = np.isfinite(img)
fig = Figure(figsize=(1, 1))
ax = fig.add_axes([0.0, 0.0, 1.0, 1.0])
ax.set_axis_off()
ax.imshow(
img,
# clip pixel values below the median
norm=Normalize(*np.percentile(img[mask], [0.5, 99.5])),
aspect="auto",
origin="lower",
)
with io.BytesIO() as buf:
fig.savefig(buf, dpi=img.shape[0])
return buf.getvalue()
class DevSkyPortalClient:
"""
Post PhotoAlerts to [a local, test instance of] SkyPortal
"""
def __init__(self, root_token, base_url="http://localhost:9000/api"):
"""
:param root_token: INITIAL_ADMIN from .tokens.yaml in the SkyPortal container
"""
self.base_url = base_url
self.kwargs = {"headers": {"Authorization": f"token {root_token}"}}
self.session = requests.Session()
# Set up seed data ourselves
p48 = self.get_id(
"/telescope",
{"name": "P48"},
{
"diameter": 1.2,
"elevation": 1870.0,
"lat": 33.3633675,
"lon": -116.8361345,
"nickname": "Palomar 1.2m Oschin",
"name": "P48",
"skycam_link": "http://bianca.palomar.caltech.edu/images/allsky/AllSkyCurrentImage.JPG",
"robotic": True,
},
)
source = {
"instrument": self.get_id(
"/instrument",
{"name": "ZTF"},
{
"filters": ["ztfg", "ztfr", "ztfi"],
"type": "imager",
"band": "optical",
"telescope_id": p48,
"name": "ZTF",
},
),
"stream": self.get_id("/streams", {"name": "ztf_partnership"}),
"group": 1, # root group
}
self.post(
f"/groups/{source['group']}/streams", json={"stream_id": source["stream"]}
)
source["filter"] = self.get_id(
"/filters",
{"name": "highlander"},
{
"name": "highlander",
"stream_id": source["stream"],
"group_id": source["group"],
},
)
self.source = source
# ensure that all users are in the root group
for user in self.get("/user")["data"]:
self.post(
f"/groups/{self.source['group']}/users",
json={"username": user["username"]},
)
def get_id(self, endpoint, params, default=None):
"""Query for an object by id, inserting it if not found"""
if not (response := self.get(endpoint, params=params))["data"]:
response = self.post(endpoint, json=default or params, raise_exc=True)
if isinstance(response["data"], list):
return response["data"][0]["id"]
else:
return response["data"]["id"]
def request(self, verb, endpoint, raise_exc=False, **kwargs):
response = self.session.request(
verb, self.base_url + endpoint, **{**self.kwargs, **kwargs}
).json()
if raise_exc and response["status"] != "success":
raise RuntimeError(response["message"])
return response
def get(self, endpoint, **kwargs):
return self.request("GET", endpoint, **kwargs)
def post(self, endpoint, **kwargs):
return self.request("POST", endpoint, **kwargs)
def make_photometry(self, alert: AmpelAlertProtocol, after=-float("inf")):
base = {
"obj_id": alert.id,
"alert_id": alert.datapoints[0]["candid"],
"group_ids": [self.source["group"]],
"instrument_id": self.source["instrument"],
"magsys": "ab",
}
content = defaultdict(list)
for doc in self._transform_datapoints(alert.datapoints, after):
for k, v in doc.items():
content[k].append(v)
return {**base, **content}
def _transform_datapoints(self, dps: Sequence[dict[str,Any]], after=-float("inf")) -> Generator[dict[str,Any],None,None]:
ztf_filters = {1: "ztfg", 2: "ztfr", 3: "ztfi"}
for dp in dps:
if dp["jd"] <= after:
continue
base = {
"filter": ztf_filters[dp["fid"]],
"mjd": dp["jd"] - 2400000.5,
"limiting_mag": dp["diffmaglim"],
}
if dp["magpsf"] is not None:
content = {
"mag": dp["magpsf"],
"magerr": dp["sigmapsf"],
"ra": dp["ra"],
"dec": dp["dec"],
}
else:
content = {k: None for k in ("mag", "magerr", "ra", "dec")}
yield {**base, **content}
def post_alert(self, alert: AmpelAlertProtocol):
# cribbed from https://github.com/dmitryduev/kowalski-dev/blob/882a7fa7e292676dd4864212efa696fb99668b4c/kowalski/alert_watcher_ztf.py#L801-L937
after = -float("inf")
if (candidate := self.get(f"/candidates/{alert.id}"))["status"] != "success":
candidate = alert.datapoints[0]
alert_thin = {
"id": alert.id,
"ra": candidate.get("ra"),
"dec": candidate.get("dec"),
"score": candidate.get("drb", candidate.get("rb")),
"passing_alert_id": candidate["candid"],
"filter_ids": [self.source["filter"]],
}
self.post("/candidates", json=alert_thin, raise_exc=True)
elif candidate["data"]["last_detected"]:
after = Time(datetime.fromisoformat(candidate["data"]["last_detected"])).jd
# post only if there are new photopoints
if "mjd" in (photometry := self.make_photometry(alert, after=after)):
response = self.post("/photometry", json=photometry, raise_exc=True)
| 36.139896 | 151 | 0.531326 | 5,571 | 0.79871 | 819 | 0.117419 | 0 | 0 | 0 | 0 | 2,064 | 0.295914 |
ffddb9df1f192b673556f7659d2310d13ba94e89 | 3,806 | py | Python | tools/test_detection_features_converter.py | jialinwu17/caption_vqa | 9bbbb580d031a20ba4f18ef14fcd3599b62a482a | [
"MIT"
] | 139 | 2018-03-21T09:39:39.000Z | 2021-07-07T14:19:26.000Z | tools/test_detection_features_converter.py | VincentYing/Attention-on-Attention-for-VQA | cbc767541667e9bb32760ac7cd2e822eff232ff5 | [
"MIT"
] | 4 | 2018-05-25T05:15:20.000Z | 2018-10-11T00:52:14.000Z | tools/test_detection_features_converter.py | VincentYing/Attention-on-Attention-for-VQA | cbc767541667e9bb32760ac7cd2e822eff232ff5 | [
"MIT"
] | 23 | 2018-03-22T10:12:35.000Z | 2021-02-20T06:18:00.000Z | """
Reads in a tsv file with pre-trained bottom up attention features and
stores it in HDF5 format. Also store {image_id: feature_idx}
as a pickle file.
Hierarchy of HDF5 file:
{ 'image_features': num_images x num_boxes x 2048 array of features
'image_bb': num_images x num_boxes x 4 array of bounding boxes }
"""
from __future__ import print_function
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import base64
import csv
import h5py
import cPickle
import numpy as np
import utils
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w', 'image_h', 'num_boxes', 'boxes', 'features']
infile = 'data/test2015_36/test2015_resnet101_faster_rcnn_genome_36.tsv'
test_data_file = 'data/test36.hdf5'
test_indices_file = 'data/test36_imgid2idx.pkl'
test_ids_file = 'data/test_ids.pkl'
feature_length = 2048
num_fixed_boxes = 36
if __name__ == '__main__':
h_test = h5py.File(test_data_file, "w")
if os.path.exists(test_ids_file):
test_imgids = cPickle.load(open(test_ids_file))
else:
test_imgids = utils.load_imageid('data/test2015')
cPickle.dump(test_imgids, open(test_ids_file, 'wb'))
test_indices = {}
test_img_features = h_test.create_dataset(
'image_features', (len(test_imgids), num_fixed_boxes, feature_length), 'f')
test_img_bb = h_test.create_dataset(
'image_bb', (len(test_imgids), num_fixed_boxes, 4), 'f')
test_spatial_img_features = h_test.create_dataset(
'spatial_features', (len(test_imgids), num_fixed_boxes, 6), 'f')
test_counter = 0
print("reading tsv...")
with open(infile, "r+b") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
item['num_boxes'] = int(item['num_boxes'])
image_id = int(item['image_id'])
image_w = float(item['image_w'])
image_h = float(item['image_h'])
bboxes = np.frombuffer(
base64.decodestring(item['boxes']),
dtype=np.float32).reshape((item['num_boxes'], -1))
box_width = bboxes[:, 2] - bboxes[:, 0]
box_height = bboxes[:, 3] - bboxes[:, 1]
scaled_width = box_width / image_w
scaled_height = box_height / image_h
scaled_x = bboxes[:, 0] / image_w
scaled_y = bboxes[:, 1] / image_h
box_width = box_width[..., np.newaxis]
box_height = box_height[..., np.newaxis]
scaled_width = scaled_width[..., np.newaxis]
scaled_height = scaled_height[..., np.newaxis]
scaled_x = scaled_x[..., np.newaxis]
scaled_y = scaled_y[..., np.newaxis]
spatial_features = np.concatenate(
(scaled_x,
scaled_y,
scaled_x + scaled_width,
scaled_y + scaled_height,
scaled_width,
scaled_height),
axis=1)
if image_id in test_imgids:
test_imgids.remove(image_id)
test_indices[image_id] = test_counter
test_img_bb[test_counter, :, :] = bboxes
test_img_features[test_counter, :, :] = np.frombuffer(
base64.decodestring(item['features']),
dtype=np.float32).reshape((item['num_boxes'], -1))
test_spatial_img_features[test_counter, :, :] = spatial_features
test_counter += 1
else:
assert False, 'Unknown image id: %d' % image_id
if len(test_imgids) != 0:
print('Warning: test_image_ids is not empty')
cPickle.dump(test_indices, open(test_indices_file, 'wb'))
h_test.close()
print("done!")
| 34.6 | 83 | 0.618497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 772 | 0.202838 |
ffde4731dad77ca75123679807fabb3875a76017 | 176 | py | Python | src/registration/urls.py | jtrussell/swindle | 914f9ddc7b155cf895fc233b9f3f0c1804bf23e3 | [
"MIT"
] | 1 | 2021-04-07T20:14:43.000Z | 2021-04-07T20:14:43.000Z | src/registration/urls.py | jtrussell/swindle | 914f9ddc7b155cf895fc233b9f3f0c1804bf23e3 | [
"MIT"
] | null | null | null | src/registration/urls.py | jtrussell/swindle | 914f9ddc7b155cf895fc233b9f3f0c1804bf23e3 | [
"MIT"
] | null | null | null | from . import views
from django.urls import path
urlpatterns = [
path('', views.profile, name='profile'),
path('sign-up', views.sign_up, name='show_sign_up_form')
]
| 17.6 | 60 | 0.681818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.221591 |
ffde4e382f893654ea15768c8c27165eee09e3a4 | 3,720 | py | Python | src/Control/Sign.py | hieuhdh/Multi-tasking-program | 2f064a554f647247c84979b7a27f0797d1e1b5af | [
"MIT"
] | null | null | null | src/Control/Sign.py | hieuhdh/Multi-tasking-program | 2f064a554f647247c84979b7a27f0797d1e1b5af | [
"MIT"
] | null | null | null | src/Control/Sign.py | hieuhdh/Multi-tasking-program | 2f064a554f647247c84979b7a27f0797d1e1b5af | [
"MIT"
] | null | null | null | from tkinter.font import BOLD
from PIL import ImageTk
from tkinter import*
from PIL import Image
from tkinter import messagebox
from Tools.log_db import*
class Sign:
def __init__(self, root):
self.root = root
## Init frame and button
Frame_sign = Frame(self.root, bg="#120b26")
Frame_sign.place(x = 300,y = 0, height = 540, width=660)
global image_default_signup
image_default_signup = ImageTk.PhotoImage(file = 'images/interfaces/signup.png')
logo_default = Label(Frame_sign, image = image_default_signup )
logo_default.place( x = 0, y = 0, relheight = 1, relwidth = 1 )
self.txt_name = Entry(Frame_sign, font=("Times New Roman",15), fg = "#8078c4", bg = "#120b26", cursor="hand2", bd = 0, width = 10)
self.txt_name.place(x = 180, y = 175, height= 34, width= 326)
self.txt_username = Entry(Frame_sign, font=("Times New Roman", 15), fg = "#8078c4", bg = "#120b26", cursor = "hand2", bd = 0)
self.txt_username.place(x = 180, y = 248, height= 34, width= 326)
self.txt_password = Entry(Frame_sign, font=("Times New Roman",15), fg = "#8078c4",bg = "#120b26", cursor = "hand2", show = "*", bd = 0, highlightbackground = "#b0bde0")
self.txt_password.place(x = 180, y = 321, height= 34, width= 326)
self.txt_password_comfirm = Entry(Frame_sign, font = ("Times New Roman",15), fg = "#8078c4",bg = "#120b26", cursor = "hand2", show = "*", bd = 0)
self.txt_password_comfirm.place(x = 180, y = 394, height= 34, width= 326)
## Make sign in button
self.sign_btn = Button(Frame_sign, activebackground="#823af7", activeforeground="white",command=self.sign, text = "Submit", font = ("Times New Roman",12,"bold"), fg = "#211c49", bg = "#823af7", relief = "flat", cursor = "hand2", borderwidth = 0, width = 38)
self.sign_btn.place(x = 156, y = 470)
## Action for Sign in
def sign(self):
if self.txt_name.get() != "" and self.txt_username.get() != "" and self.txt_password.get() != "" and self.txt_password_comfirm !="":
if self.txt_password.get() != self.txt_password_comfirm.get():
messagebox.showerror("Error","Your password didn't get match!", parent = self.root)
else:
## Add username and password in file log.txt, Dont see username and password you just entered in database, add it
username = self.txt_username.get()
password = encode(self.txt_password.get())
arr = [username, password]
if checkDB_Sign(arr) == False:
file = open("src/Documents/log_sign.txt","a", encoding= "utf-8")
file.writelines(f"name-username-password: {self.txt_name.get()}; {username}; {password}\n")
messagebox.showinfo("Welcome","You are registered successfully!", parent = self.root)
file.close()
else:
messagebox.showerror("Error","Account already exists!", parent = self.root)
else:
if self.txt_name.get() == "":
messagebox.showerror("Error","Please, enter your full name!", parent = self.root)
elif self.txt_username.get() == "":
messagebox.showerror("Error","Please, enter your username!", parent = self.root)
elif self.txt_password.get() == "":
messagebox.showerror("Error","Please, enter your password!", parent = self.root)
elif self.txt_password_comfirm.get() == "":
messagebox.showerror("Error","Please, enter your password comfirm!", parent = self.root)
| 53.913043 | 265 | 0.596774 | 3,555 | 0.955645 | 0 | 0 | 0 | 0 | 0 | 0 | 880 | 0.236559 |
ffdf3cdd0117fb616bc6eff58d4c3d502c8bf807 | 6,301 | py | Python | aydin/it/classic_denoisers/bilateral.py | AhmetCanSolak/aydin | e8bc81ee88c96e0f34986df30a63c96468a45f70 | [
"BSD-3-Clause"
] | 78 | 2021-11-08T16:11:23.000Z | 2022-03-27T17:51:04.000Z | aydin/it/classic_denoisers/bilateral.py | AhmetCanSolak/aydin | e8bc81ee88c96e0f34986df30a63c96468a45f70 | [
"BSD-3-Clause"
] | 19 | 2021-11-08T17:15:40.000Z | 2022-03-30T17:46:55.000Z | aydin/it/classic_denoisers/bilateral.py | AhmetCanSolak/aydin | e8bc81ee88c96e0f34986df30a63c96468a45f70 | [
"BSD-3-Clause"
] | 7 | 2021-11-09T17:42:32.000Z | 2022-03-09T00:37:57.000Z | from functools import partial
from typing import Optional, List, Tuple
import numpy
from numpy.typing import ArrayLike
from skimage.restoration import denoise_bilateral as skimage_denoise_bilateral
from aydin.it.classic_denoisers import _defaults
from aydin.util.crop.rep_crop import representative_crop
from aydin.util.denoise_nd.denoise_nd import extend_nd
from aydin.util.j_invariance.j_invariance import calibrate_denoiser
def calibrate_denoise_bilateral(
image: ArrayLike,
bins: int = 10000,
crop_size_in_voxels: Optional[int] = _defaults.default_crop_size_normal.value,
optimiser: str = _defaults.default_optimiser.value,
max_num_evaluations: int = _defaults.default_max_evals_normal.value,
blind_spots: Optional[List[Tuple[int]]] = _defaults.default_blind_spots.value,
jinv_interpolation_mode: str = _defaults.default_jinv_interpolation_mode.value,
display_images: bool = False,
display_crop: bool = False,
**other_fixed_parameters,
):
"""
Calibrates the bilateral denoiser for the given image and returns the optimal
parameters obtained using the N2S loss.
Note: it seems that the bilateral filter of scikit-image
is broken!
Parameters
----------
image: ArrayLike
Image to calibrate denoiser for.
bins: int
Number of discrete values for Gaussian weights of
color filtering. A larger value results in improved
accuracy.
(advanced)
crop_size_in_voxels: int or None for default
Number of voxels for crop used to calibrate denoiser.
Increase this number by factors of two if denoising quality is
unsatisfactory -- this can be important for very noisy images.
Values to try are: 65000, 128000, 256000, 320000.
We do not recommend values higher than 512000.
optimiser: str
Optimiser to use for finding the best denoising
parameters. Can be: 'smart' (default), or 'fast' for a mix of SHGO
followed by L-BFGS-B.
(advanced)
max_num_evaluations: int
Maximum number of evaluations for finding the optimal parameters.
Increase this number by factors of two if denoising quality is
unsatisfactory.
blind_spots: bool
List of voxel coordinates (relative to receptive field center) to
be included in the blind-spot. For example, you can give a list of
3 tuples: [(0,0,0), (0,1,0), (0,-1,0)] to extend the blind spot
to cover voxels of relative coordinates: (0,0,0),(0,1,0), and (0,-1,0)
(advanced) (hidden)
jinv_interpolation_mode: str
J-invariance interpolation mode for masking. Can be: 'median' or
'gaussian'.
(advanced)
display_images: bool
When True the denoised images encountered during
optimisation are shown
(advanced) (hidden)
display_crop: bool
Displays crop, for debugging purposes...
(advanced) (hidden)
other_fixed_parameters: dict
Any other fixed parameters
Returns
-------
Denoising function, dictionary containing optimal parameters,
and free memory needed in bytes for computation.
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
# obtain representative crop, to speed things up...
crop = representative_crop(
image, crop_size=crop_size_in_voxels, display_crop=display_crop
)
# Parameters to test when calibrating the denoising algorithm
parameter_ranges = {'sigma_spatial': (0.01, 1), 'sigma_color': (0.01, 1)}
# Combine fixed parameters:
other_fixed_parameters = other_fixed_parameters | {'bins': bins}
# Partial function:
_denoise_bilateral = partial(denoise_bilateral, **other_fixed_parameters)
# Calibrate denoiser
best_parameters = (
calibrate_denoiser(
crop,
_denoise_bilateral,
mode=optimiser,
denoise_parameters=parameter_ranges,
interpolation_mode=jinv_interpolation_mode,
max_num_evaluations=max_num_evaluations,
blind_spots=blind_spots,
display_images=display_images,
)
| other_fixed_parameters
)
# Memory needed:
memory_needed = 2 * image.nbytes
return denoise_bilateral, best_parameters, memory_needed
def denoise_bilateral(
image: ArrayLike,
sigma_color: Optional[float] = None,
sigma_spatial: float = 1,
bins: int = 10000,
**kwargs,
):
"""
Denoises the given image using a <a
href="https://en.wikipedia.org/wiki/Bilateral_filter">bilateral
filter</a>.
The bilateral filter is a edge-preserving smoothing filter that can
be used for image denoising. Each pixel value is replaced by a
weighted average of intensity values from nearby pixels. The
weighting is inversely related to the pixel distance in space but
also in the pixels value differences.
Parameters
----------
image : ArrayLike
Image to denoise
sigma_color : float
Standard deviation for grayvalue/color distance (radiometric
similarity). A larger value results in averaging of pixels with larger
radiometric differences. Note, that the image will be converted using
the `img_as_float` function and thus the standard deviation is in
respect to the range ``[0, 1]``. If the value is ``None`` the standard
deviation of the ``image`` will be used.
sigma_spatial : float
Standard deviation for range distance. A larger value results in
averaging of pixels with larger spatial differences.
bins : int
Number of discrete values for Gaussian weights of color filtering.
A larger value results in improved accuracy.
kwargs: dict
Other parameters
Returns
-------
Denoised image
"""
# Convert image to float if needed:
image = image.astype(dtype=numpy.float32, copy=False)
_skimage_denoise_bilateral = extend_nd(available_dims=[2])(
skimage_denoise_bilateral
)
return _skimage_denoise_bilateral(
image,
sigma_color=sigma_color,
sigma_spatial=sigma_spatial,
bins=bins,
mode='reflect',
**kwargs,
)
| 32.989529 | 83 | 0.690525 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,878 | 0.615458 |
ffe13b312ebb3748c1aadfdca895d3557dc9d9a9 | 1,889 | py | Python | pymon/pymon.py | crest42/PyMon | 96494cc37f906e6a07388af29b04c559ec72f116 | [
"MIT"
] | null | null | null | pymon/pymon.py | crest42/PyMon | 96494cc37f906e6a07388af29b04c559ec72f116 | [
"MIT"
] | null | null | null | pymon/pymon.py | crest42/PyMon | 96494cc37f906e6a07388af29b04c559ec72f116 | [
"MIT"
] | null | null | null | import logging
import time
from .exceptions import HostEntryNotValid
from .check import CheckFactory
from .alert import AlertFactory
from .host import Host
from .logging import logger
class PyMon:
def __init__(self, host_list, check_list, alert_list, daemonize=False):
self.hosts = {}
self.checks = []
self.alerts = []
self.logger = logger
for host in host_list:
if 'name' not in host:
raise HostEntryNotValid(host)
name = host['name']
self.hosts[name] = Host(host['name'], host)
for check in check_list:
self.checks.append(CheckFactory(check).create())
self.add_check(self.checks[-1])
for alert in alert_list:
self.alerts.append(AlertFactory(alert).create())
if daemonize:
self.runloop()
def runloop(self):
run = 0
while True:
self.logger.info(f"Start Run {run}")
self.run()
run += 1
time.sleep(1)
def add_check(self, check):
for host in check.hosts:
try:
self.add_check_to_host(host, check)
except HostEntryNotValid:
self.logger.warn(f"Host entry {host} unknown")
except Exception:
raise
def add_check_to_host(self, check_host, check):
if check_host not in self.hosts:
raise HostEntryNotValid(check_host)
self.hosts[check_host].add(check)
def print_hosts(self):
print("Hostlist:")
for k in self.hosts:
print(self.hosts[k])
print()
def run(self):
for k in self.hosts:
result = self.hosts[k].run()
if result is not None and len(result['RESULTS'].list) > 0:
for alert in self.alerts:
alert.send(result)
| 28.19403 | 75 | 0.564849 | 1,703 | 0.901535 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.044468 |
ffe516953bedc8e02aa7624b4a14d347ba8dad15 | 52,397 | py | Python | ambulance/tests/test_calls.py | aschrist/WebServerAndClient | 3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640 | [
"BSD-3-Clause"
] | null | null | null | ambulance/tests/test_calls.py | aschrist/WebServerAndClient | 3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640 | [
"BSD-3-Clause"
] | null | null | null | ambulance/tests/test_calls.py | aschrist/WebServerAndClient | 3aa0af2c444acac88a1b51b4cfd4bb8d0c36e640 | [
"BSD-3-Clause"
] | null | null | null | import logging
import time
from django.test import Client
from django.conf import settings
from django.urls import reverse
from django.db import IntegrityError
from django.utils import timezone
from rest_framework.parsers import JSONParser
from rest_framework import serializers
from io import BytesIO
import json
from ambulance.models import Call, Patient, AmbulanceCall, CallStatus, CallPriority, \
AmbulanceUpdate, AmbulanceStatus, Waypoint, Location, LocationType, WaypointStatus, AmbulanceCallStatus
from ambulance.serializers import CallSerializer, AmbulanceCallSerializer, PatientSerializer, \
AmbulanceUpdateSerializer, WaypointSerializer, LocationSerializer
from emstrack.tests.util import date2iso, point2str
from login.tests.setup_data import TestSetup
logger = logging.getLogger(__name__)
class TestCall(TestSetup):
def test_patient_serializer(self):
# test PatientSerializer
c1 = Call.objects.create(updated_by=self.u1)
# serialization
p1 = Patient.objects.create(call=c1)
serializer = PatientSerializer(p1)
result = {
'id': p1.id,
'name': '',
'age': None
}
self.assertDictEqual(serializer.data, result)
# deserialization
data = {
'name': 'Jose',
'age': 3
}
serializer = PatientSerializer(data=data)
self.assertTrue(serializer.is_valid())
serializer.save(call_id=c1.id)
p1 = Patient.objects.get(name='Jose')
serializer = PatientSerializer(p1)
result = {
'id': p1.id,
'name': 'Jose',
'age': 3
}
self.assertDictEqual(serializer.data, result)
# deserialization
data = {
'name': 'Maria',
}
serializer = PatientSerializer(data=data)
self.assertTrue(serializer.is_valid())
serializer.save(call_id=c1.id)
p1 = Patient.objects.get(name='Maria')
serializer = PatientSerializer(p1)
result = {
'id': p1.id,
'name': 'Maria',
'age': None
}
self.assertDictEqual(serializer.data, result)
def test_location_serializer(self):
wpl_1 = Location.objects.create(type=LocationType.i.name, updated_by=self.u1)
serializer = LocationSerializer(wpl_1)
result = {
'id': wpl_1.id,
'type': LocationType.i.name,
'location': point2str(wpl_1.location),
'number': wpl_1.number,
'street': wpl_1.street,
'unit': wpl_1.unit,
'neighborhood': wpl_1.neighborhood,
'city': wpl_1.city,
'state': wpl_1.state,
'zipcode': wpl_1.zipcode,
'country': wpl_1.country,
'name': wpl_1.name,
'comment': wpl_1.comment,
'updated_by': wpl_1.updated_by.id,
'updated_on': date2iso(wpl_1.updated_on)
}
self.assertDictEqual(serializer.data, result)
wpl_2 = Location.objects.create(type=LocationType.h.name, number='123', street='adsasd', updated_by=self.u1)
serializer = LocationSerializer(wpl_2)
result = {
'id': wpl_2.id,
'type': LocationType.h.name,
'location': point2str(wpl_2.location),
'number': '123',
'street': 'adsasd',
'unit': wpl_2.unit,
'neighborhood': wpl_2.neighborhood,
'city': wpl_2.city,
'state': wpl_2.state,
'zipcode': wpl_2.zipcode,
'country': wpl_2.country,
'name': wpl_2.name,
'comment': wpl_2.comment,
'updated_by': wpl_2.updated_by.id,
'updated_on': date2iso(wpl_2.updated_on)
}
self.assertDictEqual(serializer.data, result)
def test_waypoint_serializer(self):
# create call
c_1 = Call.objects.create(updated_by=self.u1)
# create ambulance call
ac_1 = AmbulanceCall.objects.create(call=c_1, ambulance=self.a1, updated_by=self.u1)
# serialization
wpl_1 = Location.objects.create(type=LocationType.i.name, updated_by=self.u1)
wpl_1_serializer = LocationSerializer(wpl_1)
wp_1 = Waypoint.objects.create(ambulance_call=ac_1, order=0, status=WaypointStatus.C.name,
location=wpl_1, updated_by=self.u1)
serializer = WaypointSerializer(wp_1)
result = {
'id': wp_1.id,
'ambulance_call_id': ac_1.id,
'order': 0,
'status': WaypointStatus.C.name,
'location': wpl_1_serializer.data,
'comment': wp_1.comment,
'updated_by': wp_1.updated_by.id,
'updated_on': date2iso(wp_1.updated_on)
}
self.assertDictEqual(serializer.data, result)
result = {
'id': wpl_1.id,
'type': LocationType.i.name,
'location': point2str(wpl_1.location),
'number': wpl_1.number,
'street': wpl_1.street,
'unit': wpl_1.unit,
'neighborhood': wpl_1.neighborhood,
'city': wpl_1.city,
'state': wpl_1.state,
'zipcode': wpl_1.zipcode,
'country': wpl_1.country,
'name': wpl_1.name,
'comment': wpl_1.comment,
'updated_by': wpl_1.updated_by.id,
'updated_on': date2iso(wpl_1.updated_on)
}
self.assertDictEqual(serializer.data['location'], result)
# serialization
wpl_2 = Location.objects.create(type=LocationType.h.name, number='123', street='adsasd', updated_by=self.u1)
wpl_2_serializer = LocationSerializer(wpl_2)
wp_2 = Waypoint.objects.create(ambulance_call=ac_1, order=1, status=WaypointStatus.D.name,
location=wpl_2, updated_by=self.u1)
serializer = WaypointSerializer(wp_2)
result = {
'id': wp_2.id,
'ambulance_call_id': ac_1.id,
'order': 1,
'status': WaypointStatus.D.name,
'location': wpl_2_serializer.data,
'comment': wp_2.comment,
'updated_by': wp_2.updated_by.id,
'updated_on': date2iso(wp_2.updated_on)
}
self.assertDictEqual(serializer.data, result)
result = {
'id': wpl_2.id,
'type': LocationType.h.name,
'location': point2str(wpl_2.location),
'number': '123',
'street': 'adsasd',
'unit': wpl_2.unit,
'neighborhood': wpl_2.neighborhood,
'city': wpl_2.city,
'state': wpl_2.state,
'zipcode': wpl_2.zipcode,
'country': wpl_2.country,
'name': wpl_2.name,
'comment': wpl_2.comment,
'updated_by': wpl_2.updated_by.id,
'updated_on': date2iso(wpl_2.updated_on)
}
self.assertDictEqual(serializer.data['location'], result)
def test_waypoint_serializer_create(self):
# create call
c_1 = Call.objects.create(updated_by=self.u1)
# create ambulance call
ac_1 = AmbulanceCall.objects.create(call=c_1, ambulance=self.a1, updated_by=self.u1)
# serialization
data = {
'order': 0,
'status': WaypointStatus.C.name,
'location': {
'type': LocationType.i.name
}
}
serializer = WaypointSerializer(data=data)
serializer.is_valid()
wp_1 = serializer.save(updated_by=self.u1, ambulance_call_id=ac_1.id)
wpl_1 = wp_1.location
wpl_1_serializer = LocationSerializer(wpl_1)
serializer = WaypointSerializer(wp_1)
result = {
'id': wp_1.id,
'ambulance_call_id': ac_1.id,
'order': 0,
'status': WaypointStatus.C.name,
'location': wpl_1_serializer.data,
'comment': wp_1.comment,
'updated_by': wp_1.updated_by.id,
'updated_on': date2iso(wp_1.updated_on)
}
self.assertDictEqual(serializer.data, result)
result = {
'id': wpl_1.id,
'type': LocationType.i.name,
'location': point2str(wpl_1.location),
'number': wpl_1.number,
'street': wpl_1.street,
'unit': wpl_1.unit,
'neighborhood': wpl_1.neighborhood,
'city': wpl_1.city,
'state': wpl_1.state,
'zipcode': wpl_1.zipcode,
'country': wpl_1.country,
'name': wpl_1.name,
'comment': wpl_1.comment,
'updated_by': wpl_1.updated_by.id,
'updated_on': date2iso(wpl_1.updated_on)
}
self.assertDictEqual(serializer.data['location'], result)
# serialization
data = {
'order': 1,
'status': WaypointStatus.V.name,
'location': {
'id': self.h1.id,
'type': LocationType.h.name
}
}
serializer = WaypointSerializer(data=data)
serializer.is_valid()
wp_2 = serializer.save(updated_by=self.u1,
ambulance_call_id=ac_1.id, publish=True)
wpl_2 = self.h1.location_ptr
wpl_2_serializer = LocationSerializer(wpl_2)
logger.debug(wpl_2_serializer.data)
serializer = WaypointSerializer(wp_2)
logger.debug(serializer.data['location'])
result = {
'id': wp_2.id,
'ambulance_call_id': ac_1.id,
'order': 1,
'status': WaypointStatus.V.name,
'location': wpl_2_serializer.data,
'comment': wp_2.comment,
'updated_by': wp_2.updated_by.id,
'updated_on': date2iso(wp_2.updated_on)
}
self.maxDiff = None
self.assertDictEqual(serializer.data, result)
result = {
'id': wpl_2.id,
'type': LocationType.h.name,
'location': point2str(wpl_2.location),
'number': wpl_2.number,
'street': wpl_2.street,
'unit': wpl_2.unit,
'neighborhood': wpl_2.neighborhood,
'city': wpl_2.city,
'state': wpl_2.state,
'zipcode': wpl_2.zipcode,
'country': wpl_2.country,
'name': wpl_2.name,
'comment': wpl_2.comment,
'updated_by': wpl_2.updated_by.id,
'updated_on': date2iso(wpl_2.updated_on)
}
self.assertDictEqual(serializer.data['location'], result)
# try to create hospital waypoint
data = {
'order': 1,
'status': WaypointStatus.V.name,
'location': {
'type': LocationType.h.name
}
}
serializer = WaypointSerializer(data=data)
serializer.is_valid()
self.assertRaises(serializers.ValidationError, serializer.save, updated_by=self.u1, ambulance_call_id=ac_1.id)
# try to create waypoint without location
data = {
'order': 1,
'status': WaypointStatus.V.name,
}
serializer = WaypointSerializer(data=data)
serializer.is_valid()
self.assertRaises(serializers.ValidationError, serializer.save, updated_by=self.u1, ambulance_call_id=ac_1.id)
def test_waypoint_serializer_update(self):
# create call
c_1 = Call.objects.create(updated_by=self.u1)
# create ambulance call
ac_1 = AmbulanceCall.objects.create(call=c_1, ambulance=self.a1, updated_by=self.u1)
# waypoint creation
wpl_1 = Location.objects.create(type=LocationType.i.name, updated_by=self.u1)
wp_1 = Waypoint.objects.create(ambulance_call=ac_1, order=0, status=WaypointStatus.C.name,
location=wpl_1, updated_by=self.u1)
wpl_2 = Location.objects.create(type=LocationType.w.name, number='123', street='adsasd', updated_by=self.u1)
wp_2 = Waypoint.objects.create(ambulance_call=ac_1, order=1, status=WaypointStatus.D.name,
location=wpl_2, updated_by=self.u1)
wpl_3 = self.h1.location_ptr
wp_3 = Waypoint.objects.create(ambulance_call=ac_1, order=1, status=WaypointStatus.V.name,
location=wpl_3, updated_by=self.u1)
wpl_1_serializer = LocationSerializer(wpl_1)
serializer = WaypointSerializer(wp_1)
result = {
'id': wp_1.id,
'ambulance_call_id': ac_1.id,
'order': 0,
'status': WaypointStatus.C.name,
'location': wpl_1_serializer.data,
'comment': wp_1.comment,
'updated_by': wp_1.updated_by.id,
'updated_on': date2iso(wp_1.updated_on)
}
self.assertDictEqual(serializer.data, result)
result = {
'id': wpl_1.id,
'type': LocationType.i.name,
'location': point2str(wpl_1.location),
'number': wpl_1.number,
'street': wpl_1.street,
'unit': wpl_1.unit,
'neighborhood': wpl_1.neighborhood,
'city': wpl_1.city,
'state': wpl_1.state,
'zipcode': wpl_1.zipcode,
'country': wpl_1.country,
'name': wpl_1.name,
'comment': wpl_1.comment,
'updated_by': wpl_1.updated_by.id,
'updated_on': date2iso(wpl_1.updated_on)
}
self.assertDictEqual(serializer.data['location'], result)
# update waypoint 1
data = {
'order': 1,
'status': WaypointStatus.V.name
}
serializer = WaypointSerializer(wp_1, data=data)
serializer.is_valid()
logger.debug(serializer.errors)
wp_1 = serializer.save(updated_by=self.u1)
wpl_1_serializer = LocationSerializer(wpl_1)
serializer = WaypointSerializer(wp_1)
result = {
'id': wp_1.id,
'ambulance_call_id': ac_1.id,
'order': 1,
'status': WaypointStatus.V.name,
'location': wpl_1_serializer.data,
'comment': wp_1.comment,
'updated_by': wp_1.updated_by.id,
'updated_on': date2iso(wp_1.updated_on)
}
self.assertDictEqual(serializer.data, result)
result = {
'id': wpl_1.id,
'type': LocationType.i.name,
'location': point2str(wpl_1.location),
'number': wpl_1.number,
'street': wpl_1.street,
'unit': wpl_1.unit,
'neighborhood': wpl_1.neighborhood,
'city': wpl_1.city,
'state': wpl_1.state,
'zipcode': wpl_1.zipcode,
'country': wpl_1.country,
'name': wpl_1.name,
'comment': wpl_1.comment,
'updated_by': wpl_1.updated_by.id,
'updated_on': date2iso(wpl_1.updated_on)
}
self.assertDictEqual(serializer.data['location'], result)
# update waypoint 2
data = {
'order': 2,
'status': WaypointStatus.C.name
}
serializer = WaypointSerializer(wp_2, data=data)
serializer.is_valid()
logger.debug(serializer.errors)
wp_2 = serializer.save(updated_by=self.u1)
wpl_2_serializer = LocationSerializer(wpl_2)
serializer = WaypointSerializer(wp_2)
result = {
'id': wp_2.id,
'ambulance_call_id': ac_1.id,
'order': 2,
'status': WaypointStatus.C.name,
'location': wpl_2_serializer.data,
'comment': wp_2.comment,
'updated_by': wp_2.updated_by.id,
'updated_on': date2iso(wp_2.updated_on)
}
self.assertDictEqual(serializer.data, result)
result = {
'id': wpl_2.id,
'type': LocationType.w.name,
'location': point2str(wpl_2.location),
'number': wpl_2.number,
'street': wpl_2.street,
'unit': wpl_2.unit,
'neighborhood': wpl_2.neighborhood,
'city': wpl_2.city,
'state': wpl_2.state,
'zipcode': wpl_2.zipcode,
'country': wpl_2.country,
'name': wpl_2.name,
'comment': wpl_2.comment,
'updated_by': wpl_2.updated_by.id,
'updated_on': date2iso(wpl_2.updated_on)
}
self.assertDictEqual(dict(serializer.data['location']), result)
# update waypoint 3
data = {
'order': 2,
'status': WaypointStatus.C.name,
'location': {
'id': 20,
'type': LocationType.h.name
}
}
serializer = WaypointSerializer(wp_3, data=data)
serializer.is_valid()
logger.debug(serializer.errors)
self.assertRaises(serializers.ValidationError, serializer.save, updated_by=self.u1)
def test_call_serializer(self):
# create call
c1 = Call.objects.create(updated_by=self.u1)
# it is fine to have no ambulances because it is pending
serializer = CallSerializer(c1)
expected = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': [],
'patient_set': []
}
self.assertDictEqual(serializer.data, expected)
# create first ambulance call
ambulance_call_1 = AmbulanceCall.objects.create(call=c1, ambulance=self.a1, updated_by=self.u1)
ambulance_call = ambulance_call_1
serializer = AmbulanceCallSerializer(ambulance_call)
expected = {
'id': ambulance_call.id,
'ambulance_id': ambulance_call.ambulance.id,
'comment': ambulance_call.comment,
'updated_by': ambulance_call.updated_by.id,
'updated_on': date2iso(ambulance_call.updated_on),
'status': ambulance_call.status,
'waypoint_set': []
}
self.assertDictEqual(serializer.data, expected)
serializer = CallSerializer(c1)
ambulance_call_serializer_1 = AmbulanceCallSerializer(ambulance_call_1)
expected = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': [],
'patient_set': []
}
self.assertCountEqual(serializer.data['ambulancecall_set'], [ambulance_call_serializer_1.data])
result = serializer.data
result['ambulancecall_set'] = []
self.assertDictEqual(result, expected)
# set accepted
ambulance_call_1.status = AmbulanceCallStatus.A.name
ambulance_call_1.save()
ambulance_call_serializer_1 = AmbulanceCallSerializer(ambulance_call_1)
expected = {
'id': ambulance_call.id,
'ambulance_id': ambulance_call.ambulance.id,
'comment': ambulance_call.comment,
'updated_by': ambulance_call.updated_by.id,
'updated_on': date2iso(ambulance_call.updated_on),
'status': AmbulanceCallStatus.A.name,
'waypoint_set': []
}
self.assertDictEqual(ambulance_call_serializer_1.data, expected)
# create second ambulance call
ambulance_call_2 = AmbulanceCall.objects.create(call=c1, ambulance=self.a3, updated_by=self.u1)
ambulance_call = ambulance_call_2
serializer = AmbulanceCallSerializer(ambulance_call)
expected = {
'id': ambulance_call.id,
'ambulance_id': ambulance_call.ambulance.id,
'comment': ambulance_call.comment,
'updated_by': ambulance_call.updated_by.id,
'updated_on': date2iso(ambulance_call.updated_on),
'status': ambulance_call.status,
'waypoint_set': []
}
self.assertDictEqual(serializer.data, expected)
serializer = CallSerializer(c1)
ambulance_call_serializer_2 = AmbulanceCallSerializer(ambulance_call_2)
expected = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': [],
'patient_set': []
}
self.assertCountEqual(serializer.data['ambulancecall_set'],
[ambulance_call_serializer_2.data, ambulance_call_serializer_1.data])
result = serializer.data
result['ambulancecall_set'] = []
self.assertDictEqual(result, expected)
# set accepted
ambulance_call_2.status = AmbulanceCallStatus.A.name
ambulance_call_2.save()
ambulance_call_serializer_2 = AmbulanceCallSerializer(ambulance_call_2)
expected = {
'id': ambulance_call.id,
'ambulance_id': ambulance_call.ambulance.id,
'comment': ambulance_call.comment,
'updated_by': ambulance_call.updated_by.id,
'updated_on': date2iso(ambulance_call.updated_on),
'status': AmbulanceCallStatus.A.name,
'waypoint_set': []
}
self.assertDictEqual(ambulance_call_serializer_2.data, expected)
# Add waypoints to ambulancecalls
wpl_1 = Location.objects.create(type=LocationType.i.name, updated_by=self.u1)
wp_1 = Waypoint.objects.create(ambulance_call=ambulance_call_1, order=0, status=WaypointStatus.C.name,
location=wpl_1, updated_by=self.u1)
wpl_2 = Location.objects.create(type=LocationType.h.name, number='123', street='adsasd', updated_by=self.u2)
wp_2 = Waypoint.objects.create(ambulance_call=ambulance_call_2, order=1, status=WaypointStatus.D.name,
location=wpl_2, updated_by=self.u2)
wp_3 = Waypoint.objects.create(ambulance_call=ambulance_call_2, order=2, status=WaypointStatus.V.name,
location=self.h1, updated_by=self.u2)
# create ambulance update to use in event
self.a1.status = AmbulanceStatus.PB.name
self.a1.timestamp = timezone.now()
self.a1.save()
ambulance_update_1 = AmbulanceUpdate.objects.get(status=AmbulanceStatus.PB.name)
# set suspended
ambulance_call_1.status = AmbulanceCallStatus.S.name
ambulance_call_1.save()
self.a1.status = AmbulanceStatus.AP.name
self.a1.timestamp = timezone.now()
self.a1.save()
ambulance_update_2 = AmbulanceUpdate.objects.get(status=AmbulanceStatus.AP.name)
# set accepted
ambulance_call_1.status = AmbulanceCallStatus.A.name
ambulance_call_1.save()
self.a1status = AmbulanceStatus.HB.name
self.a1.timestamp = timezone.now()
self.a1.save()
serializer = CallSerializer(c1)
ambulance_call_serializer_1 = AmbulanceCallSerializer(ambulance_call_1)
ambulance_call_serializer_2 = AmbulanceCallSerializer(ambulance_call_2)
expected = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': [],
'patient_set': []
}
self.assertCountEqual(serializer.data['ambulancecall_set'],
[ambulance_call_serializer_2.data, ambulance_call_serializer_1.data])
result = serializer.data
result['ambulancecall_set'] = []
self.assertDictEqual(result, expected)
wp_1_serializer = WaypointSerializer(wp_1)
result = {
'id': wp_1.id,
'ambulance_call_id': ambulance_call_1.id,
'order': 0,
'status': WaypointStatus.C.name,
'location': LocationSerializer(wpl_1).data,
'comment': wp_1.comment,
'updated_by': wp_1.updated_by.id,
'updated_on': date2iso(wp_1.updated_on)
}
self.assertDictEqual(wp_1_serializer.data, result)
result = {
'id': wpl_1.id,
'type': LocationType.i.name,
'location': point2str(wpl_1.location),
'number': wpl_1.street,
'street': wpl_1.street,
'unit': wpl_1.unit,
'neighborhood': wpl_1.neighborhood,
'city': wpl_1.city,
'state': wpl_1.state,
'zipcode': wpl_1.zipcode,
'country': wpl_1.country,
'name': wpl_1.name,
'comment': wpl_1.comment,
'updated_by': wpl_1.updated_by.id,
'updated_on': date2iso(wpl_1.updated_on)
}
self.assertDictEqual(wp_1_serializer.data['location'], result)
wp_2_serializer = WaypointSerializer(wp_2)
result = {
'id': wp_2.id,
'ambulance_call_id': ambulance_call_2.id,
'order': 1,
'status': WaypointStatus.D.name,
'location': LocationSerializer(wpl_2).data,
'comment': wp_2.comment,
'updated_by': wp_2.updated_by.id,
'updated_on': date2iso(wp_2.updated_on)
}
self.assertDictEqual(wp_2_serializer.data, result)
result = {
'id': wpl_2.id,
'type': LocationType.h.name,
'location': point2str(wpl_2.location),
'number': '123',
'street': 'adsasd',
'unit': wpl_2.unit,
'neighborhood': wpl_2.neighborhood,
'city': wpl_2.city,
'state': wpl_2.state,
'zipcode': wpl_2.zipcode,
'country': wpl_2.country,
'name': wpl_2.name,
'comment': wpl_2.comment,
'updated_by': wpl_2.updated_by.id,
'updated_on': date2iso(wpl_2.updated_on)
}
self.assertDictEqual(wp_2_serializer.data['location'], result)
wp_3_serializer = WaypointSerializer(wp_3)
result = {
'id': wp_3.id,
'ambulance_call_id': ambulance_call_2.id,
'order': 2,
'status': WaypointStatus.V.name,
'location': LocationSerializer(self.h1).data,
'comment': wp_3.comment,
'updated_by': wp_3.updated_by.id,
'updated_on': date2iso(wp_3.updated_on)
}
self.assertDictEqual(wp_3_serializer.data, result)
result = {
'id': self.h1.id,
'type': LocationType.h.name,
'location': point2str(self.h1.location),
'number': self.h1.number,
'street': self.h1.street,
'unit': self.h1.unit,
'neighborhood': self.h1.neighborhood,
'city': self.h1.city,
'state': self.h1.state,
'zipcode': self.h1.zipcode,
'country': self.h1.country,
'name': self.h1.name,
'comment': self.h1.comment,
'updated_by': self.h1.updated_by.id,
'updated_on': date2iso(self.h1.updated_on)
}
self.assertDictEqual(wp_3_serializer.data['location'], result)
# add patients
p1 = Patient.objects.create(call=c1, name='Jose', age=3)
p2 = Patient.objects.create(call=c1, name='Maria', age=4)
patient_serializer_1 = PatientSerializer(p1)
patient_serializer_2 = PatientSerializer(p2)
serializer = CallSerializer(c1)
expected = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': [],
'patient_set': []
}
self.assertCountEqual(serializer.data['ambulancecall_set'],
[ambulance_call_serializer_2.data, ambulance_call_serializer_1.data])
self.assertCountEqual(serializer.data['patient_set'],
[patient_serializer_2.data, patient_serializer_1.data])
result = serializer.data
result['ambulancecall_set'] = []
result['patient_set'] = []
self.assertDictEqual(result, expected)
# retrieve ambulance updates
queryset = AmbulanceUpdate\
.objects.filter(ambulance=self.a1.id)\
.filter(timestamp__gte=ambulance_update_1.timestamp)\
.exclude(id=ambulance_update_2.id)
answer1 = []
for u in queryset:
serializer = AmbulanceUpdateSerializer(u)
result = {
'id': u.id,
'ambulance_id': u.ambulance.id,
'ambulance_identifier': u.ambulance.identifier,
'comment': u.comment,
'status': u.status,
'orientation': u.orientation,
'location': point2str(u.location),
'timestamp': date2iso(u.timestamp),
'updated_by_username': u.updated_by.username,
'updated_on': date2iso(u.updated_on)
}
answer1.append(serializer.data)
logger.debug(answer1)
self.assertEqual(len(answer1), 2)
# instantiate client
client = Client()
# login as admin
client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
# retrieve ambulances updates
response = client.get('/api/ambulance/{}/updates/?call_id={}'.format(self.a1.id, c1.id),
follow=True)
self.assertEqual(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
logger.debug(result)
logger.debug(answer1)
self.assertCountEqual(result, answer1)
# logout
client.logout()
# cannot have duplicate
# This must be last
self.assertRaises(IntegrityError, AmbulanceCall.objects.create, call=c1, ambulance=self.a1, updated_by=self.u1)
def test_call_serializer_create(self):
call = {
'status': CallStatus.P.name,
'priority': CallPriority.B.name,
'ambulancecall_set': [],
'patient_set': []
}
serializer = CallSerializer(data=call)
serializer.is_valid()
call = serializer.save(updated_by=self.u1)
# test CallSerializer
c1 = Call.objects.get(id=call.id)
serializer = CallSerializer(c1)
result = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': [],
'patient_set': []
}
self.assertDictEqual(serializer.data, result)
# accepted Call without Ambulancecall_Set fails
call = {
'status': CallStatus.S.name,
'priority': CallPriority.B.name,
'patient_set': []
}
serializer = CallSerializer(data=call)
self.assertFalse(serializer.is_valid())
# Pending Call with Ambulancecall_Set will create ambulancecalls
call = {
'status': CallStatus.P.name,
'priority': CallPriority.B.name,
'ambulancecall_set': [
{
'ambulance_id': self.a1.id,
'waypoint_set': [
{
'order': 0,
'location': {
'type': LocationType.i.name,
'number': '123',
'street': 'some street'
}
},
{
'order': 1,
'status': WaypointStatus.D.name,
'location': {
'type': LocationType.w.name,
'location': {
'longitude': -110.54,
'latitude': 35.75
}
}
}
]
},
{
'ambulance_id': self.a2.id,
'waypoint_set': [
{
'order': 0,
'location': {
'type': LocationType.i.name,
'number': '321',
'street': 'another street'
}
}
]
}
],
'patient_set': []
}
serializer = CallSerializer(data=call)
serializer.is_valid()
call = serializer.save(updated_by=self.u1)
# test CallSerializer
c1 = Call.objects.get(id=call.id)
serializer = CallSerializer(c1)
expected_ambulancecall_set = [
AmbulanceCallSerializer(
AmbulanceCall.objects.get(call_id=c1.id,
ambulance_id=self.a1.id)).data,
AmbulanceCallSerializer(
AmbulanceCall.objects.get(call_id=c1.id,
ambulance_id=self.a2.id)).data
]
expected = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': expected_ambulancecall_set,
'patient_set': []
}
result = serializer.data
# logger.debug(result['ambulancecall_set'])
# logger.debug(expected['ambulancecall_set'])
self.assertCountEqual(result['ambulancecall_set'],
expected['ambulancecall_set'])
expected['ambulancecall_set'] = []
result['ambulancecall_set'] = []
self.assertDictEqual(result, expected)
# logger.debug(expected_ambulancecall_set[0])
# logger.debug(expected_ambulancecall_set[1])
self.assertEqual(len(expected_ambulancecall_set[0]['waypoint_set']), 2)
self.assertEqual(len(expected_ambulancecall_set[1]['waypoint_set']), 1)
# Pending Call with ambulancecall_set and patient_set
call = {
'status': CallStatus.P.name,
'priority': CallPriority.B.name,
'ambulancecall_set': [{'ambulance_id': self.a1.id}, {'ambulance_id': self.a2.id}],
'patient_set': [{'name': 'Jose', 'age': 3}, {'name': 'Maria', 'age': 10}]
}
serializer = CallSerializer(data=call)
serializer.is_valid()
call = serializer.save(updated_by=self.u1)
# test CallSerializer
c1 = Call.objects.get(id=call.id)
serializer = CallSerializer(c1)
expected_patient_set = PatientSerializer(Patient.objects.filter(call_id=c1.id), many=True).data
expected_ambulancecall_set = AmbulanceCallSerializer(AmbulanceCall.objects.filter(call_id=c1.id), many=True).data
expected = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': expected_ambulancecall_set,
'patient_set': expected_patient_set
}
result = serializer.data
self.assertCountEqual(result['ambulancecall_set'],
expected['ambulancecall_set'])
self.assertCountEqual(result['patient_set'],
expected['patient_set'])
expected['ambulancecall_set'] = []
result['ambulancecall_set'] = []
expected['patient_set'] = []
result['patient_set'] = []
self.assertDictEqual(result, expected)
# Should fail because ambulance id's are repeated
call = {
'status': CallStatus.S.name,
'priority': CallPriority.B.name,
'ambulancecall_set': [{'ambulance_id': self.a1.id}, {'ambulance_id': self.a1.id}],
'patient_set': []
}
serializer = CallSerializer(data=call)
serializer.is_valid()
self.assertRaises(IntegrityError, serializer.save, updated_by=self.u1)
# make sure no call was created
self.assertRaises(Call.DoesNotExist, Call.objects.get, status=CallStatus.S.name, priority=CallPriority.B.name)
# THESE ARE FAILING!
def _test_call_update_serializer(self):
# superuser first
# Update call status
c = Call.objects.create(updated_by=self.u1)
user = self.u1
status = CallStatus.S.name
serializer = CallSerializer(c,
data={
'status': status
}, partial=True)
serializer.is_valid()
serializer.save(updated_by=user)
# test
serializer = CallSerializer(c)
result = {
'id': c.id,
'status': status,
'details': c.details,
'priority': c.priority,
'created_at': date2iso(c.created_at),
'pending_at': date2iso(c.pending_at),
'started_at': date2iso(c.started_at),
'ended_at': date2iso(c.ended_at),
'comment': c.comment,
'updated_by': c.updated_by.id,
'updated_on': date2iso(c.updated_on),
'ambulancecall_set': AmbulanceCallSerializer(many=True).data,
'patient_set': PatientSerializer(many=True).data
}
self.assertDictEqual(serializer.data, result)
# # Update call street
# street = 'new street'
#
# serializer = CallSerializer(c,
# data={
# 'street': street,
# }, partial=True)
# serializer.is_valid()
# serializer.save(updated_by=user)
#
# # test
# serializer = CallSerializer(c)
# result = {
# 'id': c.id,
# 'status': c.status,
# 'details': c.details,
# 'priority': c.priority,
# 'number': c.number,
# 'street': street,
# 'unit': c.unit,
# 'neighborhood': c.neighborhood,
# 'city': c.city,
# 'state': c.state,
# 'zipcode': c.zipcode,
# 'country': c.country,
# 'location': point2str(c.location),
# 'created_at': date2iso(c.created_at),
# 'pending_at': date2iso(c.pending_at),
# 'started_at': date2iso(c.started_at),
# 'ended_at': date2iso(c.ended_at),
# 'comment': c.comment,
# 'updated_by': c.updated_by.id,
# 'updated_on': date2iso(c.updated_on),
# 'ambulancecall_set': AmbulanceCallSerializer(many=True).data,
# 'patient_set': PatientSerializer(many=True).data
# }
# self.assertDictEqual(serializer.data, result)
#
# # Update call location
# location = {'latitude': -2., 'longitude': 7.}
#
# serializer = CallSerializer(c,
# data={
# 'location': location,
# }, partial=True)
# serializer.is_valid()
# serializer.save(updated_by=user)
#
# # test
# serializer = CallSerializer(c)
# result = {
# 'id': c.id,
# 'status': c.status,
# 'details': c.details,
# 'priority': c.priority,
# 'number': c.number,
# 'street': c.street,
# 'unit': c.unit,
# 'neighborhood': c.neighborhood,
# 'city': c.city,
# 'state': c.state,
# 'zipcode': c.zipcode,
# 'country': c.country,
# 'location': point2str(location),
# 'created_at': date2iso(c.created_at),
# 'pending_at': date2iso(c.pending_at),
# 'started_at': date2iso(c.started_at),
# 'ended_at': date2iso(c.ended_at),
# 'comment': c.comment,
# 'updated_by': c.updated_by.id,
# 'updated_on': date2iso(c.updated_on),
# 'ambulancecall_set': AmbulanceCallSerializer(many=True).data,
# 'patient_set': PatientSerializer(many=True).data
# }
# self.assertDictEqual(serializer.data, result)
# Need more tests for updates by regular authorized user
def test_call_create_viewset(self):
# instantiate client
client = Client()
client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
data = {
'status': CallStatus.P.name,
'priority': CallPriority.B.name,
'ambulancecall_set': [
{
'ambulance_id': self.a1.id,
'waypoint_set': [
{
'order': 0,
'location': {
'type': LocationType.i.name,
'number': '123',
'street': 'some street'
}
},
{
'order': 1,
'status': WaypointStatus.D.name,
'location': {
'type': LocationType.w.name,
'location': {
'longitude': -110.54,
'latitude': 35.75
}
}
}
]
},
{
'ambulance_id': self.a2.id,
'waypoint_set': [
{
'order': 0,
'location': {
'type': LocationType.i.name,
'number': '321',
'street': 'another street'
}
}
]
}
],
'patient_set': [{'name': 'Jose', 'age': 3}, {'name': 'Maria', 'age': 10}]
}
response = client.post('/api/call/', data, content_type='application/json')
self.assertEqual(response.status_code, 201)
c1 = Call.objects.get(status=CallStatus.P.name)
serializer = CallSerializer(c1)
expected_patient_set = PatientSerializer(Patient.objects.filter(call_id=c1.id), many=True).data
expected_ambulancecall_set = AmbulanceCallSerializer(AmbulanceCall.objects.filter(call_id=c1.id), many=True).data
self.assertEqual(len(expected_patient_set), 2)
self.assertEqual(len(expected_ambulancecall_set[0]['waypoint_set']), 2)
self.assertEqual(len(expected_ambulancecall_set[1]['waypoint_set']), 1)
expected = {
'id': c1.id,
'status': c1.status,
'details': c1.details,
'priority': c1.priority,
'created_at': date2iso(c1.created_at),
'pending_at': date2iso(c1.pending_at),
'started_at': date2iso(c1.started_at),
'ended_at': date2iso(c1.ended_at),
'comment': c1.comment,
'updated_by': c1.updated_by.id,
'updated_on': date2iso(c1.updated_on),
'ambulancecall_set': expected_ambulancecall_set,
'patient_set': expected_patient_set
}
result = serializer.data
self.assertCountEqual(result['ambulancecall_set'],
expected['ambulancecall_set'])
self.assertCountEqual(result['patient_set'],
expected['patient_set'])
expected['ambulancecall_set'] = []
result['ambulancecall_set'] = []
expected['patient_set'] = []
result['patient_set'] = []
self.assertDictEqual(result, expected)
# logout
client.logout()
# login as testuser2
client.login(username='testuser2', password='very_secret')
# Will fail for anyone not superuser
data = {
'status': CallStatus.P.name,
'priority': CallPriority.B.name,
'ambulancecall_set': [{'ambulance_id': self.a1.id}, {'ambulance_id': self.a2.id}],
'patient_set': [{'name': 'Jose', 'age': 3}, {'name': 'Maria', 'age': 10}]
}
response = client.post('/api/call/', data, content_type='application/json')
self.assertEqual(response.status_code, 403)
# logout
client.logout()
def test_call_list_viewset(self):
# instantiate client
client = Client()
client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
response = client.get('/api/call/', follow=True)
self.assertEquals(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = CallSerializer(Call.objects.all(), many=True).data
self.assertCountEqual(result, answer)
# test_call_list_viewset_one_entry
c1 = Call.objects.create(details='nani', updated_by=self.u1)
response = client.get('/api/call/', follow=True)
self.assertEquals(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = CallSerializer(Call.objects.all(), many=True).data
self.assertCountEqual(result, answer)
# test_call_list_viewset_two_entries:
c2 = Call.objects.create(details='suhmuh', updated_by=self.u1)
response = client.get('/api/call/', follow=True)
self.assertEquals(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = CallSerializer(Call.objects.all(), many=True).data
self.assertCountEqual(result, answer)
# logout
client.logout()
# login as testuser2
client.login(username='testuser2', password='very_secret')
response = client.get('/api/call/', follow=True)
self.assertEquals(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = CallSerializer([], many=True).data
self.assertCountEqual(result, answer)
# add ambulances to calls, can only read a3
AmbulanceCall.objects.create(call=c1, ambulance=self.a3, updated_by=self.u1)
AmbulanceCall.objects.create(call=c2, ambulance=self.a2, updated_by=self.u1)
response = client.get('/api/call/', follow=True)
self.assertEquals(response.status_code, 200)
result = JSONParser().parse(BytesIO(response.content))
answer = CallSerializer([c1], many=True).data
self.assertCountEqual(result, answer)
# logout
client.logout()
def test_call_list_view(self):
# instantiate client
client = Client()
client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
response = client.get(reverse('ambulance:call_list'))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'ambulance/call_list.html')
# test_call_list_view_one_entry
c1 = Call.objects.create(details='nani', updated_by=self.u1)
response = client.get(reverse('ambulance:call_list'))
self.assertContains(response, 'nani')
# test_call_list_view_two_entries:
c2 = Call.objects.create(details='suhmuh', updated_by=self.u1)
response = client.get(reverse('ambulance:call_list'))
self.assertContains(response, 'nani')
self.assertContains(response, 'suhmuh')
# logout
client.logout()
# login as testuser2
client.login(username='testuser2', password='very_secret')
response = client.get(reverse('ambulance:call_list'))
self.assertEquals(response.status_code, 200)
self.assertNotContains(response, 'nani')
self.assertNotContains(response, 'suhmuh')
# add ambulances to calls, can only read a3
AmbulanceCall.objects.create(call=c1, ambulance=self.a3, updated_by=self.u1)
AmbulanceCall.objects.create(call=c2, ambulance=self.a2, updated_by=self.u1)
response = client.get(reverse('ambulance:call_list'))
self.assertEquals(response.status_code, 200)
self.assertContains(response, 'nani')
self.assertNotContains(response, 'suhmuh')
# logout
client.logout()
def test_call_detail_view(self):
# instantiate client
client = Client()
client.login(username=settings.MQTT['USERNAME'], password=settings.MQTT['PASSWORD'])
c1 = Call.objects.create(details="Test1", updated_by=self.u1)
response = client.get(reverse('ambulance:call_detail', kwargs={'pk': c1.id}))
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'ambulance/call_detail.html')
# test_call_detail_view_entry
c1 = Call.objects.create(details="Test1", updated_by=self.u1)
response = client.get(reverse('ambulance:call_detail', kwargs={'pk': c1.id}))
self.assertContains(response, 'Test1')
# TODO: Tests for unprivileged user
# logout
client.logout()
| 37.533668 | 121 | 0.55444 | 51,579 | 0.984388 | 0 | 0 | 0 | 0 | 0 | 0 | 11,033 | 0.210565 |
ffe63e2dda8d22501b711fdd07b98a6cfff2ea5a | 2,484 | py | Python | bot/PythonProject/Commands.py | RamaDev09/CrateBot | 34b9f50b88da42cc1c449466402897340ec142df | [
"MIT"
] | null | null | null | bot/PythonProject/Commands.py | RamaDev09/CrateBot | 34b9f50b88da42cc1c449466402897340ec142df | [
"MIT"
] | null | null | null | bot/PythonProject/Commands.py | RamaDev09/CrateBot | 34b9f50b88da42cc1c449466402897340ec142df | [
"MIT"
] | null | null | null | import os
from bot.TextInput import TextInput
from bot.prompt import color_msg
def PythonCommands(file, name, category, description, slash):
here = os.getcwd()
# Writing a new import line
cogs = file['config']['commands'] = []
cogs.append(name)
with open(here + "/main.py", "r") as f :
lines = f.readlines()
line = 0
for i in lines :
line += 1
if lines[line - 1] == "\n" : break
lines[line - 1] = f"from cogs.commands.{category}.{name} import {category}\n"
with open(here + "/main.py", "w") as f :
f.writelines(lines)
f.close()
if not slash['slash-command'] :
try :
dir = os.path.join(here + "/cogs/commands", category)
os.mkdir(dir)
try :
with open(here + "/cogs/commands/" + category + "/" + name + ".py", "x") as f :
f.write(
TextInput.CommandPy(self=TextInput(), name=name, category=category, description=description))
color_msg("#00FF00", "Command Created")
except FileExistsError :
color_msg("#ff0000", "Command Already Exits")
except FileNotFoundError :
color_msg("#ff0000", "Make sure you are in CrateBot Project")
except FileExistsError :
try :
with open(here + "/cogs/commands/" + category + "/" + name + ".py", "x") as f :
f.write(
TextInput.CommandPy(self=TextInput(), name=name, category=category, description=description))
color_msg("#00FF00", "Command Created")
except FileExistsError :
color_msg("#ff0000", "Command Already Exits")
else :
try :
dir = os.path.join(here + "/cogs/commands", category)
os.mkdir(dir)
try :
with open(here + "/cogs/commands/" + category + "/" + name + ".py", "x") as f :
f.write(
TextInput.CommandSlashPy(self=TextInput(), name=name, category=category,
description=description))
color_msg("#00FF00", "Command Created")
except FileExistsError :
color_msg("#ff0000", "Command Already Exits")
except FileNotFoundError :
color_msg("#ff0000", "Make sure you are in CrateBot Project") | 45.163636 | 118 | 0.517311 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.215781 |
ffe68f15e3bf96bdad0cec4870fd34ce0d8fbf6c | 223 | py | Python | src/methods/Addition.py | svanschooten/Flow | c7c158f986f7b108a255cbaa67ec7fff3518b637 | [
"MIT"
] | null | null | null | src/methods/Addition.py | svanschooten/Flow | c7c158f986f7b108a255cbaa67ec7fff3518b637 | [
"MIT"
] | null | null | null | src/methods/Addition.py | svanschooten/Flow | c7c158f986f7b108a255cbaa67ec7fff3518b637 | [
"MIT"
] | null | null | null | from methods.AbstactMethod import AbstractMethod
class Addition(AbstractMethod):
name = 'Addition'
def apply(self, args: dict) -> dict:
return {
'res': args.get('x') + args.get('y')
}
| 20.272727 | 48 | 0.591928 | 171 | 0.766816 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.09417 |
ffe7a09ec4555bf2573c09777fdb5c2946647fc9 | 3,914 | py | Python | submissions_comments.py | jbell1991/reddit-scraping | 73d88501ed0205e78000b9c30780a33186154fda | [
"MIT"
] | null | null | null | submissions_comments.py | jbell1991/reddit-scraping | 73d88501ed0205e78000b9c30780a33186154fda | [
"MIT"
] | null | null | null | submissions_comments.py | jbell1991/reddit-scraping | 73d88501ed0205e78000b9c30780a33186154fda | [
"MIT"
] | null | null | null | # imports
from decouple import config
import pandas as pd
import praw
import psycopg2
import schedule
from sqlalchemy import create_engine
import time
def job():
current_day = time.strftime("%m/%d/%Y")
print(f"Performing job on {current_day}")
startTime = time.time()
# connecting to reddit API
reddit = praw.Reddit(
client_id=config("CLIENT_ID"),
client_secret=config("SECRET"),
user_agent=config("USER"),
username=config("USERNAME"),
password=config("PASSWORD")
)
subreddit = reddit.subreddit("wallstreetbets")
hot_wsb = subreddit.hot(limit=150)
# storing submission data in a dictionary
submissions = {
"title": [],
"subreddit": [],
"submission_author": [],
"submission_score": [],
"submission_id": [],
"url": [],
"num_comments": [],
"submission_created": [],
"submission_body": []
}
# iterate over each submission and store data in the submissions dictionary
for submission in hot_wsb:
submissions["title"].append(submission.title)
submissions["subreddit"].append(submission.subreddit)
submissions["submission_author"].append(submission.author)
submissions["submission_score"].append(submission.score)
submissions["submission_id"].append(submission.id)
submissions["url"].append(submission.url)
submissions["num_comments"].append(submission.num_comments)
submissions["submission_created"].append(submission.created)
submissions["submission_body"].append(submission.selftext)
# transform the submissions dictionary into a pandas dataframe
df = pd.DataFrame(submissions)
# convert created to date
df['submission_created'] = pd.to_datetime(df['submission_created'], unit='s')
# convert subreddit column to string
df['subreddit'] = df['subreddit'].astype(str)
# convert author column to string
df['submission_author'] = df['submission_author'].astype(str)
# connect to postgresql database
db_pass = config("PASSWORD")
engine = create_engine(
f'postgresql://postgres:{db_pass}@localhost:5432/postgres')
# store pandas dataframe in sql database
df.to_sql('submissions', engine, if_exists='append')
# create dictionary to store comments
comments = {
"submission_id": [],
"comment_id": [],
"comment_score": [],
"comment_author": [],
"comment_created": [],
"comment_body": []
}
# iterating over each submission and collecting relevent comment data
for id in df['submission_id']:
submission = reddit.submission(id=id)
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
comments["submission_id"].append(id)
comments["comment_id"].append(comment.id)
comments["comment_score"].append(comment.score)
comments["comment_author"].append(comment.author)
comments["comment_created"].append(comment.created)
comments["comment_body"].append(comment.body)
# converting comments dictionary to a pandas dataframe
comments_df = pd.DataFrame(comments)
# convert created to date
comments_df["comment_created"] = pd.to_datetime(comments_df["comment_created"], unit='s')
# convert author to string
comments_df["comment_author"] = comments_df["comment_author"].astype(str)
# store comments_df in sql table
comments_df.to_sql('comments', engine, if_exists='append', index=False)
# calculate time it takes for script to run
executionTime = (time.time() - startTime)
print('Execution time in minutes: ' + str(executionTime/60))
# automate script to run at the same time everyday
schedule.every().day.at("09:07").do(job)
while True:
schedule.run_pending()
time.sleep(1)
| 32.890756 | 93 | 0.667092 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,582 | 0.40419 |
ffe7fe43c53e89a050ea85e42fd101e3306b2423 | 9,139 | py | Python | vision_proc/proc_frame.py | SMS-Raiders/First2016 | a08eb1fa195bd869f8e7de7761d791e3fcf23d22 | [
"BSD-3-Clause"
] | 1 | 2016-03-08T14:39:52.000Z | 2016-03-08T14:39:52.000Z | vision_proc/proc_frame.py | SMS-Raiders/First2016 | a08eb1fa195bd869f8e7de7761d791e3fcf23d22 | [
"BSD-3-Clause"
] | null | null | null | vision_proc/proc_frame.py | SMS-Raiders/First2016 | a08eb1fa195bd869f8e7de7761d791e3fcf23d22 | [
"BSD-3-Clause"
] | null | null | null | #!/bin/python
#Frame processing and distance estimation for
#goal
#-------------------------------------------------------------------------------
# IMPORTS
#-------------------------------------------------------------------------------
import cv2
import math
import numpy
import sys
#-------------------------------------------------------------------------------
# VARIABLES
#-------------------------------------------------------------------------------
def cvClr( R, G, B ):
"""
Color array macro
"""
return( numpy.array( [R,G,B], numpy.uint8 ) )
#=====================================================================
# Approx. The green color range
#=====================================================================
MASK_LOW = cvClr( 0, 0, 245 )
MASK_HIGH = cvClr( 255, 70, 255 )
#=====================================================================
# Approximate Areas for the goal (Pixels)
#=====================================================================
#MIN_AREA = 250
MIN_AREA = 1600
#MAX_AREA = 4000
MAX_AREA = 5000
#=================================================================
# Numbers Determined from experiment apart from 0 and 20
# Straight on to Goal
# width and height and area are in pixel area
# THIS IS THE COUNTOUR AREA NOT THE CONVEX HULL AREA!
#=================================================================
goal_lkup = [
{ 'dist ft' : 0, 'width' : 200, 'height' : 90, 'area' : 9000, 'ratio w_h' : 1.80 }, #0ft not tested needs to be large
{ 'dist ft' : 7, 'width' : 151, 'height' : 88, 'area' : 4828, 'ratio w_h' : 1.71 },
{ 'dist ft' : 8, 'width' : 141, 'height' : 85, 'area' : 4700, 'ratio w_h' : 1.65 },
{ 'dist ft' : 9, 'width' : 132, 'height' : 81, 'area' : 4300, 'ratio w_h' : 1.62 },
{ 'dist ft' : 10, 'width' : 123, 'height' : 78, 'area' : 3860, 'ratio w_h' : 1.57 },
{ 'dist ft' : 11, 'width' : 114, 'height' : 75, 'area' : 3420, 'ratio w_h' : 1.52 },
{ 'dist ft' : 12, 'width' : 108, 'height' : 73, 'area' : 3120, 'ratio w_h' : 1.47 },
{ 'dist ft' : 13, 'width' : 102, 'height' : 70, 'area' : 2770, 'ratio w_h' : 1.45 },
{ 'dist ft' : 14, 'width' : 96 , 'height' : 68, 'area' : 2357, 'ratio w_h' : 1.41 },
{ 'dist ft' : 20, 'width' : 60 , 'height' : 35, 'area' : 1000, 'ratio w_h' : 1.30 } ] #20 ft not tested needs to be small
#-------------------------------------------------------------------------------
# CLASSES
#-------------------------------------------------------------------------------
class Point:
"""Simple Class for XY point"""
x = 0
y = 0
#-------------------------------------------------------------------------------
# PROCEDURES
#-------------------------------------------------------------------------------
def find_squares( contours, debug=False ):
"""
Find square shaped objects
"""
#=================================================================
# The Minimum and Maximum rations for width vs height for the goal
# based on experimental results goal is approx 1.5:1
#=================================================================
MIN_RATIO = 1.3
MAX_RATIO = 1.8
ret = []
for shape in contours:
x, y, w, h = cv2.boundingRect( shape )
w_h_ratio = float( w ) / float( h )
if debug:
print "Area", (w * h)
print "Width ", w
print "Height", h
if MIN_RATIO < w_h_ratio and w_h_ratio < MAX_RATIO:
ret.append( shape )
return( ret )
def filter_area( contours, debug=False ):
"""
Filter out contours based on area
"""
ret = []
for x in contours:
area = cv2.contourArea( x )
if area > MIN_AREA and area < MAX_AREA:
if debug:
print "Area", area
ret.append( x )
return( ret )
def find_center( contours ):
"""
Find the center of a contour based on moments
"""
ret = []
for x in contours:
M = cv2.moments( x )
pt = Point()
pt.x = int( M['m10']/M['m00'] )
pt.y = int( M['m01']/M['m00'] )
ret.append( pt )
return( ret );
def convex_hull_area( contours, debug= False ):
"""
Find the Area of convex Hulls
"""
ret_areas = []
ret_hulls = []
for c in contours:
hull = cv2.convexHull( c )
area = cv2.contourArea( hull )
ret_areas.append( area )
ret_hulls.append( hull )
if( debug ):
print( "Hull area: {0}".format( area ) )
return ( ret_areas, ret_hulls )
def angle_from_point( x, img_width=640, fov_angle=44 ):
"""
Calculate the angle from a point
"""
return( -( ( img_width / 2 ) - x ) * fov_angle )
def lin_scale( val, x1, y1, x2, y2 ):
"""
Linearly scale Val to y1 and y2 from x1 and x2 range
x1 and y1 are low values
"""
x_range = (x2 - x1)
new_val = 0
if x_range is 0:
new_val = y1
else:
y_range = ( y2 - y1 )
new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1
return new_val
def dist_from_goal( area ):
"""
Calculates the distance to the Goal based on area, x, y
Args:
area: the area in pixels of the target
Returns:
Feet from goal
"""
dist = 99
prev = goal_lkup[ 0 ]
for cur in goal_lkup:
#=============================================================
# If the area is less than the currently selected area, but
# greater then the previous area, then the distance is some
# where in between. Then do linear interpolation
#=============================================================
if area > cur[ 'area' ] and area < prev[ 'area' ]:
dist = lin_scale( area, cur[ 'area' ], cur[ 'dist ft' ], prev[ 'area' ], prev[ 'dist ft' ] )
return dist
prev = cur
return dist
def proc_frame( frame, debug=False ):
"""
Process a frame
"""
#=================================================================
# Convert to HSV so we can mask more easily
#=================================================================
hsv_frame = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV )
#=================================================================
# Apply the color mask defined at the top of file
#=================================================================
if( debug ):
hlo = cv2.getTrackbarPos( "H low", "Mask" )
hhi = cv2.getTrackbarPos( "H hi", "Mask" )
slo = cv2.getTrackbarPos( "S low", "Mask" )
shi = cv2.getTrackbarPos( "S hi", "Mask" )
vlo = cv2.getTrackbarPos( "V low", "Mask" )
vhi = cv2.getTrackbarPos( "V hi", "Mask" )
lo = numpy.array( [ hlo, slo, vlo ], numpy.uint8 )
hi = numpy.array( [ hhi, shi, vhi ], numpy.uint8 )
color_mask = cv2.inRange( hsv_frame, lo, hi )
else:
color_mask = cv2.inRange( hsv_frame, MASK_LOW, MASK_HIGH )
#=================================================================
# Apply our color mask
#=================================================================
masked_frame = cv2.bitwise_and( hsv_frame, hsv_frame, mask = color_mask )
#=================================================================
# Contours stuff
# First convert to Gray and find the contours
#=================================================================
bw_frame = cv2.cvtColor( masked_frame, cv2.COLOR_BGR2GRAY )
contours, hierarchy = cv2.findContours( bw_frame, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE )
#=================================================================
# Filter the contours based on area, convex hull area etc...
#=================================================================
draw = filter_area( contours )
hull_areas, hulls = convex_hull_area( draw )
squares = find_squares( hulls )
centers = find_center( squares )
#=================================================================
# If debug mode, show the result of the line finding in a GUI
#=================================================================
if( debug ):
#contours
cv2.drawContours( frame, draw, -1, ( 0, 255, 0 ), 3 )
cv2.drawContours( frame, squares, -1, ( 255, 255, 0 ), 3 )
for i in centers:
cv2.circle( frame, ( i.x, i.y ), 3, ( 0, 255, 255 ), )
#print "X = {0} Y = {1}".format( i.x, i.y )
cv2.imshow( "Goal", frame )
#cv2.imshow( "Mask", masked_frame )
return dist_from_goal( squares ), angle_from_point( centers[0].x, len( frame[0] ) )
| 37.454918 | 135 | 0.402889 | 71 | 0.007769 | 0 | 0 | 0 | 0 | 0 | 0 | 4,636 | 0.507277 |
ffeabfb85c362b4fd5f28c9b1e056f66d191fed5 | 100 | py | Python | 9.py | sarika228/React-Projects | 24c342f71f839c257150f4b5e096c127b51d525c | [
"MIT"
] | null | null | null | 9.py | sarika228/React-Projects | 24c342f71f839c257150f4b5e096c127b51d525c | [
"MIT"
] | null | null | null | 9.py | sarika228/React-Projects | 24c342f71f839c257150f4b5e096c127b51d525c | [
"MIT"
] | null | null | null | i=1
while i<=4:
j=16
while j>=i:
print(i,end="")
j=j-1
print()
i=i+1 | 12.5 | 23 | 0.39 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.02 |
ffeb87db7651191ea5cf19f49a0c7c9aa356f87d | 8,539 | py | Python | site-packages/playhouse/sqliteq.py | lego-cloud/MDMPy | dc676a5d2245a14b9b98a2ac2dba64ff0bf61800 | [
"Python-2.0",
"OLDAP-2.7"
] | 674 | 2015-11-06T04:22:47.000Z | 2022-02-26T17:31:43.000Z | site-packages/playhouse/sqliteq.py | lego-cloud/MDMPy | dc676a5d2245a14b9b98a2ac2dba64ff0bf61800 | [
"Python-2.0",
"OLDAP-2.7"
] | 713 | 2015-11-06T10:48:58.000Z | 2018-11-27T16:32:18.000Z | site-packages/playhouse/sqliteq.py | lego-cloud/MDMPy | dc676a5d2245a14b9b98a2ac2dba64ff0bf61800 | [
"Python-2.0",
"OLDAP-2.7"
] | 106 | 2015-12-07T11:21:06.000Z | 2022-03-11T10:58:41.000Z | import logging
import weakref
from threading import Event
from threading import Thread
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
import gevent
from gevent import Greenlet as GThread
from gevent.event import Event as GEvent
from gevent.queue import Queue as GQueue
except ImportError:
GThread = GQueue = GEvent = None
from playhouse.sqlite_ext import SqliteExtDatabase
logger = logging.getLogger('peewee.sqliteq')
class ResultTimeout(Exception):
pass
class AsyncCursor(object):
__slots__ = ('sql', 'params', 'commit', 'timeout',
'_event', '_cursor', '_exc', '_idx', '_rows')
def __init__(self, event, sql, params, commit, timeout):
self._event = event
self.sql = sql
self.params = params
self.commit = commit
self.timeout = timeout
self._cursor = self._exc = self._idx = self._rows = None
def set_result(self, cursor, exc=None):
self._cursor = cursor
self._exc = exc
self._idx = 0
self._rows = cursor.fetchall() if exc is None else []
self._event.set()
return self
def _wait(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
if not self._event.wait(timeout=timeout) and timeout:
raise ResultTimeout('results not ready, timed out.')
if self._exc is not None:
raise self._exc
def __iter__(self):
self._wait()
if self._exc is not None:
raise self._exec
return self
def next(self):
try:
obj = self._rows[self._idx]
except IndexError:
raise StopIteration
else:
self._idx += 1
return obj
__next__ = next
@property
def lastrowid(self):
self._wait()
return self._cursor.lastrowid
@property
def rowcount(self):
self._wait()
return self._cursor.rowcount
@property
def description(self):
return self._cursor.description
def close(self):
self._cursor.close()
def fetchall(self):
return list(self) # Iterating implies waiting until populated.
def fetchone(self):
self._wait()
try:
return next(self)
except StopIteration:
return None
THREADLOCAL_ERROR_MESSAGE = ('threadlocals cannot be set to True when using '
'the Sqlite thread / queue database. All queries '
'are serialized through a single connection, so '
'allowing multiple threads to connect defeats '
'the purpose of this database.')
WAL_MODE_ERROR_MESSAGE = ('SQLite must be configured to use the WAL journal '
'mode when using this feature. WAL mode allows '
'one or more readers to continue reading while '
'another connection writes to the database.')
class SqliteQueueDatabase(SqliteExtDatabase):
def __init__(self, database, use_gevent=False, autostart=False, readers=1,
queue_max_size=None, results_timeout=None, *args, **kwargs):
if kwargs.get('threadlocals'):
raise ValueError(THREADLOCAL_ERROR_MESSAGE)
kwargs['threadlocals'] = False
kwargs['check_same_thread'] = False
# Ensure that journal_mode is WAL. This value is passed to the parent
# class constructor below.
pragmas = self._validate_journal_mode(
kwargs.pop('journal_mode', None),
kwargs.pop('pragmas', None))
# Reference to execute_sql on the parent class. Since we've overridden
# execute_sql(), this is just a handy way to reference the real
# implementation.
Parent = super(SqliteQueueDatabase, self)
self.__execute_sql = Parent.execute_sql
# Call the parent class constructor with our modified pragmas.
Parent.__init__(database, pragmas=pragmas, *args, **kwargs)
self._autostart = autostart
self._results_timeout = results_timeout
self._num_readers = readers
self._is_stopped = True
self._thread_helper = self.get_thread_impl(use_gevent)(queue_max_size)
self._create_queues_and_workers()
if self._autostart:
self.start()
def get_thread_impl(self, use_gevent):
return GreenletHelper if use_gevent else ThreadHelper
def _validate_journal_mode(self, journal_mode=None, pragmas=None):
if journal_mode and journal_mode.lower() != 'wal':
raise ValueError(WAL_MODE_ERROR_MESSAGE)
if pragmas:
pdict = dict((k.lower(), v) for (k, v) in pragmas)
if pdict.get('journal_mode', 'wal').lower() != 'wal':
raise ValueError(WAL_MODE_ERROR_MESSAGE)
return [(k, v) for (k, v) in pragmas
if k != 'journal_mode'] + [('journal_mode', 'wal')]
else:
return [('journal_mode', 'wal')]
def _create_queues_and_workers(self):
self._write_queue = self._thread_helper.queue()
self._read_queue = self._thread_helper.queue()
target = self._run_worker_loop
self._writer = self._thread_helper.thread(target, self._write_queue)
self._readers = [self._thread_helper.thread(target, self._read_queue)
for _ in range(self._num_readers)]
def _run_worker_loop(self, queue):
while True:
async_cursor = queue.get()
if async_cursor is StopIteration:
logger.info('worker shutting down.')
return
logger.debug('received query %s', async_cursor.sql)
self._process_execution(async_cursor)
def _process_execution(self, async_cursor):
try:
cursor = self.__execute_sql(async_cursor.sql, async_cursor.params,
async_cursor.commit)
except Exception as exc:
cursor = None
else:
exc = None
return async_cursor.set_result(cursor, exc)
def queue_size(self):
return (self._write_queue.qsize(), self._read_queue.qsize())
def execute_sql(self, sql, params=None, require_commit=True, timeout=None):
cursor = AsyncCursor(
event=self._thread_helper.event(),
sql=sql,
params=params,
commit=require_commit,
timeout=self._results_timeout if timeout is None else timeout)
queue = self._write_queue if require_commit else self._read_queue
queue.put(cursor)
return cursor
def start(self):
with self._conn_lock:
if not self._is_stopped:
return False
self._writer.start()
for reader in self._readers:
reader.start()
logger.info('workers started.')
self._is_stopped = False
return True
def stop(self):
logger.debug('environment stop requested.')
with self._conn_lock:
if self._is_stopped:
return False
self._write_queue.put(StopIteration)
for _ in self._readers:
self._read_queue.put(StopIteration)
self._writer.join()
for reader in self._readers:
reader.join()
return True
def is_stopped(self):
with self._conn_lock:
return self._is_stopped
class ThreadHelper(object):
__slots__ = ('queue_max_size',)
def __init__(self, queue_max_size=None):
self.queue_max_size = queue_max_size
def event(self): return Event()
def queue(self, max_size=None):
max_size = max_size if max_size is not None else self.queue_max_size
return Queue(maxsize=max_size or 0)
def thread(self, fn, *args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.daemon = True
return thread
class GreenletHelper(ThreadHelper):
__slots__ = ('queue_max_size',)
def event(self): return GEvent()
def queue(self, max_size=None):
max_size = max_size if max_size is not None else self.queue_max_size
return GQueue(maxsize=max_size or 0)
def thread(self, fn, *args, **kwargs):
def wrap(*a, **k):
gevent.sleep()
return fn(*a, **k)
return GThread(wrap, *args, **kwargs)
| 32.222642 | 79 | 0.610025 | 7,365 | 0.862513 | 0 | 0 | 260 | 0.030449 | 0 | 0 | 1,152 | 0.13491 |
ffed6941b3c99947e3e5d93c80fbd2e963b7ad51 | 9,056 | py | Python | Common/Db.py | StrawberryTeam/pi_robot | c1b8ce2ad49c64173673df0eb59e0941624556e7 | [
"MIT"
] | 2 | 2018-08-30T14:38:53.000Z | 2019-12-12T09:33:42.000Z | Common/Db.py | StrawberryTeam/pi_robot | c1b8ce2ad49c64173673df0eb59e0941624556e7 | [
"MIT"
] | 1 | 2018-12-10T05:15:48.000Z | 2018-12-10T05:15:48.000Z | Common/Db.py | StrawberryTeam/pi_robot | c1b8ce2ad49c64173673df0eb59e0941624556e7 | [
"MIT"
] | 2 | 2019-06-28T06:05:17.000Z | 2019-10-28T08:34:50.000Z | #!/usr/bin/python3
from Common.Straw import Straw
import pymongo
from pymongo import MongoClient
from bson.objectid import ObjectId
import os
class Db(Straw):
# 任务
_taskFields = {
'videoIds': 'string', #待操作的 视频
'setId': 'objectid', #待操作的视频集 id
'fromDevice': 'string',
'toDevice': 'string', #to device uid
'type': 'string', #copy 复制 zip 打包 addset 添加影片集 addvideo 添加影片 transfer
'link': 'string', #添加影片 / 影片集任务 链接
'platform': 'int', #影片集 / 影片 对应的平台
'created_at': 'int',
'sort': 'int', #排序方法
'status': 'int', #状态
'transfer_status': 'int', #传送状态
'file_md5': 'string', #传送文件的 md5 值
'file_path': 'string', #传送文件的路径
}
# 已连接表
_collection = {
# 影片集
'video_set': {},
# 影片列表
'video_list': {},
# task
'task': {},
# setting
'setting': {},
}
# 已连接 db
_db = {}
def __init__(self):
pass
# 连接表
def connect(self, table):
# 已连接过的表
if self._collection[table]:
return self._collection[table]
config = self.getConfig('DB')
client = MongoClient(config['mongoClient'])
if not self._db:
self._db = client[config['dbName']] # 连接库
self._collection[table] = self._db[table] # 选择表
return self._collection[table]
# 获取所有 set 内容 拼音不存在的
def getNonpySetList(self, count = 10):
_collection = self.connect('video_set')
dataList = _collection.find({"title_py": {"$exists": False}, 'non_py': {"$ne": True}}).sort("play_num", pymongo.DESCENDING).limit(count)
return dataList if dataList.count() > 0 else False
# 更新 set 拼音内容
def saveSetPy(self, data, _id):
_collection = self.connect('video_set')
avaiableFileds = ['title_py', 'title_pyshow', 'title_sp', 'tags']
saveData = common.removeUnsafeFields(data, avaiableFileds, self._videoSetFields)
# saveData = dict(filter(lambda k: k[0] in avaiableFileds, data.items()))
return _collection.update_one({"_id": _id}, {"$set": saveData})
# 获取所有 video 内容
def getNonpyVideoList(self, count = 10):
_collection = self.connect('video_list')
dataList = _collection.find({"name_py": {"$exists": False}, 'non_py': {"$ne": True}}).sort("plays", pymongo.DESCENDING).limit(count)
return dataList if dataList.count() > 0 else False
# 更新 video 拼音内容
def saveVideoPy(self, data, _id):
_collection = self.connect('video_list')
avaiableFileds = ['name_py', 'name_pyshow', 'name_sp', 'tags']
saveData = common.removeUnsafeFields(data, avaiableFileds, self._videoListFields)
# saveData = dict(filter(lambda k: k[0] in avaiableFileds, data.items()))
return _collection.update_one({"_id": _id}, {"$set": saveData})
# 获取影片集信息
def getSetInfo(self, setId):
_collection = self.connect('video_set')
item = _collection.find_one({"_id": ObjectId(setId)})
return item if item else False
# 获取本影片集所有影片内容
def getVideoListBySetId(self, setId):
_collection = self.connect('video_list')
dataList = _collection.find({"setId": common.conv2(setId, self._videoListFields['setId'])}).sort("_id", pymongo.ASCENDING)
return dataList if dataList.count() > 0 else False
# 获取本影片集所有影片内容
def getVideoListByDlImg(self, uid, setId):
_collection = self.connect('video_list')
dataList = _collection.find({"setId": common.conv2(setId, self._videoListFields['setId']), "img." + str(uid): {'$exists': False}}).sort("_id", pymongo.ASCENDING)
return dataList if dataList.count() > 0 else False
# 获取一个需要下载封面影片集
def getVideoSetByDlImg(self, uid, platforms = [1]):
'''
platform 1 爱奇艺
'''
_collection = self.connect('video_set')
dataList = _collection.find_one({"platform": {'$in': platforms}, "imgs." + str(uid): {'$exists': False}, "play_num." + str(uid): {'$exists': True}})
return dataList if dataList else False
# 更新影片集图片至本地图
def modifySetImg(self, setId, data, uid):
if not data['img']:
return False
_collection = self.connect('video_set')
modify = _collection.update_one({"_id": setId}, {"$set": {"imgs." + str(uid): data['img']}})
return True if modify else False
# 更新影片内容图片
def modifyVideoImg(self, _id, data, uid):
if not data['img']:
return False
_collection = self.connect('video_list')
modify = _collection.update_one({"_id": _id}, {"$set": {"imgs." + str(uid): data['img']}})
return True if modify else False
# # 修复用 start
# def fixGetSet(_id):
# table = 'video_set'
# _collection = connect(table)
# return _collection.find_one({"_id": _id})
# def fixGetVideo(_id):
# table = 'video_list'
# _collection = connect(table)
# return _collection.find_one({"_id": _id})
# # 更新影片集图片
# def fixModifySetImg(setId, data):
# if not data['img']:
# return False
# table = 'video_set'
# _collection = connect(table)
# modify = _collection.update_one({"_id": setId}, {"$set": {"img": data['img']}})
# modify2 = _collection.update_one({"_id": setId}, {"$unset": {"img_status": ""}})
# return True if modify and modify2 else False
# # 更新影片内容图片
# def fixModifyVideoImg(_id, data):
# if not data['img']:
# return False
# table = 'video_list'
# _collection = connect(table)
# modify = _collection.update_one({"_id": _id}, {"$set": {"img": data['img']}})
# modify2 = _collection.update_one({"_id": _id}, {"$unset": {"img_status": ""}})
# return True if modify and modify2 else False
# # 修复用 end
_TASK_READY = 1 #未执行的
_TASK_FAILD = 2 #已完成的未成功的
_TASK_SUCCESS = 3 #明确成功的任务
# 获取下一个需要执行的任务
def getTask(self, taskTypes, deviceId):
_collection = self.connect('task')
deviceId = str(deviceId)
taskInfo = _collection.find_one({"toDevice": deviceId, "type": {'$in':taskTypes}, 'status': self._TASK_READY})
return taskInfo if taskInfo else False
_TRANSFER_FAILD = -1 # 操作中断或失败 不重新尝试
_TRANSFER_READY = 1 # 等待打包文件
# _TRANSFER_PACK = 2 # 完成打包等待传送
_TRANSFER_COMPLETE = 2 # 传送完成,等待接收
_TRANSFER_SUCCESS = 3 # 任务完成,等待删除原始文件
_TRANSFER_CLERA = 4 # 任务完成,原始文件清除完成
# 下载影片集
def set2Dl(self, setId, deviceId):
# print("set {} to dl".format(setId))
_collection = self.connect('video_set')
modify = _collection.update_one({"_id": setId}, {"$push": {"dl": str(deviceId)}})
return True if modify else False
# 传送完成
def taskTransferComplete(self, taskId, fileMd5, filePath):
self.taskDoing(taskId)
_collection = self.connect('task')
saveData = dict()
saveData['transfer_status'] = self._TRANSFER_COMPLETE
saveData['file_md5'] = fileMd5
saveData['file_path'] = filePath
saveData = common.removeUnsafeFields(saveData, self._taskFields.keys(), self._taskFields)
modify = _collection.update_one({"_id": ObjectId(taskId)}, {"$set": saveData})
return True if modify else False
# 传送失败
def taskTransferFaild(self, taskId):
self.taskDoing(taskId)
_collection = self.connect('task')
modify = _collection.update_one({"_id": ObjectId(taskId)}, {"$set": {"transfer_status": self._TRANSFER_FAILD}})
return True if modify else False
# 默认任务为失败
def taskDoing(self, _id):
_collection = self.connect('task')
modify = _collection.update_one({"_id": ObjectId(_id)}, {"$set": {"status": self._TASK_FAILD}})
return True if modify else False
# 任务成功
def taskSuccess(self, _id):
_collection = self.connect('task')
modify = _collection.update_one({"_id": ObjectId(_id)}, {"$set": {"status": self._TASK_SUCCESS}})
return True if modify else False
# 查询已下载完成的集
def getDledRes(self, uid):
# 查 list
_collection = self.connect('video_list')
# 找一个未下载的单集
listItem = _collection.find({"plays." + str(uid): {'$exists': True}})
return listItem if listItem else False
# 设置为下载中
def setVSetOnDl(self, setId, uid):
_collection = self.connect('video_set')
uid = str(uid)
upMap = {"_id": ObjectId(setId)}
# 已全部下载完成
# 移出下载完成
_collection.update(upMap, {"$pull": {"dled": uid}})
# 添加已完成
_collection.update(upMap, {"$addToSet": {"dl": uid}})
# 需要重新更新 play_num -1
_collection.update_one(upMap, {"$inc": {"play_num." + uid : -1}})
return True
# 移出已下载完成的影片
def rmVideo(self, _id, uid):
_collection = self.connect('video_list')
_collection.update({"_id": _id}, {"$unset": {"plays." + str(uid): ""}})
return True
if __name__ == "__main__":
db()
| 35.100775 | 169 | 0.597946 | 9,660 | 0.981508 | 0 | 0 | 0 | 0 | 0 | 0 | 3,741 | 0.380106 |
ffed95a551ec4c75f989589df7d781a9f4387728 | 1,251 | py | Python | baya/tests/test_templatetags.py | kreneskyp/baya | 5cf04b6873927124b4a3f24c113c08699dd61315 | [
"MIT"
] | 4 | 2016-05-24T13:57:37.000Z | 2020-02-27T05:22:56.000Z | baya/tests/test_templatetags.py | kreneskyp/baya | 5cf04b6873927124b4a3f24c113c08699dd61315 | [
"MIT"
] | 29 | 2016-02-05T01:31:51.000Z | 2022-02-23T18:50:58.000Z | baya/tests/test_templatetags.py | hrichards/baya | f319cef5e95cd6a166265d51ae0ea236b6f65be3 | [
"MIT"
] | 6 | 2016-05-20T22:22:45.000Z | 2019-09-03T17:57:59.000Z | from django.template import Context
from django.template import Template
from .test_base import LDAPGroupAuthTestBase
from django.contrib.auth.models import AnonymousUser
class CanUserPerformActionTagTest(LDAPGroupAuthTestBase):
BASIC_TEMPLATE = Template(
"{% load baya_tags %}"
"{% can_user_perform_action action as can_perform_action %}"
"{% if can_perform_action %}"
"True"
"{% else %}"
"False"
"{% endif %}"
)
def test_anonymous_user_has_permission_false(self):
context = Context({
'action': 'index',
'user': AnonymousUser(),
})
rendered = self.BASIC_TEMPLATE.render(context)
self.assertIn('False', rendered)
def test_has_permission_false(self):
context = Context({
'action': 'index',
'user': self.login('has_nothing'),
})
rendered = self.BASIC_TEMPLATE.render(context)
self.assertIn('False', rendered)
def test_has_permission_true(self):
context = Context({
'action': 'index',
'user': self.login('has_all'),
})
rendered = self.BASIC_TEMPLATE.render(context)
self.assertIn('True', rendered)
| 29.785714 | 69 | 0.608313 | 1,077 | 0.860911 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.207834 |
fff185192df2e58db961f6b323cfb8259a7a9f46 | 2,611 | py | Python | egg/zoo/sum_game/architectures.py | CorentinKervadec/EGG | 5ccd49c4a493514b1194699954d41940f5e2a5c6 | [
"MIT"
] | null | null | null | egg/zoo/sum_game/architectures.py | CorentinKervadec/EGG | 5ccd49c4a493514b1194699954d41940f5e2a5c6 | [
"MIT"
] | null | null | null | egg/zoo/sum_game/architectures.py | CorentinKervadec/EGG | 5ccd49c4a493514b1194699954d41940f5e2a5c6 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.nn import functional as F
# In EGG, the game designer must implement the core functionality of the Sender and Receiver agents. These are then
# embedded in wrappers that are used to train them to play Gumbel-Softmax- or Reinforce-optimized games. The core
# Sender must take the input and produce a hidden representation that is then used by the wrapper to initialize
# the RNN or other module that will generate the message. The core Receiver expects a hidden representation
# generated by the message-processing wrapper, plus possibly other game-specific input, and it must generate the
# game-specific output.
# The RecoReceiver class implements the core Receiver agent for the reconstruction game. This is simply a linear layer
# that takes as input the vector generated by the message-decoding RNN in the wrapper (x in the forward method) and
# produces an output of n_features dimensionality, to be interpreted as a one-hot representation of the reconstructed
# attribute-value vector
class RecoReceiver(nn.Module):
def __init__(self, n_features, n_hidden):
super(RecoReceiver, self).__init__()
self.output = nn.Linear(n_hidden, n_features)
def forward(self, x, _input, _aux_input):
return self.output(x)
# The Sender class implements the core Sender agent common to both games: it gets the input target vector and produces a hidden layer
# that will initialize the message producing RNN
class Sender(nn.Module):
def __init__(self, n_hidden, n_features, log_sftmx=False):
super(Sender, self).__init__()
self.fc1 = nn.Linear(n_features, n_hidden)
self.log_sftmx = log_sftmx
if log_sftmx:
self.logsoft = nn.LogSoftmax(dim=1)
def forward(self, x, _aux_input):
out = self.fc1(x)
if self.log_sftmx:
out = self.logsoft(out)
return out
class SenderOracle(nn.Module):
def __init__(self, n_hidden, n_features):
super(SenderOracle, self).__init__()
def forward(self, x, _aux_input):
n = x.size(-1)/2
ar = torch.arange(n).to(x.device)
ar = torch.cat([ar, ar])
ar = torch.stack([ar]*x.size(0), dim=0)
decoded = (x*ar).sum(-1).long().unsqueeze(-1)
out = torch.zeros_like(x)
out.scatter_(1, decoded, 1e6)
return out
# here, it might make sense to add a non-linearity, such as tanh | 44.254237 | 133 | 0.711222 | 1,207 | 0.462275 | 0 | 0 | 0 | 0 | 0 | 0 | 1,372 | 0.525469 |
fff18656fd42956b8ef43e1d1fc5a06b2aa15f66 | 2,757 | py | Python | utils/random_training_splits.py | suvarnak/GenerativeFSLCovid | 0bdeb4ed444c5c9d59697c71d0733fc3a100944c | [
"MIT"
] | null | null | null | utils/random_training_splits.py | suvarnak/GenerativeFSLCovid | 0bdeb4ed444c5c9d59697c71d0733fc3a100944c | [
"MIT"
] | null | null | null | utils/random_training_splits.py | suvarnak/GenerativeFSLCovid | 0bdeb4ed444c5c9d59697c71d0733fc3a100944c | [
"MIT"
] | null | null | null | import os
import shutil
import random
def copy_random_k_files(src_dir, k, dst_dir):
file_list = os.listdir(src_dir)
if k == -1:
k=len(file_list)
for i in range(k):
random_file=random.choice(file_list)
print(random_file)
src1 = os.path.join(src_dir, random_file)
dst1 = os.path.join(dst_dir, random_file)
shutil.copyfile(src1, dst1)
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def main():
shots_per_run = 84
no_of_runs =10
image_dir = "./data/DeepCovid"
split_names = os.listdir(image_dir)
target_splits_dir = "./data"
print("createing directory structure")
for i in range(no_of_runs):
random_run_path = os.path.join(target_splits_dir, "DeepCovid_"+str(shots_per_run) + "_" + str(i))
print(random_run_path)
os.mkdir(random_run_path)
train_split = "train" #split_names[1]
test_split = "test" #split_names[0]
class_names = ['0_non','1_covid']
base_path_split = os.path.join(random_run_path,train_split)
os.makedirs(os.path.join(base_path_split,class_names[0]))
os.makedirs(os.path.join(base_path_split,class_names[1]))
base_path_split = os.path.join(random_run_path,test_split)
os.makedirs(os.path.join(base_path_split,class_names[0]))
os.makedirs(os.path.join(base_path_split,class_names[1]))
print("Directory '% s' created" % random_run_path)
src_train_dir = os.path.join(image_dir,"train")
src_train_dir_non = os.path.join(src_train_dir,"0_non")
src_train_dir_covid = os.path.join(src_train_dir,"1_covid")
dst_train_dir = os.path.join(random_run_path,"train")
dst_train_dir_non = os.path.join(dst_train_dir,"0_non")
dst_train_dir_covid = os.path.join(dst_train_dir,"1_covid")
copy_random_k_files(src_train_dir_non, shots_per_run, dst_train_dir_non)
copy_random_k_files(src_train_dir_covid, shots_per_run, dst_train_dir_covid)
src_test_dir = os.path.join(image_dir,"test")
src_test_dir_non = os.path.join(src_test_dir,"0_non")
src_test_dir_covid = os.path.join(src_test_dir,"1_covid")
dst_test_dir = os.path.join(random_run_path,"test")
dst_test_dir_non = os.path.join(dst_test_dir,"0_non")
dst_test_dir_covid = os.path.join(dst_test_dir,"1_covid")
copytree(src_test_dir_non, dst_test_dir_non)
copytree(src_test_dir_covid, dst_test_dir_covid)
if __name__ == '__main__':
main()
| 39.385714 | 105 | 0.673921 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 258 | 0.09358 |
fff197ae68beb5dbb26583494df00c1fc7732948 | 1,285 | py | Python | tools/gen_bbox_ac.py | vincentzhang/faster-rcnn-fcn | 7118d715a430f0ec2697e5f7a9a39c9752b466da | [
"BSD-2-Clause"
] | 7 | 2019-07-19T21:30:26.000Z | 2021-06-17T03:57:22.000Z | tools/gen_bbox_ac.py | vincentzhang/faster-rcnn-fcn | 7118d715a430f0ec2697e5f7a9a39c9752b466da | [
"BSD-2-Clause"
] | null | null | null | tools/gen_bbox_ac.py | vincentzhang/faster-rcnn-fcn | 7118d715a430f0ec2697e5f7a9a39c9752b466da | [
"BSD-2-Clause"
] | 1 | 2021-06-17T03:57:23.000Z | 2021-06-17T03:57:23.000Z | # generated bbox ground truth from pixel-wise segmentation
# it currently only generate one bbox
from __future__ import print_function
import numpy as np
import h5py
import os
import pdb
mask_path = '../data/acce'
f = h5py.File(os.path.join(mask_path, "resized_label_ac_2d.h5"), 'r')
bbox_path = '../data/acce/bbox'
if not os.path.exists(bbox_path):
os.mkdir(bbox_path)
# dim: shape (256, 367, 342), slices, height, width
count = 0
for k in f.keys():
#pdb.set_trace()
count += 1
print("processing {}-th vol".format(count))
data = f[k][...] # convert to numpy
k = k.rsplit('_',1)[0] # strip the '_label' from the vol name
with open( os.path.join(bbox_path, k)+'_bbox.txt', 'w') as bbox_file:
# iterate through each slice
for idx in range(data.shape[0]):
mask = data[idx, :, :] # get the mask
i,j = np.where(mask) # find positive mask
if not i.size: # no positive mask
print("{}_{},{}".format(k, idx, 0), file=bbox_file)
else:
h_min,w_min = np.min(zip(i,j), axis=0)
h_max,w_max = np.max(zip(i,j), axis=0)
print("{}_{},{},{},{},{},{}".format(k, idx, 1, w_min, h_min, w_max,
h_max), file=bbox_file)
f.close()
| 34.72973 | 83 | 0.585214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.33463 |
fff2144edf1dc7c96f337289635ef5af44b23625 | 8,510 | py | Python | testscript/imputation_algorithms.py | zshufan/Tattle-Tale | f9d93051efb523f1bda0cead023c2f001e18cc85 | [
"BSD-3-Clause"
] | null | null | null | testscript/imputation_algorithms.py | zshufan/Tattle-Tale | f9d93051efb523f1bda0cead023c2f001e18cc85 | [
"BSD-3-Clause"
] | null | null | null | testscript/imputation_algorithms.py | zshufan/Tattle-Tale | f9d93051efb523f1bda0cead023c2f001e18cc85 | [
"BSD-3-Clause"
] | null | null | null | # some codes refer to Holoclean evaluation function
# https://github.com/HoloClean/holoclean
import pandas as pd
import numpy as np
import logging
import random
import argparse
parser = argparse.ArgumentParser(description='Predict on many examples')
parser.add_argument("--dataset", type=str, help="dataset path")
parser.add_argument("--ground_truth", type=str, help="ground truth path")
parser.add_argument("--ground_truth_2", type=str, help="ground truth path")
args = parser.parse_args()
NULL_REPR = '_nan_'
exclude_attr = ['_tid_', 'FName', 'LName']
class DataCleaningAsAdv:
def __init__(self, csv_fpath) -> None:
# load dataset with missing values
self.load_dataset(csv_fpath)
# associate with domain
self.get_domain_knowledge()
def load_dataset(self, fpath, na_values=None) -> None:
try:
# Do not include TID and source column as trainable attributes
exclude_attr_cols = ['_tid_']
self.df = pd.read_csv(fpath, dtype=str, na_values=na_values, encoding='utf-8')
# Normalize the dataframe: drop null columns, convert to lowercase strings, and strip whitespaces.
for attr in self.df.columns.values:
if self.df[attr].isnull().all():
logging.warning("Dropping the following null column from the dataset: '%s'", attr)
self.df.drop(labels=[attr], axis=1, inplace=True)
continue
if attr not in exclude_attr_cols:
self.df[attr] = self.df[attr].str.strip().str.lower()
# Add _tid_ column to dataset that uniquely identifies an entity.
self.df.insert(0, '_tid_', range(0,len(self.df)))
# Use NULL_REPR to represent NULL values
self.df.fillna(NULL_REPR, inplace=True)
# print(self.df.head())
logging.info("Loaded %d rows with %d cells", self.df.shape[0], self.df.shape[0] * self.df.shape[1])
except Exception:
logging.error('loading data for missing data table %s', fpath)
raise
def load_ground_truth(self, fpath, tid_col, attr_col, val_col, na_values=None) -> None:
try:
self.gt_data = pd.read_csv(fpath, na_values=na_values, encoding='utf-8')
# We drop any ground truth values that are NULLs since we follow
# the closed-world assumption (if it's not there it's wrong).
# TODO: revisit this once we allow users to specify which
# attributes may be NULL.
self.gt_data.dropna(subset=[val_col], inplace=True)
self.gt_data.fillna(NULL_REPR, inplace=True)
self.gt_data.rename({tid_col: '_tid_',
attr_col: '_attribute_',
val_col: '_value_'},
axis='columns',
inplace=True)
self.gt_data = self.gt_data[['_tid_', '_attribute_', '_value_']]
# Normalize string to whitespaces.
self.gt_data['_value_'] = self.gt_data['_value_'].str.strip().str.lower()
except Exception:
logging.error('load_data for ground truth table %s', fpath)
raise
def get_domain_knowledge(self) -> None:
# get the domain of each column
# and the frequency of each value in the domain
self.domain = {}
self.weight = {}
for attr in self.df.columns.values:
if attr in exclude_attr:
continue
domain = self.df[attr].unique()
if NULL_REPR in domain:
domain = domain[domain != NULL_REPR]
self.domain[attr] = domain
attr_gb_count_df = self.df.groupby([attr])[attr].count()
# print(attr_gb_count_df)
self.weight[attr] = [attr_gb_count_df[val] for val in domain]
# print(self.weight[attr])
def fill_in_random_value(self) -> None:
self.random_repair = self.df.copy()
for attr in self.df.columns.values:
if attr in exclude_attr:
continue
# fill in the missing values
indices = self.random_repair[self.random_repair[attr]==NULL_REPR].index.tolist()
# print(indices)
for index in indices:
if self.random_repair.loc[index][attr] is not NULL_REPR:
logging.error("index not match")
raise
self.random_repair.at[index, attr] = np.random.choice(self.domain[attr])
# print(self.random_repair.loc[index][attr], self.df.loc[index][attr])
def fill_in_popular_value(self) -> None:
self.popular_repair = self.df.copy()
for attr in self.df.columns.values:
if attr in exclude_attr:
continue
# sort the zipped list to get the most popular item
# in each column in the ascending order
zipped = zip(self.domain[attr], self.weight[attr])
sorted_zip = sorted(zipped, key=lambda x: x[1])
# print(sorted_zip[-1])
# fill in the missing values
indices = self.popular_repair[self.popular_repair[attr]==NULL_REPR].index.tolist()
for index in indices:
if self.popular_repair.loc[index][attr] is not NULL_REPR:
logging.error("index not match")
raise
self.popular_repair.at[index, attr] = sorted_zip[-1][0]
# print(self.popular_repair.loc[index][attr], self.df.loc[index][attr])
def fill_in_by_weighted_sampling(self) -> None:
self.weighted_repair = self.df.copy()
for attr in self.df.columns.values:
if attr in exclude_attr:
continue
# fill in the missing values
indices = self.weighted_repair[self.weighted_repair[attr]==NULL_REPR].index.tolist()
# print(indices)
for index in indices:
if self.weighted_repair.loc[index][attr] is not NULL_REPR:
logging.error("index not match")
raise
self.weighted_repair.at[index, attr] = random.choices(self.domain[attr], weights=self.weight[attr], k=1)[0]
# print(self.weighted_repair.loc[index][attr], self.df.loc[index][attr])
def evaluate(self, gt_fpath, tid_col, attr_col, val_col, file) -> None:
self.load_ground_truth(gt_fpath, tid_col, attr_col, val_col)
total_repairs = self.gt_data.shape[0]
def _evaluate(df) -> int:
correct_repair = 0
for _, row in self.gt_data.iterrows():
if df.loc[row['_tid_']][row['_attribute_']] == row['_value_']:
if self.df.loc[row['_tid_']][row['_attribute_']] is not NULL_REPR:
logging.error("index not match when evaluating")
raise
correct_repair += 1
return correct_repair
# evaluate random filling
self.fill_in_random_value()
correct_repair = _evaluate(self.random_repair)
print("Precision of random filling: {}, correct_repairs: {}, total_repairs: {}".format(correct_repair/total_repairs, correct_repair, total_repairs), file=file)
# evaluate popular filling
self.fill_in_popular_value()
correct_repair = _evaluate(self.popular_repair)
print("Precision of popular filling: {}, correct_repairs: {}, total_repairs: {}".format(correct_repair/total_repairs, correct_repair, total_repairs), file=file)
# evaluate weighted filling
self.fill_in_by_weighted_sampling()
correct_repair = _evaluate(self.weighted_repair)
print("Precision of weighted filling: {}, correct_repairs: {}, total_repairs: {}".format(correct_repair/total_repairs, correct_repair, total_repairs), file=file)
if __name__ == "__main__":
# load dataset
adv = DataCleaningAsAdv(args.dataset)
f = open("baseline_cleaning_report_1", "a")
print(args.dataset, file=f)
# evaluate
adv.evaluate(gt_fpath=args.ground_truth,
tid_col='tid',
attr_col='attribute',
val_col='correct_val', file=f)
if args.ground_truth_2 is not None:
adv.evaluate(gt_fpath=args.ground_truth_2,
tid_col='tid',
attr_col='attribute',
val_col='correct_val', file=f)
| 42.338308 | 169 | 0.602115 | 7,412 | 0.870975 | 0 | 0 | 0 | 0 | 0 | 0 | 2,235 | 0.262632 |
fff27be5ec642c73c3bac5ec2ecda165d7fe17c5 | 145 | py | Python | Hackerrank_python/15.numpy/10.Min and Max.py | manish1822510059/Hackerrank | 7c6e4553f033f067e04dc6c756ef90cb43f3c4a8 | [
"MIT"
] | 39 | 2020-09-27T05:32:05.000Z | 2022-01-08T18:04:05.000Z | Hackerrank_python/15.numpy/10.Min and Max.py | manish1822510059/Hackerrank | 7c6e4553f033f067e04dc6c756ef90cb43f3c4a8 | [
"MIT"
] | 5 | 2020-10-02T13:33:00.000Z | 2021-03-01T14:06:08.000Z | Hackerrank_python/15.numpy/10.Min and Max.py | manish1822510059/Hackerrank | 7c6e4553f033f067e04dc6c756ef90cb43f3c4a8 | [
"MIT"
] | 6 | 2020-10-03T04:04:55.000Z | 2021-10-18T04:07:53.000Z | import numpy as arr
n,m=map(int,input().split())
ar=([list(map(int,input().split()))for _ in range(n)])
arr1=arr.min(ar,axis=1)
print(max(arr1))
| 24.166667 | 54 | 0.668966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
fff3557fd7e005babefb16e3b6b117ef8a3354ec | 918 | py | Python | file_automation.py | FlightDev/YSPA | 5226712ebf305e7a3c686c43c996517a617f748b | [
"MIT"
] | null | null | null | file_automation.py | FlightDev/YSPA | 5226712ebf305e7a3c686c43c996517a617f748b | [
"MIT"
] | null | null | null | file_automation.py | FlightDev/YSPA | 5226712ebf305e7a3c686c43c996517a617f748b | [
"MIT"
] | null | null | null | import os
import glob
from astropy.io import fits
#/home/student/Desktop/Images/iTelescope/20180716-California-T24-GOOD
# Yo Neal. When you use this program, you have to change a few things between iTelescope and LFOP
# FIRST, remember to change the file path or you'll be a dummy. Also for LFOP -13 and -12 while
# for iTelescope it should be -9 and -8. Hopefully you know what to do with those numbers...
#/home/student/Desktop/Images/LFOP
dir = '20180726-LFOP-GOOD'
path = '/home/student/Desktop/Images/LFOP/' + dir + '/'
dict = {}
date = ""
for filename in os.listdir(path):
if filename.endswith(".fit"):
file = path + str(filename)
image = fits.open(file)
s = image[0].header.get("DATE-OBS")
date = s[:len(s) - 13]
dict.update({s[len(s) - 12:]: filename})
for key, value in sorted(dict.items()):
print value + "\t\t" + str(key)
print date
print len(dict)
| 32.785714 | 99 | 0.667756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.518519 |
fff3dd07c2f6cdec73bcd25788a20c7594c2652d | 959 | py | Python | streamlit/main.py | prakhar134/clean-or-messy | 0b9080363c48ca9cff0449875dfcbd169ef64321 | [
"MIT"
] | 13 | 2020-10-08T13:52:21.000Z | 2022-03-11T07:02:35.000Z | streamlit/main.py | architsharmaa/clean-or-messy | b40028cb4c4c8bbefb91a4b016096953b445c146 | [
"MIT"
] | null | null | null | streamlit/main.py | architsharmaa/clean-or-messy | b40028cb4c4c8bbefb91a4b016096953b445c146 | [
"MIT"
] | 9 | 2020-10-08T12:02:50.000Z | 2022-01-25T23:38:46.000Z | from fastai.vision.all import *
from PIL import Image
import streamlit as st
import numpy as np
from io import BytesIO
from .config import imgWidth, imgHeight
st.title("CleanvsMessy")
st.markdown('''
## Upload the image''',True)
st.set_option('deprecation.showfileUploaderEncoding', False)
file = st.file_uploader(" ")
model = load_learner('model/model_v0.pkl')
st.markdown('''
## Preview of the Image''',True)
if file != None:
st.image(file, width = imgWidth, height = imgHeight)
if file != None:
def upload(file):
image = Image.open(file)
image_np = np.array(image)
image_without_alpha = image_np[:, :, :3]
is_clean, _, probs = model.predict(image_without_alpha)
prob = float(list(probs.numpy())[1])
return {"is_image_clean": is_clean, "predictedVal": prob}
result = upload(file)
st.write("Is Image Clean? "+result["is_image_clean"])
st.write("Confidence "+str(result["predictedVal"])) | 30.935484 | 65 | 0.683003 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.231491 |
fff46233cd9fc6a4821a3755e7bb2b8fd09e058e | 1,030 | py | Python | read_trials.py | Volkarl/P10-ExoskeletonTransferLearning | 311daf3791c65838ff9c496eeb6526b096b41d4c | [
"MIT"
] | null | null | null | read_trials.py | Volkarl/P10-ExoskeletonTransferLearning | 311daf3791c65838ff9c496eeb6526b096b41d4c | [
"MIT"
] | 2 | 2020-11-13T18:39:27.000Z | 2021-08-25T15:59:36.000Z | read_trials.py | Volkarl/P10-ExoskeletonTransferLearning | 311daf3791c65838ff9c496eeb6526b096b41d4c | [
"MIT"
] | null | null | null | import pickle
import matplotlib.pyplot as plt
import pandas as pd
trials = pickle.load(open("trials.p", "rb"))
print("Set breakpoint here")
#for item in trials.trials:
# args = item["vals"]
# res = item["result"]["loss"]
#itemtuples = [(item["misc"]["vals"]["dilation_group"], item["misc"]["vals"]["use_ref_points"], item["result"]["loss"]) for item in trials.trials]
#(dil, ref, loss) = zip(*itemtuples)
#plt.figure()
#plt.plot(dil, 'ro')
#plt.title('Use_dilation (1 is true, 0 is false)')
#plt.plot(loss)
#plt.plot(pd.DataFrame(loss).ewm(span=1).mean())
#plt.title('MAE')
#plt.plot(ref, 'g^')
#plt.legend()
#plt.show()
print("Set breakpoint here")
print("PRINT BEST TRIALS")
myitems = [(trial["result"]["loss"], str(trial["misc"]["vals"])) for trial in trials.trials if trial["result"]["status"] == "ok"]
myitems.sort(key=lambda tup: tup[0])
for item in myitems[:10]:
print("--------------------------\n")
print(item)
print("\n\n")
# If you want to print training times use attemptid["book_time"]
| 24.52381 | 146 | 0.635922 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 694 | 0.673786 |
fff5f55a4eee57bae636a577f32adbde97ba453e | 3,151 | py | Python | e3/provisioning/AtlassianAwsSecurity.py | sguillory6/e3 | 1505e6ea389157b9645155b9da13d6d316235f1a | [
"Apache-2.0"
] | null | null | null | e3/provisioning/AtlassianAwsSecurity.py | sguillory6/e3 | 1505e6ea389157b9645155b9da13d6d316235f1a | [
"Apache-2.0"
] | null | null | null | e3/provisioning/AtlassianAwsSecurity.py | sguillory6/e3 | 1505e6ea389157b9645155b9da13d6d316235f1a | [
"Apache-2.0"
] | null | null | null | import logging
import logging.config
import os
import subprocess
from datetime import datetime, timedelta
from botocore.credentials import CredentialProvider, RefreshableCredentials
from dateutil.tz import tzlocal
from common.E3 import e3
class AtlassianAwsSecurity(CredentialProvider):
"""
This class is only used internally by Atlassian to make use of our SAML implementation for AWS authentication.
It is included in the E3 distribution to serve as an example of how to integrate 3rd party authentication
tools with E3
"""
METHOD = "awstoken"
AWS_ACCESS_KEY_ID_KEY = 'AWS_ACCESS_KEY_ID'
AWS_SECRET_ACCESS_KEY_KEY = 'AWS_SECRET_ACCESS_KEY'
AWS_SECURITY_TOKEN_KEY = 'AWS_SECURITY_TOKEN'
def __init__(self, environ=None, mapping=None):
super(AtlassianAwsSecurity, self).__init__()
conf = e3.get_auth_config()
logging.debug("Atlassian AWS config: %s" % conf)
self._script = os.path.expanduser(conf.get('script', None))
self._token_file = os.path.expanduser(conf.get('tokens', None))
self._token_valid_for = long(conf.get('valid_for', 3600))
def load(self):
return RefreshableCredentials.create_from_metadata(
metadata=self.refresh(),
refresh_using=self.refresh,
method=self.METHOD)
def refresh(self):
if not (self._script and self._token_file):
logging.error("Unable to refresh tokens because configuration is missing")
return None
self._run_script()
return self._parse_tokens()
def _parse_tokens(self):
if not os.path.exists(self._token_file):
logging.error("Unable to locate '%s' unable to load AWS credentials, trying to proceed without them.",
self._token_file)
else:
with open(self._token_file) as tokens:
expiry = datetime.now(tzlocal()) + timedelta(minutes=55)
metadata = {
"expiry_time": str(expiry)
}
lines = tokens.readlines()
for line in lines:
line_tokens = line[7:-1]
eq_pos = line_tokens.find("=")
token_key = line_tokens[0:eq_pos]
token_value = line_tokens[eq_pos + 1:]
if token_key == self.AWS_ACCESS_KEY_ID_KEY:
metadata["access_key"] = token_value
if token_key == self.AWS_SECRET_ACCESS_KEY_KEY:
metadata["secret_key"] = token_value
self._aws_secret_access_key = token_value
if token_key == self.AWS_SECURITY_TOKEN_KEY:
metadata["token"] = token_value
self._aws_security_token = token_value
return metadata
return None
def _run_script(self):
environ = os.environ.copy().update({
'PATH': '/usr/local/bin:/usr/local/sbin:/usr/bin:/bin:/usr/sbin:/sbin',
'SHELL': '/bin/bash'
})
subprocess.call(self._script, shell=True, env=environ)
| 39.886076 | 114 | 0.614408 | 2,906 | 0.922247 | 0 | 0 | 0 | 0 | 0 | 0 | 658 | 0.208823 |
fff5fae09ca4ba6758cfde4e7471355a0e7af098 | 3,506 | py | Python | RecRoomAnimatedProfilePicture.py | zigzatuzoo/Rec-Room-Animated-Profile-Picture | b8eeabf478613f47d3bdb9195ad2f5051e7aaaad | [
"Apache-2.0"
] | 4 | 2021-08-17T01:13:03.000Z | 2022-03-19T04:03:01.000Z | RecRoomAnimatedProfilePicture.py | zigzatuzoo/Rec-Room-Animated-Profile-Picture | b8eeabf478613f47d3bdb9195ad2f5051e7aaaad | [
"Apache-2.0"
] | null | null | null | RecRoomAnimatedProfilePicture.py | zigzatuzoo/Rec-Room-Animated-Profile-Picture | b8eeabf478613f47d3bdb9195ad2f5051e7aaaad | [
"Apache-2.0"
] | null | null | null | ''' Stuff you need to update for this to work '''
'Enter your username here'
user = ''
'Enter your password here'
passwd = ''
image1 = '2d83af05944d49c69fa9565fb238a91b.jpg'
image2 = '49b2788b672e4088a25eb0a9eff35c17.jpg'
image3 = '355c2c7e87f0489bb5f0308cdec108f6.jpg'
" ^ You need to change EACH of these to whatever you want the 3 pics to be (Currently set to a waving red zigzag)"
''' Stuff that will change how the program works '''
speed = 0.2
"^ As you can probably guess, this changes how long the PFP stays on each image"
import time
try:
import requests
except:
print('''You do not have the requests library installed, you need to install it via the following command:
pip install requests
Thank you!''')
try:
import recnetlogin as rnl
except:
print('''You do not have the RecNetLogin package installed, you need to install it via the following command:
python -m pip install git+https://github.com/Jegarde/RecNet-Login.git#egg=recnetlogin
Thank you!''')
''' Just Initializing some values '''
login = rnl.login_to_recnet(username=user,password=passwd)
x = 0
BToken = ''
''' Making the strings into the format read by the rec.net image api '''
imageName1 = 'imageName=' + image1
imageName2 = 'imageName=' + image2
imageName3 = 'imageName=' + image3
''' Initial token request '''
BToken = login.access_token
print(BToken)
''' The loop program that actually makes the picure move '''
while 1 == 1:
''' The HTTP header for changing your In-Game pfp '''
Headers = {'sec-ch-ua':'";Not A Brand";v="99", "Chromium";v="88"',
'Accept' : '*/*',
'sec-ch-ua-mobile' : '?0',
'Authorization' : BToken,
'Content-Type' : 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin' : 'https://rec.net',
'Sec-Fetch-Site' : 'same-site',
'Sec-Fetch-Mode' : 'cors',
'Sec-Fetch-Dest' : 'empty',
'Referer' : 'https://rec.net/',
'Accept-Encoding' : 'gzip, deflate',
'Accept-Language' : 'en-US,en;q=0.9',
}
''' The easy way to edit what pfp plays after what '''
def i1():
r = requests.put('https://accounts.rec.net/account/me/profileImage', headers = Headers, data = imageName1)
print(str(r) + " num of requests: " + str(x))
time.sleep(speed)
def i2():
r = requests.put('https://accounts.rec.net/account/me/profileImage', headers = Headers, data = imageName2)
print(str(r) + " num of requests: " + str(x))
time.sleep(speed)
def i3():
r = requests.put('https://accounts.rec.net/account/me/profileImage', headers = Headers, data = imageName3)
print(str(r) + " num of requests: " + str(x))
time.sleep(speed)
''' In this default format, it will show image 1 first, then image 2, then image 3, then image 2 again and will LOOP this. The x value in the function calls is to make the counter function, if you don't add it to your function calls or you delete them, THE COUNTER WILL NOT WORK. '''
x = x + 1
i1()
x = x + 1
i2()
x = x + 1
i3()
x = x + 1
i2()
''' Requests a new auth token when that one is no longer valid '''
r = requests.put('https://accounts.rec.net/account/me/profileImage', headers = Headers)
if r.status_code == 401:
print('Invalid Token')
login = rnl.login_to_recnet(username=user,password=passwd)
BToken = login.access_token
print(BToken)
| 35.77551 | 287 | 0.634341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,150 | 0.613234 |
fff7d77cd5951c966e8c3d645997399fd6e953c2 | 14,121 | py | Python | rcommander/src/rcommander/graph_view.py | rummanwaqar/rcommander-core | 7106d5868db76c47dea6ad11118a54351a8bd390 | [
"BSD-3-Clause"
] | 4 | 2015-04-08T09:57:43.000Z | 2021-08-12T01:44:37.000Z | rcommander/src/rcommander/graph_view.py | jhu-lcsr-forks/rcommander-core | 1a0350e9b93687eff6a4407f72b5250be5f56919 | [
"BSD-3-Clause"
] | 1 | 2015-03-12T09:10:27.000Z | 2015-03-12T09:10:27.000Z | rcommander/src/rcommander/graph_view.py | jhu-lcsr-forks/rcommander-core | 1a0350e9b93687eff6a4407f72b5250be5f56919 | [
"BSD-3-Clause"
] | 3 | 2015-03-12T10:59:17.000Z | 2021-06-21T02:13:57.000Z | #import roslib; roslib.load_manifest('rcommander_core')
import graph.style as gs
import graph
import graph.layout as gl
import tool_utils as tu
import graph_model as gm
import numpy as np
import time
import copy
def copy_style(astyle, bstyle):
bstyle.background = astyle.background
bstyle.fill = astyle.fill
bstyle.stroke = astyle.stroke
bstyle.strokewidth = astyle.strokewidth
bstyle.text = astyle.text
bstyle.font = astyle.font
bstyle.fontsize = astyle.fontsize
bstyle.textwidth = astyle.textwidth
bstyle.align = astyle.align
bstyle.depth = astyle.depth
class GraphView:
def __init__(self, context, graph_model):
self.graph_model = graph_model
g = self.graph_model.gve
self.gve = g
self.context = context
node_outlines = self.context.color(0.4, 0.4, 0.4, 1.)
text_color = self.context.color(0.3, 0.3, 0.3, 1.)
node_font_size = 14
#Customizations
g.styles.default.depth = True
g.styles.default.background = self.context.color(1., 1., 1., 1.)
g.styles.default.stroke = node_outlines
g.styles.default.text = text_color
g.styles.default.fontsize = node_font_size
g.styles.root.text = self.context.color(255/255., 153/255., 51/255., 1.)
g.styles.important.fontsize = node_font_size
g.styles.important.text = text_color
g.styles.important.stroke = node_outlines
g.styles.marked.fontsize = node_font_size
g.styles.marked.text = text_color
g.styles.marked.stroke = node_outlines
#g.styles.default.fontsize = 12
#g.styles.light.fontsize = 12
#g.styles.back.fontsize = 12
#g.styles.marked.fontsize = 12
#g.styles.dark.fontsize = 12
#g.styles.highlight.fontsize = 12
#g.styles.root.fontsize = 12
self.refresh = self.gve.layout.refresh
old_outcome_style = g.styles.create('old_outcome')
active_node_style = g.styles.create('active_node')
selected_style = g.styles.create('selected')
normal_style = g.styles.create('normal')
normal_edge_style = g.styles.create('normal_edge')
selected_edge_style = g.styles.create('selected_edge')
graph_circle = g.styles.create('graph_circle')
container = g.styles.create('container')
container_selected = g.styles.create('container_selected')
copy_style(g.styles.important, old_outcome_style)
copy_style(g.styles.important, active_node_style)
copy_style(g.styles.important, selected_style)
copy_style(g.styles.default, normal_style)
copy_style(g.styles.default, normal_edge_style)
copy_style(g.styles.default, selected_edge_style)
copy_style(g.styles.default, graph_circle)
copy_style(g.styles.default, container)
copy_style(g.styles.default, container_selected)
graph_circle.fill = self.context.color(.96, .96, .96, .96)
graph_circle.stroke = self.context.color(.8, .8, .8, 1.)
graph_circle.strokewidth = 3
graph_circle.fontsize = 24
graph_circle.textwidth = 800
graph_circle.text = self.context.color(.5, .5, .5, 1.)
container.fill = self.context.color(255./255, 204./255, 102./255., .4)
container.node = g.styles.important.node
container_selected.fill = self.context.color(255./255, 204./255, 102./255., 1.)
container_selected.node = g.styles.important.node
selected_style.text = text_color
selected_edge_style.stroke = self.context.color(0.80, 0.00, 0.00, 0.75)
selected_edge_style.strokewidth = 1.0
active_node_style.text = text_color
active_node_style.fill = self.context.color(153./255, 255./255, 51/255, .75)
active_node_style.strokewidth = 3
old_outcome_style.text = text_color
old_outcome_style.fill = self.context.color(153./255, 255./255, 51/255, .4)
self.radii_increment = 150
self.fsm_start_color = 1.
self.fsm_end_color = .96
self.fsm_stroke_color = .85
self.fsm_current_context_node = None
self.fsm_dclick_cb = None
self.right_clicked = None
self.dx = 0.
self.dy = 0.
self.tx = 0.
self.ty = 0.
#g.node('start').style = 'marked'
def set_node_style(self, node_name, style):
self.gve.node(node_name).style = style
self.gve.layout.refresh()
def get_node_style(self, node_name):
return self.gve.node(node_name).style
#def drag_background_cb(self, s, e):
# #print start_click.x, start_click.y
# #print curr_pos.x, curr_pos.y
# #transform.scale(self.zoom, self.zoom)
# self.dx = e.x - s.x
# self.dy = e.y - s.y
# #print dx, dy
# #transform = QTransform()
# ##transform.scale(abs(dx), abs(dy))
# #transform.translate(dx, dy)
# #self.graphicsView.superView.setTransform(transform)
def _background_drag(self, properties_dict):
mouse_pose = properties_dict['MOUSEX'], properties_dict['MOUSEY']
if properties_dict['rightdown']:
if not self.right_clicked:
self.right_clicked = mouse_pose
else:
self.tx = mouse_pose[0] - self.right_clicked[0]
self.ty = mouse_pose[1] - self.right_clicked[1]
else:
#Commit transform
self.right_clicked = None
self.dx += self.tx
self.dy += self.ty
self.ty = 0.
self.tx = 0.
#if self._ctx._ns["rightdown"]:
# #Make sure we're not in any nodes
# in_nodes = False
# for n in self.graph.nodes:
# if self.mouse in n:
# in_nodes = True
# break
# #Set pose first time
# if not in_nodes and not self.right_clicked:
# self.right_clicked = self.mouse
# else:
# self.right_drag(self.right_clicked, self.mouse)
#else:
# self.right_clicked = None
def setup(self):
self.times = {}
self.times['draw'] = 0.
self.times['check'] = 0.
self.times['iter'] = 0
def draw(self, properties_dict):
START_TIME = time.time()
self.context.size(properties_dict['width'], properties_dict['height'])
cx = self.context
g = self.gve
for n in g.nodes:
if properties_dict['selected_node'] == n.id:
self.set_node_style(n.id, 'selected')
else:
self.set_node_style(n.id, 'normal')
if self.graph_model.get_start_state() == n.id:
if self.get_node_style(n.id) == 'selected':
self.set_node_style(n.id, 'important')
else:
self.set_node_style(n.id, 'marked')
if hasattr(self.graph_model.get_state(n.id), 'get_child'):
if self.get_node_style(n.id) == 'selected':
self.set_node_style(n.id, 'container_selected')
else:
self.set_node_style(n.id, 'container')
if self.graph_model.is_running():
if not self.graph_model.sm_thread.has_key('current_states'):
print 'KEYS!', self.graph_model.sm_thread.keys()
if self.graph_model.sm_thread['current_states'] != None and \
(len(set(self.graph_model.sm_thread['current_states']).intersection(set([n.id]))) > 0):
self.set_node_style(n.id, 'active_node')
if self.graph_model.get_last_outcome() != None:
outcome, t = self.graph_model.get_last_outcome()
if outcome == n.id:
if time.time() - t < 10.:
self.set_node_style(n.id, 'old_outcome')
#self.set_node_style(tu.InfoStateBase.GLOBAL_NAME, 'root')
draw_func = None
#if properties_dict['selected_edge'] != None:
def draw_selected():
if properties_dict['selected_edge'] == None:
return
cx = self.context
g = self.gve
#edge = self.selected_edge
edge = properties_dict['selected_edge']
x0, y0 = edge.node1.x, edge.node1.y
x1, y1 = edge.node2.x, edge.node2.y
coordinates = lambda x, y, d, a: (x+math.cos(math.radians(a))*d, y+math.sin(math.radians(a))*d)
# Find the edge's angle based on node1 and node2 position.
a = math.degrees(math.atan2(y1-y0, x1-x0))
# draw line from node's edge instead of it's center.
r = edge.node2.r
d = math.sqrt(pow(x1-x0, 2) + pow(y1-y0, 2))
x00, y00 = coordinates(x0, y0, r+1, a)
x01, y01 = coordinates(x0, y0, d-r-1, a)
# draw
p1 = [x00, y00]
p2 = [x01, y01]
cx.fill()
cx.strokewidth(1.0)
cx.stroke(1., 153./255., 0, .75)
cx.beginpath(p1[0], p1[1])
cx.lineto(p2[0], p2[1])
path = cx.endpath(False)
gs.edge_arrow(g.styles[edge.node1.style], path, edge, radius=10)
cx.drawpath(path)
def draw_fsm_circles():
g = self.gve
#figure out where centroids should be
coords = []
[coords.append([n.x, n.y]) for n in g.nodes]
coords = np.matrix(coords).T
centroid = np.median(coords, 1)
if len(coords) == 0:
return
#calculate where radii should be
radius = np.max(np.power(np.sum(np.power((coords - centroid), 2), 0), .5)) + gm.GraphModel.NODE_RADIUS*2
radius = max(radius, 200.)
container_style = g.styles.graph_circle
container_stroke = container_style.stroke
##
#Draw fsm_stack
stack = copy.copy(properties_dict['fsm_stack'])
#stack.reverse()
#smallest_radii = radius
largest_radii = radius + len(stack) * self.radii_increment
color = self.fsm_start_color
if len(stack) > 0:
color_incre = (self.fsm_start_color - self.fsm_end_color) / len(stack)
#draw stack
for el in stack:
#smallest_radii = smallest_radii + self.radii_increment
name = el.model.document.get_name()#el.document.get_name()
#Draw node
stack_node = graph.node(g, radius = largest_radii, id = name)
stack_node.x, stack_node.y = centroid[0,0], centroid[1,0]
el.graph_node = stack_node
container_style.fill = self.context.color(color, color, color, 1.)
container_style.stroke = self.context.color(self.fsm_stroke_color, self.fsm_stroke_color, 1.)
gs.node(container_style, stack_node, g.alpha)
#Draw label
node_label_node_ = graph.node(g, radius = largest_radii, id = name)
node_label_node_.x, node_label_node_.y = centroid[0,0], centroid[1,0] - largest_radii
gs.node_label(container_style, node_label_node_, g.alpha)
color -= color_incre
largest_radii -= self.radii_increment
##
#Draw node
#Draw node circle
graph_name_node = graph.node(g, radius=radius, id = properties_dict['name'])
graph_name_node.x, graph_name_node.y = centroid[0,0], centroid[1,0]
self.fsm_current_context_node = graph_name_node
container_style.fill = self.context.color(self.fsm_end_color, self.fsm_end_color, self.fsm_end_color, 1.)
container_style.stroke = container_stroke
gs.node(container_style, graph_name_node, g.alpha)
#draw node label
node_label_node = graph.node(g, radius=radius, id = properties_dict['name'])
node_label_node.x, node_label_node.y = centroid[0,0], centroid[1,0] - radius
gs.node_label(container_style, node_label_node, g.alpha)
def detect_fsm_click():
def in_node(x, y, n):
return (abs(x - n.x) < n.r) and (abs(y - n.y) < n.r)
mousex_g = self.context._ns['MOUSEX'] - self.gve.x
mousey_g = self.context._ns['MOUSEY'] - self.gve.y
if self.context._ns['mousedoubleclick'] and len(properties_dict['fsm_stack']) > 0:
if not in_node(mousex_g, mousey_g, self.fsm_current_context_node):
stack = copy.copy(properties_dict['fsm_stack'])
stack.reverse()
selected_el = None
for el in stack:
if in_node(mousex_g, mousey_g, el.graph_node):
#if p in el.graph_node:
selected_el = el
break
#selected something so load it
if selected_el != None and self.fsm_dclick_cb != None:
self.fsm_dclick_cb(selected_el)
def final_func():
draw_selected()
detect_fsm_click()
CHECK_TIME = time.time()
self._background_drag(properties_dict)
properties_dict['MOUSEX'] -= self.dx+self.tx
properties_dict['MOUSEY'] -= self.dy+self.ty
g.draw(dx=self.dx+self.tx, dy=self.dy+self.ty, directed=True, traffic=False, user_draw_start=draw_fsm_circles, user_draw_final=final_func)
DRAW_TIME = time.time()
total_draw = DRAW_TIME - CHECK_TIME
total_check = CHECK_TIME - START_TIME
self.times['draw'] += total_draw
self.times['check'] +- total_check
self.times['iter'] += 1
#print 'draw', (1000.* self.times['draw'] / self.times['iter']), 'check', (1000.* self.times['check'] / self.times['iter'])
| 39.116343 | 146 | 0.578217 | 13,430 | 0.951066 | 0 | 0 | 0 | 0 | 0 | 0 | 2,336 | 0.165427 |
fff91c879216ac70a7559f58214c7d1b3892a9ea | 3,264 | py | Python | django_input_collection/api/restframework/collection.py | pivotal-energy-solutions/django-input-collection | cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f | [
"Apache-2.0"
] | null | null | null | django_input_collection/api/restframework/collection.py | pivotal-energy-solutions/django-input-collection | cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f | [
"Apache-2.0"
] | 4 | 2019-08-25T15:47:24.000Z | 2022-03-24T19:35:09.000Z | django_input_collection/api/restframework/collection.py | pivotal-energy-solutions/django-input-collection | cc2ce3e0a7104ba9c524eaba5706da94ddb04a5f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from django.urls import reverse
from rest_framework.response import Response
from rest_framework import status
from ...collection import BaseAPICollector, BaseAPISpecification
from ... import models
from . import serializers
class RestFrameworkSpecification(BaseAPISpecification):
content_type = "application/json"
def get_api_info(self):
info = super(RestFrameworkSpecification, self).get_api_info()
input_list = reverse("collection-api:input-list")
input_detail = reverse("collection-api:input-detail", kwargs={"pk": "__id__"})
instrument_list = reverse("collection-api:instrument-list")
instrument_detail = reverse("collection-api:instrument-detail", kwargs={"pk": "__id__"})
info["endpoints"] = {
"input": {
"list": {"url": input_list, "method": "GET"},
"add": {"url": input_list, "method": "POST"},
"get": {"url": input_detail, "method": "GET"},
"delete": {"url": input_detail, "method": "DELETE"},
},
"instrument": {
"list": {"url": instrument_list, "method": "GET"},
"get": {"url": instrument_detail, "method": "GET"},
},
}
return info
class RestFrameworkCollector(BaseAPICollector):
specification_class = RestFrameworkSpecification
model_codenames = {
models.Measure: "measure",
models.CollectionRequest: "request",
models.CollectionGroup: "segment",
models.CollectionGroup: "group",
models.CollectionInstrument: "instrument",
models.get_input_model(): "input",
}
# dynamic rest_framework overrides per model (use codename strings)
serializer_classes = {}
pagination_classes = {}
default_serializer_classes = {
"measure": serializers.MeasureSerializer,
"request": serializers.CollectionRequestSerializer,
"segment": serializers.CollectionGroupSerializer,
"group": serializers.CollectionGroupSerializer,
"instrument": serializers.CollectionInstrumentSerializer,
"input": serializers.CollectedInputSerializer,
}
def get_pagination_class(self, model):
"""
Returns a rest_framework pagination class for the model's viewset. Returning ``None`` will
be taken directly (disabling pagination), and ``False`` will ensure rest_framework still
applies whatever default pagination policy is in effect.
"""
codename = self.model_codenames.get(model, model)
return self.pagination_classes.get(codename, False)
def get_serializer_class(self, model):
"""Returns a rest_framework serializer class for the model's viewset."""
codename = self.model_codenames.get(model, model)
return self.serializer_classes.get(codename, self.default_serializer_classes[codename])
def get_destroy_response(self, instrument):
"""Returns a rest_framework Response when an input is deleted from this instrument."""
return Response(status=status.HTTP_204_NO_CONTENT)
def validate(self, instrument, data):
"""Raises any validation errors in the serializer's ``data``."""
return data
| 38.857143 | 99 | 0.662377 | 3,007 | 0.921262 | 0 | 0 | 0 | 0 | 0 | 0 | 1,036 | 0.317402 |
fffc90bcd5aabe8c07f5b2517e1c835715addf0e | 770 | py | Python | DFS/depth_first_search.py | Quanta-Algorithm-Design/graphs | 3a5b6362bf60a1e2fb06d2fadab46e72124d637d | [
"MIT"
] | null | null | null | DFS/depth_first_search.py | Quanta-Algorithm-Design/graphs | 3a5b6362bf60a1e2fb06d2fadab46e72124d637d | [
"MIT"
] | null | null | null | DFS/depth_first_search.py | Quanta-Algorithm-Design/graphs | 3a5b6362bf60a1e2fb06d2fadab46e72124d637d | [
"MIT"
] | 1 | 2020-10-05T06:46:13.000Z | 2020-10-05T06:46:13.000Z | #!/usr/bin/env python3
"""
This module defines functions for depth-first-search in a graph with a given adjacency list
"""
def dfs_visit(node_list, adj_list, root_node, parent):
"""
Takes the graph node list, its adj list, and a node s,
and visits all the nodes reachable from s recursively.
"""
for node in adj_list[root_node]:
if node not in parent:
parent[node] = root_node
dfs_visit(node_list, adj_list, node, parent)
def dfs(node_list, adj_list):
"""
Iterate over possible root_nodes to explore the whole graph
"""
parent = {}
for root_node in node_list:
if root_node not in parent:
parent[root_node] = None
dfs_visit(node_list, adj_list, root_node, parent)
| 29.615385 | 91 | 0.654545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.422078 |
ffff1e4cd8bc9bad42ca402b2c639f4b45a16abe | 791 | py | Python | pirates/quest/QuestHolderBase.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/quest/QuestHolderBase.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/quest/QuestHolderBase.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.quest.QuestHolderBase
class QuestHolderBase:
__module__ = __name__
def __init__(self):
self._rewardCollectors = {}
def getQuests(self):
raise 'derived must implement'
def _addQuestRewardCollector(self, collector):
cId = collector._serialNum
self._rewardCollectors[cId] = collector
def _removeQuestRewardCollector(self, collector):
cId = collector._serialNum
del self._rewardCollectors[cId]
def _trackRewards(self, trade):
for collector in self._rewardCollectors.itervalues():
collector.collect(trade) | 30.423077 | 104 | 0.694058 | 575 | 0.726928 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.295828 |
0803020bd1e3c35bd9b149aea49e7ac12f9623a3 | 933 | py | Python | setup.py | yihong0618/-nbnhhsh-cli | 3c8241dbc772b4b693e06b350c4351e75572596a | [
"Apache-2.0"
] | 33 | 2021-07-09T05:40:00.000Z | 2022-02-07T12:49:34.000Z | setup.py | yihong0618/-nbnhhsh-cli | 3c8241dbc772b4b693e06b350c4351e75572596a | [
"Apache-2.0"
] | 1 | 2021-07-09T05:37:02.000Z | 2021-07-09T05:37:02.000Z | setup.py | yihong0618/-nbnhhsh-cli | 3c8241dbc772b4b693e06b350c4351e75572596a | [
"Apache-2.0"
] | 2 | 2021-07-10T10:25:08.000Z | 2021-07-11T03:16:38.000Z | from setuptools import setup, find_packages
VERSION = "0.1.1"
setup(
name="hhsh",
version=VERSION,
description="能不能好好说话? cli",
long_description="能不能好好说话? cli",
keywords="python hhsh cli terminal",
author="itorr,yihong0618",
author_email="zouzou0208@gmail.com",
url="https://github.com/yihong0618/hhsh",
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=["requests", "rich"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries",
],
entry_points={
"console_scripts": ["hhsh = hhsh.hhsh:main"],
},
)
| 29.15625 | 53 | 0.621651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 517 | 0.535751 |
08058658e2bf102d2ac28a2a02f1701e1eb02d65 | 937 | py | Python | container/base/src/cache.py | hmrc/devops-tooling-build | 03d62df3a45d5dcce306cd6cad6c95a24a4b34ab | [
"Apache-2.0"
] | 1 | 2021-11-10T16:09:43.000Z | 2021-11-10T16:09:43.000Z | container/base/src/cache.py | hmrc/devops-tooling-build | 03d62df3a45d5dcce306cd6cad6c95a24a4b34ab | [
"Apache-2.0"
] | 6 | 2021-07-02T14:15:25.000Z | 2022-02-03T12:57:36.000Z | container/base/src/cache.py | hmrc/devops-tooling-build | 03d62df3a45d5dcce306cd6cad6c95a24a4b34ab | [
"Apache-2.0"
] | null | null | null | import datetime
import hashlib
import os
import pathlib
from typing import Optional
import yaml
def key(name):
return hashlib.sha1(name.encode()).hexdigest()
def path(name):
return pathlib.Path(os.environ['CACHE_AWS_MNT']) / key(name)
def aws_get(name) -> Optional[dict]:
try:
with path(name).open('r') as fp:
response = yaml.safe_load(fp.read())
now = datetime.datetime.now(tz=datetime.timezone.utc)
min_session_time = datetime.timedelta(hours=1)
if now + min_session_time < response['Credentials']['Expiration']:
return response
except FileNotFoundError:
pass
except (yaml.scanner.ScannerError, TypeError, KeyError):
# we somehow ended up with bad yaml, ≈ no cache; invalidate
path(name).unlink()
def aws_set(name, response) -> None:
with path(name).open('w') as fp:
fp.write(yaml.dump(response))
| 25.324324 | 78 | 0.649947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.113951 |
08075a784b23b26531f0e2fcf4a1653e8cbbe078 | 1,118 | py | Python | tests/test_blender.py | dumpmemory/lassl | dfe56f09cc2ade6c777ad8561b24f23d83a34188 | [
"Apache-2.0"
] | null | null | null | tests/test_blender.py | dumpmemory/lassl | dfe56f09cc2ade6c777ad8561b24f23d83a34188 | [
"Apache-2.0"
] | null | null | null | tests/test_blender.py | dumpmemory/lassl | dfe56f09cc2ade6c777ad8561b24f23d83a34188 | [
"Apache-2.0"
] | null | null | null | from collections import Counter
import pytest
from datasets import load_dataset
from lassl.blender import DatasetBlender
def test_blending():
try:
from langid import classify
except ImportError as _:
raise ImportError(
"To test dataset blending, you need to install langid. "
"Please install langid using `pip install langid`."
)
en = load_dataset("squad").data["train"]["context"]
ko = load_dataset("oscar", "unshuffled_deduplicated_ko").data["train"]["text"]
ja = load_dataset("amazon_reviews_multi", "ja").data["train"]["review_body"]
weights = {"en": 0.2, "ko": 0.5, "ja": 0.3}
datasets = {"en": en, "ko": ko, "ja": ja}
blend = DatasetBlender(
datasets=list(datasets.values()),
weights=list(weights.values()),
)
langs = [classify(str(blend[i]))[0] for i in range(10)]
counts = Counter(langs)
assert int(counts["ko"]) == int(weights["ko"] * 10)
assert int(counts["en"]) == int(weights["en"] * 10)
assert int(counts["ja"]) == int(weights["ja"] * 10)
print("All tests are passed ;)")
| 30.216216 | 82 | 0.620751 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.265653 |
080796109f90dd5533115b48ae3a4657f5fb4224 | 4,542 | py | Python | wisps/data_analysis/path_parser.py | caganze/WISPS | 81b91f8b49c7345ab68b7c4eb480716985e8905c | [
"MIT"
] | null | null | null | wisps/data_analysis/path_parser.py | caganze/WISPS | 81b91f8b49c7345ab68b7c4eb480716985e8905c | [
"MIT"
] | 7 | 2021-02-02T21:51:56.000Z | 2022-01-13T00:57:45.000Z | wisps/data_analysis/path_parser.py | caganze/wisps | 6572201f94a6af6d1c0a306f2f447215d4330bd7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
After the introduction of version 6.2, all wisp data and hst-3d are now on MAST
3D-HST has not added any new data nor changed their directory structure,
but that's not the case for WISP
Aim: parse new directories to make them compatible with v5.0
"""
import os
import glob
from ..utils import memoize_func
REMOTE_FOLDER=os.environ['WISP_SURVEY_DATA']
@memoize_func
def get_image_path(name, spectrum_path):
#print (name)
##returns the image path without going through the whole thing again
if name.lower().startswith('par') or name.startswith('hlsp'):
survey='wisps'
elif name.startswith('goo') or name.startswith('ud') or name.startswith('aeg') or name.startswith('cos'):
survey='hst3d'
if survey=='wisps':
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
if '_wfc3' in name:
name=(name.split('wfc3_')[-1]).split('_g141')[0]
#print (name)
#print (REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*stamp2d.fits')
stamp_image_path=glob.glob(REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*stamp2d.fits')[0]
if survey=='hst3d':
#print (spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits')
stamp_image_path=glob.glob(spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits')[0]
#print ('stamp image',stamp_image_path )
#print (survey, spectrum_path, stamp_image_path)
return survey, stamp_image_path
@memoize_func
def parse_path(name, version):
"""
Parse a filename and retrieve all the survey info at once
"""
survey=None
spectrum_path=None
stamp_image_path=None
if name.startswith('Par') or name.startswith('par') or name.startswith('hlsp'):
survey='wisps'
elif name.startswith('goo') or name.startswith('ud') or name.startswith('aeg') or name.startswith('cos'):
survey='hst3d'
else:
survey=None
if survey=='wisps':
spectrum_path=_run_search(name)
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
name=name.split('_wfc3_')[-1].split('a_g102')[0]
stamp_image_path=glob.glob(REMOTE_FOLDER+'/wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'*/2dstamp/hlsp_wisp_hst_wfc3*'+name+'*a_g141_v6.2_stamp2d.fits')[0]
if survey=='hst3d':
spectrum_path=_run_search(name)
s= spectrum_path.split('/1D/ASCII/')[0]+'/2D/'+'FITS/'+name.split('1D')[0]+'*2D.fits'
stamp_image_path=glob.glob(s.replace('g141', 'G141') )[0]
#print ('stamp image',stamp_image_path )
#print (survey, spectrum_path, stamp_image_path)
#blah
return survey, spectrum_path, stamp_image_path
@memoize_func
def _run_search(name):
#internal function used to search path given spectrum name
path=''
prefix= name[:3]
if name.startswith('Par') or name.startswith('par') or name.startswith('hlsp'):
#search version 6
if name.endswith('.dat'):
n=name.split('.dat')[0]
folder=name.split('wfc3_')[-1].split('wfc3_')[-1].split('-')[0]
else:
folder=name.split('-')[0]
n=name
path1=REMOTE_FOLDER+'wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'/1dspectra/*'+n+'*a_g141_*'
path2=REMOTE_FOLDER+'wisps/archive.stsci.edu/missions/hlsp/wisp/v6.2/'+folder+'/1dspectra/*'+n+'*a_g102-g141_*'
path=glob.glob(path1)[0]
if len(glob.glob(path2)) > 0:
path=glob.glob(path2)[0]
#except:
# #search version 5
# folder=name.split('_')[0]
# path=REMOTE_FOLDER+'wisps/'+folder+'*/Spectra/*'+name+'.dat'
# #print (path)
# path=glob.glob(path)[0]
if prefix in ['aeg', 'cos', 'uds', 'goo']:
syls= (name.split('-'))
str_= REMOTE_FOLDER+'*'+prefix+'*'+'/*'+prefix+ '*'+syls[1]+'*'+'/1D/ASCII/'+prefix+'*'+ syls[1]+ '*'+syls[2]+'*'
#print (str_)
path=glob.glob(str_.replace('g141', 'G141'))[0]
return path
@memoize_func
def return_path(name):
#print(name)wisps
if type(name) is list:
paths=[]
for p in name:
paths.append( _run_search(p))
return paths
if type(name) is str:
return _run_search(name)
@memoize_func
def return_spectrum_name(path):
""" returns name given path in the wisp folder"""
name=''
if path.endswith('.dat'):
name= path.split('.dat')[0].split('/')[-1]
else:
name=path.split('.ascii')[0].split('/')[-1].split('.')[0]
return name | 34.409091 | 170 | 0.647952 | 0 | 0 | 0 | 0 | 4,142 | 0.911933 | 0 | 0 | 1,986 | 0.437252 |
08092e15e7923e75bbc9274300846c3ee3fbd2d9 | 158 | py | Python | tests/utils/TestTransaction.py | Shaid3r/reservations | 43e17ae88eed74593879f9f8c5a9bed7252888f7 | [
"MIT"
] | null | null | null | tests/utils/TestTransaction.py | Shaid3r/reservations | 43e17ae88eed74593879f9f8c5a9bed7252888f7 | [
"MIT"
] | null | null | null | tests/utils/TestTransaction.py | Shaid3r/reservations | 43e17ae88eed74593879f9f8c5a9bed7252888f7 | [
"MIT"
] | null | null | null | import storage
import pytest
class TestTransaction:
@pytest.fixture(autouse=True)
def transact(self):
yield
storage.conn.rollback()
| 15.8 | 33 | 0.683544 | 126 | 0.797468 | 65 | 0.411392 | 99 | 0.626582 | 0 | 0 | 0 | 0 |
080b1f9b578c418d65d4a8c4119d27d86ab70fa5 | 2,451 | py | Python | aldryn_redirects/migrations/0003_auto_20171206_1150.py | compoundpartners/aldryn-redirects | ed1b1e90a7774a4bead771e158e30d5846e64e60 | [
"BSD-3-Clause"
] | 1 | 2020-05-14T06:41:50.000Z | 2020-05-14T06:41:50.000Z | aldryn_redirects/migrations/0003_auto_20171206_1150.py | compoundpartners/aldryn-redirects | ed1b1e90a7774a4bead771e158e30d5846e64e60 | [
"BSD-3-Clause"
] | 11 | 2016-01-11T11:42:58.000Z | 2018-11-05T16:13:27.000Z | aldryn_redirects/migrations/0003_auto_20171206_1150.py | compoundpartners/aldryn-redirects | ed1b1e90a7774a4bead771e158e30d5846e64e60 | [
"BSD-3-Clause"
] | 6 | 2016-11-22T04:53:37.000Z | 2018-11-15T13:56:39.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-06 13:50
from __future__ import unicode_literals
import aldryn_redirects.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
('aldryn_redirects', '0002_on_delete_and_verbose_names'),
]
operations = [
migrations.CreateModel(
name='StaticRedirect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('inbound_route', models.CharField(db_index=True, help_text='Redirect origin. Do not provide the domain. Always add a leading slash here.', max_length=255, validators=[aldryn_redirects.validators.validate_inbound_route], verbose_name='Redirect from')),
('outbound_route', models.CharField(help_text='Redirect destination. Domain is not required (defaults to inbound route domain).', max_length=255, validators=[aldryn_redirects.validators.validate_outbound_route], verbose_name='Redirect to')),
('sites', models.ManyToManyField(related_name='_staticredirect_sites_+', to='sites.Site')),
],
options={
'verbose_name_plural': 'Static Redirects',
'verbose_name': 'Static Redirect',
},
),
migrations.CreateModel(
name='StaticRedirectInboundRouteQueryParam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=255, verbose_name='Key')),
('value', models.CharField(blank=True, max_length=255, verbose_name='Value')),
('static_redirect', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='query_params', to='aldryn_redirects.StaticRedirect')),
],
),
migrations.AlterModelOptions(
name='redirect',
options={'ordering': ('old_path',), 'verbose_name': 'Multilanguage Redirect', 'verbose_name_plural': 'Multilanguage Redirects'},
),
migrations.AlterModelOptions(
name='redirecttranslation',
options={'default_permissions': (), 'managed': True, 'verbose_name': 'Multilanguage Redirect Translation'},
),
]
| 50.020408 | 268 | 0.651571 | 2,225 | 0.907793 | 0 | 0 | 0 | 0 | 0 | 0 | 850 | 0.346797 |
0811b7481588bc53cfde102ac50bffe1f9e0e41c | 161 | py | Python | velocity/constants.py | aisthesis/mfstockmkt | d442ec4cb3b379f6984397926b4466420236c032 | [
"MIT"
] | null | null | null | velocity/constants.py | aisthesis/mfstockmkt | d442ec4cb3b379f6984397926b4466420236c032 | [
"MIT"
] | 1 | 2015-12-27T17:37:54.000Z | 2015-12-31T05:06:06.000Z | velocity/constants.py | aisthesis/mfstockmkt | d442ec4cb3b379f6984397926b4466420236c032 | [
"MIT"
] | 1 | 2020-05-02T08:25:35.000Z | 2020-05-02T08:25:35.000Z | """
.. Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
Constants
=========
"""
UPVEL_COL = 'Up Vel'
DOWNVEL_COL = 'Down Vel'
| 14.636364 | 45 | 0.652174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.813665 |
0811dfdcb7e741d544fe728950a10ae174c04263 | 3,284 | py | Python | fileForRepair/src/parking.py | ChangSeonKim/5G_UWC_project | 0504a1b1ed30787f30e18a178897978de55660ef | [
"Apache-2.0"
] | null | null | null | fileForRepair/src/parking.py | ChangSeonKim/5G_UWC_project | 0504a1b1ed30787f30e18a178897978de55660ef | [
"Apache-2.0"
] | null | null | null | fileForRepair/src/parking.py | ChangSeonKim/5G_UWC_project | 0504a1b1ed30787f30e18a178897978de55660ef | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
import numpy as np
import math
from std_msgs.msg import String
def callback(data):
laser_arr_f = np.array(data.ranges[0:10])
laser_arr_l= np.array(data.ranges[85:95])
laser_arr_r = np.array(data.ranges[265:275])
block_f = laser_arr_f.mean()
block_r = laser_arr_r.mean()
block_l = laser_arr_l.mean()
print(block_f, block_r, block_l)
msg = Twist()
if block_f > 0.225: # and block_f < 0.3:
#go straight
msg.linear.x = 1
pub.publish(msg)
# elif block_f > 0.45:
# #go straight
# msg.linear.x = 1
# if ( block_l - block_r) > 0.05:
# msg.linear.x = 1
# msg.angular.z = -0.5
# elif ( block_l - block_r) < -0.05:
# msg.linear.x = 1
# msg.angular.z = -0.5
# else:
# msg.linear.x = 1
# msg.angular.z = 0.0
# pub.publish(msg)
else:
#stop
msg.linear.x = 0
pub.publish(msg)
if block_f < 0.225 and block_r > 0.30:
# right-turn
relative_angle = math.radians(95)
angular_speed = -1.0
duration = relative_angle/abs(angular_speed)
msg.angular.z = angular_speed
time2end = rospy.Time.now() + rospy.Duration(duration)
while rospy.Time.now() < time2end:
pub.publish(msg)
# new = 0
msg.linear.x = 0
msg.angular.z = 0
pub.publish(msg)
# rospy.sleep(.2)
elif block_f < 0.225 and block_l > 0.30:
# left-turn
relative_angle = math.radians(95)
angular_speed = 1.0
duration = relative_angle/abs(angular_speed)
msg.angular.z = angular_speed
time2end = rospy.Time.now() + rospy.Duration(duration)
while rospy.Time.now() < time2end:
pub.publish(msg)
# new = 0
msg.linear.x = 0
msg.angular.z = 0
pub.publish(msg)
# rospy.sleep(.2)
# elif block_f < 0.225 and block_l < 0.3 and block_r < 0.3:
# # U-turn
# relative_angle = math.radians(190)
# angular_speed = 1.0
# duration = relative_angle/abs(angular_speed)
# msg.angular.z = angular_speed
# time2end = rospy.Time.now() + rospy.Duration(duration)
# while rospy.Time.now() < time2end:
# pub.publish(msg)
# msg.linear.x = 0
# msg.angular.z = 0
# pub.publish(msg)
# rospy.sleep(.2)
# elif block_f < 0.225 and block_l > 0.3 and block_r > 0.3:
# # stop
# msg.linear.x = 0
# msg.angular.z = 0
# pub.publish(msg)
# # rospy.sleep(.2)
else:
pass
return
def stop(msg):
if(msg.data == 'stop here'):
msg = Twist()
#stop
msg.linear.x = 0
msg.angular.z = 0
pub.publish(msg)
if __name__ =='__main__':
rospy.init_node('parking')
pub = rospy.Publisher('/cmd_vel',Twist, queue_size=10)
rospy.Subscriber('/scan',LaserScan, queue_size = 1, callback = callback)
rospy.Subscriber('helloworld03', String, callback=stop)
rospy.spin()
pass | 29.061947 | 76 | 0.546894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,268 | 0.386114 |
08138545899e44b68cb9f2c6902d9d5be0b380f7 | 2,622 | py | Python | opennsa/provreg.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | opennsa/provreg.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | opennsa/provreg.py | jmacauley/opennsa | 853c0fc8e065e74815cbc3f769939f64ac6aadeb | [
"BSD-3-Clause"
] | null | null | null | """
Registry for tracking providers dynamically in OpenNSA.
Keeping track of providers in a dynamical way in an NSI implementation is a
huge pain in the ass. This is a combination of things, such as seperate
identities and endpoints, callbacks, and the combination of local providers.
The class ProviderRegistry tries to keep it a bit sane.
"""
from twisted.python import log
from opennsa import error
LOG_SYSTEM = 'providerregistry'
class ProviderRegistry(object):
def __init__(self, providers, provider_factories):
# usually initialized with local providers
self.providers = providers.copy()
self.provider_factories = provider_factories # { provider_type : provider_spawn_func }
self.provider_networks = {} # { provider_urn : [ network ] }
def getProvider(self, nsi_agent_urn):
"""
Get a provider from a NSI agent identity/urn.
"""
try:
return self.providers[nsi_agent_urn]
except KeyError:
raise error.STPResolutionError('Could not resolve a provider for %s' % nsi_agent_urn)
def getProviderByNetwork(self, network_id):
"""
Get the provider urn by specifying network.
"""
for provider, networks in self.provider_networks.items():
if network_id in networks:
return provider
else:
raise error.STPResolutionError('Could not resolve a provider for %s' % network_id)
def addProvider(self, nsi_agent_urn, provider, network_ids):
"""
Directly add a provider. Probably only needed by setup.py
"""
if not nsi_agent_urn in self.providers:
log.msg('Creating new provider for %s' % nsi_agent_urn, system=LOG_SYSTEM)
self.providers[ nsi_agent_urn ] = provider
self.provider_networks[ nsi_agent_urn ] = network_ids
def spawnProvider(self, nsi_agent, network_ids):
"""
Create a new provider, from an NSI agent.
ServiceType must exist on the NSI agent, and a factory for the type available.
"""
if nsi_agent.urn() in self.providers and self.provider_networks[nsi_agent.urn()] == network_ids:
log.msg('Skipping provider spawn for %s (no change)' % nsi_agent, debug=True, system=LOG_SYSTEM)
return self.providers[nsi_agent.urn()]
factory = self.provider_factories[ nsi_agent.getServiceType() ]
prov = factory(nsi_agent)
self.addProvider(nsi_agent.urn(), prov, network_ids)
log.msg('Spawned new provider for %s' % nsi_agent, system=LOG_SYSTEM)
return prov
| 34.051948 | 108 | 0.672006 | 2,179 | 0.831045 | 0 | 0 | 0 | 0 | 0 | 0 | 1,025 | 0.390923 |
081559dc3ab661ae3a1df9c2d52bc8d2ba1f2ae4 | 997 | py | Python | tests/test_task_tracker.py | jmchilton/shedclient-beta | 50041b488652f8bf40555b0c1ef001290f1c3f6a | [
"CC-BY-3.0"
] | 2 | 2015-12-21T02:18:54.000Z | 2016-09-08T13:56:36.000Z | tests/test_task_tracker.py | jmchilton/shedclient-beta | 50041b488652f8bf40555b0c1ef001290f1c3f6a | [
"CC-BY-3.0"
] | 1 | 2015-12-21T19:26:21.000Z | 2015-12-21T19:26:21.000Z | tests/test_task_tracker.py | jmchilton/shedclient-beta | 50041b488652f8bf40555b0c1ef001290f1c3f6a | [
"CC-BY-3.0"
] | null | null | null | from test_utils import TempDirectoryContext
from shedclient import task_tracker
def test_task_tracker():
with TempDirectoryContext() as context:
config = dict(
task_tracking_directory=context.temp_directory
)
tracker = task_tracker.build_task_tracker(config)
assert len(tracker.list_active_tasks()) == 0
task0_id = tracker.register_task({"state": "new"})
assert len(tracker.list_active_tasks()) == 1
task0_state0 = tracker.read_task(task0_id)
assert task0_state0["state"] == "new"
tracker.delete_task(task0_id)
assert len(tracker.list_active_tasks()) == 0
task1_id = tracker.register_task({"state": "new"})
assert len(tracker.list_active_tasks()) == 1
tracker.update_task(task1_id, {"state": "queued", "name": "task 1"})
task1_state0 = tracker.read_task(task1_id)
assert task1_state0["state"] == "queued"
assert task1_state0["name"] == "task 1"
| 33.233333 | 76 | 0.657974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 94 | 0.094283 |
081691097da1b52252fbbf22f08b3e7856a39982 | 5,825 | py | Python | gonder_rc.py | TarikCinar/python-sesli-asistan | 1a29a8d3081b67ff352cf03f7b01ac01b7118deb | [
"MIT"
] | 1 | 2021-05-28T17:27:50.000Z | 2021-05-28T17:27:50.000Z | gonder_rc.py | TarikCinar/python-sesli-asistan | 1a29a8d3081b67ff352cf03f7b01ac01b7118deb | [
"MIT"
] | null | null | null | gonder_rc.py | TarikCinar/python-sesli-asistan | 1a29a8d3081b67ff352cf03f7b01ac01b7118deb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.13.0)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x04\x1a\
\x00\
\x01\x08\x3e\x78\x9c\xed\x9c\x4d\x6e\xd3\x50\x14\x85\x1d\x65\x10\
\x66\x61\xc4\x2c\x75\x87\xdd\x85\x59\x4a\x77\x92\xcc\xd2\x59\x97\
\xc2\x12\x40\x62\x01\x2c\xa1\x48\xb4\x62\x58\x86\x0c\x10\xc1\x76\
\x52\xc7\x3e\x21\x8e\xe3\xdf\x77\xdf\xfb\xbe\xea\x22\x1d\x17\x61\
\xc7\xe7\xbe\x73\x18\x54\x8d\xa2\x59\xfa\xb5\xd9\x44\xe9\x9f\xb7\
\xd1\xdd\xbb\x59\xf4\x21\x8a\xa2\xbb\x74\xd2\x4b\xd9\xc5\xfc\x7a\
\x4e\xfa\x3d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x08\x97\x97\xed\x6a\xf3\xfc\xb0\x7a\x7a\xde\xae\xee\x9f\
\x1e\xe3\xf7\x53\x3f\x0f\x8c\x4b\xe6\xff\xcb\xc3\xcd\x2e\x9f\xed\
\xea\xf5\x79\x7b\xf3\xf8\x73\x1b\xdf\x4e\xfd\x5c\x30\x0e\x15\xff\
\x4b\x93\xee\xc1\xa7\x1f\xdb\xf8\xe3\xd4\xcf\x07\xc3\x72\xce\xff\
\x62\x0f\xe8\x06\xaf\xb9\xe4\x3f\xdd\xe0\x37\x8d\xfd\xa7\x1b\xbc\
\xa4\x8d\xff\x74\x83\x3f\x74\xf1\x9f\x6e\xb0\x4f\x2f\xfe\xd3\x0d\
\x66\xe9\xdb\x7f\xba\xc1\x16\x43\xf9\x4f\x37\xd8\x60\x70\xff\xe9\
\x06\xa7\x19\xd3\x7f\xba\xc1\x3d\xa6\xf0\x9f\x6e\x70\x87\x49\xfd\
\xa7\x1b\x26\xc7\x15\xff\xe9\x86\x69\x70\xcd\x7f\xba\x61\x5c\x9c\
\xf5\x9f\x6e\x18\x05\x0b\xfe\xd3\x0d\xc3\x61\xc9\x7f\xba\xa1\x7f\
\x4c\xfa\x5f\xce\x04\xba\xa1\x13\xd6\xfd\x2f\xf6\x80\x6e\x68\x85\
\x2f\xfe\x17\x43\x37\x5c\x85\x77\xfe\x97\x33\x81\x6e\xb8\x88\xcf\
\xfe\x17\x7b\x40\x37\x9c\x25\x04\xff\x8b\xa1\x1b\x4e\x08\xca\xff\
\x72\x26\xd0\x0d\x39\xa1\xfa\x5f\xec\x41\xe0\xdd\x10\xba\xff\xc5\
\x04\xda\x0d\xf8\x7f\x3a\x21\x75\x03\xfe\xd7\xec\x41\x00\xdd\x80\
\xff\x0d\xc6\xe3\x6e\xc0\xff\xeb\xc6\xb7\x6e\xc0\xff\x96\x7b\xe0\
\x49\x37\xe0\x7f\xc7\x31\xde\x0d\xf8\xdf\xdf\x58\xec\x06\xfc\x1f\
\x60\x0f\x0c\x75\x43\xb6\xaf\x59\x7e\xbd\x3c\xac\x3e\xa7\xbb\xf0\
\x6d\xea\x77\xe7\xd5\x18\xed\x86\xec\x79\xf7\x7b\xb1\xba\xcf\x7f\
\x3f\x58\x9a\x6b\x87\xfd\x78\x9d\xfc\x9d\x1a\x1d\x8b\xdd\x70\x8e\
\xec\x73\x64\x73\xd8\x0d\xb2\xe3\x9a\x3d\x30\xd4\x0d\x6d\x20\x3b\
\x1a\x8e\xd1\x6e\xe8\x0a\xd9\x71\x3a\x3e\x75\x43\x17\x42\xcf\x0e\
\xdf\xbb\xa1\x2b\xc1\x64\x47\xa0\xdd\xd0\x05\x5f\xb3\x83\x6e\xe8\
\x07\xeb\xd9\x41\x37\x0c\x87\xa9\xec\xa0\x1b\x46\xc7\xd5\xec\xa0\
\x1b\xc6\x01\xff\xfd\x86\xfc\xf7\x1f\x57\xcf\x70\xe3\xb3\xce\xff\
\xff\x6a\x31\x75\x86\xc9\xf8\x56\x58\x3f\xc3\x8d\x27\xd0\x8c\xf7\
\xf5\x0c\x37\x3e\xeb\x01\x64\x7c\x30\x67\xf8\x1a\xdf\x3d\xca\xf8\
\xd0\xcf\x70\xe3\x31\x9c\xf1\x9c\xe1\xf6\x63\x21\xe3\x39\xc3\x03\
\xf8\xee\x68\xc6\xf3\xf3\x9f\x03\x8e\x81\x8c\xe7\xe7\xbf\xfb\x1f\
\x0b\x19\xff\x06\xfe\xf7\xe8\xbb\xa3\x19\x5f\x07\xfe\x77\x1c\x03\
\x19\x5f\x07\xfe\xb7\x1b\x4b\x19\x5f\x07\xfe\x5f\xe9\xbb\xc1\x8c\
\xaf\x03\xff\x1b\x8c\xf1\x8c\xaf\x03\xff\xcf\x8f\x2f\x19\x5f\x07\
\xfe\xff\xc7\x77\xcf\x32\xbe\x0e\xfc\x3f\x8c\xc7\x19\x5f\x47\xe8\
\xfe\x87\x90\xf1\x75\x84\xea\x7f\x48\x19\x5f\x47\x50\xfe\x07\x9a\
\xf1\x75\x84\xe0\x7f\xe8\x19\x5f\x87\xcf\xfe\x93\xf1\x97\xf1\xce\
\x7f\x32\xfe\x2a\x7c\xf1\x9f\x8c\x6f\x87\x75\xff\xc9\xf8\x6e\x98\
\xf4\x9f\x8c\xef\x0d\x4b\xfe\x93\xf1\xfd\x63\xc1\x7f\x32\x7e\x38\
\x9c\xf5\x9f\x8c\x1f\x05\xd7\xfc\x27\xe3\xc7\xc5\x15\xff\xc9\xf8\
\x69\x98\xd4\x7f\x32\x7e\x72\xa6\xf0\x9f\x8c\x77\x87\x31\xfd\x27\
\xe3\xdd\x63\x70\xff\xc9\x78\xa7\x19\xca\x7f\x32\xde\x06\x7d\xfb\
\x4f\xc6\xdb\xa2\x17\xff\xc9\x78\xb3\x74\xf1\x9f\x8c\xb7\x4f\x1b\
\xff\xc9\x78\x7f\x68\xec\x3f\x19\xef\x25\x97\xfc\x27\xe3\xfd\xe6\
\x9c\xff\x64\x7c\x18\x54\xfc\x27\xe3\x83\x23\xff\xfd\x5e\x64\x3c\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\
\x73\xec\x42\xe7\xab\xe8\x2f\xaa\x13\xd1\x0b\xd1\x33\xd1\xd1\x5a\
\xf4\x52\xf4\x5c\x74\xa4\x3a\x16\xbd\x10\x3d\x13\x1d\x25\xa2\x97\
\xa2\xe7\xa2\xcb\x8f\x98\xeb\x58\xf4\x42\x74\xa4\x3a\x11\xbd\x14\
\x3d\x13\x7d\xbc\xe3\x41\xc7\xa2\xe7\xa2\x23\xd5\x89\xe8\x85\xe8\
\x99\xe8\xb7\x3b\x16\x7a\x29\x7a\x2e\x3a\x52\x1d\x8b\x5e\x88\x9e\
\x89\xde\x3f\x62\x49\xe7\x77\xfc\x7b\xd4\xfb\x3b\x96\x2e\xec\x1f\
\xf1\x8f\xdc\xf1\x78\xe1\xed\x33\xfe\xae\x3e\xe2\xf1\x42\x61\xc3\
\xaf\xca\x67\x2c\x2e\x94\x36\xe5\x7b\xf9\xa5\x14\x17\xe2\xb3\x5a\
\xfe\xbe\xfc\x7b\x72\x3f\x79\x1e\x79\x5e\xf9\x3c\xf2\x79\xe5\x7d\
\xe8\xfb\xd2\xf7\x59\xd2\xf2\xbe\xd5\x0f\xf5\x2b\x16\xbd\xab\x6a\
\xdd\x07\xdd\x97\x75\x55\xeb\xbe\xe9\x3e\x26\xa2\x77\x55\xad\xfb\
\x1e\x8b\x5e\x57\xb5\x9e\x27\x3d\x6f\x89\xe8\x5d\x55\xeb\x79\x8e\
\x45\xaf\xab\x5a\xf3\x42\xf3\x24\xa9\x6a\xcd\x23\xcd\xab\x58\xf4\
\xae\xaa\x35\x0f\x35\x2f\xd7\x55\xad\x79\xab\x79\x9c\x88\xae\xca\
\x93\xbc\x0f\x9c\x7f\x31\x73\xbc\x32\
"
qt_resource_name = b"\
\x00\x09\
\x0c\x78\x54\x88\
\x00\x6e\
\x00\x65\x00\x77\x00\x50\x00\x72\x00\x65\x00\x66\x00\x69\x00\x78\
\x00\x0a\
\x0a\xc8\x83\x1f\
\x00\x67\
\x00\x6f\x00\x6e\x00\x64\x00\x65\x00\x72\x00\x2e\x00\x69\x00\x63\x00\x6f\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x18\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x18\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x6e\xda\x88\xff\x59\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 47.357724 | 103 | 0.72721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,200 | 0.892704 |
081745d5d369bb388f32e0870139795f7243852c | 213 | py | Python | tuples-and-sets/3_unique_names.py | Minkov/python-advanced-2020-01 | f2ee26f1325d943529673457a1cbba5657ae5905 | [
"MIT"
] | 5 | 2020-01-16T18:17:08.000Z | 2020-04-12T06:42:47.000Z | tuples-and-sets/3_unique_names.py | Minkov/python-advanced-2020-01 | f2ee26f1325d943529673457a1cbba5657ae5905 | [
"MIT"
] | null | null | null | tuples-and-sets/3_unique_names.py | Minkov/python-advanced-2020-01 | f2ee26f1325d943529673457a1cbba5657ae5905 | [
"MIT"
] | null | null | null | n = int(input())
# names = {input() for _ in range(n)}
names = []
for _ in range(n):
names.append(input())
unique_names = list(set(names))
[print(name)
for name
in sorted(unique_names, key=names.index)]
| 15.214286 | 42 | 0.643192 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.173709 |
081c3a4e5b548789411fc11be988031444e552dd | 233 | py | Python | dialogue/tensorflow/task/common/common.py | ishine/nlp-dialogue | d47c1438cb5c45c2c2aebfb82fea92bef4c3d65c | [
"Apache-2.0"
] | 478 | 2020-10-28T01:30:30.000Z | 2022-03-30T06:34:07.000Z | paper-code/tensorflow_src/models/task/common/common.py | HengYongChao/nlp-paper | fcf985e3c9bfd6944d07c4c36afbaee3384d040d | [
"Apache-2.0"
] | 1 | 2021-08-29T11:55:09.000Z | 2021-11-04T09:25:19.000Z | paper-code/tensorflow_src/models/task/common/common.py | HengYongChao/nlp-paper | fcf985e3c9bfd6944d07c4c36afbaee3384d040d | [
"Apache-2.0"
] | 89 | 2021-01-05T06:11:55.000Z | 2022-03-24T12:51:57.000Z | from optparse import OptionParser
class CmdParser(OptionParser):
def error(self, msg):
print('Error!提示信息如下:')
self.print_help()
self.exit(0)
def exit(self, status=0, msg=None):
exit(status)
| 19.416667 | 39 | 0.622318 | 210 | 0.850202 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.117409 |
081da69448bb7e8d65c5e3d690d670101f274a22 | 587 | py | Python | posts/migrations/0003_auto_20200522_0446.py | NotBlizzard/lark | b348f8d2b532ce20581030397cbba7f6565d1c56 | [
"MIT"
] | 2 | 2020-12-10T06:13:36.000Z | 2021-01-15T09:32:41.000Z | posts/migrations/0003_auto_20200522_0446.py | NotBlizzard/lark | b348f8d2b532ce20581030397cbba7f6565d1c56 | [
"MIT"
] | 7 | 2021-03-10T21:21:55.000Z | 2021-09-22T19:20:03.000Z | posts/migrations/0003_auto_20200522_0446.py | NotBlizzard/lark | b348f8d2b532ce20581030397cbba7f6565d1c56 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.6 on 2020-05-22 04:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20200520_0536'),
]
operations = [
migrations.AddField(
model_name='post',
name='subtitle',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='title_slug',
field=models.SlugField(max_length=200),
),
]
| 23.48 | 63 | 0.574106 | 494 | 0.841567 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.195911 |
081ea6e893da219c4f8a085a91c4473a4ef03166 | 191 | py | Python | tasking/utils/models.py | cmanallen/tasking | 0a613357148afdb4b55078c58429ec45bb60a436 | [
"MIT"
] | null | null | null | tasking/utils/models.py | cmanallen/tasking | 0a613357148afdb4b55078c58429ec45bb60a436 | [
"MIT"
] | null | null | null | tasking/utils/models.py | cmanallen/tasking | 0a613357148afdb4b55078c58429ec45bb60a436 | [
"MIT"
] | null | null | null | from django.db import models
class TimeStamp(models.Model):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True | 21.222222 | 50 | 0.78534 | 160 | 0.837696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
08212ae6445b938c3145af03c666f1c2c0d5163b | 439 | py | Python | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/conftest.py | SirTelemak/cookiecutter-python-template | d7d8c4493250654a4ee3badb36c4c4da1ccb8d3d | [
"MIT"
] | 2 | 2020-06-04T19:17:13.000Z | 2020-06-05T08:05:16.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/conftest.py | SirTelemak/cookiecutter-python-template | d7d8c4493250654a4ee3badb36c4c4da1ccb8d3d | [
"MIT"
] | 1 | 2020-08-06T15:01:47.000Z | 2020-08-06T15:01:47.000Z | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/conftest.py | SirTelemak/cookiecutter-python-template | d7d8c4493250654a4ee3badb36c4c4da1ccb8d3d | [
"MIT"
] | 2 | 2020-06-15T19:26:33.000Z | 2020-11-20T20:24:03.000Z | import logging
import pytest
from loguru import logger
@pytest.fixture(name='caplog', autouse=True)
def loguru_caplog(caplog):
class PropogateHandler(logging.Handler):
def emit(self, record):
logging.getLogger(record.name).handle(record)
logger.remove()
handler_id = logger.add(PropogateHandler(), format='{message}', backtrace=False)
caplog.clear()
yield caplog
logger.remove(handler_id)
| 21.95 | 84 | 0.708428 | 131 | 0.298405 | 335 | 0.763098 | 380 | 0.865604 | 0 | 0 | 19 | 0.04328 |
08217e660e94837e28763173bb72fbc25fe9ee5e | 216 | py | Python | locale/pot/api/plotting/_autosummary/pyvista-Plotter-enable_lightkit-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/plotting/_autosummary/pyvista-Plotter-enable_lightkit-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/plotting/_autosummary/pyvista-Plotter-enable_lightkit-1.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | # Create a plotter without any lights and then enable the
# default light kit.
#
import pyvista
pl = pyvista.Plotter(lighting=None)
pl.enable_lightkit()
actor = pl.add_mesh(pyvista.Cube(), show_edges=True)
pl.show()
| 24 | 57 | 0.763889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.361111 |
0822f1091f07394bee07ab3fa63b7142aa217e7c | 1,353 | py | Python | sphinx/environment/managers/__init__.py | rweickelt/sphinx | 1a4c41a7691e8f78d42e2db221192962c53b27df | [
"BSD-2-Clause"
] | null | null | null | sphinx/environment/managers/__init__.py | rweickelt/sphinx | 1a4c41a7691e8f78d42e2db221192962c53b27df | [
"BSD-2-Clause"
] | null | null | null | sphinx/environment/managers/__init__.py | rweickelt/sphinx | 1a4c41a7691e8f78d42e2db221192962c53b27df | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sphinx.environment.managers
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Manager components for sphinx.environment.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
if False:
# For type annotation
from typing import Any # NOQA
from docutils import nodes # NOQA
from sphinx.environment import BuildEnvironment # NOQA
class EnvironmentManager(object):
"""Base class for sphinx.environment managers."""
name = None # type: unicode
env = None # type: BuildEnvironment
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env
def attach(self, env):
# type: (BuildEnvironment) -> None
self.env = env
if self.name:
setattr(env, self.name, self)
def detach(self, env):
# type: (BuildEnvironment) -> None
self.env = None
if self.name:
delattr(env, self.name)
def clear_doc(self, docname):
# type: (unicode) -> None
raise NotImplementedError
def merge_other(self, docnames, other):
# type: (List[unicode], Any) -> None
raise NotImplementedError
def process_doc(self, docname, doctree):
# type: (unicode, nodes.Node) -> None
raise NotImplementedError
| 26.529412 | 68 | 0.604582 | 921 | 0.68071 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.430894 |
0822f39156313d04e61ff6ddaaed66e14edc3a38 | 3,692 | py | Python | scripts/convert_queries.py | galuscakova/podcasts | 967cc04e2b0f7cf963a189ac5270cfa69f81a540 | [
"BSD-4-Clause-UC"
] | null | null | null | scripts/convert_queries.py | galuscakova/podcasts | 967cc04e2b0f7cf963a189ac5270cfa69f81a540 | [
"BSD-4-Clause-UC"
] | null | null | null | scripts/convert_queries.py | galuscakova/podcasts | 967cc04e2b0f7cf963a189ac5270cfa69f81a540 | [
"BSD-4-Clause-UC"
] | 1 | 2021-05-27T07:44:51.000Z | 2021-05-27T07:44:51.000Z | import getopt
import sys
import os
import re
import string
import xml.etree.ElementTree as ET
input_filename = ""
expansion_filename = ""
output_type = "combine"
exclude = set(string.punctuation)
options, remainder = getopt.getopt(sys.argv[1:], 'i:e:t:', ['inputfile=', 'expansionfile=', 'type='])
for opt, arg in options:
if opt in ('-i', '--inputfile'):
input_filename = arg
if (not os.path.exists(input_filename)):
sys.exit("Error: Inputfile does not exists")
if opt in ('-e', '--expansionfile'):
expansion_filename = arg
if (not os.path.exists(expansion_filename)):
sys.exit("Error: Expansion file does not exists")
if opt in ('-t', '--type'):
output_type = arg
def get_sdm_query(query,lambda_t=0.8,lambda_o=0.1,lambda_u=0.1):
words = query.split()
if len(words)==1:
return f"{lambda_t} #combine( {query} )"
terms = " ".join(words)
ordered = "".join([" #1({}) ".format(" ".join(bigram)) for bigram in zip(words,words[1:])])
unordered = "".join([" #uw8({}) ".format(" ".join(bigram)) for bigram in zip(words,words[1:])])
indri_query = f"{lambda_t} #combine( {terms} ) {lambda_o} #combine({ordered}) {lambda_u} #combine({unordered})"
return indri_query
expansion_terms = []
if (expansion_filename != ""):
with open(expansion_filename) as expandfile:
expansion_terms = expandfile.readlines()
xml_root = ET.parse(input_filename)
print("<parameters>")
order = 0
for topic in xml_root.findall('.//topic'):
num = topic.find('num').text
query = topic.find('query').text
description = topic.find('description').text
query = query.replace('-', ' ')
query = query.replace('\n', ' ')
description = description.replace('-', ' ')
description = description.replace('\n', ' ')
query = query.translate(str.maketrans('', '', string.punctuation))
description = description.translate(str.maketrans('', '', string.punctuation))
print("<query>")
print("<number>" + str(num) + "</number>")
expansion = ""
if ( expansion_filename != ""):
line_expansion_term = expansion_terms[order]
line_expansion_term = line_expansion_term.replace("[", "")
line_expansion_term = line_expansion_term.replace("]", "")
line_expansion_term = line_expansion_term.replace('"', "")
line_expansion_term = line_expansion_term.replace('\n',"")
line_expansion_terms = line_expansion_term.split(',')
expansion = " "
max_expansion_terms = 10
for i in range (min(max_expansion_terms, len(line_expansion_terms))):
if (':' in line_expansion_terms[i]):
term,score = line_expansion_terms[i].split(':')
score = score.replace("\n", "")
if (output_type == "weights"):
expansion = expansion + str(score) + " #combine(" + term + ") "
else:
expansion = expansion + term
expansion = expansion + " "
if (output_type == "combine"):
print("<text>#combine(" + query + " " + expansion + description + ")</text>")
if (output_type == "weights"):
print("<text>#weight( 1.0 #combine(" + query + ") " + expansion + " 0.5 #combine(" + description + "))</text>")
if (output_type == "terms"):
print("<text>" + query + " " + expansion + description + "</text>")
if (output_type == "sdm"):
query_sdm = get_sdm_query(query)
description_sdm = get_sdm_query(description)
print("<text>#weight(" + query_sdm + " " + description_sdm + ")</text>")
print("</query>")
order += 1
print("</parameters>")
| 34.830189 | 119 | 0.600488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 725 | 0.196371 |
08235f7faf3865296eebd91470431d320d7b228e | 370 | py | Python | createGlobalMap.py | abhi20sc/autoClim | b131a19e935e8ba7778a2c73107a183df37e92da | [
"MIT"
] | 2 | 2021-07-28T05:58:20.000Z | 2021-08-16T18:27:27.000Z | createGlobalMap.py | abhi20sc/autoClim | b131a19e935e8ba7778a2c73107a183df37e92da | [
"MIT"
] | null | null | null | createGlobalMap.py | abhi20sc/autoClim | b131a19e935e8ba7778a2c73107a183df37e92da | [
"MIT"
] | 3 | 2021-08-05T15:21:05.000Z | 2021-10-04T03:42:16.000Z | import cartopy.crs as ccrs
import cartopy.feature as cf
from matplotlib import pyplot as plt
from matplotlib import image as img
def createMap():
fig = plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(linewidth=1)
ax.add_feature(cf.BORDERS,linestyle='-',linewidth=1)
fig.savefig('globalMap.png', bbox_inches='tight', pad_inches=0)
return 0. | 30.833333 | 64 | 0.767568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.067568 |
0823b5eeb8c1036e06aae43d61945a3ec0226291 | 2,124 | py | Python | tests/decloud_unittest.py | CNES/decloud | 6b06ae98bfe68821b4ebd0e7ba06723809cb9b42 | [
"Apache-2.0"
] | 8 | 2022-02-25T13:15:07.000Z | 2022-03-20T18:29:49.000Z | tests/decloud_unittest.py | CNES/decloud | 6b06ae98bfe68821b4ebd0e7ba06723809cb9b42 | [
"Apache-2.0"
] | 1 | 2022-02-25T13:21:33.000Z | 2022-02-25T13:21:33.000Z | tests/decloud_unittest.py | CNES/decloud | 6b06ae98bfe68821b4ebd0e7ba06723809cb9b42 | [
"Apache-2.0"
] | 1 | 2022-03-31T23:43:12.000Z | 2022-03-31T23:43:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import subprocess
import unittest
import filecmp
import gdal
import otbApplication as otb
from abc import ABC
from decloud.core.system import get_env_var, pathify, basename
class DecloudTest(ABC, unittest.TestCase):
DECLOUD_DATA_DIR = get_env_var("DECLOUD_DATA_DIR")
def get_path(self, path):
return pathify(self.DECLOUD_DATA_DIR) + path
def compare_images(self, image, reference, mae_threshold=0.01):
nbchannels_reconstruct = gdal.Open(image).RasterCount
nbchannels_baseline = gdal.Open(reference).RasterCount
self.assertTrue(nbchannels_reconstruct == nbchannels_baseline)
for i in range(1, 1+nbchannels_baseline):
comp = otb.Registry.CreateApplication('CompareImages')
comp.SetParameterString('ref.in', reference)
comp.SetParameterInt('ref.channel', i)
comp.SetParameterString('meas.in', image)
comp.SetParameterInt('meas.channel', i)
comp.Execute()
mae = comp.GetParameterFloat('mae')
self.assertTrue(mae < mae_threshold)
def compare_file(self, file, reference):
self.assertTrue(filecmp.cmp(file, reference))
def compare_raster_metadata(self, image, reference):
baseline_gdalinfo_path = '/tmp/baseline_{}_gdalinfo'.format(basename(reference))
subprocess.call('gdalinfo {} | grep --invert-match -e "Files:" -e "METADATATYPE" -e "OTB_VERSION" '
'-e "NoData Value" > {}'.format(reference, baseline_gdalinfo_path), shell=True)
image_gdalinfo_path = '/tmp/image_{}_gdalinfo'.format(basename(image))
subprocess.call('gdalinfo {} | grep --invert-match -e "Files:" -e "METADATATYPE" -e "OTB_VERSION" '
'-e "NoData Value" > {}'.format(image, image_gdalinfo_path), shell=True)
with open(baseline_gdalinfo_path) as f:
baseline_gdalinfo = f.read()
with open(image_gdalinfo_path) as f:
image_gdalinfo_path = f.read()
self.assertEqual(baseline_gdalinfo, image_gdalinfo_path)
| 38.618182 | 107 | 0.672787 | 1,891 | 0.890301 | 0 | 0 | 0 | 0 | 0 | 0 | 392 | 0.184557 |
08248ac0b1e2686f247d443d0208fc7018480282 | 1,327 | py | Python | test/test_merge.py | tawiesn/sclblonnx | 0cf73112db5df13009cd2ddb5d49744689096209 | [
"MIT"
] | null | null | null | test/test_merge.py | tawiesn/sclblonnx | 0cf73112db5df13009cd2ddb5d49744689096209 | [
"MIT"
] | null | null | null | test/test_merge.py | tawiesn/sclblonnx | 0cf73112db5df13009cd2ddb5d49744689096209 | [
"MIT"
] | null | null | null | from sclblonnx import add_output, add_input, add_node, node, empty_graph, add_constant, display, merge, run
import numpy as np
def test_merge():
# Subgraph 1
sg1 = empty_graph("Graph 1")
n1 = node('Add', inputs=['x1', 'x2'], outputs=['sum'])
sg1 = add_node(sg1, n1)
sg1 = add_input(sg1, 'x1', "FLOAT", [1])
sg1 = add_input(sg1, 'x2', "FLOAT", [1])
sg1 = add_output(sg1, 'sum', "FLOAT", [1])
# Subgraph 2
sg2 = empty_graph("Graph 2")
sg2 = add_constant(sg2, "const", np.array([7]), "FLOAT")
n2 = node("Equal", inputs=['sum', 'const'], outputs=['equal'])
sg2 = add_node(sg2, n2)
sg2 = add_input(sg2, 'sum', "FLOAT", [1])
sg2 = add_output(sg2, 'equal', "BOOL", [1])
g = merge(sg1, sg2, outputs=["sum"], inputs=["sum"])
in1 = {"x1": np.array([2]).astype(np.float32), "x2": np.array([5]).astype(np.float32)}
result = run(g, inputs=in1, outputs=["equal"])
assert result[0], "Sum of 2 and 5 should be equal to constant 7."
in2 = {"x1": np.array([4]).astype(np.float32), "x2": np.array([5]).astype(np.float32)}
result = run(g, inputs=in2, outputs=["equal"])
assert not result[0], "Sum of 4 and 5 should not be equal to constant 7."
# todo(McK): Add tests for multiple inputs-outputs
# todo(McK): Add tests for graphs containing If
| 34.025641 | 107 | 0.605124 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.29691 |
08248cc60a1189c226093e9c782fd70e1acdd43e | 2,609 | py | Python | src/cameraCalibrator.py | mdaros2016/CarND-Advanced-Lane-Lines | b27d57f1c6730f302f18fb6b8cbbfcb9361d57bf | [
"MIT"
] | null | null | null | src/cameraCalibrator.py | mdaros2016/CarND-Advanced-Lane-Lines | b27d57f1c6730f302f18fb6b8cbbfcb9361d57bf | [
"MIT"
] | null | null | null | src/cameraCalibrator.py | mdaros2016/CarND-Advanced-Lane-Lines | b27d57f1c6730f302f18fb6b8cbbfcb9361d57bf | [
"MIT"
] | null | null | null | import glob
import cv2
import numpy as np
class CameraCalibrator:
'''
Class for correcting the distortion of the pictures taken from the camera.
'''
def __init__(self, calibration_pictures_path_pattern='../camera_cal/calibration*.jpg'):
'''
:param calibration_pictures_path_pattern: File system path of a set of 9x6 chessboard pictures that will be used for camera calibration
'''
# store mtx and dist in the status of the object, so we don't have to compute them at every iteration
self.mtx = None
self.dist = None
self.calibration_pictures_path_pattern = calibration_pictures_path_pattern
def undistort(self, img):
'''
Corrects the distortion of an image.
The first invocation of thi method will take long, since it will lazily initialize the transformation matrix
:param img: distorted picture to be corrected
:return: the corrected picture
'''
if self.mtx is None:
self.initialize_transformation_matrix()
dst = cv2.undistort(img, self.mtx, self.dist, None, self.mtx)
return dst
def initialize_transformation_matrix(self):
'''
Initializes the transformation matrix, using the pictures contained in the path specified above
:return: Nothing, it just changes the internal status of the object
'''
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6 * 9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
img_size = []
# Make a list of calibration images
images = glob.glob(self.calibration_pictures_path_pattern)
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
append = imgpoints.append(corners)
img_size = (img.shape[1], img.shape[0])
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)
self.mtx = mtx
self.dist = dist
| 38.367647 | 143 | 0.63166 | 2,563 | 0.982369 | 0 | 0 | 0 | 0 | 0 | 0 | 1,206 | 0.462246 |
0827c8ec658edf16eba00017e1a771b5d2f84def | 591 | py | Python | nicos_ess/dream/setups/beam_monitor.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_ess/dream/setups/beam_monitor.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_ess/dream/setups/beam_monitor.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'Instrument shutter'
prefix = "IOC"
devices = dict(
beam_monitor_1=device(
'nicos_ess.devices.epics.motor.EpicsMotor',
description="Beam monitor continuous position feedback",
motorpv=f'{prefix}:m8',
abslimits=(-10, 10),
unit='mm',
speed=5.,
),
beam_monitor_switch=device(
'nicos.devices.generic.Switcher',
description="Toggles between in and out of the beam",
moveable="beam_monitor_1",
mapping={
'IN': 0,
'OUT': 5,
},
precision=0.01,
)
)
| 24.625 | 64 | 0.566836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.380711 |