hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0829534c63fae0dfb66814593c9605ce70347325 | 28,509 | py | Python | biosteam/_system.py | tylerhuntington222/biosteam | 234959180a3210d95e39a012454f455723c92686 | [
"MIT"
] | null | null | null | biosteam/_system.py | tylerhuntington222/biosteam | 234959180a3210d95e39a012454f455723c92686 | [
"MIT"
] | null | null | null | biosteam/_system.py | tylerhuntington222/biosteam | 234959180a3210d95e39a012454f455723c92686 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020, Yoel Cortes-Pena <yoelcortes@gmail.com>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import flexsolve as flx
from .digraph import (digraph_from_units_and_streams,
minimal_digraph,
surface_digraph,
finalize_digraph)
from thermosteam import Stream
from thermosteam.utils import registered
from .exceptions import try_method_with_object_stamp
from ._network import Network
from ._facility import Facility
from ._unit import Unit
from .report import save_report
from .exceptions import InfeasibleRegion
from .utils import colors, strtuple
import biosteam as bst
__all__ = ('System',)
# %% Functions for taking care of numerical specifications within a system path
def run_unit_in_path(unit):
specification = unit._specification
if specification:
method = specification
else:
method = unit._run
try_method_with_object_stamp(unit, method)
def converge_system_in_path(system):
specification = system._specification
if specification:
method = specification
else:
method = system._converge
try_method_with_object_stamp(system, method)
def simulate_unit_in_path(unit):
specification = unit._specification
if specification:
try_method_with_object_stamp(unit, unit._load_stream_links)
try_method_with_object_stamp(unit, unit._setup)
try_method_with_object_stamp(unit, specification)
try_method_with_object_stamp(unit, unit._summary)
else:
try_method_with_object_stamp(unit, unit.simulate)
def simulate_system_in_path(system):
specification = system._specification
if specification:
method = specification
else:
method = system.simulate
try_method_with_object_stamp(system, method)
# %% Debugging and exception handling
def _evaluate(self, command=None):
"""
Evaluate a command and request user input for next command.
If no command, return. This function is used for debugging a System object.
"""
# Done evaluating if no command, exit debugger if 'exit'
if command is None:
Next = colors.next('Next: ') + f'{repr(self)}\n'
info = colors.info("Enter to continue or type to evaluate:\n")
command = input(Next + info + ">>> ")
if command == 'exit': raise KeyboardInterrupt()
if command:
# Build locals dictionary for evaluating command
F = bst.main_flowsheet
lcs = {self.ID: self, 'bst': bst,
**F.system.__dict__,
**F.stream.__dict__,
**F.unit.__dict__,
**F.flowsheet.__dict__
}
try:
out = eval(command, {}, lcs)
except Exception as err:
# Print exception and ask to raise error or continue evaluating
err = colors.exception(f'{type(err).__name__}:') + f' {str(err)}\n\n'
info = colors.info(f"Enter to raise error or type to evaluate:\n")
command = input(err + info + ">>> ")
if command == '': raise err
_evaluate(self, command)
else:
# If successful, continue evaluating
if out is None: pass
elif (not hasattr(out, '_ipython_display_')
or isinstance(out, type)): print(out)
else: out._ipython_display_()
command = input(">>> ")
_evaluate(self, command)
def _method_debug(self, func):
"""Method decorator for debugging system."""
def wrapper(*args, **kwargs):
# Run method and ask to evaluate
_evaluate(self)
func(*args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
wrapper._original = func
return wrapper
def _notify_run_wrapper(self, func):
"""Decorate a System run method to notify you after each loop"""
def wrapper(*args, **kwargs):
if self.recycle:
func(*args, **kwargs)
input(f' Finished loop #{self._iter}\n')
else:
func(*args, **kwargs)
wrapper.__name__ = func.__name__
wrapper.__doc__ = func.__doc__
wrapper._original = func
return wrapper
# %% Process flow
class system(type):
@property
def converge_method(self):
"""Iterative convergence method ('wegstein', 'aitken', or 'fixed point')."""
return self._converge_method.__name__[1:]
@converge_method.setter
def converge_method(self, method):
method = method.lower().replace('-', '').replace(' ', '')
if 'wegstein' == method:
self._converge_method = self._wegstein
elif 'fixedpoint' == method:
self._converge_method = self._fixed_point
elif 'aitken' == method:
self._converge_method = self._aitken
else:
raise ValueError(f"only 'wegstein', 'aitken', and 'fixed point' methods are valid, not '{method}'")
@registered('SYS')
class System(metaclass=system):
"""
Create a System object that can iteratively run each element in a path
of BioSTREAM objects until the recycle stream is converged. A path can
have function, Unit and/or System objects. When the path contains an
inner System object, it converges/solves it in each loop/iteration.
Parameters
----------
ID : str
A unique identification. If ID is None, instance will not be
registered in flowsheet.
path : tuple[Unit, function and/or System]
A path that is run element by element until the recycle converges.
recycle=None : :class:`~thermosteam.Stream`, optional
A tear stream for the recycle loop.
facilities=() : tuple[Unit, function, and/or System], optional
Offsite facilities that are simulated only after
completing the path simulation.
"""
### Class attributes ###
#: Maximum number of iterations
maxiter = 200
#: Molar tolerance (kmol/hr)
molar_tolerance = 0.50
#: Temperature tolerance (K)
temperature_tolerance = 0.10
# [dict] Cached downstream systems by (system, unit, with_facilities) keys
_cached_downstream_systems = {}
@classmethod
def from_feedstock(cls, ID, feedstock, feeds=None, facilities=(),
ends=None, facility_recycle=None):
"""
Create a System object from a feedstock.
Parameters
----------
ID : str
Name of system.
feedstock : :class:`~thermosteam.Stream`
Main feedstock of the process.
feeds : Iterable[:class:`~thermosteam.Stream`]
Additional feeds to the process.
facilities : Iterable[Facility]
Offsite facilities that are simulated only after
completing the path simulation.
ends : Iterable[:class:`~thermosteam.Stream`]
Streams that not products, but are ultimately specified through
process requirements and not by its unit source.
facility_recycle : [:class:`~thermosteam.Stream`], optional
Recycle stream between facilities and system path.
"""
network = Network.from_feedstock(feedstock, feeds, ends)
return cls.from_network(ID, network, facilities, facility_recycle)
@classmethod
def from_network(cls, ID, network, facilities=(), facility_recycle=None):
"""
Create a System object from a network.
Parameters
----------
ID : str
Name of system.
network : Network
Network that defines the simulation path.
facilities : Iterable[Facility]
Offsite facilities that are simulated only after
completing the path simulation.
facility_recycle : [:class:`~thermosteam.Stream`], optional
Recycle stream between facilities and system path.
"""
facilities = Facility.ordered_facilities(facilities)
isa = isinstance
path = tuple([(cls.from_network('', i) if isa(i, Network) else i)
for i in network.path])
self = cls.__new__(cls)
self.units = network.units
self.streams = streams = network.streams
self.feeds = feeds = network.feeds
self.products = products = network.products
self._specification = None
self._set_recycle(network.recycle)
self._reset_errors()
self._set_path(path)
self._set_facilities(facilities)
self._set_facility_recycle(facility_recycle)
self._register(ID)
if facilities:
f_streams = bst.utils.streams_from_path(facilities)
f_feeds = bst.utils.feeds(f_streams)
f_products = bst.utils.products(f_streams)
streams.update(f_streams)
feeds.update(f_feeds)
products.update(f_products)
self._finalize_streams()
return self
def __init__(self, ID, path, recycle=None, facilities=(), facility_recycle=None):
self._specification = None
self._set_recycle(recycle)
self._load_flowsheet()
self._reset_errors()
self._set_path(path)
self._load_units()
self._set_facilities(facilities)
self._set_facility_recycle(facility_recycle)
self._load_streams()
self._finalize_streams()
self._register(ID)
specification = Unit.specification
save_report = save_report
def _load_flowsheet(self):
self.flowsheet = flowsheet_module.main_flowsheet.get_flowsheet()
def _set_recycle(self, recycle):
assert recycle is None or isinstance(recycle, Stream), (
"recycle must be a Stream instance or None, not "
f"{type(recycle).__name__}"
)
self._recycle = recycle
def _set_path(self, path):
#: tuple[Unit, function and/or System] A path that is run element
#: by element until the recycle converges.
self.path = path
#: set[System] All subsystems in the system
self.subsystems = subsystems = set()
#: list[Unit] Network of only unit operations
self._unit_path = unit_path = []
#: set[Unit] All units that have costs.
self._costunits = costunits = set()
isa = isinstance
for i in path:
if i in unit_path: continue
if isa(i, Unit):
unit_path.append(i)
elif isa(i, System):
unit_path.extend(i._unit_path)
subsystems.add(i)
costunits.update(i._costunits)
#: set[Unit] All units in the path that have costs
self._path_costunits = path_costunits = {i for i in unit_path
if i._design or i._cost}
costunits.update(path_costunits)
def _load_units(self):
#: set[Unit] All units within the system
self.units = set(self._unit_path) | self._costunits
def _set_facilities(self, facilities):
#: tuple[Unit, function, and/or System] Offsite facilities that are simulated only after completing the path simulation.
self._facilities = facilities = tuple(facilities)
subsystems = self.subsystems
costunits = self._costunits
units = self.units
isa = isinstance
for i in facilities:
if isa(i, Unit):
i._load_stream_links()
units.add(i)
if i._cost: costunits.add(i)
if isa(i, Facility) and not i._system: i._system = self
elif isa(i, System):
units.update(i.units)
subsystems.add(i)
costunits.update(i._costunits)
def _set_facility_recycle(self, recycle):
if recycle:
system = self._downstream_system(recycle.sink)
#: [FacilityLoop] Recycle loop for converging facilities
self._facility_loop = FacilityLoop(system, recycle)
else:
self._facility_loop = None
def _load_streams(self):
#: set[:class:`~thermosteam.Stream`] All streams within the system
self.streams = streams = set()
for u in self.units:
streams.update(u._ins + u._outs)
for sys in self.subsystems:
streams.update(sys.streams)
#: set[:class:`~thermosteam.Stream`] All feed streams in the system.
self.feeds = bst.utils.feeds(streams)
#: set[:class:`~thermosteam.Stream`] All product streams in the system.
self.products = bst.utils.products(streams)
def _load_stream_links(self):
for u in self._unit_path: u._load_stream_links()
def _filter_out_missing_streams(self):
for stream_set in (self.streams, self.feeds, self.products):
bst.utils.filter_out_missing_streams(stream_set)
def _finalize_streams(self):
self._load_stream_links()
self._filter_out_missing_streams()
@property
def TEA(self):
"""[TEA] Object for Techno-Economic Analysis."""
try: return self._TEA
except AttributeError: return None
@property
def facilities(self):
"""tuple[Facility] All system facilities."""
return self._facilities
@property
def recycle(self):
"""[:class:`~thermosteam.Stream`] A tear stream for the recycle loop"""
return self._recycle
@property
def converge_method(self):
"""Iterative convergence method ('wegstein', 'aitken', or 'fixed point')."""
return self._converge_method.__name__[1:]
@converge_method.setter
def converge_method(self, method):
if self.recycle is None:
raise ValueError(
"cannot set converge method when no recyle is specified")
method = method.lower().replace('-', '').replace(' ', '')
if 'wegstein' == method:
self._converge_method = self._wegstein
elif 'fixedpoint' == method:
self._converge_method = self._fixed_point
elif 'aitken' == method:
self._converge_method = self._aitken
else:
raise ValueError(
f"only 'wegstein', 'aitken', and 'fixed point' methods "
f"are valid, not '{method}'")
def _downstream_path(self, unit):
"""Return a list composed of the `unit` and everything downstream."""
if unit not in self.units: return []
elif self._recycle: return self.path
unit_found = False
downstream_units = unit._downstream_units
path = []
isa = isinstance
for i in self.path:
if unit_found:
if isa(i, System):
for u in i.units:
if u in downstream_units:
path.append(i)
break
elif i in downstream_units or not isa(i, Unit):
path.append(i)
else:
if unit is i:
unit_found = True
path.append(unit)
elif isa(i, System) and unit in i.units:
unit_found = True
path.append(i)
return path
def _downstream_system(self, unit):
"""Return a system with a path composed of the `unit` and
everything downstream (facilities included)."""
if unit is self.path[0]: return self
system = self._cached_downstream_systems.get((self, unit))
if system: return system
path = self._downstream_path(unit)
if path:
downstream_facilities = self._facilities
else:
unit_found = False
isa = isinstance
for pos, i in enumerate(self._facilities):
if unit is i or (isa(i, System) and unit in i.units):
downstream_facilities = self._facilities[pos:]
unit_found = True
break
assert unit_found, f'{unit} not found in system'
system = System(None, path,
facilities=downstream_facilities)
system._ID = f'{type(unit).__name__}-{unit} and downstream'
self._cached_downstream_systems[unit] = system
return system
def _minimal_digraph(self, **graph_attrs):
"""Return digraph of the path as a box."""
return minimal_digraph(self.ID, self.units, self.streams, **graph_attrs)
def _surface_digraph(self, **graph_attrs):
return surface_digraph(self.path)
def _thorough_digraph(self, **graph_attrs):
return digraph_from_units_and_streams(self.units, self.streams,
**graph_attrs)
def diagram(self, kind='surface', file=None, format='png', **graph_attrs):
"""Display a `Graphviz <https://pypi.org/project/graphviz/>`__ diagram of the system.
Parameters
----------
kind='surface' : {'thorough', 'surface', 'minimal'}:
* **'thorough':** Display every unit within the path.
* **'surface':** Display only elements listed in the path.
* **'minimal':** Display path as a box.
file=None : str, display in console by default
File name to save diagram.
format='png' : str
File format (e.g. "png", "svg").
"""
if kind == 'thorough':
f = self._thorough_digraph(format=format, **graph_attrs)
elif kind == 'surface':
f = self._surface_digraph(format=format, **graph_attrs)
elif kind == 'minimal':
f = self._minimal_digraph(format=format, **graph_attrs)
else:
raise ValueError(f"kind must be either 'thorough', 'surface', or 'minimal'")
finalize_digraph(f, file, format)
# Methods for running one iteration of a loop
def _iter_run(self, mol):
"""
Run the system at specified recycle molar flow rate.
Parameters
----------
mol : numpy.ndarray
Recycle molar flow rates.
Returns
-------
rmol : numpy.ndarray
New recycle molar flow rates.
unconverged : bool
True if recycle has not converged.
"""
if (mol < 0.).any():
raise InfeasibleRegion('material flow')
recycle = self.recycle
rmol = recycle.mol
rmol[:] = mol
T = recycle.T
self._run()
self._mol_error = mol_error = abs(mol - recycle.mol).sum()
self._T_error = T_error = abs(T - recycle.T)
self._iter += 1
if mol_error < self.molar_tolerance and T_error < self.temperature_tolerance:
unconverged = False
elif self._iter == self.maxiter:
raise RuntimeError(f'{repr(self)} could not converge' + self._error_info())
else:
unconverged = True
return rmol.copy(), unconverged
def _setup(self):
"""Setup each element of the system."""
isa = isinstance
for i in self.path:
if isa(i, (Unit, System)): i._setup()
def _run(self):
"""Rigorous run each element of the system."""
isa = isinstance
for i in self.path:
if isa(i, Unit):
run_unit_in_path(i)
elif isa(i, System):
converge_system_in_path(i)
else: i() # Assume it is a function
# Methods for convering the recycle stream
def _fixed_point(self):
"""Converge system recycle iteratively using fixed-point iteration."""
self._reset_iter()
flx.conditional_fixed_point(self._iter_run, self.recycle.mol.copy())
def _wegstein(self):
"""Converge the system recycle iteratively using wegstein's method."""
self._reset_iter()
flx.conditional_wegstein(self._iter_run, self.recycle.mol.copy())
def _aitken(self):
"""Converge the system recycle iteratively using Aitken's method."""
self._reset_iter()
flx.conditional_aitken(self._iter_run, self.recycle.mol.copy())
# Default converge method
_converge_method = _aitken
def _converge(self):
return self._converge_method() if self._recycle else self._run()
def _design_and_cost(self):
for i in self._path_costunits:
try_method_with_object_stamp(i, i._summary)
isa = isinstance
for i in self._facilities:
if isa(i, Unit):
simulate_unit_in_path(i)
elif isa(i, System):
simulate_system_in_path(i)
else:
i() # Assume it is a function
def _reset_iter(self):
self._iter = 0
for system in self.subsystems: system._reset_iter()
def reset_names(self, unit_format=None, stream_format=None):
"""Reset names of all streams and units according to the path order."""
Unit._default_ID = unit_format if unit_format else ['U', 0]
Stream._default_ID = stream_format if stream_format else ['d', 0]
streams = set()
units = set()
for i in self._unit_path:
if i in units: continue
try: i.ID = ''
except: continue
for s in (i._ins + i._outs):
if (s and s._sink and s._source
and s not in streams):
s.ID = ''
streams.add(s)
units.add(i)
def _reset_errors(self):
#: Molar flow rate error (kmol/hr)
self._mol_error = 0
#: Temperature error (K)
self._T_error = 0
#: Number of iterations
self._iter = 0
def reset_flows(self):
"""Reset all process streams to zero flow."""
from warnings import warn
warn(DeprecationWarning("'reset_flows' will be depracated; please use 'empty_process_streams'"))
self.empty_process_streams()
def empty_process_streams(self):
"""Reset all process streams to zero flow."""
self._reset_errors()
feeds = self.feeds
for stream in self.streams:
if stream not in feeds: stream.empty()
def empty_recycles(self):
"""Reset all recycle streams to zero flow."""
self._reset_errors()
if self.recycle: self.recycle.empty()
for system in self.subsystems:
system.empty_recycles()
def reset_cache(self):
"""Reset cache of all unit operations."""
for unit in self.units: unit.reset_cache()
def simulate(self):
"""Converge the path and simulate all units."""
self._setup()
self._converge()
self._design_and_cost()
if self._facility_loop: self._facility_loop()
# Debugging
def _debug_on(self):
"""Turn on debug mode."""
self._run = _notify_run_wrapper(self, self._run)
self.path = path = list(self.path)
for i, item in enumerate(path):
if isinstance(item, Unit):
item._run = _method_debug(item, item._run)
elif isinstance(item, System):
item._converge = _method_debug(item, item._converge)
elif callable(item):
path[i] = _method_debug(item, item)
def _debug_off(self):
"""Turn off debug mode."""
self._run = self._run._original
path = self.path
for i, item in enumerate(path):
if isinstance(item, Unit):
item._run = item._run._original
elif isinstance(item, System):
item._converge = item._converge._original
elif callable(item):
path[i] = item._original
self.path = tuple(path)
def debug(self):
"""Converge in debug mode. Just try it!"""
self._debug_on()
try: self._converge()
finally: self._debug_off()
end = self._error_info()
if end:
print(f'\nFinished debugging{end}')
else:
print(f'\n Finished debugging')
# Representation
def __str__(self):
if self.ID: return self.ID
else: return type(self).__name__
def __repr__(self):
if self.ID: return f'<{type(self).__name__}: {self.ID}>'
else: return f'<{type(self).__name__}>'
def show(self):
"""Prints information on unit."""
print(self._info())
def to_network(self):
"""Return network that defines the system path."""
isa = isinstance
path = [(i.to_network() if isa(i, System) else i) for i in self.path]
network = Network.__new__(Network)
network.path = path
network.recycle = self.recycle
network.units = self.units
network.subnetworks = [i for i in path if isa(i, Network)]
network.feeds = self.feeds
network.products = self.products
return network
def _ipython_display_(self):
try: self.diagram('minimal')
except: pass
self.show()
def _error_info(self):
"""Return information on convergence."""
if self.recycle:
return (f"\n convergence error: Flow rate {self._mol_error:.2e} kmol/hr"
f"\n Temperature {self._T_error:.2e} K"
f"\n iterations: {self._iter}")
else:
return ""
def _info(self):
"""Return string with all specifications."""
if self.recycle is None:
recycle = ''
else:
recycle = f"\n recycle: {self.recycle}"
error = self._error_info()
path = strtuple(self.path)
i = 1; last_i = 0
while True:
i += 2
i = path.find(', ', i)
i_next = path.find(', ', i+2)
if (i_next-last_i) > 35:
path = (path[:i] + '%' + path[i:])
last_i = i
elif i == -1: break
path = path.replace('%, ', ',\n' + ' '*8)
if self.facilities:
facilities = strtuple(self.facilities)
i = 1; last_i = 0
while True:
i += 2
i = facilities.find(', ', i)
if (i - last_i) > 35:
facilities = (facilities[:i] + '%' + facilities[i:])
last_i = i
elif i == -1: break
facilities = facilities.replace('%, ', ',\n'+' '*14)
facilities = f"\n facilities: {facilities}"
else:
facilities = ''
return (f"System: {self.ID}"
+ recycle
+ f"\n path: {path}"
+ facilities
+ error)
class FacilityLoop(metaclass=system):
__slots__ = ('system', 'recycle',
'_mol_error', '_T_error', '_iter')
#: Maximum number of iterations to solve facilities
maxiter = 50
#: Molar tolerance (kmol/hr)
molar_tolerance = 0.50
#: Temperature tolerance (K)
temperature_tolerance = 0.10
def __init__(self, system, recycle):
self.system = system
self.recycle = recycle
self._reset_errors()
_reset_errors = System._reset_errors
_error_info = System._error_info
_iter_run = System._iter_run
_fixed_point = System._fixed_point
_aitken = System._aitken
_wegstein = System._wegstein
_converge_method = System._converge_method
converge_method = System.converge_method
def _reset_iter(self):
self.system._reset_iter()
self._iter = 0
def _run(self): self.system.simulate()
def __call__(self): self._converge_method()
def __repr__(self):
return f"<{type(self).__name__}: {self.system.ID}>"
from biosteam import _flowsheet as flowsheet_module | 35.725564 | 128 | 0.582377 | 23,928 | 0.839314 | 0 | 0 | 22,858 | 0.801782 | 0 | 0 | 8,232 | 0.288751 |
082bb5b00799a75a854f5404ce105bcaeac6c3e7 | 1,005 | py | Python | modules/AI/research/findContour.py | killax-d/Counter-Coins-API | 97acede70e26b23f96883bb14e2bf6ace3759174 | [
"MIT"
] | null | null | null | modules/AI/research/findContour.py | killax-d/Counter-Coins-API | 97acede70e26b23f96883bb14e2bf6ace3759174 | [
"MIT"
] | null | null | null | modules/AI/research/findContour.py | killax-d/Counter-Coins-API | 97acede70e26b23f96883bb14e2bf6ace3759174 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
image = cv2.imread('original.png')
gray = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
blur = cv2.GaussianBlur(gray, (19, 19), 0)
# Application d'un seuil pour obtenir une image binaire
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 1)
kernel = np.ones((3, 3), np.uint8)
# Application d'érosion et d'ouverture pour supprimer les contours de petites pièces
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=1)
contours, hierarchy = cv2.findContours(closing.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
area = cv2.contourArea(contour)
if area < 10000 or area > 50000:
continue
print(area)
if len(contour) < 5:
continue
try:
ellipse = cv2.fitEllipse(contour)
cv2.ellipse(image, ellipse, (0,255,0), 2)
except:
pass
# ecriture de l'image
cv2.imwrite('result.png', image) | 30.454545 | 103 | 0.711443 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.186693 |
083461c10e66e08e6e0c8ad2d8f84b46b0b09e65 | 8,413 | py | Python | python/src/ties/cli/test/ties_convert_tests.py | Noblis/ties-lib | e7c6165ebcd80e11b792fd4bcddf6ce634da0c60 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-04-10T19:02:27.000Z | 2020-04-10T19:02:27.000Z | python/src/ties/cli/test/ties_convert_tests.py | Noblis/ties-lib | e7c6165ebcd80e11b792fd4bcddf6ce634da0c60 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | python/src/ties/cli/test/ties_convert_tests.py | Noblis/ties-lib | e7c6165ebcd80e11b792fd4bcddf6ce634da0c60 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | ################################################################################
# Copyright 2019 Noblis, Inc #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
################################################################################
import json
import os
import unittest
from stat import S_IRUSR
from tempfile import mkstemp
from unittest import TestCase
from ties.cli.ties_convert import main
from ties.util.testing import cli_test
short_usage = """\
usage: ties-convert [-h] [--classification-level SECURITY_TAG]
[--output-file OUTPUT_FILE | --in-place] [--version]
EXPORT_PATH"""
long_usage = """\
{}
Converts TIES export.json files from older versions of the schema (0.1.8, 0.2,
0.3, 0.4, 0.5, 0.6, 0.7, 0.8) to the current version (0.9).
positional arguments:
EXPORT_PATH the path to the TIES JSON file or - to read from stdin
optional arguments:
-h, --help show this help message and exit
--classification-level SECURITY_TAG, -c SECURITY_TAG
the classification level of the TIES JSON, required
for TIES JSON from pre-0.3 versions of the schema
--output-file OUTPUT_FILE, -f OUTPUT_FILE
the output file path for the converted TIES JSON
--in-place, -i modifies the input file in-place, overwriting it with
the converted JSON data
--version prints version information
""".format(short_usage)
test_input = """\
{
"version": "0.1.8",
"objectItem": [
{
"sha256Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"md5Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
}
]
}"""
test_output = """\
{
"version": "0.9",
"securityTag": "UNCLASSIFIED",
"objectItems": [
{
"objectId": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sha256Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"md5Hash": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"authorityInformation": {
"securityTag": "UNCLASSIFIED"
}
}
]
}"""
class TiesConvertTests(TestCase):
def setUp(self):
self._default_args = ['--classification-level', 'UNCLASSIFIED']
fd, self._input_file_path = mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(test_input)
fd, self._output_file_path = mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(test_output)
def tearDown(self):
try:
os.remove(self._input_file_path)
except Exception: # pylint: disable=broad-except
pass
try:
os.remove(self._output_file_path)
except Exception: # pylint: disable=broad-except
pass
def _check_input_file_json(self, expected_json):
with open(self._input_file_path, 'r', encoding='utf-8') as f:
self.assertEqual(json.load(f), json.loads(expected_json))
def _check_output_file_json(self, expected_json):
with open(self._output_file_path, 'r', encoding='utf-8') as f:
self.assertEqual(json.load(f), json.loads(expected_json))
def test_no_args(self):
with cli_test(self, main) as t:
t.args([])
t.return_code(2)
t.stdout_text()
t.stderr(short_usage)
t.stderr('ties-convert: error: the following arguments are required: EXPORT_PATH')
t.stderr()
def test_help_short(self):
with cli_test(self, main) as t:
t.args(['-h'])
t.return_code(0)
t.stdout_text(long_usage)
t.stderr()
def test_help_long(self):
with cli_test(self, main) as t:
t.args(['--help'])
t.return_code(0)
t.stdout_text(long_usage)
t.stderr()
def test_stdin_stdout(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-'])
t.stdin(test_input)
t.return_code(0)
t.stdout_json(test_output)
t.stderr()
def test_infile_stdout(self):
with cli_test(self, main) as t:
t.args(self._default_args + [self._input_file_path])
t.return_code(0)
t.stdout_json(test_output)
t.stderr()
self._check_input_file_json(test_input)
def test_stdin_outfile(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-f', self._output_file_path, '-'])
t.stdin(test_input)
t.return_code(0)
t.stdout_text()
t.stderr()
self._check_output_file_json(test_output)
def test_infile_outfile(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-f', self._output_file_path, self._input_file_path])
t.return_code(0)
t.stdout_text()
t.stderr()
self._check_input_file_json(test_input)
self._check_output_file_json(test_output)
def test_inplace(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-i', self._input_file_path])
t.return_code(0)
t.stdout_text()
t.stderr()
self._check_input_file_json(test_output)
def test_inplace_stdin(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-i', '-'])
t.stdin(test_input)
t.return_code(0)
t.stdout_json(test_output)
t.stderr()
def test_inplace_outfile_error(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-i', '-f', self._output_file_path, self._input_file_path])
t.return_code(2)
t.stdout_text()
t.stderr(short_usage)
t.stderr('ties-convert: error: argument --output-file/-f: not allowed with argument --in-place/-i')
def test_inplace_write_error(self):
os.chmod(self._input_file_path, S_IRUSR)
with cli_test(self, main) as t:
t.args(self._default_args + ['-i', self._input_file_path])
t.return_code(1)
t.stdout_text()
t.stderr("error: could not write to file: {}".format(self._input_file_path))
def test_stdin_parse_exception(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-'])
t.return_code(1)
t.stdout_text()
t.stderr('error: could not parse JSON from stdin')
def test_infile_fnf(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['/file/not/found'])
t.return_code(1)
t.stdout_text()
t.stderr('error: could not read from file: /file/not/found')
def test_infile_parse_exception(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['/dev/null'])
t.return_code(1)
t.stdout_text()
t.stderr('error: could not read from file: /dev/null')
def test_outfile_fnf(self):
with cli_test(self, main) as t:
t.args(self._default_args + ['-f', '/dev/full', self._input_file_path])
t.return_code(1)
t.stdout_text()
t.stderr('error: could not write to file: /dev/full')
if __name__ == '__main__':
unittest.main()
| 36.578261 | 111 | 0.562463 | 5,221 | 0.620587 | 0 | 0 | 0 | 0 | 0 | 0 | 3,444 | 0.409366 |
0834a96e609f196a4e397fc0d0398ea157ccd7e5 | 2,316 | py | Python | Edge Detection.py | paulmtree/Lung-Segmentation-Project | 2cffe09ce6a4818200d88b9e4e87155feb594366 | [
"MIT"
] | 14 | 2020-11-10T16:47:54.000Z | 2022-03-15T12:17:29.000Z | Edge Detection.py | paulmtree/Lung-Segmentation-Project | 2cffe09ce6a4818200d88b9e4e87155feb594366 | [
"MIT"
] | 3 | 2020-11-21T09:49:15.000Z | 2021-05-30T23:58:30.000Z | Edge Detection.py | paulmtree/Lung-Segmentation-Project | 2cffe09ce6a4818200d88b9e4e87155feb594366 | [
"MIT"
] | 3 | 2021-11-04T18:08:53.000Z | 2022-01-13T03:22:26.000Z | from PIL import Image, ImageFilter
import numpy as np
import glob
from numpy import array
import matplotlib.pyplot as plt
from skimage import morphology
import scipy.ndimage
def sample_stack(stack, rows=2, cols=2, start_with=0, show_every=1, display1 = True):
if (display1):
new_list = []
new_list.append(stack)
new_list.append(stack)
new_list.append(stack)
new_list.append(stack)
sample_stack(new_list, 2, 2, 0, 1, False)
else:
fig,ax = plt.subplots(rows,cols,figsize=[12,12])
for i in range((rows*cols)):
ind = start_with + i*show_every
ax[int(i/rows),int(i % rows)].set_title('slice %d' % ind)
ax[int(i/rows),int(i % rows)].imshow(stack[ind],cmap='gray')
ax[int(i/rows),int(i % rows)].axis('off')
plt.show()
"""
datapath = "jpg_images/"
img0 = Image.open("jpg_images/maskedimage" + str(0) + ".jpg")
counter = 0
img1 = []
for f in glob.glob('/Users/paulmccabe/Desktop/jpg images/*.jpg'):
path = "jpg_images/maskedimage" + str(counter) + ".jpg"
img0 = Image.open(path).convert('L')
img1.append(array(img0))
counter += 1
print("Counter: " + str(counter))
imgs_to_process_orig = np.stack([s for s in img1])
"""
id = 2
imgs = np.load("/Users/paulmccabe/Desktop/Segmentation Project/" + "justmask_%d.npy" % (id))
counter = 0
print("Saving as jpg Images...")
for img in imgs:
scipy.misc.imsave('/Users/paulmccabe/Desktop/Segmentation Project' + '/jpg mask images/justmask{}.jpg'.format(counter), img)
counter += 1
counter = 0
#print("Re-Importing jpg Images...")
#for f in glob.glob('/Users/paulmccabe/Desktop/Segmentation Project/jpg mask images/*.jpg'):
# path = "jpg_images/maskedimage" + str(counter) + ".jpg"
# img0 = Image.open(path).convert('L')
# img1.append(array(img0))
# counter += 1
imgs[imgs == 1] = 255
list = []
for img in imgs:
PIL_img = Image.fromarray(img.astype('uint8'))
PIL_edge = PIL_img.filter(ImageFilter.FIND_EDGES)
np_img = array(PIL_edge)
dilation = morphology.dilation(np_img, np.ones([4,4]))
list.append(dilation)
imgs_after_processing = np.stack([s for s in list])
np.save("/Users/paulmccabe/Desktop/Segmentation Project" + "/justedge_%d.npy" % (id), imgs_after_processing[:284])
#sample_stack(np_img) | 35.090909 | 128 | 0.658895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 976 | 0.421416 |
08354cb83dbefe75aa87b426bfa4c3e544572c47 | 2,191 | py | Python | benchmark.py | Umass-ITS/Open3D-PointNet2-Semantic3D | 0254926f62cbca695aa1e76a18fec0863be5e455 | [
"MIT"
] | 330 | 2019-04-10T21:31:24.000Z | 2021-07-26T06:16:17.000Z | benchmark.py | largeword/Open3D-PointNet2-Semantic3D | 3a9751dc724877933fc883320100796cef23489d | [
"MIT"
] | 44 | 2019-04-10T15:28:36.000Z | 2021-06-22T17:39:05.000Z | benchmark.py | largeword/Open3D-PointNet2-Semantic3D | 3a9751dc724877933fc883320100796cef23489d | [
"MIT"
] | 78 | 2019-04-08T09:39:29.000Z | 2021-06-08T02:39:14.000Z | import json
import numpy as np
import tensorflow as tf
import time
from predict import Predictor
if __name__ == "__main__":
checkpoint = "logs/semantic_backup_full_submit_dec_10/best_model_epoch_275.ckpt"
hyper_params = json.loads(open("semantic.json").read())
predictor = Predictor(
checkpoint_path=checkpoint, num_classes=9, hyper_params=hyper_params
)
batch_size = 64
# Init data
points_with_colors = np.random.randn(batch_size, hyper_params["num_point"], 6)
# Warm up
pd_labels = predictor.predict(points_with_colors)
# Benchmark
s = time.time()
profiler = tf.profiler.Profiler(predictor.sess.graph)
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_ = predictor.predict(
points_with_colors, run_options=run_options, run_metadata=run_metadata
)
profiler.add_step(0, run_metadata)
batch_time = time.time() - s
sample_time = batch_time / batch_size
print(
"Batch size: {}, batch_time: {}, sample_time: {}".format(
batch_size, batch_time, sample_time
)
)
option_builder = tf.profiler.ProfileOptionBuilder
opts = (
option_builder(option_builder.time_and_memory())
.with_step(-1) # with -1, should compute the average of all registered steps.
.with_file_output("tf-profile.txt")
.select(["micros", "bytes", "occurrence"])
.order_by("micros")
.build()
)
# Profiling info about ops are saved in 'test-%s.txt' % FLAGS.out
profiler.profile_operations(options=opts)
for batch_size in [2 ** n for n in range(8)]:
# Init data
points_with_colors = np.random.randn(batch_size, hyper_params["num_point"], 6)
# Warm up
pd_labels = predictor.predict(points_with_colors)
# Benchmark
s = time.time()
_ = predictor.predict(points_with_colors)
batch_time = time.time() - s
sample_time = batch_time / batch_size
print(
"Batch size: {}, batch_time: {}, sample_time: {}".format(
batch_size, batch_time, sample_time
)
)
| 30.013699 | 86 | 0.652214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 452 | 0.206298 |
0836babd9f72c506519d713c961b9257fd759c19 | 447 | py | Python | tests/my_select_group.py | oldjun/PyMyORM | ac49910f21d3f3d3d4b3d75a0f998526963f0a2a | [
"MIT"
] | 1 | 2021-12-01T23:47:24.000Z | 2021-12-01T23:47:24.000Z | tests/my_select_group.py | oldjun/PyMyORM | ac49910f21d3f3d3d4b3d75a0f998526963f0a2a | [
"MIT"
] | null | null | null | tests/my_select_group.py | oldjun/PyMyORM | ac49910f21d3f3d3d4b3d75a0f998526963f0a2a | [
"MIT"
] | 2 | 2022-01-03T15:03:37.000Z | 2022-02-16T09:00:58.000Z | from pymyorm.database import Database
from config import db
from models.user import User
if __name__ == '__main__':
Database.connect(**db)
# # case 1
# all = User.find().select('count(*) as count', 'money').group('money').order('count asc').all()
# for one in all:
# print(one)
all = User.find().select('gender', 'count(*) as count', 'avg(money) as avg').group('gender').all()
for one in all:
print(one)
| 27.9375 | 102 | 0.615213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.454139 |
083725212ef9f198c79212406fcc54599eb1abb4 | 2,783 | py | Python | framework/codejam/extract/cyclomatic_complexity.py | neizod/coding-analysis | cc086bcf204e570032d11b12a46ac819cfe93f2b | [
"MIT"
] | 1 | 2015-05-22T05:01:53.000Z | 2015-05-22T05:01:53.000Z | framework/codejam/extract/cyclomatic_complexity.py | neizod/coding-analysis | cc086bcf204e570032d11b12a46ac819cfe93f2b | [
"MIT"
] | null | null | null | framework/codejam/extract/cyclomatic_complexity.py | neizod/coding-analysis | cc086bcf204e570032d11b12a46ac819cfe93f2b | [
"MIT"
] | null | null | null | import os
import json
import logging
from framework._utils import FunctionHook
class CodeJamExtractCyclomaticComplexity(FunctionHook):
''' This method will extract cyclomatic complexity from submitted code.
Need to run `extract language` first, since not every language has
implement with the extractor (only C, C++, Python). '''
@staticmethod
def use_cmetrics(pid, pio, uname):
''' cmetrics is a tool for analysing cyclomatic complexity for
code written in C, C++. '''
from subprocess import getoutput
from framework._utils.misc import datapath
directory = datapath('codejam', 'source', pid, pio, uname)
data = getoutput('mccabe -n {}/*'.format(directory))
if not data:
return
for line in data.split('\n'):
*_, complexity, _ = line.split('\t')
yield int(complexity)
@staticmethod
def use_radon(pid, pio, uname):
''' radon is a tool for analysing cyclomatic complexity for
code written in Python. '''
from subprocess import getoutput
from framework._utils.misc import datapath
directory = datapath('codejam', 'source', pid, pio, uname)
data = json.loads(getoutput('radon cc -sj {}'.format(directory)))
for extracted_file in data.values():
if 'error' in extracted_file:
return
for extracted_func in extracted_file:
yield extracted_func['complexity']
def main(self, year, force=False, **_):
from framework._utils import write
from framework._utils.misc import datapath, make_ext
os.makedirs(datapath('codejam', 'extract'), exist_ok=True)
usepath = datapath('codejam', 'extract',
make_ext('language', year, 'json'))
outpath = datapath('codejam', 'extract',
make_ext('cyclomatic-complexity', year, 'json'))
if not force and os.path.isfile(outpath):
return logging.warn('output file already exists, aborting.')
extracted_data = json.load(open(usepath))
for submission in extracted_data:
pid = submission['pid']
pio = submission['io']
uname = submission['uname']
logging.info('extracting: %i %i %s', pid, pio, uname)
languages_set = set(submission.pop('languages'))
complexity = []
if {'Python'} & languages_set:
complexity += self.use_radon(pid, pio, uname)
if {'C', 'C++'} & languages_set:
complexity += self.use_cmetrics(pid, pio, uname)
submission['cyclomatic-complexity'] = sorted(complexity)
write.json(extracted_data, open(outpath, 'w'))
| 42.815385 | 75 | 0.606899 | 2,700 | 0.970176 | 1,109 | 0.398491 | 1,145 | 0.411427 | 0 | 0 | 734 | 0.263744 |
08384f79281339a1e0387a70c9f20061ae7f5d42 | 64 | py | Python | tests/basic/numeric.py | MoonStarCZW/py2rb | 89b247717d33d780fbf143e1583bfe9252984da4 | [
"MIT"
] | null | null | null | tests/basic/numeric.py | MoonStarCZW/py2rb | 89b247717d33d780fbf143e1583bfe9252984da4 | [
"MIT"
] | null | null | null | tests/basic/numeric.py | MoonStarCZW/py2rb | 89b247717d33d780fbf143e1583bfe9252984da4 | [
"MIT"
] | null | null | null | print(int(2.0))
print(float(2))
print(abs(-2.0))
print(abs(-2))
| 12.8 | 16 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
083ac0bbfaedec44e83a000de5fcb0cfa49ed48e | 310 | py | Python | tests/symmetry/test_point_group.py | kijanac/Materia | b49af518c8eff7d3a8c6caff39783e3daf80a7a0 | [
"MIT"
] | null | null | null | tests/symmetry/test_point_group.py | kijanac/Materia | b49af518c8eff7d3a8c6caff39783e3daf80a7a0 | [
"MIT"
] | null | null | null | tests/symmetry/test_point_group.py | kijanac/Materia | b49af518c8eff7d3a8c6caff39783e3daf80a7a0 | [
"MIT"
] | null | null | null | # import materia as mtr
# import numpy as np
# def test_point_group_C1():
# ctable = mtr.symmetry.C1().cayley_table()
# assert (ctable == np.array([[0]])).all()
# def test_point_group_Ci():
# ctable = mtr.symmetry.Ci().cayley_table()
# assert (ctable == np.array([[0, 1], [1, 0]])).all()
| 20.666667 | 57 | 0.606452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 296 | 0.954839 |
083e03b527a87a9ebea41c58c4a9944e76e7007f | 1,948 | py | Python | extrator/test/test_pipeline.py | MinisterioPublicoRJ/robotj | 946e9547eea6f548609f7ccfaf1c6a13fffece65 | [
"MIT"
] | 3 | 2018-03-13T12:17:13.000Z | 2021-04-18T19:55:04.000Z | extrator/test/test_pipeline.py | MinisterioPublicoRJ/robotj | 946e9547eea6f548609f7ccfaf1c6a13fffece65 | [
"MIT"
] | 1 | 2018-06-19T13:09:10.000Z | 2018-06-19T13:09:10.000Z | extrator/test/test_pipeline.py | MinisterioPublicoRJ/robotj | 946e9547eea6f548609f7ccfaf1c6a13fffece65 | [
"MIT"
] | 1 | 2021-04-18T19:55:09.000Z | 2021-04-18T19:55:09.000Z | from unittest.mock import patch, MagicMock
from unittest import TestCase
from ..crawler.pipeliner import pipeline
from ..settings import URL_PROCESSO
class Pipeline(TestCase):
@patch('robotj.extrator.crawler.pipeliner.parse_itens',
return_value={'d': 4})
@patch('robotj.extrator.crawler.pipeliner.parse_metadados',
return_value={'a': 1})
@patch('robotj.extrator.crawler.pipeliner.area_dos_metadados',
return_value=(0, 1))
@patch('robotj.extrator.crawler.pipeliner.BeautifulSoup')
@patch('robotj.extrator.crawler.pipeliner.cria_hash_do_processo')
@patch('robotj.extrator.crawler.pipeliner.requests')
@patch('robotj.extrator.crawler.pipeliner.formata_numero_processo')
def test_pipeline_do_parsing_dos_processos(self, _fnp, _req, _chdp, _bs,
_am, _pm, _pi):
processo = '1234'
numero_formatado = '1.2.3.4'
html = '{"a": 1}'
_resp_mock = MagicMock()
_resp_mock.content = html
_soup_mock = MagicMock()
_soup_mock.find_all.return_value = 'rows_mock'
_fnp.return_value = numero_formatado
_req.get.return_value = _resp_mock
_chdp.return_value = 'ab12'
_bs.return_value = _soup_mock
processos = pipeline(processo)
_fnp.assert_called_once_with(processo)
_req.get.assert_called_once_with(URL_PROCESSO.format(
doc_number=numero_formatado),
headers={'X-Forwarded-For': '10.0.250.15'},
timeout=10)
_chdp.assert_called_once_with(html)
_bs.assert_called_once_with(html, 'lxml')
_soup_mock.find_all.assert_called_once_with('tr')
_am.assert_called_once_with('rows_mock')
_pm.assert_called_once_with('rows_mock', '1.2.3.4', 0, 1)
_pi.assert_called_once_with(_soup_mock, '1234', 1)
self.assertEqual(processos, {'a': 1, 'd': 4, 'hash': 'ab12'})
| 38.96 | 76 | 0.661704 | 1,795 | 0.921458 | 0 | 0 | 1,765 | 0.906057 | 0 | 0 | 504 | 0.258727 |
083f30db4f011f2e287409fe5ae43ef0e966b47a | 3,943 | py | Python | tests/test_step.py | arup-group/mc | 50b8faa8b9d40dece88e0a27f911edd427ebc064 | [
"MIT"
] | null | null | null | tests/test_step.py | arup-group/mc | 50b8faa8b9d40dece88e0a27f911edd427ebc064 | [
"MIT"
] | 12 | 2021-12-14T15:10:43.000Z | 2022-03-31T13:39:25.000Z | tests/test_step.py | arup-group/mc | 50b8faa8b9d40dece88e0a27f911edd427ebc064 | [
"MIT"
] | null | null | null | from pathlib import Path
import pytest
from copy import deepcopy
import os
from mc.base import BaseConfig
from mc import step
@pytest.fixture()
def config():
in_file = Path("tests/test_data/test_config.xml")
return BaseConfig(in_file)
def test_set_write_path(config):
step.set_write_path(config, {'outputDirectory': 'testing'})
assert config['controler']['outputDirectory'] == 'testing'
def test_set_input_paths(config):
step.set_input_paths(config, {'matsim_source': 'test/ing'})
assert config['network']['inputNetworkFile'] == 'test/ing/network.xml'
assert config['plans']['inputPlansFile'] == 'test/ing/population.xml.gz'
assert config['plans']['inputPersonAttributesFile'] == 'test/ing/population_attributes.xml.gz'
assert config['transit']['transitScheduleFile'] == 'test/ing/schedule-merged.xml'
assert config['transit']['vehiclesFile'] == 'test/ing/vehicles.xml'
assert config['transit']['transitLinesAttributesFile'] == 'null'
def test_set_step(config):
step.set_last_iteration(config, {'step': '999'})
assert config['controler']['lastIteration'] == '999'
def test_find_and_set_param(config):
step.find_and_set_overrides(
config,
{"modeParams:car/constant": "-1.0"}
)
assert config['planCalcScore']['scoringParameters:default']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:bus']["constant"] == "0.0"
def test_find_and_set_params(config):
step.find_and_set_overrides(
config,
{
"modeParams:car/constant": "-1.0",
"scoringParameters:unknown/modeParams:bus/constant": "-1.0"
}
)
assert config['planCalcScore']['scoringParameters:default']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:bus']["constant"] == "-1.0"
def test_find_and_set_bad_param(config):
cnfg = deepcopy(config)
step.find_and_set_overrides(
config,
{"modeParams:*/horseback": "-1.0"}
)
assert cnfg == config
def test_construct_overrides_map_from_tuple():
assert step.construct_override_map_from_tuple(
('a','b','c','d')
) == {'a':'b', 'c':'d'}
def test_step_config(tmp_path):
in_file = "tests/test_data/test_config.xml"
out_file = os.path.join(tmp_path, "test_config.xml")
step.step_config(
input_file=in_file,
output_file=out_file,
overrides=(
'matsim_source', 'test/ing',
'outputDirectory', 'testing',
'step', '999',
"modeParams:car/constant", "-1.0",
"scoringParameters:unknown/modeParams:bus/constant", "-1.0"
)
)
assert os.path.exists(out_file)
config = BaseConfig(out_file)
assert config['controler']['lastIteration'] == '999'
assert config['controler']['outputDirectory'] == 'testing'
assert config['network']['inputNetworkFile'] == 'test/ing/network.xml'
assert config['plans']['inputPlansFile'] == 'test/ing/population.xml.gz'
assert config['plans']['inputPersonAttributesFile'] == 'test/ing/population_attributes.xml.gz'
assert config['transit']['transitScheduleFile'] == 'test/ing/schedule-merged.xml'
assert config['transit']['vehiclesFile'] == 'test/ing/vehicles.xml'
assert config['transit']['transitLinesAttributesFile'] == 'null'
assert config['planCalcScore']['scoringParameters:default']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:car']["constant"] == "-1.0"
assert config['planCalcScore']['scoringParameters:unknown']['modeParams:bus']["constant"] == "-1.0"
| 39.43 | 103 | 0.672331 | 0 | 0 | 0 | 0 | 116 | 0.029419 | 0 | 0 | 1,913 | 0.485164 |
0840e1f2cca91c8f40fea1035d91f9ed0ea2c8f1 | 15,552 | py | Python | lambda_functions.py | intirix/serverless-secrets-manager | 2c89b2c497f7078c38885125dfa79db944a214db | [
"Apache-2.0"
] | 2 | 2018-05-23T06:04:13.000Z | 2020-11-04T23:16:09.000Z | lambda_functions.py | intirix/serverless-secrets-manager | 2c89b2c497f7078c38885125dfa79db944a214db | [
"Apache-2.0"
] | null | null | null | lambda_functions.py | intirix/serverless-secrets-manager | 2c89b2c497f7078c38885125dfa79db944a214db | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import system
import db
import client
import server
import logging
import json
import base64
import os
from aws_xray_sdk.core import patch_all
if "AWS_REGION" in os.environ:
patch_all()
class LambdaCommon:
def __init__(self, ddb_client=None):
self.log = logging.getLogger("Lambda")
self.system = system.System()
userTable = "secrets-users"
if "USERS_TABLE" in os.environ:
userTable = os.environ["USERS_TABLE"]
secretsTable = "secrets-secrets"
if "SECRETS_TABLE" in os.environ:
secretsTable = os.environ["SECRETS_TABLE"]
self.db = db.CacheDB(db.DynamoDB(userTable, secretsTable, ddb_client))
self.system.setDB(self.db)
self.system.init()
self.client = client.Client(client.ClientSystemInterface(self.system))
self.server = server.Server(self.system)
self.resp = None
self.ctx = None
self.mockUser = None
if "MOCK_USER" in os.environ and len(os.environ["MOCK_USER"]) > 0:
self.mockUser = os.environ["MOCK_USER"]
def _response401(self):
self.resp = {"statusCode": 401}
def authenticate(self, event):
if self.mockUser != None:
self.ctx = self.server.mockAuthentication(self.mockUser)
return
if (
event == None
or not "headers" in event
or event["headers"] == None
or not "Authorization" in event["headers"]
):
self._response401()
return
self.ctx = self.server.validateAuthenticationHeader(
event["headers"]["Authorization"]
)
if self.ctx == None:
self._response401()
return
def getResponse(self):
return self.resp
def get_body(event):
if not "body" in event:
return None
if event["body"] == None:
return None
if "isBase64Encoded" in event and event["isBase64Encoded"] == True:
return base64.b64decode(event["body"])
return event["body"]
def matches(event, meth, path):
log = logging.getLogger("Lambda")
if event == None:
return False
if not "httpMethod" in event or meth != event["httpMethod"]:
return False
if "requestContext" in event and "resourcePath" in event["requestContext"]:
if path == event["requestContext"]["resourcePath"]:
log.info("Matched " + meth + " to " + path)
return True
return False
_singleton = None
def get_lambda_common():
global _singleton
if _singleton is None:
_singleton = LambdaCommon()
return _singleton
def single_func(event, context):
# print(json.dumps(event,indent=2))
if matches(event, "GET", "/v1/users"):
return list_users(event, context)
if matches(event, "GET", "/v1/users/{username}"):
return get_user(event, context)
if matches(event, "PUT", "/v1/users/{username}"):
return update_user(event, context)
if matches(event, "POST", "/v1/users/{username}"):
return create_user(event, context)
if matches(event, "GET", "/v1/users/{username}/keys/public"):
return get_user_public_key(event, context)
if matches(event, "PUT", "/v1/users/{username}/keys/public"):
return set_user_public_key(event, context)
if matches(event, "POST", "/v1/users/{username}/keys/public"):
return set_user_public_key(event, context)
if matches(event, "POST", "/v1/users/{username}/keys"):
return generate_user_keys(event, context)
if matches(event, "GET", "/v1/users/{username}/keys/private/encrypted"):
return get_user_private_key_encrypted(event, context)
if matches(event, "PUT", "/v1/users/{username}/keys/private/encrypted"):
return set_user_private_key_encrypted(event, context)
if matches(event, "POST", "/v1/users/{username}/keys/private/encrypted"):
return set_user_private_key_encrypted(event, context)
if matches(event, "GET", "/v1/users/{username}/secrets"):
return get_user_secrets(event, context)
if matches(event, "GET", "/v1/secrets/{sid}"):
return get_secret(event, context)
if matches(event, "PUT", "/v1/secrets/{sid}"):
return update_secret(event, context)
if matches(event, "POST", "/v1/secrets"):
return add_secret(event, context)
if matches(event, "PUT", "/v1/secrets/{sid}/users/{username}"):
return share_secret(event, context)
if matches(event, "DELETE", "/v1/secrets/{sid}/users/{username}"):
return unshare_secret(event, context)
print("Did not match the event")
return {"statusCode": 404}
def list_users(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
return {
"statusCode": 200,
"body": json.dumps(obj.server.listUsers(obj.ctx), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_user(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
return {
"statusCode": 200,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def update_user(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
if obj.server.updateUser(obj.ctx, user, body):
return {
"statusCode": 200,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def set_user_public_key(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
keyType = obj.server.getPublicKeyType(body)
if obj.server.setUserPublicKey(obj.ctx, user, body, keyType):
return {
"statusCode": 200,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def create_user(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
if obj.server.addUser(obj.ctx, user, body):
if obj.server.addUser(obj.ctx, user, body):
return {
"statusCode": 201,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_user_public_key(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
pem = obj.server.getUserPublicKey(obj.ctx, user)
if pem == None:
return {"statusCode": 404}
return {
"statusCode": 200,
"body": pem,
"headers": {"Content-Type": "application/x-pem-file"},
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_user_private_key_encrypted(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
data = obj.server.getUserEncryptedPrivateKey(obj.ctx, user)
if isinstance(data, str):
data = data.encode("UTF-8")
b64 = base64.b64encode(data).decode("UTF-8")
return {
"statusCode": 200,
"body": b64,
"headers": {"Content-Type": "application/octet-stream"},
"isBase64Encoded": True,
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def generate_user_keys(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
if body is None:
obj.log.exception("Password not provided in body")
return {"statusCode": 400}
body = body.strip()
generate = False
if (
"queryStringParameters" in event
and "generate" in event["queryStringParameters"]
):
generate = "true" == event["queryStringParameters"]["generate"]
if generate:
pem = obj.server.generateKeysForUser(obj.ctx, user, body)
return {
"statusCode": 200,
"body": pem,
"headers": {"Content-Type": "application/x-pem-file"},
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def set_user_private_key_encrypted(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
body = get_body(event)
if obj.server.setUserEncryptedPrivateKey(obj.ctx, user, body):
return {
"statusCode": 200,
"body": json.dumps(obj.server.getUser(obj.ctx, user), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_user_secrets(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
user = event["pathParameters"]["username"]
return {
"statusCode": 200,
"body": json.dumps(obj.server.getMySecrets(obj.ctx, user), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def get_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
sid = event["pathParameters"]["sid"]
return {
"statusCode": 200,
"body": json.dumps(obj.server.getSecret(obj.ctx, sid), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def update_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
sid = event["pathParameters"]["sid"]
body = get_body(event)
if obj.server.updateSecret(obj.ctx, sid, body):
return {
"statusCode": 200,
"body": json.dumps(obj.server.getSecret(obj.ctx, sid), indent=2),
}
return {"statusCode": 404}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def add_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
body = get_body(event)
sid = obj.server.addSecret(obj.ctx, body)
return {
"statusCode": 201,
"body": json.dumps(obj.server.getSecret(obj.ctx, sid), indent=2),
}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def share_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
sid = event["pathParameters"]["sid"]
user = event["pathParameters"]["username"]
body = get_body(event)
ret = obj.server.shareSecret(obj.ctx, sid, user, body)
return {"statusCode": 200, "body": json.dumps(ret, indent=2)}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
def unshare_secret(event, context):
obj = get_lambda_common()
obj.authenticate(event)
if obj.getResponse() != None:
return obj.getResponse()
try:
sid = event["pathParameters"]["sid"]
user = event["pathParameters"]["username"]
ret = obj.server.unshareSecret(obj.ctx, sid, user)
return {"statusCode": 200, "body": json.dumps(ret, indent=2)}
except server.AccessDeniedException:
obj.log.exception("Access Denied")
return {"statusCode": 403}
except:
obj.log.exception("Error")
return {"statusCode": 500}
return {"statusCode": 404}
FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(format=FORMAT)
| 29.622857 | 84 | 0.600244 | 1,598 | 0.102752 | 0 | 0 | 0 | 0 | 0 | 0 | 3,011 | 0.193609 |
08418a8370fcf775a2fd7e29466ecc715efe0e4f | 2,575 | py | Python | tests/utils_test.py | asrashley/dash-live | 1ffbc57896e4e46855a42af6ef79a1865ebfce55 | [
"Apache-2.0"
] | 2 | 2019-11-02T06:26:29.000Z | 2020-05-15T16:54:20.000Z | tests/utils_test.py | asrashley/dash-live | 1ffbc57896e4e46855a42af6ef79a1865ebfce55 | [
"Apache-2.0"
] | 1 | 2020-01-20T17:20:54.000Z | 2020-01-21T08:38:30.000Z | tests/utils_test.py | asrashley/dash-live | 1ffbc57896e4e46855a42af6ef79a1865ebfce55 | [
"Apache-2.0"
] | null | null | null |
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import datetime
import os
import sys
import unittest
_src = os.path.join(os.path.dirname(__file__),"..", "src")
if not _src in sys.path:
sys.path.append(_src)
import utils
class DateTimeTests(unittest.TestCase):
def test_isoformat(self):
tests = [
('2009-02-27T10:00:00Z', datetime.datetime(2009,2,27,10,0,0, tzinfo=utils.UTC()) ),
('2013-07-25T09:57:31Z', datetime.datetime(2013,7,25,9,57,31, tzinfo=utils.UTC()) ),
('PT14H00M00S', datetime.timedelta(hours=14) ),
('PT26H00M00S', datetime.timedelta(hours=26) ),
('PT14H', datetime.timedelta(hours=14) ),
('PT1M00S', datetime.timedelta(minutes=1) ),
('PT2M', datetime.timedelta(minutes=2) ),
('PT1M0.00S', datetime.timedelta(minutes=1) ),
('PT45S', datetime.timedelta(seconds=45) ),
('PT4.5S', datetime.timedelta(seconds=4.5) ),
('PT01:45:19', datetime.timedelta(hours=1,minutes=45,seconds=19) ),
]
for test in tests:
tc = utils.from_isodatetime(test[0])
self.failUnlessEqual(tc,test[1])
date_str = "2013-07-25T09:57:31Z"
date_val = utils.from_isodatetime(date_str)
# Don't check for the 'Z' because Python doesn't put the timezone in the isoformat string
isoformat = date_val.isoformat().replace('+00:00','Z')
self.assertEqual(isoformat,date_str)
date_str = "2013-07-25T09:57:31.123Z"
date_val = utils.from_isodatetime(date_str)
self.assertEqual(date_val.microsecond, 123000)
self.assertTrue(date_val.isoformat().startswith(date_str[:-1]))
class BufferedReaderTests(unittest.TestCase):
def test_buffer_reader(self):
r = bytearray('t'*65536)
#mem = memoryview(r)
for i in range(len(r)):
r[i] = i & 0xFF
br = utils.BufferedReader(StringIO.StringIO(r), buffersize=1024)
p = br.peek(8)
self.assertTrue(len(p) >= 8)
for i in range(8):
self.assertEqual(ord(p[i]), i)
self.assertEqual(br.tell(), 0)
p = br.read(8)
self.assertEqual(br.tell(), 8)
self.assertEqual(len(p), 8)
for i in range(8):
self.assertEqual(ord(p[i]), i)
p = br.read(8)
self.assertEqual(br.tell(), 16)
self.assertEqual(len(p), 8)
for i in range(8):
self.assertEqual(ord(p[i]), i+8)
if __name__ == "__main__":
unittest.main()
| 35.763889 | 97 | 0.597282 | 2,256 | 0.876117 | 0 | 0 | 0 | 0 | 0 | 0 | 320 | 0.124272 |
08432f03ae4911f91726c50919d96811876b71c7 | 9,364 | py | Python | midv500/download_dataset.py | fcakyon/midv500-to-coco | 2f1cd74e0bb8da2301a96e3fb0cd9f17005ed08c | [
"MIT"
] | 39 | 2020-05-15T17:34:32.000Z | 2022-03-25T08:22:47.000Z | midv500/download_dataset.py | fcakyon/midv500-to-coco | 2f1cd74e0bb8da2301a96e3fb0cd9f17005ed08c | [
"MIT"
] | 1 | 2020-08-04T09:04:06.000Z | 2020-08-19T12:50:15.000Z | midv500/download_dataset.py | fcakyon/midv500-to-coco | 2f1cd74e0bb8da2301a96e3fb0cd9f17005ed08c | [
"MIT"
] | 6 | 2020-04-23T19:40:16.000Z | 2021-12-19T17:52:42.000Z | import os
import argparse
from midv500.utils import download, unzip
midv500_links = [
"ftp://smartengines.com/midv-500/dataset/01_alb_id.zip",
"ftp://smartengines.com/midv-500/dataset/02_aut_drvlic_new.zip",
"ftp://smartengines.com/midv-500/dataset/03_aut_id_old.zip",
"ftp://smartengines.com/midv-500/dataset/04_aut_id.zip",
"ftp://smartengines.com/midv-500/dataset/05_aze_passport.zip",
"ftp://smartengines.com/midv-500/dataset/06_bra_passport.zip",
"ftp://smartengines.com/midv-500/dataset/07_chl_id.zip",
"ftp://smartengines.com/midv-500/dataset/08_chn_homereturn.zip",
"ftp://smartengines.com/midv-500/dataset/09_chn_id.zip",
"ftp://smartengines.com/midv-500/dataset/10_cze_id.zip",
"ftp://smartengines.com/midv-500/dataset/11_cze_passport.zip",
"ftp://smartengines.com/midv-500/dataset/12_deu_drvlic_new.zip",
"ftp://smartengines.com/midv-500/dataset/13_deu_drvlic_old.zip",
"ftp://smartengines.com/midv-500/dataset/14_deu_id_new.zip",
"ftp://smartengines.com/midv-500/dataset/15_deu_id_old.zip",
"ftp://smartengines.com/midv-500/dataset/16_deu_passport_new.zip",
"ftp://smartengines.com/midv-500/dataset/17_deu_passport_old.zip",
"ftp://smartengines.com/midv-500/dataset/18_dza_passport.zip",
"ftp://smartengines.com/midv-500/dataset/19_esp_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/20_esp_id_new.zip",
"ftp://smartengines.com/midv-500/dataset/21_esp_id_old.zip",
"ftp://smartengines.com/midv-500/dataset/22_est_id.zip",
"ftp://smartengines.com/midv-500/dataset/23_fin_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/24_fin_id.zip",
"ftp://smartengines.com/midv-500/dataset/25_grc_passport.zip",
"ftp://smartengines.com/midv-500/dataset/26_hrv_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/27_hrv_passport.zip",
"ftp://smartengines.com/midv-500/dataset/28_hun_passport.zip",
"ftp://smartengines.com/midv-500/dataset/29_irn_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/30_ita_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/31_jpn_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/32_lva_passport.zip",
"ftp://smartengines.com/midv-500/dataset/33_mac_id.zip",
"ftp://smartengines.com/midv-500/dataset/34_mda_passport.zip",
"ftp://smartengines.com/midv-500/dataset/35_nor_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/36_pol_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/37_prt_id.zip",
"ftp://smartengines.com/midv-500/dataset/38_rou_drvlic.zip",
"ftp://smartengines.com/midv-500/dataset/39_rus_internalpassport.zip",
"ftp://smartengines.com/midv-500/dataset/40_srb_id.zip",
"ftp://smartengines.com/midv-500/dataset/41_srb_passport.zip",
"ftp://smartengines.com/midv-500/dataset/42_svk_id.zip",
"ftp://smartengines.com/midv-500/dataset/43_tur_id.zip",
"ftp://smartengines.com/midv-500/dataset/44_ukr_id.zip",
"ftp://smartengines.com/midv-500/dataset/45_ukr_passport.zip",
"ftp://smartengines.com/midv-500/dataset/46_ury_passport.zip",
"ftp://smartengines.com/midv-500/dataset/47_usa_bordercrossing.zip",
"ftp://smartengines.com/midv-500/dataset/48_usa_passportcard.zip",
"ftp://smartengines.com/midv-500/dataset/49_usa_ssn82.zip",
"ftp://smartengines.com/midv-500/dataset/50_xpo_id.zip",
]
midv2019_links = [
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/01_alb_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/02_aut_drvlic_new.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/03_aut_id_old.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/04_aut_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/05_aze_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/06_bra_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/07_chl_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/08_chn_homereturn.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/09_chn_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/10_cze_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/11_cze_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/12_deu_drvlic_new.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/13_deu_drvlic_old.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/14_deu_id_new.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/15_deu_id_old.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/16_deu_passport_new.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/17_deu_passport_old.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/18_dza_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/19_esp_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/20_esp_id_new.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/21_esp_id_old.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/22_est_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/23_fin_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/24_fin_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/25_grc_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/26_hrv_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/27_hrv_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/28_hun_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/29_irn_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/30_ita_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/31_jpn_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/32_lva_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/33_mac_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/34_mda_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/35_nor_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/36_pol_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/37_prt_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/38_rou_drvlic.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/39_rus_internalpassport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/40_srb_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/41_srb_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/42_svk_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/43_tur_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/44_ukr_id.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/45_ukr_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/46_ury_passport.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/47_usa_bordercrossing.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/48_usa_passportcard.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/49_usa_ssn82.zip",
"ftp://smartengines.com/midv-500/extra/midv-2019/dataset/50_xpo_id.zip",
]
def download_dataset(download_dir: str, dataset_name: str = "midv500"):
"""
This script downloads the MIDV-500 dataset with extra files and unzips the folders.
dataset_name: str
"midv500": https://doi.org/10.18287/2412-6179-2019-43-5-818-824
"midv2019": https://doi.org/10.1117/12.2558438
"all": midv500 + midv2019
"""
if dataset_name == "midv500":
links_set = {
"midv500": midv500_links,
}
elif dataset_name == "midv2019":
links_set = {
"midv2019": midv2019_links,
}
elif dataset_name == "all":
links_set = {
"midv500": midv500_links,
"midv2019": midv2019_links,
}
else:
Exception('Invalid dataset_name, try one of "midv500", "midv2019" or "all".')
for k, v in links_set.items():
dst = os.path.join(download_dir, k)
for link in v:
print("--------------------------------------------------------------")
# download zip file
link = link.replace("\\", "/") # for windows
filename = link.split("/")[-1]
print("\nDownloading:", filename)
download(link, dst)
print("Downloaded:", filename)
# unzip zip file
print("Unzipping:", filename)
zip_path = os.path.join(dst, filename)
unzip(zip_path, dst)
print("Unzipped:", filename.replace(".zip", ""))
# remove zip file
os.remove(zip_path)
if __name__ == "__main__":
# construct the argument parser
ap = argparse.ArgumentParser()
# add the arguments to the parser
ap.add_argument(
"download_dir",
default="data/",
help="Directory for MIDV-500 dataset to be downloaded.",
)
args = vars(ap.parse_args())
# download dataset
download_dataset(args["download_dir"])
| 54.127168 | 90 | 0.705788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,533 | 0.804464 |
0845053b64f5370f1498b8e4729e90a827f0c839 | 6,329 | py | Python | erpnext_taxjar/api.py | DigiThinkIT/erpnext_taxjar | 5313dbdd931745e9655d3f5fd53c830abb0d7ee7 | [
"MIT"
] | null | null | null | erpnext_taxjar/api.py | DigiThinkIT/erpnext_taxjar | 5313dbdd931745e9655d3f5fd53c830abb0d7ee7 | [
"MIT"
] | 8 | 2017-07-01T11:13:14.000Z | 2020-11-19T13:26:29.000Z | erpnext_taxjar/api.py | DigiThinkIT/erpnext_taxjar | 5313dbdd931745e9655d3f5fd53c830abb0d7ee7 | [
"MIT"
] | 13 | 2017-06-30T15:47:00.000Z | 2022-02-22T16:24:41.000Z | import traceback
import pycountry
import taxjar
import frappe
from erpnext import get_default_company
from frappe import _
from frappe.contacts.doctype.address.address import get_company_address
TAX_ACCOUNT_HEAD = frappe.db.get_single_value("TaxJar Settings", "tax_account_head")
SHIP_ACCOUNT_HEAD = frappe.db.get_single_value("TaxJar Settings", "shipping_account_head")
def create_transaction(doc, method):
# Allow skipping creation of transaction for dev environment
# if taxjar_create_transactions isn't defined in site_config we assume
# we DO NOT want to create transactions all the time, except on production.
if not frappe.local.conf.get("taxjar_create_transactions", 0):
return
sales_tax = 0
for tax in doc.taxes:
if tax.account_head == TAX_ACCOUNT_HEAD:
sales_tax = tax.tax_amount
if not sales_tax:
return
tax_dict = get_tax_data(doc)
if not tax_dict:
return
tax_dict['transaction_id'] = doc.name
tax_dict['transaction_date'] = frappe.utils.today()
tax_dict['sales_tax'] = sales_tax
tax_dict['amount'] = doc.total + tax_dict['shipping']
client = get_client()
try:
client.create_order(tax_dict)
except taxjar.exceptions.TaxJarResponseError as err:
frappe.throw(_(sanitize_error_response(err)))
except Exception as ex:
print(traceback.format_exc(ex))
def delete_transaction(doc, method):
client = get_client()
client.delete_order(doc.name)
def get_client():
taxjar_settings = frappe.get_single("TaxJar Settings")
if not taxjar_settings.api_key:
frappe.throw(_("The TaxJar API key is missing."), frappe.AuthenticationError)
api_key = taxjar_settings.get_password("api_key")
return taxjar.Client(api_key=api_key)
def get_shipping_address(doc):
company_address = get_company_address(get_default_company()).company_address
company_address = frappe.get_doc("Address", company_address)
shipping_address = None
if company_address:
if doc.shipping_address_name:
shipping_address = frappe.get_doc("Address", doc.shipping_address_name)
else:
shipping_address = company_address
return shipping_address
def get_tax_data(doc):
shipping_address = get_shipping_address(doc)
if not shipping_address:
return
if shipping_address.country:
country_code = frappe.db.get_value("Country", shipping_address.country, "code")
country_code = country_code.upper()
else:
frappe.throw(_("Please select a country!"))
if country_code != "US":
return
shipping = 0
for tax in doc.taxes:
if tax.account_head == SHIP_ACCOUNT_HEAD:
shipping += tax.tax_amount
shipping_state = shipping_address.get("state")
if shipping_state is not None:
# Handle shipments to military addresses
if shipping_state.upper() in ("AE", "AA", "AP"):
frappe.throw(_("""For shipping to overseas US bases, please
contact us with your order details."""))
else:
shipping_state = validate_state(shipping_address)
tax_dict = {
'to_country': country_code,
'to_zip': shipping_address.pincode,
'to_city': shipping_address.city,
'to_state': shipping_state,
'shipping': shipping,
'amount': doc.net_total
}
return tax_dict
def sanitize_error_response(response):
response = response.full_response.get("detail")
response = response.replace("_", " ")
sanitized_responses = {
"to zip": "Zipcode",
"to city": "City",
"to state": "State",
"to country": "Country"
}
for k, v in sanitized_responses.items():
response = response.replace(k, v)
return response
def set_sales_tax(doc, method):
if not doc.items:
return
# Allow skipping calculation of tax for dev environment
# if taxjar_calculate_tax isn't defined in site_config we assume
# we DO want to calculate tax all the time.
if not frappe.local.conf.get("taxjar_calculate_tax", 1):
return
if doc.exempt_from_sales_tax or frappe.db.get_value("Customer", doc.customer, "exempt_from_sales_tax"):
for tax in doc.taxes:
if tax.account_head == TAX_ACCOUNT_HEAD:
tax.tax_amount = 0
break
doc.run_method("calculate_taxes_and_totals")
return
tax_dict = get_tax_data(doc)
if not tax_dict:
# Remove existing tax rows if address is changed from a taxable state/country
setattr(doc, "taxes", [tax for tax in doc.taxes if tax.account_head != TAX_ACCOUNT_HEAD])
return
tax_data = validate_tax_request(tax_dict)
if tax_data is not None:
if not tax_data.amount_to_collect:
setattr(doc, "taxes", [tax for tax in doc.taxes if tax.account_head != TAX_ACCOUNT_HEAD])
elif tax_data.amount_to_collect > 0:
# Loop through tax rows for existing Sales Tax entry
# If none are found, add a row with the tax amount
for tax in doc.taxes:
if tax.account_head == TAX_ACCOUNT_HEAD:
tax.tax_amount = tax_data.amount_to_collect
doc.run_method("calculate_taxes_and_totals")
break
else:
doc.append("taxes", {
"charge_type": "Actual",
"description": "Sales Tax",
"account_head": TAX_ACCOUNT_HEAD,
"tax_amount": tax_data.amount_to_collect
})
doc.run_method("calculate_taxes_and_totals")
def validate_address(doc, address):
# Validate address using PyCountry
tax_dict = get_tax_data(doc)
if tax_dict:
# Validate address using TaxJar
validate_tax_request(tax_dict)
def validate_tax_request(tax_dict):
client = get_client()
try:
tax_data = client.tax_for_order(tax_dict)
except taxjar.exceptions.TaxJarResponseError as err:
frappe.throw(_(sanitize_error_response(err)))
else:
return tax_data
def validate_state(address):
country_code = frappe.db.get_value("Country", address.get("country"), "code")
error_message = _("""{} is not a valid state! Check for typos or enter the ISO code for your state.""".format(address.get("state")))
state = address.get("state").upper().strip()
# The max length for ISO state codes is 3, excluding the country code
if len(state) <= 3:
address_state = (country_code + "-" + state).upper() # PyCountry returns state code as {country_code}-{state-code} (e.g. US-FL)
states = pycountry.subdivisions.get(country_code=country_code.upper())
states = [pystate.code for pystate in states]
if address_state in states:
return state
frappe.throw(error_message)
else:
try:
lookup_state = pycountry.subdivisions.lookup(state)
except LookupError:
frappe.throw(error_message)
else:
return lookup_state.code.split('-')[1]
| 26.931915 | 133 | 0.746721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,690 | 0.267025 |
08453ede8c646dbf40688a3665092cf3d4f4e359 | 3,543 | py | Python | tests/lib_test.py | grundrauschen/center-points | 5a12f68ac012a0a2bf52d8a8381d0272e309ac18 | [
"MIT"
] | null | null | null | tests/lib_test.py | grundrauschen/center-points | 5a12f68ac012a0a2bf52d8a8381d0272e309ac18 | [
"MIT"
] | 2 | 2015-06-03T10:57:13.000Z | 2015-09-15T12:43:22.000Z | tests/lib_test.py | fu-berlin-swp-2014/center-points | 0fa523314a3168d4d229b6f61d0d05d314a8b35a | [
"MIT"
] | null | null | null | import unittest
import numpy as np
import numpy.testing as nptest
import centerpoints.lib as lib
class TestLibrary(unittest.TestCase):
def setUp(self):
# { dimension -> points }
self.d_plus_2_points = {}
for d in [3, 5, 10, 100]:
# we need d+2 points, first take all bases
bases = np.eye(d)
self.d_plus_2_points[d] = \
np.concatenate((bases,
[bases[0] + bases[1],
bases[1] + bases[2]]))
def test_find_alphas(self):
for points in self.d_plus_2_points.values():
alphas = lib._find_alphas(points)
self.assertEqual(type(alphas), type(np.array([])))
self.assertEqual(len(alphas), len(points))
greater_idx = alphas > 0
smaller_idx = ~ greater_idx
smaller_sum = np.sum(alphas[smaller_idx])
greater_sum = np.sum(alphas[greater_idx])
# make sure it is not the trivial solution
self.assertNotAlmostEqual(smaller_sum, 0)
self.assertAlmostEqual(greater_sum + smaller_sum, 0)
def test_radon_point(self):
for points in self.d_plus_2_points.values():
alphas = lib._find_alphas(points)
radon_tuple = lib.radon_point(points)
self.assertEqual(type(radon_tuple), np.ndarray)
radon = np.asmatrix(radon_tuple)
greater_idx = alphas > 0
greater_alphas = np.asmatrix(alphas[greater_idx])
greater_points = np.asmatrix(points[greater_idx])
sum_greater = np.sum(greater_alphas)
nptest.assert_allclose(radon / sum_greater, radon * sum_greater)
nptest.assert_allclose(radon / sum_greater,
greater_alphas * greater_points)
smaller_alphas = np.asmatrix(alphas[~ greater_idx])
smaller_points = np.asmatrix(points[~ greater_idx])
nptest.assert_allclose(smaller_alphas * smaller_points,
radon / np.sum(smaller_alphas),
atol=1e-15)
def test_solve_homogeneous(self):
M = np.array([[1, 0, 0, 0, 2],
[0, 0, 3, 0, 0],
[0, 0, 0, 0, 0],
[0, 4, 0, 0, 0]])
null = lib.solve_homogeneous(M)
nptest.assert_allclose(np.dot(M, null), np.zeros(4), atol=1e-10)
def test_null_space(self):
# simple example with a one dimensional null space ()
a = np.array([[2, 3, 5], [-4, 2, 3], [0, 0, 0]])
null_space_a = lib.null_space(a)
x = np.dot(a, null_space_a)
nptest.assert_allclose(np.dot(a, (2*null_space_a)),
np.zeros_like(null_space_a),
atol=1e-10)
nptest.assert_allclose(np.dot(a, (10*null_space_a)),
np.zeros_like(null_space_a),
atol=1e-10)
# advanced example with a 3 dimensional null space ()
b = np.array([[1, 1, 1, 2, 3],
[1, 0, 1, 2, 3],
[1, 0, 1, 2, 3],
[1, 0, 1, 2, 3],
[1, 0, 1, 2, 3]])
null_space_b = lib.null_space(b)
null_vec = 2*null_space_b[:, 0] + 4*null_space_b[:, 1]
nptest.assert_allclose(np.dot(b, null_vec),
np.zeros_like(null_vec),
atol=1e-10)
| 36.90625 | 76 | 0.519616 | 3,441 | 0.971211 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.060683 |
084547589496d6e3bddafc72879279f994ed30e1 | 711 | py | Python | genome-experimentation/cleaning-genome-data.py | shivamsyal/summer21 | 68cdcae1524e720066e57baa190f15477b69515a | [
"MIT"
] | null | null | null | genome-experimentation/cleaning-genome-data.py | shivamsyal/summer21 | 68cdcae1524e720066e57baa190f15477b69515a | [
"MIT"
] | null | null | null | genome-experimentation/cleaning-genome-data.py | shivamsyal/summer21 | 68cdcae1524e720066e57baa190f15477b69515a | [
"MIT"
] | 2 | 2022-01-10T18:16:18.000Z | 2022-03-20T01:17:28.000Z | # test comment
import os
filename = input("File to format: ")
os.system("gunzip "+filename)
n = int(input("What number genome is this? "))
os.system("mv "+filename[:-3]+" genome"+str(n)+".fna")
original = "genome"+str(n)+".fna"
copy = "genome"+str(n)+"_copy.fna"
filtered = "genome"+str(n)+"_filtered.fna"
rem = ['>']
with open(original) as old, open(copy,'w') as new:
for line in old:
if not any(bad in line for bad in rem):
new.write(line)
with open(copy) as f, open(filtered,'a') as f2:
f2.write("".join(line.strip() for line in f))
with open(filtered, 'r+') as inp:
y = inp.read().upper()
inp.truncate(0)
with open(filtered, 'a') as out:
out.write(y)
os.remove(copy)
| 30.913043 | 54 | 0.624473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.232068 |
084592c05031adcf4e22889393a72a2880d58eb8 | 758 | py | Python | villas/controller/components/managers/generic.py | VILLASframework/VILLAScontroller | e672439797f209afdd5bc62078f7d49c60269aa4 | [
"Apache-2.0"
] | null | null | null | villas/controller/components/managers/generic.py | VILLASframework/VILLAScontroller | e672439797f209afdd5bc62078f7d49c60269aa4 | [
"Apache-2.0"
] | null | null | null | villas/controller/components/managers/generic.py | VILLASframework/VILLAScontroller | e672439797f209afdd5bc62078f7d49c60269aa4 | [
"Apache-2.0"
] | null | null | null | from villas.controller.components.manager import Manager
from villas.controller.component import Component
class GenericManager(Manager):
def create(self, payload):
component = Component.from_dict(payload.get('parameters'))
try:
self.add_component(component)
except KeyError:
self.logger.error('A component with the UUID %s already exists',
component.uuid)
def delete(self, payload):
parameters = payload.get('parameters')
uuid = parameters.get('uuid')
try:
comp = self.components[uuid]
self.remove_component(comp)
except KeyError:
self.logger.error('There is not component with UUID: %s', uuid)
| 28.074074 | 76 | 0.62533 | 648 | 0.854881 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.149077 |
0845d2588d5c55abf24f9ab405009bd284d758d8 | 833 | py | Python | tests/test_composition.py | gregorynicholas/proto-pigeon | 65a5d961e7a8506f3a968b21aaf68f625fd13190 | [
"Apache-2.0"
] | null | null | null | tests/test_composition.py | gregorynicholas/proto-pigeon | 65a5d961e7a8506f3a968b21aaf68f625fd13190 | [
"Apache-2.0"
] | null | null | null | tests/test_composition.py | gregorynicholas/proto-pigeon | 65a5d961e7a8506f3a968b21aaf68f625fd13190 | [
"Apache-2.0"
] | null | null | null | from protorpc.messages import Message, IntegerField, StringField
import protopigeon
class MessageOne(Message):
one = IntegerField(1)
two = IntegerField(2)
class MessageTwo(Message):
three = StringField(1)
four = StringField(2)
def test():
ComposedMessage = protopigeon.compose(MessageOne, MessageTwo)
assert hasattr(ComposedMessage, 'one')
assert hasattr(ComposedMessage, 'two')
assert hasattr(ComposedMessage, 'three')
assert hasattr(ComposedMessage, 'four')
# Make sure these fields weren't modified
assert MessageOne.one.number == 1
assert MessageOne.two.number == 2
assert MessageTwo.three.number == 1
assert MessageTwo.four.number == 2
instance = ComposedMessage(
one=1,
two=2,
three='three',
four='four')
assert instance
| 23.138889 | 65 | 0.686675 | 157 | 0.188475 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.092437 |
0846011f39bb03a7af3bf569426365af42543fe1 | 1,503 | py | Python | udacity-program_self_driving_car_engineer_v2.0/module02-computer vision/exercise02-data acquisiton and visualization/visualization.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | udacity-program_self_driving_car_engineer_v2.0/module02-computer vision/exercise02-data acquisiton and visualization/visualization.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | udacity-program_self_driving_car_engineer_v2.0/module02-computer vision/exercise02-data acquisiton and visualization/visualization.py | linksdl/futuretec-project-self_driving_cars_projects | 38e8f14543132ec86a8bada8d708eefaef23fee8 | [
"MIT"
] | null | null | null | """
# !/usr/bin/env python
# -*- coding: utf-8 -*-
@Time : 2022/2/23 19:35
@Author : shengdl999links@gmail.com
@ProjectName : udacity-program_self_driving_car_engineer_v1.0_source.0
@File : visualization.py
"""
import glob
import os.path
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image
from utils import get_data
def viz(ground_truth):
"""
create a grid visualization of images with color coded bboxes
args:
- ground_truth [list[dict]]: ground truth data
"""
# IMPLEMENT THIS FUNCTION
paths = glob.glob('../data/images/*')
gt_dic = {}
# mapping to access data faster
for gt in ground_truth:
gt_dic[gt['filename']] = gt
# color mapping of classes
color_map = {1: [1, 0, 0], 2: [0, 1, 0], 4: [0, 0, 1]}
f, ax = plt.subplots(4, 5, figsize=(20, 10))
for i in range(20):
x = i % 4
y = i % 5
filename = os.path.basename(paths[i])
img = Image.open(paths[i])
ax[x, y].imshow(img)
bboxes = gt_dic[filename]['boxes']
classes = gt_dic[filename]['classes']
for cl, bb in zip(classes, bboxes):
y1, x1, y2, x2 = bb
rec = Rectangle((x1, y1), x2 - x1, y2 - y1, facecolor='none', edgecolor=color_map[cl])
ax[x, y].add_patch(rec)
ax[x, y].axis('off')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
ground_truth, _ = get_data()
viz(ground_truth)
| 25.05 | 98 | 0.594145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 514 | 0.341983 |
084746dfc5f458e9131b1743d5567db36da8ab9c | 898 | py | Python | setup.py | georgenicolaou/python-fakeports | 24eecf879e0d2d2a100be06952fb3677019457e2 | [
"MIT"
] | 3 | 2020-02-03T08:25:10.000Z | 2021-09-29T15:59:01.000Z | setup.py | georgenicolaou/python-fakeports | 24eecf879e0d2d2a100be06952fb3677019457e2 | [
"MIT"
] | 2 | 2021-01-18T19:27:44.000Z | 2021-01-18T19:27:44.000Z | setup.py | georgenicolaou/python-fakeports | 24eecf879e0d2d2a100be06952fb3677019457e2 | [
"MIT"
] | null | null | null | from setuptools import setup
long_description = 'TODO'
# with open("README.md", "r") as rfd:
# long_description = rfd.read()
REQUIREMENTS = [r.strip() for r in open("requirements.txt").readlines()]
setup(
name='python-fakeports',
version="0.1",
packages=['python_fakeports'],
url='',
license='GPL',
author='George Nicolaou',
author_email='george@silensec.com',
description='Python clone of portspoof',
long_description=long_description,
install_requires=REQUIREMENTS,
data_files=[
('/etc/fakeports/', ['fakeports.yml.sample']),
('/usr/local/bin/', ['bin/fakeports.tac'])
],
scripts=['bin/fakeportsctl', 'bin/fakeportsd'],
platforms='any',
classifiers = [line.strip() for line in '''\
Development Status :: 4 - Beta
Intended Audience :: System Administrators
Operating System :: POSIX :: Linux
''']
)
| 28.967742 | 72 | 0.644766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.507795 |
08483d8175b8bd82d7534d3c981f5d1467884db2 | 195 | py | Python | darzalib/Incoming/__init__.py | swrlly/Midnight | b4375002761a13a09a6c3085e9b34384b28227ba | [
"MIT"
] | 2 | 2021-11-18T13:38:52.000Z | 2021-11-19T04:15:24.000Z | darzalib/Incoming/__init__.py | swrlly/Midnight | b4375002761a13a09a6c3085e9b34384b28227ba | [
"MIT"
] | null | null | null | darzalib/Incoming/__init__.py | swrlly/Midnight | b4375002761a13a09a6c3085e9b34384b28227ba | [
"MIT"
] | null | null | null | from .BiomeDisplay import BiomeDisplay
from .Chats import Chats
from .PlayEffect import PlayEffect
from .Reconnect import Reconnect
from .SwapAck import SwapAck
from .UseItemAck import UseItemAck | 32.5 | 38 | 0.851282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
08486cbf36ba6ba189128910a8b98a815a664466 | 938 | py | Python | python/17_letter_combinations_of_a_phone_number.py | dchapp/blind75 | aaa409cf2db4ef6d0f86177f4217eceeb391caa8 | [
"MIT"
] | null | null | null | python/17_letter_combinations_of_a_phone_number.py | dchapp/blind75 | aaa409cf2db4ef6d0f86177f4217eceeb391caa8 | [
"MIT"
] | null | null | null | python/17_letter_combinations_of_a_phone_number.py | dchapp/blind75 | aaa409cf2db4ef6d0f86177f4217eceeb391caa8 | [
"MIT"
] | null | null | null | num_to_letters = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
if len(digits) == 0:
return []
return self.recursive(digits)
def recursive(self, digits):
words = set()
digit_idx = 0
def worker(digits, digit_idx, current_word):
candidates = num_to_letters[digits[digit_idx]]
for c in candidates:
if digit_idx == len(digits)-1:
words.add(current_word + c)
else:
worker(digits, digit_idx+1, current_word + c)
worker(digits, 0, "")
return list(words)
| 28.424242 | 65 | 0.410448 | 657 | 0.700426 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.110874 |
084d1fd01b6f648a85848dd0310b96b0d8966a0e | 651 | py | Python | yotta/options/registry.py | microbit-foundation/yotta | 82d854b43d391abb5a006b05e7beffe7d0d6ffbf | [
"Apache-2.0"
] | 176 | 2015-01-02T07:31:59.000Z | 2022-03-21T12:40:02.000Z | yotta/options/registry.py | microbit-foundation/yotta | 82d854b43d391abb5a006b05e7beffe7d0d6ffbf | [
"Apache-2.0"
] | 549 | 2015-01-05T16:19:54.000Z | 2021-01-15T13:46:42.000Z | yotta/options/registry.py | microbit-foundation/yotta | 82d854b43d391abb5a006b05e7beffe7d0d6ffbf | [
"Apache-2.0"
] | 84 | 2015-01-10T21:01:00.000Z | 2022-03-24T16:04:42.000Z | # Copyright 2014-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library options
from argparse import Action, SUPPRESS
class RegistryAction(Action):
def __init__(self, *args, **kwargs):
kwargs['nargs'] = 1
self.dest = kwargs['dest']
super(RegistryAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values[0])
def addTo(parser):
parser.add_argument(
'--registry', default=None, dest='registry', help=SUPPRESS,
action=RegistryAction
)
| 27.125 | 70 | 0.680492 | 316 | 0.485407 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.267281 |
084edafd90972abf12ce9cf828ac494b0afdd467 | 4,453 | py | Python | src/pybraingym/environment.py | anetczuk/pybraingym | 4f930021d7802e88c75a1a0aed135dd4de66cc1b | [
"MIT"
] | null | null | null | src/pybraingym/environment.py | anetczuk/pybraingym | 4f930021d7802e88c75a1a0aed135dd4de66cc1b | [
"MIT"
] | null | null | null | src/pybraingym/environment.py | anetczuk/pybraingym | 4f930021d7802e88c75a1a0aed135dd4de66cc1b | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2019 Arkadiusz Netczuk <dev.arnet@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from pybrain.rl.environments.environment import Environment
from gym.spaces.discrete import Discrete
class GymEnvironment(Environment):
def __init__(self, gymRawEnv):
Environment.__init__(self)
observationSpace = gymRawEnv.observation_space
if type(observationSpace) == Discrete:
self.outdim = 1
self.discreteStates = True
self.numStates = observationSpace.n
actionSpace = gymRawEnv.action_space
if type(actionSpace) == Discrete:
self.indim = 1
self.discreteActions = True
self.numActions = actionSpace.n
self.env = gymRawEnv
self.observation = None
self.reward = 0
self.cumReward = 0
self.done = True
self.info = None
self.transform = None
self.doCumulative = False
self.doRender = False
def setRendering(self, render=True):
self.doRender = render
def getCumulativeRewardMode(self):
return self.doCumulative
def setCumulativeRewardMode(self, cumulativeReward=True):
self.doCumulative = cumulativeReward
def setTransformation(self, transformation):
self.transform = transformation
self.transform.env = self
# ==========================================================================
def getSensors(self):
return self.observation
def performAction(self, action):
if self.transform is not None:
action = self.transform.action(action)
self.observation, self.reward, self.done, self.info = self.env.step(action)
if self.transform is not None:
self.observation = self.transform.observation(self.observation)
self.reward = self.transform.reward(self.reward)
self.cumReward += self.reward
def reset(self):
self.done = False
self.reward = 0
self.cumReward = 0
self.info = None
self.observation = self.env.reset()
if self.transform is not None:
self.observation = self.transform.observation(self.observation)
# ==========================================================================
def getReward(self):
if self.doCumulative:
return self.cumReward
else:
return self.reward
def sampleAction(self):
return self.env.action_space.sample()
def render(self):
self.env.render()
def close(self):
self.env.close()
class Transformation:
def __init__(self):
self._env = None
@property
def env(self):
return self._env
@env.setter
def env(self, new_env):
self._env = new_env
def observation(self, observationValue):
"""Transform observation value received from OpenAi Gym. Transformed value is passed to PyBrain.
For discrete observations Gym often returns single value, but PyBrain always requires array.
"""
return observationValue
def action(self, actionValue):
"""Transform action value received from PyBrain and pass result to OpenAi Gym."""
return actionValue
def reward(self, rewardValue):
"""Transform reward value received from OpenAi Gym and pass result to PyBrain."""
return rewardValue
| 32.50365 | 104 | 0.651246 | 3,207 | 0.720189 | 0 | 0 | 120 | 0.026948 | 0 | 0 | 1,640 | 0.368291 |
084eddbd29309d0a8c29e8b0baeae41ed4f83c9f | 7,420 | py | Python | logicscen.py | exposit/pythia-oracle | 60e4e806c9ed1627f2649822ab1901d28933daac | [
"MIT"
] | 32 | 2016-08-27T01:31:42.000Z | 2022-03-21T08:59:28.000Z | logicscen.py | exposit/pythia-oracle | 60e4e806c9ed1627f2649822ab1901d28933daac | [
"MIT"
] | 3 | 2016-08-27T00:51:47.000Z | 2019-08-26T13:23:04.000Z | logicscen.py | exposit/pythia-oracle | 60e4e806c9ed1627f2649822ab1901d28933daac | [
"MIT"
] | 10 | 2016-08-28T14:14:41.000Z | 2021-03-18T03:24:22.000Z | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#---------------------------------------------------------------------------------------------------
# --> Logic to handle scenarios
#---------------------------------------------------------------------------------------------------
import imports
from imports import *
import config
import logic
from logic import *
def parseRefs(source):
start_sep='[['
end_sep=']]'
result=[]
tmp=source.split(start_sep)
for par in tmp:
if end_sep in par:
result.append(par.split(end_sep)[0])
for clause in result:
action, text, link = clause.split('|')
new = "[ref=" + action + "_" + link + "][color=" + config.formats['link_color'] + "]" + text + "[/color][/ref]"
source = source.replace("[[" + clause + "]]", new, 1)
return source
def parseTextVariables(self, source):
start_sep='<<'
end_sep='>>'
result=[]
tmp=source.split(start_sep)
try:
mod = config.curr_game_dir + "scenlogic.py"
filename = mod.split('/')[-1]
pyfile = filename.split('.')[0]
scenlogic = imp.load_source( pyfile, mod)
except:
pass
for par in tmp:
if end_sep in par:
result.append(par.split(end_sep)[0])
for clause in result:
try:
a = clause.split("if ")[0]
except:
a = clause
try:
if a.split('.')[0] == 'var':
a = config.scenario[ a.split('.')[1] ]
else:
a = eval("scenlogic." + a)(self)
except:
pass
try:
b = clause.split(" else ")[-1]
except:
b = ""
try:
if b.split('.')[0] == 'var':
b = config.scenario[ b.split('.')[1] ]
else:
b = eval("scenlogic." + b)(self)
except:
pass
try:
condition = clause.split("if ")[1]
condition = condition.split(" else ")[0]
except:
condition = ""
try:
condition = config.scenario[ condition ]
except:
pass
try:
condition = eval("scenlogic." + condition)(self)
except:
pass
try:
if condition == True:
new = a
else:
new = b
except:
new = a
source = source.replace("<<" + clause + ">>", new, 1)
return source
def clearOldLinks(self, ref):
for i in range(len(config.textLabelArray)):
newtext = config.textLabelArray[i].text
colorList = re.findall('(?:[0-9a-fA-F]{3}){2}', newtext)
for color in colorList:
newtext = newtext.replace(color, "")
newtext = newtext.replace("[ref=" + ref + "]", "")
newtext = newtext.replace("[/ref]", "")
newtext = newtext.replace("[/color]", "")
config.textLabelArray[i].text = newtext
config.textArray[config.textLabelArray[i].index] = config.textLabelArray[i].text
def refPress(*args):
self = args[0].self
label = args[0]
subtype, text = args[1].split('_')
subtype = subtype[:1]
ref = args[1]
print(label.index)
try:
mod = config.curr_game_dir + "scenlogic.py"
filename = mod.split('/')[-1]
pyfile = filename.split('.')[0]
scenlogic = imp.load_source( pyfile, mod)
except:
pass
if subtype == "d":
block = config.scenario['block']
#try:
# base = config.advDict[block][text]
#except:
base = config.scenario['descRefs'][text]
try:
eval("scenlogic." + base[3])(self)
except:
pass
display = parseTextVariables(self, base[0])
display = parseRefs(display)
logic.updateCenterDisplay(self, display, base[1])
if base[2] == 'repeatable':
newtext = label.text
colorList = re.findall('(?:[0-9a-fA-F]{3}){2}', newtext)
for color in colorList:
newtext = newtext.replace(color, config.formats['visited_link_color'])
label.text = newtext
config.textArray[label.index] = label.text
else:
newtext = label.text
colorList = re.findall('(?:[0-9a-fA-F]{3}){2}', newtext)
for color in colorList:
newtext = newtext.replace(color, "")
newtext = newtext.replace("[ref=" + ref + "]", "")
newtext = newtext.replace("[/ref]", "")
newtext = newtext.replace("[/color]", "")
label.text = newtext
config.textArray[label.index] = label.text
elif subtype == "t":
block = config.scenario['block']
base = config.scenario['toggleRefs'][text]
label.text = base[0]
config.textArray[label.index] = label.text
elif subtype == "j":
block = config.scenario['block']
try:
base = config.advDict[block][text]
except:
base = config.scenario['jumpRefs'][text]
destination = base['jump']
try:
exitmsg = base['exitmsg']
except:
exitmsg = "..."
try:
exitformat = base['exitformat']
except:
exitformat = "result"
try:
repeatable = base['repeatable']
except:
repeatable = "yes"
try:
pause = base['pause']
except:
pause = False
config.scenario['block'] = destination
# this was a jump; clear all older links
clearOldLinks(self, ref)
exitmsg = parseTextVariables(self, exitmsg)
exitmsg = parseRefs(exitmsg)
logic.updateCenterDisplay(self, exitmsg, exitformat)
if pause == False:
showCurrentBlock(self)
else:
more = "[ref=f_showCurrentBlock][color=" + config.formats['link_color'] + "]continue" + "[/color][/ref]"
logic.updateCenterDisplay(self, more, 'italic')
else:
# this is a function; clear all older links
clearOldLinks(self, ref)
try:
eval("scenlogic." + text)(self)
except:
pass
def showCurrentBlock(self, *args):
block = config.scenario['block']
result = ""
count = 0
for item in config.advDict[block]['text']:
count = count + 1
display = parseTextVariables(self, item[0])
display = parseRefs(display)
logic.updateCenterDisplay(self, display, item[1])
self.scenarioTitleLabel.text = config.advDict[block]['title']
showCurrentExits(self)
def showCurrentExits(self, *args):
block = config.scenario['block']
result = ""
try:
for item in config.advDict[block]['exits']:
display = parseTextVariables(self, item[0])
display = parseRefs(display)
logic.updateCenterDisplay(self, display, item[1])
except:
try:
for item in config.advDict[block]['exitlist']:
display = '[[jump|' + config.advDict[block][item]['display'] + '|' + item + ']]'
display = parseTextVariables(self, display)
display = parseRefs(display)
logic.updateCenterDisplay(self, display, config.advDict[block][item]['exitmsg'])
except:
pass
| 27.279412 | 119 | 0.508491 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,118 | 0.150674 |
0850f9781ec228546bf41eccc932a22fd036e4a8 | 7,980 | py | Python | datyy/views/projects.py | VladimirSiv/datyy | 4f3b54557850212ca3ce4c0d16cd56eb9989d7c4 | [
"MIT"
] | null | null | null | datyy/views/projects.py | VladimirSiv/datyy | 4f3b54557850212ca3ce4c0d16cd56eb9989d7c4 | [
"MIT"
] | null | null | null | datyy/views/projects.py | VladimirSiv/datyy | 4f3b54557850212ca3ce4c0d16cd56eb9989d7c4 | [
"MIT"
] | null | null | null | import dash
import dash_html_components as html
import dash_bootstrap_components as dbc
import numpy as np
from server import app
from dash.dependencies import Input, Output, State
from dash.exceptions import PreventUpdate
from components.cards import simple_info_card
from components.dropdowns import dropdown_single
from components.cards import project_info_card
from components.tables import simple_table
from components.gantts import simple_gantt_graph
from logic.dropdowns import dropdown_single_logic
from logic.tables import generate_project_tasks_data
from logic.pie_charts import sunburst_chart_logic
from logic.gantts import simple_gantt_logic
layout = html.Div(
children=[
html.Div(id="project-temp", style={"display": "none"}),
dbc.Row(
className="main-row",
children=[
dbc.Col(
dropdown_single(
id_="project-select",
placeholder="Select Project",
text="Project:",
),
width=3,
),
],
),
dbc.Row(
className="main-row",
children=[
dbc.Col(
simple_info_card(
id_="project-card-planning",
title="Planning",
)
),
dbc.Col(
simple_info_card(
id_="project-card-design",
title="Design",
)
),
dbc.Col(
simple_info_card(
id_="project-card-development",
title="Development",
)
),
dbc.Col(
simple_info_card(
id_="project-card-testing",
title="Testing",
)
),
dbc.Col(
simple_info_card(
id_="project-card-cost",
title="Cost",
)
),
dbc.Col(
simple_info_card(
id_="project-card-duration",
title="Duration",
)
),
],
),
dbc.Row(
className="main-row",
children=[
dbc.Col(
project_info_card(
id_="budget-graph",
title="Budget spending",
subcomponents={
"project-budget": "Budget",
"project-remaining": "Remaining",
"project-currently": "Currently",
},
),
width=6,
),
dbc.Col(
simple_table(
id_="project-tasks-table",
title="Overdue tasks",
columns=[
"Overdue (days)",
"Task",
"Deadline",
"Employee",
],
),
width=6,
),
],
),
html.Div(
className="main-row", children=[html.H4("Milestones", className="title-bold")]
),
dbc.Row(
className="main-row",
children=[dbc.Col(simple_gantt_graph(id_="project-gantt-graph"))],
),
]
)
@app.callback(
[Output("project-select", "options"), Output("project-temp", "children")],
Input("url", "pathname"),
State("project-item", "data"),
)
def set_project_select_options(pathname, project_stored):
"""Sets project select options
Args:
pathname (str): Url pathname
project_stored (str): State of project value
Returns:
list: List of options
str: Project hidden value
Raises:
PreventUpdate: if arguments are not valid
"""
if pathname == "/datyy/projects":
project = project_stored
if project_stored is None:
project = 0
return dropdown_single_logic(), project
raise PreventUpdate
@app.callback(
[Output("project-item", "data"), Output("project-select", "value")],
[Input("project-temp", "children"), Input("project-select", "value")],
)
def set_hidden_project_item(hidden, dropdown_value):
"""Set state and selected project value
Args:
hidden (str): Hidden project value
dropdown_value (str): Selected project value
Returns:
str: State of project value
str: Selected project value
Raises:
PreventUpdate: if arguments are not valid
"""
ctx = dash.callback_context
if not ctx.triggered:
input_id = None
else:
input_id = ctx.triggered[0]["prop_id"].split(".")[0]
if input_id == "project-temp" and hidden is not None:
return hidden, int(hidden)
if input_id == "project-select" and dropdown_value is not None:
return dropdown_value, dropdown_value
raise PreventUpdate
@app.callback(
[
Output("project-card-" + card_type, "children")
for card_type in [
"planning",
"design",
"development",
"testing",
"cost",
"duration",
]
],
Input("project-select", "value"),
)
def set_project_card_info_values(value):
"""Sets project information values
Args:
value (str): Selected project value
Returns:
str: Project planning value
str: Project design value
str: Project development value
str: Project testing value
str: Project cost value
str: Project duration value
Raises:
PreventUpdate: if arguments are not valid
"""
if value is not None:
result = [str(x) + "%" for x in np.random.randint(100, size=4)]
result.append("$" + str(np.random.randint(100, 1000)))
result.append(str(np.random.randint(10, 20)) + " days")
return result
raise PreventUpdate
@app.callback(Output("project-tasks-table", "data"), Input("project-select", "value"))
def set_project_tasks_table(value):
"""Sets project tasks table data
Args:
value (str): Select project value
Returns:
obj: Table data
Raises:
PreventUpdate: if arguments are not valid
"""
if value is not None:
return generate_project_tasks_data()
raise PreventUpdate
@app.callback(
[
Output("project-budget", "children"),
Output("project-remaining", "children"),
Output("project-currently", "children"),
Output("budget-graph", "figure"),
],
Input("project-select", "value"),
)
def set_project_budget_info(value):
"""Sets project budget information
Args:
value (str): Selected project value
Returns:
str: Project budget value
str: Project remaining value
str: Project currently value
obj: Project Budget graph figure
Raises:
PreventUpdate: if arguments are not valid
"""
if value is not None:
result = list(np.random.randint(0, 1000, size=3))
result.append(sunburst_chart_logic())
return result
raise PreventUpdate
@app.callback(Output("project-gantt-graph", "figure"), Input("project-select", "value"))
def display_gantt_graph(value):
"""Displays gantt graph figure
Args:
value (str): Selected project value
Returns:
obj: Project gantt graph figure
Raises:
PreventUpdate: if arguments are not valid
"""
if value is not None:
return simple_gantt_logic()
raise PreventUpdate
| 28.098592 | 90 | 0.52193 | 0 | 0 | 0 | 0 | 4,253 | 0.532957 | 0 | 0 | 2,832 | 0.354887 |
08540bf2ac8cadc1cf3900bd14a8f79f2ba8831e | 146 | py | Python | week-02/appendA.py | norbertbodo91/pythonExercises | 9cd773c5d6ce3280d19a84ef12b8fd478ff09613 | [
"MIT"
] | null | null | null | week-02/appendA.py | norbertbodo91/pythonExercises | 9cd773c5d6ce3280d19a84ef12b8fd478ff09613 | [
"MIT"
] | null | null | null | week-02/appendA.py | norbertbodo91/pythonExercises | 9cd773c5d6ce3280d19a84ef12b8fd478ff09613 | [
"MIT"
] | null | null | null | def appendA(toAppend):
newWord = toAppend + 'a'
return newWord
print(appendA(toAppend = raw_input("Enter a word to add an A letter: ")))
| 24.333333 | 73 | 0.684932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.260274 |
085550c02672da4291f033dfdf10337c089c2aa8 | 16,119 | py | Python | multiacctcf.py | DonMills/multiacct-CF-orchestrate | 4acce3c984c1801ff66cf9d210e3a0d1a6f9246b | [
"MIT"
] | 11 | 2017-07-19T07:05:44.000Z | 2022-02-07T19:35:51.000Z | multiacctcf.py | DonMills/multiacct-CF-orchestrate | 4acce3c984c1801ff66cf9d210e3a0d1a6f9246b | [
"MIT"
] | null | null | null | multiacctcf.py | DonMills/multiacct-CF-orchestrate | 4acce3c984c1801ff66cf9d210e3a0d1a6f9246b | [
"MIT"
] | 2 | 2017-07-19T15:01:52.000Z | 2022-02-07T19:35:53.000Z | #!/usr/bin/python
from __future__ import print_function
import threading
import boto3
import botocore
import argparse
from time import ctime
###############
# Some Global Vars
##############
lock = threading.Lock()
awsaccts = [{'acct': 'acct1ID',
'name': 'master',
'cffile': 'location of cloudformation file in S3'},
{'acct': 'acct2ID',
'name': 'dev',
'cffile': 'location of cloudformation file in S3'},
{'acct': 'acct3ID',
'name': 'staging',
'cffile': 'location of cloudformation file in S3'},
{'acct': 'acct4ID',
'name': 'test',
'cffile': 'location of cloudformation file in S3'},
{'acct': 'acct5ID',
'name': 'QA',
'cffile': 'location of cloudformation file in S3'}]
###################################
# This results dict is prepopulated with the info for the master vpc in a region. It will be overwritten
# if the master cloudform is run
###################################
results = {
'master': {
'CIDRblock': '172.0.1.0/22',
'RTBint': [
'rtb-xxxxxxxx',
'rtb-xxxxxxxx'],
'VPCID': 'vpc-xxxxxxxx'}}
threads = []
#######################
# The function that does CloudFormation and peering requests
#######################
def run_cloudform(acct, acctname, region, cffile, nopeer, results):
################
# Don't like these, but necessary due to scoping
###############
cfgood = None
ismaster = None
cidrblock = None
vpcid = None
rtbid = None
rtb_inta = None
rtb_intb = None
threadname = threading.current_thread().name
if acctname == "master":
ismaster = True
###################
# If we are running in master, we don't need sts creds
###################
if ismaster:
try:
cf = boto3.client('cloudformation',
region_name=region)
validate = cf.validate_template(
TemplateURL=cffile
)
cfgood = True
print(
"[%s] %s CloudFormation file %s validated successfully for account %s" %
(ctime(), threadname, cffile, acctname))
except botocore.exceptions.ClientError as e:
print(
"[%s] %s CloudFormation file %s validation failed for account %s with error: %s" %
(ctime(), threadname, cffile, acctname, e))
cfgood = False
###################
# Otherwise, we do.
###################
else:
with lock:
print(
"[%s] %s is assuming STS role for account %s" %
(ctime(), threadname, acctname))
try:
with lock:
sts = boto3.client('sts')
role = sts.assume_role(
RoleArn='arn:aws:iam::' + acct + ':role/MasterAcctRole',
RoleSessionName='STSTest',
DurationSeconds=900
)
accesskey = role["Credentials"]["AccessKeyId"]
secretkey = role["Credentials"]["SecretAccessKey"]
sessiontoken = role["Credentials"]["SessionToken"]
print(
"[%s] %s successfully assumed STS role for account %s" %
(ctime(), threadname, acctname))
except botocore.exceptions.ClientError as e:
with lock:
print(
"[%s] %s failed to assume role for account %s with error: %s" %
(ctime(), threadname, acctname, e))
with lock:
print(
"[%s] %s is verifying CloudFormation file %s for account %s" %
(ctime(), threadname, cffile, acctname))
try:
cf = boto3.client('cloudformation',
aws_access_key_id=accesskey,
aws_secret_access_key=secretkey,
aws_session_token=sessiontoken,
region_name=region)
validate = cf.validate_template(
TemplateURL=cffile
)
cfgood = True
with lock:
print(
"[%s] %s CloudFormation file %s validated successfully for account %s" %
(ctime(), threadname, cffile, acctname))
except botocore.exceptions.ClientError as e:
with lock:
print(
"[%s] %s CloudFormation file %s validation failed for account %s with error: %s" %
(ctime(), threadname, cffile, acctname, e))
cfgood = False
##########################
# Ok the CF should be validated (cfgood=True), so let's run it.
#########################
if cfgood:
with lock:
print(
"[%s] %s Preparing to run CloudFormation file %s in account %s" %
(ctime(), threadname, cffile, acctname))
stackid = cf.create_stack(
StackName=region + "-" + acctname,
TemplateURL=cffile,
Parameters=[
{
},
],
Tags=[
{
'Key': 'Purpose',
'Value': 'Infrastructure'
},
]
)['StackId']
with lock:
print("[%s] %s StackID %s is running in account %s" %
(ctime(), threadname, stackid, acctname))
waiter = cf.get_waiter('stack_create_complete')
waiter.wait(StackName=stackid)
with lock:
print(
"[%s] %s StackID %s completed creation in account %s" %
(ctime(), threadname, stackid, acctname))
stack = cf.describe_stacks(StackName=stackid)
for item in stack['Stacks'][0]['Outputs']:
if item['OutputKey'] == "VPCId":
vpcid = item["OutputValue"]
elif item['OutputKey'] == "VPCCIDRBlock":
cidrblock = item["OutputValue"]
elif item['OutputKey'] == "RouteTableId":
rtbid = item["OutputValue"]
elif item['OutputKey'] == "InternalRouteTableA":
rtbid_inta = item["OutputValue"]
elif item['OutputKey'] == "InternalRouteTableB":
rtbid_intb = item["OutputValue"]
pcxid = "None"
###########################
# Don't do peering if we are master vpc or if nopeer is set via cli
# otherwise, this is the peering code
##########################
if not ismaster and not nopeer:
with lock:
print(
"[%s] %s Preparing to request peering with Master vpc in account %s" %
(ctime(), threadname, acctname))
try:
ec2 = boto3.client('ec2',
aws_access_key_id=accesskey,
aws_secret_access_key=secretkey,
aws_session_token=sessiontoken,
region_name=region)
pcx = ec2.create_vpc_peering_connection(
VpcId=vpcid,
PeerVpcId=results['master']['VPCID'],
PeerOwnerId='masteracctID'
)
pcxid = pcx['VpcPeeringConnection']['VpcPeeringConnectionId']
with lock:
print(
"[%s] %s Peering Connection request ID %s sent from account %s" %
(ctime(), threadname, pcxid, acctname))
print(
"[%s] %s Preparing to add route to table %s to Peer Connection ID %s in account %s" %
(ctime(), threadname, rtbid, pcxid, acctname))
route = ec2.create_route(
DestinationCidrBlock=results['master']['CIDRblock'],
VpcPeeringConnectionId=pcxid,
RouteTableId=rtbid
)
if route['Return']:
print(
"[%s] Route added to route table %s for network %s to peer connection %s in account %s" %
(ctime(), rtbid, results['master']['CIDRblock'], pcxid, acctname))
else:
print(
"[%s] Failed adding to route table %s for network %s to peer connection %s in account %s" %
(ctime(), rtbid, results['master']['CIDRblock'], pcxid, acctname))
except botocore.exceptions.ClientError as e:
with lock:
print(
"[%s] %s Peering Connection request failed for account %s with error: %s" %
(ctime(), threadname, acctname, e))
results[acctname] = {
"CIDRblock": cidrblock,
"VPCID": vpcid,
"PCXID": pcxid}
############################
# master results need the route table ids of both internal tables to add routes to both
###########################
if ismaster:
results[acctname].update({'RTBint': [rtbid_inta, rtbid_intb]})
def printdata(results, acctname):
print(
"The CIDRBlock for VPC %s in account %s is %s. The VPC peering id is %s" %
(results[acctname]['VPCID'],
acctname,
results[acctname]['CIDRblock'],
results[acctname]['PCXID']))
def printdatamaster(results):
print(
"The CIDRBlock for VPC %s in master account is %s. The internal route table ids are %s and %s" %
(results['master']['VPCID'],
results['master']['CIDRblock'],
results['master']['RTBint'][0],
results['master']['RTBint'][1]))
def main():
#############################
# Parse CLI options - setup the parser
############################
parser = argparse.ArgumentParser(
description='An orchestration script that runs multi-account CloudFormation and can set up peering relationships between the VPCs created')
parser.add_argument(
"region",
type=str,
choices=[
"us-west-2",
"us-east-1"],
help="The AWS Region you would like to operate in")
parser.add_argument(
"-sa",
"--single_account",
action='append',
help="Provide a single account name(dev,hdp,test,beps) and only operate on that account. You can perform this action multiple times to operate on more than one account.")
parser.add_argument(
"-np",
"--no_peering",
action='store_true',
dest='no_peering',
help="Run the CloudFormation, but don't do the inter-VPC peering")
#################################
# Parse CLI options - read the parser
#################################
nopeer = None
args = parser.parse_args()
region = args.region
acct = args.single_account
if args.no_peering:
nopeer = True
############################
# Do single account or multiple single account runs
############################
if acct:
for line in acct:
foundacct = None
print(
"[%s] Single account selected: Preparing to run CloudFormation on %s account" %
(ctime(), line))
print("[%s] Preparing to spawn thread" % ctime())
for entry in awsaccts:
if entry['name'] == line:
t = threading.Thread(
target=run_cloudform,
args=(
entry['acct'],
entry['name'],
region,
entry['cffile'],
nopeer,
results))
threads.append(t)
t.start()
foundacct = True
if not foundacct:
print("[%s] No matching account name found!" % ctime())
print("[%s] Current configured accounts are:" % ctime())
for entry in awsaccts:
print(
"[%s] Account ID: %s Account Name: %s" %
(ctime(), entry['acct'], entry['name']))
for i in range(len(threads)):
threads[i].join()
#############################
# Or run the whole shebang
#############################
else:
print(
"[%s] Preparing to run CloudFormation across all AWS accounts" %
ctime())
print("[%s] Preparing to run Master account CloudFormation" % ctime())
masteracct = list(
(entry for entry in awsaccts if entry['name'] == 'master'))[0]
run_cloudform(
masteracct['acct'],
masteracct['name'],
region,
masteracct['cffile'],
nopeer,
results)
printdatamaster(results)
print("[%s] Preparing to spawn threads" % ctime())
subaccts = (entry for entry in awsaccts if entry['name'] != 'master')
##############################
# do the threading for subaccts
#############################
for entry in subaccts:
t = threading.Thread(
target=run_cloudform,
args=(
entry['acct'],
entry['name'],
region,
entry['cffile'],
nopeer,
results))
threads.append(t)
t.start()
for i in range(len(threads)):
threads[i].join()
print("[%s] All CloudFormations run!" % ctime())
if len(results) > 1:
print("[%s] Printing outputs:" % ctime())
for entry in (entry for entry in results if entry != 'master'):
printdata(results, entry)
###############################
# Accept peering and add final routes to peering vpcs
##############################
if not nopeer and len(results) > 1:
print(
"[%s] Attempting to accept peering requests in Master" %
ctime())
try:
master = boto3.client('ec2',
region_name=region)
subaccts = (entry for entry in results if entry != "master")
for entry in subaccts:
pcx = master.accept_vpc_peering_connection(
VpcPeeringConnectionId=results[entry]['PCXID']
)
print(
"[%s] VPC Peering connection from %s with ID %s is status: %s" %
(ctime(),
entry,
results[entry]['PCXID'],
pcx['VpcPeeringConnection']['Status']['Code']))
for table in results['master']['RTBint']:
route = master.create_route(
DestinationCidrBlock=results[entry]['CIDRblock'],
VpcPeeringConnectionId=results[entry]['PCXID'],
RouteTableId=table
)
if route['Return']:
print(
"[%s] Route added to Master route table %s for network %s to peer connection %s" %
(ctime(), table, results[entry]['CIDRblock'], results[entry]['PCXID']))
else:
print(
"[%s] Adding route to Master route table %s for network %s to peer connection %s failed!" %
(ctime(), table, results[entry]['CIDRblock'], results[entry]['PCXID']))
except botocore.exceptions.ClientError as e:
print(
"[%s] Failed to manipulate account %s with error: %s" %
(ctime(), "Master", e))
print("[%s] Finished" % ctime())
if __name__ == '__main__':
main()
| 39.70197 | 179 | 0.470749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,673 | 0.351945 |
085769a397608c592ac48390d3b4d6b67aae08eb | 882 | py | Python | NIM/tests/woa_test.py | buctlab/source-seeking-multi-robot-team-simulator | a68c214b9bd19006a94c0adc832681bbaf0d6dc8 | [
"Apache-2.0"
] | null | null | null | NIM/tests/woa_test.py | buctlab/source-seeking-multi-robot-team-simulator | a68c214b9bd19006a94c0adc832681bbaf0d6dc8 | [
"Apache-2.0"
] | null | null | null | NIM/tests/woa_test.py | buctlab/source-seeking-multi-robot-team-simulator | a68c214b9bd19006a94c0adc832681bbaf0d6dc8 | [
"Apache-2.0"
] | null | null | null | import os
from Config import Config
from NIM.algorithms import WhaleOptimizationAlgorithm
from NIM.algorithms.algorithm import logger
if __name__ == '__main__':
with open(Config.default_saved_scene_path, 'r') as f:
data = f.read()
m2d = eval(data)
seed = 5
woa = WhaleOptimizationAlgorithm(m2d, Config.rasterized_cell_size, func=Config.func, iterations=Config.iterations,
debug=True, population=Config.number_of_robots, robot_size=Config.size, seed=seed,
k=Config.leakage_sources)
best_sol, best_val = woa.run()
logger.info("best sol:{sol}, best val:{val}".format(sol=best_sol, val=best_val))
func_name = type(woa.func).__name__
woa.iter_swarm_pos.to_csv(
os.path.join(Config.project_root, "data/csv_file/woa_MultiSourceFunction_" + str(seed) + ".csv"))
| 36.75 | 119 | 0.675737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 91 | 0.103175 |
085893c679735b22d323d01a1e71583ba759cc3a | 6,242 | py | Python | src/COVIDZejunDatagraphs.py | luisflores0330/ista131final | 168ac6afe666e945ae717387b50420804b33c4f3 | [
"Apache-2.0"
] | null | null | null | src/COVIDZejunDatagraphs.py | luisflores0330/ista131final | 168ac6afe666e945ae717387b50420804b33c4f3 | [
"Apache-2.0"
] | null | null | null | src/COVIDZejunDatagraphs.py | luisflores0330/ista131final | 168ac6afe666e945ae717387b50420804b33c4f3 | [
"Apache-2.0"
] | 4 | 2021-12-07T21:44:31.000Z | 2021-12-07T23:20:04.000Z | '''
File: COVIDZejunDatagraphs.py
Author: Zejun Li
Purpose: This file contains 12 different functions to make 5 different graphs about the COVID 19 in Idaho
'''
import pandas as pd, numpy as np, matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import datetime as dt
def get_df():
'''
This function is to get the dataframe from the csv file : data_table_for_daily_death_trends__idaho.csv
'''
fname = "data_table_for_daily_death_trends__idaho.csv"
df = pd.read_csv(fname,sep=',', skiprows = 2, engine='python')
del df["State"]
df["Dates"] = np.nan
def date_convert(date_to_convert):
return datetime.datetime.strptime(date_to_convert, '%b %d %Y').strftime('%m/%d/%Y')
df['Dates'] = df['Date'].apply(date_convert)
del df["Date"]
return df
def get_date_lst():
'''This function is to get all of the dates from the Dates column
'''
df = get_df()
lst_dates = []
for i in df['Dates']:
lst_dates.append(i)
return lst_dates
def fig1():
'''This function is to make a line graph with x axis of Dates and y axis of Current Hospitalized COVID-19 Patients.
'''
df = get_df()
lst_dates = get_date_lst()
x = [dt.datetime.strptime(d,'%m/%d/%Y').date() for d in lst_dates]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))
plt.plot(x,df['Current Hospitalized COVID-19 Patients'])
plt.gcf().autofmt_xdate()
plt.xlabel("Dates")
plt.ylabel("Current Hospitalized COVID-19 Patients")
plt.suptitle('Figure 1', fontsize=16)
def fig2():
'''This function is to make a bar chart with x axis of Dates and y axis of New Deaths
'''
df = get_df()
lst_dates = get_date_lst()
plt.figure(figsize=(10,10))
plt.style.use('ggplot')
lst_dates = []
for i in df['Dates']:
lst_dates.append(i)
x = [dt.datetime.strptime(d,'%m/%d/%Y').date() for d in lst_dates]
lst = []
for i in df['New Deaths']:
lst.append(i)
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x,lst,width=0.8, color='darkviolet')
plt.xlabel("Dates")
plt.ylabel("New Deaths")
plt.suptitle('Figure 2', fontsize=16)
def fig3():
'''This function is to make a scatter plot with x axis of Dates and y axis of 7-Day Moving Avg
'''
df = get_df()
plt.figure(figsize=(16,10), dpi= 80)
lst_dates = get_date_lst()
lst = []
for i in df["7-Day Moving Avg"]:
lst.append(i)
int_lst = []
for i in range(len(lst_dates)):
int_lst.append(i)
x = np.array(lst_dates)
y = np.array(lst)
x1 = np.array(int_lst)
m, b = np.polyfit(x1, y, 1)
plt.plot(x, m*x1 + b)
plt.scatter(x, y)
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))
plt.xlabel("Dates")
plt.ylabel("7-Day Moving Avg")
plt.gca().invert_xaxis()
plt.suptitle('Figure 3', fontsize=16)
def main():
fig1()
fig2()
fig3()
plt.show()
main()
def csv(file):
'''
This function is to get two dataframes from the csv file; df: data_table_for_daily_case_trends__idaho1.csv; df2:data_table_for_daily_death_trends__idaho2.csv
'''
df = pd.read_csv(file, sep = ",", skiprows = 2)
df2 = pd.read_csv("data_table_for_daily_death_trends__idaho2.csv", sep = "," , skiprows = 2)
df["New Deaths"] = df2["New Deaths"]
df["Doses Per Day"] = 0
df["Dates"] = df["Date"].replace({"Jan":"01", "Feb":"02","Mar":"03","Apr":"04","May":"05","Jun":"06","Jul":"07","Aug":"08","Sep":"09","Oct":"10","Nov":"11","Dec":"12"}, regex = True)
df["Total Doses Administered"] = df["Total Doses Administered"].fillna(0)
for i in range(1, len(df["Total Doses Administered"])-1):
a = pd.to_numeric(df["Total Doses Administered"])
df.loc[i-1,"Doses Per Day"] = abs((int(a.iloc[i-1]) - int(a.iloc[i])))
a.append(df["Doses Per Day"])
df.drop(labels = [0], axis = 0)
df.drop([0, 1, 2], axis = 0,inplace = True)
del df["7-Day Moving Avg"]
del df["State"]
return df
def clean_dose():
'''This function is to delete the dates that don't have dose
'''
df = csv("data_table_for_daily_case_trends__idaho1.csv")
for i in range(626,670):
df = df.drop(index=i)
return df
def fig4():
'''This function is to make a line graph with x axis of Dates and y axis of New cases
'''
df = csv("data_table_for_daily_case_trends__idaho1.csv")
x = [dt.datetime.strptime(d,'%m %d %Y').date() for d in df["Dates"]]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m %d %Y'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))
plt.plot(x,df['New Cases'])
plt.gcf().autofmt_xdate()
plt.xlabel("Dates")
plt.ylabel("New Cases")
plt.suptitle('Figure 4', fontsize=16)
'''
def fig5():
df = csv("data_table_for_daily_case_trends__idaho1.csv")
plt.figure(figsize=(10,10))
plt.style.use('ggplot')
lst_dates = []
for i in df['Dates']:
lst_dates.append(i)
x = [dt.datetime.strptime(d,'%m %d %Y').date() for d in df["Dates"]]
lst = []
for i in df['New Deaths']:
lst.append(i)
x_pos = [i for i, _ in enumerate(x)]
plt.bar(x,lst,width=0.8, color='black')
plt.xlabel("Dates")
plt.ylabel("New Deaths")
plt.suptitle('Figure 5', fontsize=16)
'''
def fig5():
'''This function is to make a bar chart with x axis of Dates and y axis of Doses Per Day
'''
df = clean_dose()
plt.figure(figsize=(16,10), dpi= 80)
lst = []
for i in df["Doses Per Day"]:
lst.append(i)
x = np.array(df["Dates"])
y = np.array(lst)
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=50))
plt.bar(x,lst,width=0.8, color='navy')
plt.xlabel("Dates")
plt.ylabel("Doses Per Day")
plt.gca().invert_xaxis()
plt.suptitle('Figure 5', fontsize=16)
def main2():
fig4()
#fig5()
fig5()
plt.show()
main2()
| 33.026455 | 187 | 0.603172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,634 | 0.42198 |
0858b5bc59305248e9f97a28c217e52f4157d9b4 | 1,118 | py | Python | tests/test_pipeline_disk_deduplication.py | kingking888/skyscraper | d710202f9581c3791d2cf7ee3ae33e950e46c0b7 | [
"MIT"
] | 1 | 2021-03-21T07:25:43.000Z | 2021-03-21T07:25:43.000Z | tests/test_pipeline_disk_deduplication.py | kingking888/skyscraper | d710202f9581c3791d2cf7ee3ae33e950e46c0b7 | [
"MIT"
] | null | null | null | tests/test_pipeline_disk_deduplication.py | kingking888/skyscraper | d710202f9581c3791d2cf7ee3ae33e950e46c0b7 | [
"MIT"
] | 1 | 2021-04-24T11:38:18.000Z | 2021-04-24T11:38:18.000Z | import pytest
import json
import datetime
from scrapy.spiders import Spider
import scrapy.exceptions
from skyscraper.items import BasicItem
from scrapy.exceptions import DropItem
from skyscraper.pipelines.filesystem import DiskDeduplicationPipeline
class MockDeduplication():
def __init__(self):
self.s = set()
def add_word(self, word):
self.s.add(word)
def has_word(self, word):
return word in self.s
def test_filters_duplicate_item():
pipeline = DiskDeduplicationPipeline(MockDeduplication(), 'namespace')
spider = Spider(name='spider')
item = BasicItem()
item['id'] = 'my-unique-id'
item['url'] = 'http://example.com/'
item['source'] = 'dummy source'
# one time it should work
pipeline.process_item(item, spider)
# afterwards it should throw
with pytest.raises(DropItem):
pipeline.process_item(item, spider)
# for different ID it should work
item = BasicItem()
item['id'] = 'my-unique-id-2'
item['url'] = 'http://example.com/'
item['source'] = 'dummy source'
pipeline.process_item(item, spider)
| 24.304348 | 74 | 0.686047 | 190 | 0.169946 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.213775 |
08597f4873dfad388e0eb75f921c519b7373d12a | 180 | py | Python | windscribe/__init__.py | Dayzpd/Python-Windscribe | eaaca7b39286434ec8588c967076f0b5b9961d91 | [
"MIT"
] | 9 | 2020-09-17T19:42:18.000Z | 2022-01-04T07:14:37.000Z | windscribe/__init__.py | Dayzpd/Python-Windscribe | eaaca7b39286434ec8588c967076f0b5b9961d91 | [
"MIT"
] | 4 | 2020-10-28T16:22:54.000Z | 2022-01-04T07:13:18.000Z | windscribe/__init__.py | Dayzpd/Python-Windscribe | eaaca7b39286434ec8588c967076f0b5b9961d91 | [
"MIT"
] | 4 | 2020-12-11T11:13:27.000Z | 2022-01-16T02:40:55.000Z | __all__ = [
'account',
'connect',
'locations',
'login',
'logout',
]
from .windscribe import (
account,
connect,
locations,
login,
logout,
) | 12 | 25 | 0.527778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.244444 |
085a588c3443a2133c8229f5612a92a5ee522cad | 335 | py | Python | src/videos/migrations/0009_rename_updated_timestamp_video_updated.py | imsubhamsingh/vibeon | 5ea67bb8dae0a0c28d36f81374eb4f046d842cf5 | [
"Apache-2.0"
] | null | null | null | src/videos/migrations/0009_rename_updated_timestamp_video_updated.py | imsubhamsingh/vibeon | 5ea67bb8dae0a0c28d36f81374eb4f046d842cf5 | [
"Apache-2.0"
] | 2 | 2021-07-19T18:41:46.000Z | 2022-02-10T11:43:07.000Z | src/videos/migrations/0009_rename_updated_timestamp_video_updated.py | imsubhamsingh/vibeon | 5ea67bb8dae0a0c28d36f81374eb4f046d842cf5 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2 on 2021-04-20 19:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("videos", "0008_video_updated_timestamp")]
operations = [
migrations.RenameField(
model_name="video", old_name="updated_timestamp", new_name="updated"
)
]
| 22.333333 | 80 | 0.674627 | 252 | 0.752239 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.352239 |
085b597e5e9aaf7c138a4db4c8f8739331aa2a66 | 2,342 | py | Python | SVM/SVM_Regression/Sklearn_SVM_Regression.py | Jojoxiao/Machine-Learning-for-Beginner-by-Python3 | 71b91c9cba5803bd78d4d31be6dabb1d3989e968 | [
"MIT"
] | 397 | 2018-05-28T02:07:32.000Z | 2022-03-30T09:53:37.000Z | SVM/SVM_Regression/Sklearn_SVM_Regression.py | 976634681/Machine-Learning-for-Beginner-by-Python3 | d9effcbb1b390dc608a0f4c0a28f0ad03892047a | [
"MIT"
] | 4 | 2019-01-14T16:41:02.000Z | 2021-03-11T13:23:06.000Z | SVM/SVM_Regression/Sklearn_SVM_Regression.py | 976634681/Machine-Learning-for-Beginner-by-Python3 | d9effcbb1b390dc608a0f4c0a28f0ad03892047a | [
"MIT"
] | 235 | 2018-06-28T05:31:40.000Z | 2022-03-11T03:20:07.000Z | # -*- coding:utf-8 -*-
# &Author AnFany
# 利用Sklearn包实现支持核函数回归
"""
第一部分:引入库
"""
# 引入部分的北京PM2.5数据
import SVM_Regression_Data as rdata
# 引入库包
from sklearn import svm
import numpy as np
import matplotlib.pyplot as plt
from pylab import mpl
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 中文字体名称
mpl.rcParams['axes.unicode_minus'] = False # 显示负号
"""
第二部分:构建函数
"""
# 核函数
def sk_svm_train(intr, labeltr, inte, kener):
clf = svm.SVR(kernel=kener)
# 开始训练
clf.fit(intr, labeltr)
# 训练输出
tr = clf.predict(intr)
# 预测输出
pr = clf.predict(inte)
return tr, pr
# 结果输出函数
'''
‘linear’, ‘poly’, ‘rbf’, ‘sigmoid’
'''
# 数据集
def result(data, he='rbf'):
# 训练、预测的网络输出
trainacc, testacc = [], []
xd = data[0]
yd = data[1].T[0]
# 测试数据
texd = data[2]
teyd = data[3].T[0]
# 开始训练
resu = sk_svm_train(xd, yd, texd, he)
tra = resu[0] * (data[4][1] - data[4][0]) + data[4][0]
pre = resu[1] * (data[4][1] - data[4][0]) + data[4][0]
ydd = data[1].T[0] * (data[4][1] - data[4][0]) + data[4][0]
teyd = data[3].T[0] * (data[4][1] - data[4][0]) + data[4][0]
return ydd, tra, teyd, pre
# 绘图的函数
def huitu(suout, shiout, c=['b', 'k'], sign='训练', cudu=3):
# 绘制原始数据和预测数据的对比
plt.subplot(2, 1, 1)
plt.plot(list(range(len(suout))), suout, c=c[0], linewidth=cudu, label='%s:算法输出' % sign)
plt.plot(list(range(len(shiout))), shiout, c=c[1], linewidth=cudu, label='%s:实际值' % sign)
plt.legend(loc='best')
plt.title('原始数据和向量机输出数据的对比')
# 绘制误差和0的对比图
plt.subplot(2, 2, 3)
plt.plot(list(range(len(suout))), suout - shiout, c='r', linewidth=cudu, label='%s:误差' % sign)
plt.plot(list(range(len(suout))), list(np.zeros(len(suout))), c='k', linewidth=cudu, label='0值')
plt.legend(loc='best')
plt.title('误差和0的对比')
# 需要添加一个误差的分布图
plt.subplot(2, 2, 4)
plt.hist(suout - shiout, 50, facecolor='g', alpha=0.75)
plt.title('误差直方图')
# 显示
plt.show()
'''第四部分:最终的运行程序'''
if __name__ == "__main__":
datasvr = rdata.model_data
realtr, outtri, realpre, poupre = result(datasvr, he='rbf')
huitu(realtr, outtri, c=['b', 'k'], sign='训练', cudu=1.5)
huitu(realpre, poupre, c=['b', 'k'], sign='预测', cudu=1.5)
| 22.519231 | 101 | 0.557643 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 916 | 0.333576 |
085b8a0758f970cf513eb9555d20e921de2dbc2f | 1,655 | py | Python | tests/test_history.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
] | null | null | null | tests/test_history.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
] | null | null | null | tests/test_history.py | dfroger/conda | c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa | [
"BSD-3-Clause"
] | null | null | null | from os.path import dirname
import unittest
from .decorators import skip_if_no_mock
from .helpers import mock
from conda import history
class HistoryTestCase(unittest.TestCase):
def test_works_as_context_manager(self):
h = history.History("/path/to/prefix")
self.assertTrue(getattr(h, '__enter__'))
self.assertTrue(getattr(h, '__exit__'))
@skip_if_no_mock
def test_calls_update_on_enter_and_exit(self):
h = history.History("/path/to/prefix")
with mock.patch.object(h, 'update') as update:
with h:
self.assertEqual(1, update.call_count)
pass
self.assertEqual(2, update.call_count)
@skip_if_no_mock
def test_returns_history_object_as_context_object(self):
h = history.History("/path/to/prefix")
with mock.patch.object(h, 'update'):
with h as h2:
self.assertEqual(h, h2)
class UserRequestsTestCase(unittest.TestCase):
h = history.History(dirname(__file__))
user_requests = h.get_user_requests()
def test_len(self):
self.assertEqual(len(self.user_requests), 6)
def test_0(self):
self.assertEqual(self.user_requests[0],
{'cmd': ['conda', 'update', 'conda'],
'date': '2016-02-16 13:31:33'})
def test_last(self):
self.assertEqual(self.user_requests[-1],
{'action': 'install',
'cmd': ['conda', 'install', 'pyflakes'],
'date': '2016-02-18 22:53:20',
'specs': ['pyflakes', 'conda', 'python 2.7*']})
| 31.826923 | 73 | 0.590937 | 1,511 | 0.912991 | 0 | 0 | 547 | 0.330514 | 0 | 0 | 254 | 0.153474 |
085dd2a204aa1776b398919049eef21372b1d7e4 | 107 | py | Python | src/zojax/catalog/tests/__init__.py | Zojax/zojax.catalog | 1be9ef2cd4516d6c1dcfe7da52c4d438852e7ea6 | [
"ZPL-2.1"
] | null | null | null | src/zojax/catalog/tests/__init__.py | Zojax/zojax.catalog | 1be9ef2cd4516d6c1dcfe7da52c4d438852e7ea6 | [
"ZPL-2.1"
] | null | null | null | src/zojax/catalog/tests/__init__.py | Zojax/zojax.catalog | 1be9ef2cd4516d6c1dcfe7da52c4d438852e7ea6 | [
"ZPL-2.1"
] | null | null | null | # This file is necessary to make this directory a package.
from zojax.catalog.catalog import queryCatalog
| 26.75 | 58 | 0.813084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.542056 |
085e0152d8a979274c20816965dae9f9c36f8c65 | 6,066 | py | Python | src/bpp/views/raporty/ranking_autorow.py | iplweb/django-bpp | 85f183a99d8d5027ae4772efac1e4a9f21675849 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T19:50:02.000Z | 2017-04-27T19:50:02.000Z | src/bpp/views/raporty/ranking_autorow.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
] | 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/bpp/views/raporty/ranking_autorow.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | # -*- encoding: utf-8 -*-
import itertools
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.db.models.aggregates import Sum
from django.template.defaultfilters import safe
from django.utils.functional import cached_property
from django_tables2 import Column
from django_tables2.export.views import ExportMixin
from django_tables2.tables import Table
from django_tables2.views import SingleTableView
from bpp.models import Autor, Sumy, OpcjaWyswietlaniaField, Uczelnia
from bpp.models.struktura import Wydzial
class RankingAutorowTable(Table):
class Meta:
attrs = {"class": "bpp-table"}
model = Autor
order_by = ("-impact_factor_sum", "autor__nazwisko")
fields = (
"lp",
"autor",
"impact_factor_sum",
"liczba_cytowan_sum",
"punkty_kbn_sum",
)
lp = Column(
empty_values=(),
orderable=False,
attrs={"td": {"class": "bpp-lp-column"}},
exclude_from_export=True,
)
autor = Column(order_by=("autor__nazwisko", "autor__imiona"))
punkty_kbn_sum = Column("Punkty PK", "punkty_kbn_sum")
impact_factor_sum = Column("Impact Factor", "impact_factor_sum")
liczba_cytowan_sum = Column("Liczba cytowań", "liczba_cytowan_sum")
def render_lp(self):
self.lp_counter = getattr(
self, "lp_counter", itertools.count(self.page.start_index())
)
return "%i." % next(self.lp_counter)
def render_autor(self, record):
return safe(
'<a href="%s">%s</a>'
% (
reverse("bpp:browse_autor", args=(record.autor.slug,)),
str(record.autor),
)
)
def value_autor(self, record):
return str(record.autor)
class RankingAutorowJednostkaWydzialTable(RankingAutorowTable):
class Meta:
fields = (
"lp",
"autor",
"jednostka",
"wydzial",
"impact_factor_sum",
"liczba_cytowan_sum",
"punkty_kbn_sum",
)
order_by = ("-impact_factor_sum", "autor__nazwisko")
jednostka = Column(accessor="jednostka.nazwa")
wydzial = Column(accessor="jednostka.wydzial.nazwa")
class RankingAutorow(ExportMixin, SingleTableView):
template_name = "raporty/ranking-autorow.html"
def get_table_class(self):
if self.rozbij_na_wydzialy:
return RankingAutorowJednostkaWydzialTable
return RankingAutorowTable
@cached_property
def rozbij_na_wydzialy(self):
return self.request.GET.get("rozbij_na_jednostki", "True") == "True"
@cached_property
def tylko_afiliowane(self):
return self.request.GET.get("tylko_afiliowane", "False") == "True"
def get_queryset(self):
qset = Sumy.objects.all()
qset = qset.filter(
rok__gte=self.kwargs["od_roku"], rok__lte=self.kwargs["do_roku"]
)
wydzialy = self.get_wydzialy()
if wydzialy:
qset = qset.filter(jednostka__wydzial__in=wydzialy)
if self.tylko_afiliowane:
qset = qset.filter(jednostka__skupia_pracownikow=True)
qset = qset.filter(afiliuje=True)
if self.rozbij_na_wydzialy:
qset = qset.prefetch_related("jednostka__wydzial").select_related(
"autor", "jednostka"
)
qset = qset.group_by("autor", "jednostka")
else:
qset = qset.select_related("autor")
qset = qset.group_by("autor")
qset = qset.annotate(
impact_factor_sum=Sum("impact_factor"),
liczba_cytowan_sum=Sum("liczba_cytowan"),
punkty_kbn_sum=Sum("punkty_kbn"),
)
qset = qset.exclude(impact_factor_sum=0, liczba_cytowan_sum=0, punkty_kbn_sum=0)
qset = qset.exclude(autor__pokazuj=False)
uczelnia = Uczelnia.objects.get_default()
if uczelnia is not None:
ukryte_statusy = uczelnia.ukryte_statusy("rankingi")
if ukryte_statusy:
qset = qset.exclude(status_korekty_id__in=ukryte_statusy)
return qset
def get_dostepne_wydzialy(self):
return Wydzial.objects.filter(zezwalaj_na_ranking_autorow=True)
def get_wydzialy(self):
base_query = self.get_dostepne_wydzialy()
wydzialy = self.request.GET.getlist("wydzialy[]")
if wydzialy:
try:
wydzialy = base_query.filter(pk__in=[int(x) for x in wydzialy])
return wydzialy
except (TypeError, ValueError):
pass
return base_query
def get_context_data(self, **kwargs):
context = super(SingleTableView, self).get_context_data(**kwargs)
context["od_roku"] = self.kwargs["od_roku"]
context["do_roku"] = self.kwargs["do_roku"]
jeden_rok = False
if self.kwargs["od_roku"] == self.kwargs["do_roku"]:
context["rok"] = self.kwargs["od_roku"]
jeden_rok = True
wydzialy = self.get_wydzialy()
context["wydzialy"] = wydzialy
if jeden_rok:
context["table_title"] = "Ranking autorów za rok %s" % context["rok"]
else:
context["table_title"] = "Ranking autorów za lata %s - %s" % (
context["od_roku"],
context["do_roku"],
)
context["tab_subtitle"] = ""
if len(wydzialy) != len(self.get_dostepne_wydzialy()):
context["table_subtitle"] = ", ".join([x.nazwa for x in wydzialy])
return context
def get_table_kwargs(self):
uczelnia = Uczelnia.objects.all().first()
pokazuj = uczelnia.pokazuj_liczbe_cytowan_w_rankingu
if pokazuj == OpcjaWyswietlaniaField.POKAZUJ_NIGDY or (
pokazuj == OpcjaWyswietlaniaField.POKAZUJ_ZALOGOWANYM
and self.request.user.is_anonymous
):
return {"exclude": ("liczba_cytowan_sum",)}
return {}
| 32.612903 | 88 | 0.616716 | 5,476 | 0.90229 | 0 | 0 | 250 | 0.041193 | 0 | 0 | 1,024 | 0.168726 |
f22aabe1afa4a1593594ef47c8110872cb757c3c | 16,701 | py | Python | client-lib/pypi/nsrr/nsrr.py | nsrr/nsrr-cloud | a1e33bc3ba3220600e8b1973882d2ed76a7277c6 | [
"MIT"
] | null | null | null | client-lib/pypi/nsrr/nsrr.py | nsrr/nsrr-cloud | a1e33bc3ba3220600e8b1973882d2ed76a7277c6 | [
"MIT"
] | null | null | null | client-lib/pypi/nsrr/nsrr.py | nsrr/nsrr-cloud | a1e33bc3ba3220600e8b1973882d2ed76a7277c6 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import requests
from requests.structures import CaseInsensitiveDict
import json
import getpass
from pathlib import Path
import hashlib
import pandas as pd
import gzip
from multiprocessing import Process
# Global variables
#API_SERVER='https://dev-cloud.sleepdata.org/api/v1'
API_SERVER='https://cloud.sleepdata.org/api/v1'
#API_SERVER='http://localhost:9002/api/v1'
procs=[]
all_decompress_edfz=[]
def get_input_token():
enter_pass_text="""
Get your token here: https://sleepdata.org/token
Your input is hidden while entering token.
Enter your token:
"""
return getpass.getpass(enter_pass_text)
def read_token_from_file(file_name):
try:
f=open(file_name,'r')
user_token=f.readline().strip()
f.close()
return user_token
except Exception as e:
print("ERROR: the following error occured while reading token from input file")
print(e)
def get_user_access(user_token):
headers = CaseInsensitiveDict()
headers= {'token': user_token}
try:
resp = requests.get(API_SERVER+'/list/access', headers=headers)
if(resp.ok and resp.status_code == 200):
user_access_json=json.loads(resp.content)
if(user_access_json["datasets"]):
df=pd.DataFrame(user_access_json["datasets"], columns=["Dataset", "Full Name", "URL","Access"])
print(df.to_string(index=False))
else:
print("ERROR: Unable to list user access, please verify input token, approved DUA and try again")
except Exception as e:
print("ERROR: Unable to process request at this time, try again later")
def get_auth_token(user_token, dataset_name):
headers = CaseInsensitiveDict()
headers={'token': user_token}
payload = {'dataset_name': dataset_name}
try:
resp = requests.get(API_SERVER+'/auth-token', params=payload, headers=headers)
if(resp.ok and resp.status_code == 200):
auth_token=json.loads(resp.content)["auth_token"]
else:
auth_token=False
return auth_token
except Exception as e:
return False
def get_download_url(auth_token=None, file_name=None):
payload = {'file_name': file_name}
try:
if(auth_token):
auth_headers = CaseInsensitiveDict()
auth_headers = {'Authorization': 'Bearer %s' %auth_token}
resp = requests.get(API_SERVER+'/download/url/controlled', params=payload, headers=auth_headers)
else:
resp = requests.get(API_SERVER+'/download/url/open', params=payload)
if(resp.ok and resp.status_code == 200):
return resp.content
else:
return False
except Exception as e:
return False
def download_file(url, download_file_name, no_md5,decompress, metadata):
global procs, all_decompress_edfz
try:
file_name_split=download_file_name.split("/")
file_name=file_name_split[-1]
if(decompress and file_name.split(".")[-1]=='idx'):
print("Skipping download of file: ",download_file_name)
return True
file_download_path="/".join(file_name_split[:-1])
path = Path(str(Path.cwd())+"/"+file_download_path)
if not path.exists():
path.mkdir(parents= True, exist_ok= True)
response=requests.get(url, stream=True)
f_download=path / file_name
with f_download.open("wb+") as f:
for chunk in response.iter_content(chunk_size=1024):
f.write(chunk)
f.close()
if no_md5:
if not f_download.stat().st_size == metadata["size"]:
delete_file_path=Path(str(Path.cwd())+"/"+download_file_name)
delete_file_path.unlink()
return False
else:
print("Downloaded file: ",download_file_name," ",metadata["size"],"bytes")
else:
md5_object = hashlib.md5()
block_size = 128 * md5_object.block_size
md5_file = open(f_download, 'rb')
chunk = md5_file.read(block_size)
while chunk:
md5_object.update(chunk)
chunk = md5_file.read(block_size)
md5_hash = md5_object.hexdigest()
md5_file.close()
if not md5_hash == metadata["md5"]:
delete_file_path=Path(str(Path.cwd())+"/"+download_file_name)
#delete_file_path.unlink()
return False
else:
print("Downloaded file: ",download_file_name," ", metadata["size"],"bytes")
# call decompress fn
if(decompress and file_name.split(".")[-1]=="edfz"):
decompress_proc = Process(target=decompress_edf, args=(download_file_name,))
decompress_proc.start()
procs.append(decompress_proc)
all_decompress_edfz.append({"name": f_download, "size":f_download.stat().st_size})
return True
except Exception as e:
return False
def get_all_files_list(dataset_name):
payload = {'dataset_name': dataset_name}
try:
resp = requests.get(API_SERVER+'/list/all-files', params=payload)
if(resp.ok and resp.status_code == 200):
return resp.content
else:
return False
except Exception as e:
return False
def download_wrapper(all_files,user_token, dataset_name,download_path, force, no_md5, decompress):
if(decompress):
global procs, all_decompress_edfz
all_download_size=0
all_files=json.loads(all_files)
for f in all_files["open_files"]:
if not download_path in f:
continue
if not force:
file_path=""
if decompress and f.split(".")[-1]=="edfz":
file_path=Path(str(Path.cwd())+"/"+".".join(f.split(".")[:-1])+".edf")
if file_path.is_file():
print("Skipping download of existing file: {0}".format(f))
continue
else:
file_path=Path(str(Path.cwd())+"/"+f)
if file_path.is_file():
if file_path.stat().st_size == all_files["open_files"][f]['size']:
print("Skipping download of existing file: {0}".format(f))
continue
url=get_download_url(file_name=f)
if(url):
download_success=download_file(url,f,no_md5,decompress,all_files["open_files"][f])
if not download_success:
print("ERROR: Unable to download file {0}".format(f))
else:
if not (decompress and f.split(".")[-1] == ".idx" ):
all_download_size+=all_files["open_files"][f]["size"]
else:
print("ERROR: Unable to get download URL for file {0}, try again later".format(f))
if(all_files["controlled_files"]):
if "/" in download_path:
download_path="/".join(download_path.split("/")[1:])
for f in list(all_files["controlled_files"]):
if not download_path in f:
del all_files["controlled_files"][f]
controlled_files_count=len(all_files["controlled_files"])
if controlled_files_count == 0:
if all_download_size != 0:
print("Total size of downloaded file(s) is ",all_download_size, "bytes")
return
if not user_token:
print("Error: Input token is empty, skipping {0} controlled file(s) download".format(controlled_files_count))
if all_download_size != 0:
print("Total size of downloaded file(s) is ",all_download_size, "bytes")
return
for f in all_files["controlled_files"]:
f_with_dataset=dataset_name+"/"+f
if not force:
file_path=""
if decompress and f_with_dataset.split(".")[-1]=="edfz":
file_path=Path(str(Path.cwd())+"/"+".".join(f_with_dataset.split(".")[:-1])+".edf")
if file_path.is_file():
print("Skipping download of existing file: {0}".format(f))
controlled_files_count-=1
continue
else:
file_path=Path(str(Path.cwd())+"/"+f_with_dataset)
if file_path.is_file():
if file_path.stat().st_size == all_files["controlled_files"][f]['size']:
print("Skipping download of existing file: {0}".format(f))
controlled_files_count-=1
continue
# get bearer token
auth_token=get_auth_token(user_token, dataset_name)
if(auth_token):
url=get_download_url(auth_token=auth_token,file_name=f)
if(url):
download_success=download_file(url,f_with_dataset,no_md5,decompress,all_files["controlled_files"][f])
if not download_success:
print("ERROR: Unable to download file {0}".format(f))
else:
controlled_files_count-=1
if not (decompress and f.split(".")[-1] == ".idx"):
all_download_size+=all_files["controlled_files"][f]["size"]
else:
print("ERROR: Unable to get download URL for file {0}, try again later".format(f))
else:
print("ERROR: Unable to (re)download {0} controlled files as token verification failed, try again later".format(controlled_files_count))
break
sum_=0
try:
if decompress:
for proc in procs:
proc.join()
for f in all_decompress_edfz:
sum_+=Path('.'.join(str(f["name"]).split(".")[:-1])+".edf").stat().st_size -f["size"]
except Exception as e:
print("ERROR: Calculation failed for additional space used by decompressed files")
return
if all_download_size != 0:
print("Total size of downloaded file(s) is ",all_download_size, "bytes")
if sum_ !=0:
print("Total additional space consumed by decompression is ", sum_, "bytes")
def download_all_files(user_token, dataset_name, force, no_md5, decompress):
try:
download_path=''
if "/" in dataset_name:
download_path=dataset_name
dataset_name=dataset_name.split("/")[0]
all_files=get_all_files_list(dataset_name)
if(all_files):
download_wrapper(all_files,user_token, dataset_name, download_path, force, no_md5, decompress)
else:
print("ERROR: Unable to retrieve files list of dataset {0}, check list of cloud hosted datasets and try again".format(dataset_name))
except Exception as e:
print("ERROR: Unable to complete the download of files")
def get_subject_files_list(dataset_name,subject):
payload = {'dataset_name': dataset_name, 'subject': subject}
try:
resp = requests.get(API_SERVER+'/list/subject-files', params=payload)
if(resp.ok and resp.status_code == 200):
return resp.content
else:
return False
except Exception as e:
return False
def download_subject_files(user_token,dataset_name,subject, force, no_md5, decompress):
download_path=''
if "/" in dataset_name:
download_path=dataset_name
dataset_name=dataset_name.split("/")[0]
all_files=get_subject_files_list(dataset_name,subject)
if(all_files):
download_wrapper(all_files,user_token, dataset_name, download_path, force, no_md5, decompress)
else:
print("ERROR: Unable to retrieve files list of subject {0} of dataset {1}, check list of cloud hosted datasets and try again".format(subject,dataset_name))
def list_all_subjects(dataset_name):
payload = {'dataset_name': dataset_name}
try:
resp = requests.get(API_SERVER+'/list/all-subjects', params=payload)
if(resp.ok and resp.status_code == 200):
all_subjects_json=json.loads(resp.content)
if(all_subjects_json["subjects"]):
all_subjects="\n".join(list(all_subjects_json["subjects"]))
print(all_subjects)
else:
print("ERROR: Unable to list all subject of {0} dataset, check list of cloud hosted datasets and try again".format(dataset_name))
except Exception as e:
print("ERROR: Unable to process request at this time, try again later")
def list_all_files(dataset_name):
download_path=''
if "/" in dataset_name:
download_path=dataset_name
dataset_name=dataset_name.split("/")[0]
try:
all_files=get_all_files_list(dataset_name)
if not all_files:
print("ERROR: Unable to retrieve files list of dataset {0}, check list of cloud hosted datasets and try again".format(dataset_name))
return
all_files=json.loads(all_files)
if(all_files):
print_files=[]
for f in all_files["open_files"]:
if not download_path in f:
continue
print_files.append(["/".join(f.split("/")[1:]),all_files["open_files"][f]["size"]])
if download_path:
download_path='/'.join(download_path.split("/")[1:])
for f in all_files["controlled_files"]:
if not download_path in f:
continue
print_files.append([f,all_files["controlled_files"][f]["size"]])
print_files=sorted(print_files,key= lambda x:x[0])
df=pd.DataFrame(print_files, columns=["File Name", "Size(Bytes)"])
if df.empty:
print("ERROR: No files found for given input dataset (path): ",dataset_name+"/"+download_path)
else:
print(df.to_string(index=False))
except Exception as e:
print("ERROR: Unable to process request at this time, try again later")
def generate_nested_dirs(directories_list):
try:
nested_dirs={}
for d in directories_list:
temp=nested_dirs
for sub_dir in d.split("/"):
if temp.get(sub_dir) is None:
temp[sub_dir]={}
temp=temp[sub_dir]
return nested_dirs
except Exception as e:
return False
def print_tree_structure(nested_dirs_dict, indent, parent):
try:
for d in list(nested_dirs_dict):
if indent == 0:
print('{0: <50}{1}'.format(d,parent+"/"+d))
else:
print('{0: <50}{1}'.format(' '*indent+'+--'+d,parent+"/"+d))
if nested_dirs_dict[d]:
print_tree_structure(nested_dirs_dict[d], indent+1, parent+"/"+d)
return True
except Exception as e:
return False
def list_all_directories(dataset_name):
try:
all_files=get_all_files_list(dataset_name)
if not all_files:
print("ERROR: Unable to retrieve files list of dataset {0}, check list of cloud hosted datasets and try again".format(dataset_name))
return
all_files=json.loads(all_files)
if(all_files):
print_dirs=[]
for f in all_files["open_files"]:
print_dirs.append("/".join(f.split("/")[1:-1]))
for f in all_files["controlled_files"]:
print_dirs.append("/".join(f.split("/")[:-1]))
print_dirs=sorted(set(print_dirs))
nested_dirs_dict=generate_nested_dirs(print_dirs)
if nested_dirs_dict:
printed=print_tree_structure(nested_dirs_dict,0,dataset_name)
if not printed:
print("ERROR: Unable to show directory structure of dataset {0}, try again later".format(dataset_name))
except Exception as e:
print("ERROR: Unable to process request at this time, try again later")
def decompress_edf(edfz_file_name):
full_edfz_file_name = Path(str(Path.cwd())+"/"+edfz_file_name)
try:
edf_data=''
with gzip.open(full_edfz_file_name, 'rb') as f:
edf_data = f.read()
edf_to_write=Path(''.join(str(full_edfz_file_name).split(".")[:-1])+".edf")
with open(edf_to_write,'wb') as f:
f.write(edf_data)
full_edfz_file_name.unlink()
print("Decompressed file: ",edfz_file_name, "to",'.'.join(edfz_file_name.split(".")[:-1])+".edf","and deleted original")
except Exception as e:
print("ERROR: Unable to decompress EDFZ file: ",edfz_file_name) | 42.496183 | 163 | 0.598946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,482 | 0.208491 |
f22b087ab319568e891a7406ef151ad2f4d6b818 | 509 | py | Python | assignment2.py | talsperre/random-walk | 5c810f571c9de28926850e1ad70ff4c29df9c0f4 | [
"MIT"
] | null | null | null | assignment2.py | talsperre/random-walk | 5c810f571c9de28926850e1ad70ff4c29df9c0f4 | [
"MIT"
] | null | null | null | assignment2.py | talsperre/random-walk | 5c810f571c9de28926850e1ad70ff4c29df9c0f4 | [
"MIT"
] | null | null | null | import numpy as np
N = 100
R = 10000
R_range = range(R)
size = (N, 3)
C = np.zeros((N, 3))
k = 1
print ("100")
print ("STEP: ", k)
for i in range(N):
print ("He ", C[i, 0], " ", C[i, 1], " ", C[i, 2])
k += 1
for j in range(R):
A = np.random.uniform(-1, 1, size)
B = np.sum(np.multiply(A, A), axis=1)
B = np.sqrt(B)
B = B.reshape(N, 1)
Norm_A = A / B
C += Norm_A
if j % 10 == 0:
print ("100")
print ("STEP: ", k)
for i in range(N):
print ("He ", C[i, 0], " ", C[i, 1], " ", C[i, 2])
k += 1 | 18.851852 | 53 | 0.489194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.094303 |
f22fac0a3ced91e4e4e5768a9d363783d0f24bd3 | 1,462 | py | Python | parallel/images_common.py | minrk/ipython-cse17 | 16a9059c7054a8bd4977a3cb8b09c100ea779069 | [
"BSD-3-Clause"
] | 3 | 2017-03-02T07:11:37.000Z | 2017-03-03T06:13:32.000Z | parallel/images_common.py | minrk/ipython-cse17 | 16a9059c7054a8bd4977a3cb8b09c100ea779069 | [
"BSD-3-Clause"
] | null | null | null | parallel/images_common.py | minrk/ipython-cse17 | 16a9059c7054a8bd4977a3cb8b09c100ea779069 | [
"BSD-3-Clause"
] | null | null | null | import os
import matplotlib.pyplot as plt
from skimage.io import imread
def plot_corners(img, corners, show=True):
"""Display the image and plot all contours found"""
plt.imshow(img, cmap='gray')
plt.plot(corners[:,1], corners[:,0], 'r+', markeredgewidth=1.5, markersize=8) # Plot corners
plt.axis('image')
plt.xticks([])
plt.yticks([])
if show:
plt.show()
def find_corners(path, min_distance=5):
"""Find corners in an image at path
Returns the image and the corner lists.
"""
from skimage.feature import corner_harris, corner_peaks
img = imread(path, flatten=True)
corners = corner_peaks(corner_harris(img), min_distance=min_distance)
return img, corners
def get_corners_image(path):
"""Given a path, return a PNG of the image with contour lines
Calls both find_contours and plot_contours
"""
from IPython.core.pylabtools import print_figure
img, corners = find_corners(path)
plot_corners(img, corners, show=False)
fig = plt.gcf()
pngdata = print_figure(fig)
plt.close(fig)
return pngdata
def get_pictures(pictures_dir):
"""Return a list of picture files found in pictures_dir"""
pictures = []
for directory, subdirs, files in os.walk(pictures_dir):
for fname in files:
if fname.lower().endswith(('.jpg', '.png')):
pictures.append(os.path.join(directory, fname))
return pictures
| 29.24 | 96 | 0.666211 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 365 | 0.249658 |
f23153ff9da39e77238d222d2874c0c47b3effe7 | 1,765 | py | Python | tests/test_copies.py | mschmidtkorth/shallow-backup | 6629fed7d5a3a13eb068c7ef0168cfa8ffbd3bbf | [
"MIT"
] | 1 | 2021-07-25T19:26:47.000Z | 2021-07-25T19:26:47.000Z | tests/test_copies.py | mschmidtkorth/shallow-backup | 6629fed7d5a3a13eb068c7ef0168cfa8ffbd3bbf | [
"MIT"
] | null | null | null | tests/test_copies.py | mschmidtkorth/shallow-backup | 6629fed7d5a3a13eb068c7ef0168cfa8ffbd3bbf | [
"MIT"
] | null | null | null | import os
import sys
import pytest
import shutil
from .test_utils import setup_env_vars, unset_env_vars, BACKUP_DEST_DIR, FAKE_HOME_DIR, DIRS
sys.path.insert(0, "../shallow_backup")
from shallow_backup.utils import copy_dir_if_valid
TEST_TEXT_FILE = os.path.join(FAKE_HOME_DIR, 'test-file.txt')
class TestCopyMethods:
"""
Test the functionality of copying
"""
@staticmethod
def setup_method():
setup_env_vars()
try:
os.mkdir(FAKE_HOME_DIR)
except FileExistsError:
shutil.rmtree(FAKE_HOME_DIR)
os.mkdir(FAKE_HOME_DIR)
print(f"Created {TEST_TEXT_FILE}")
open(TEST_TEXT_FILE, "w+").close()
@staticmethod
def teardown_method():
for directory in DIRS:
if os.path.isdir(directory):
shutil.rmtree(directory)
unset_env_vars()
def test_copy_dir(self):
"""
Test that copying a directory works as expected
"""
# TODO: Test that all subfiles and folders are copied.
test_dir = 'subdir-to-copy'
test_path = os.path.join(FAKE_HOME_DIR, test_dir)
os.mkdir(test_path)
copy_dir_if_valid(FAKE_HOME_DIR, BACKUP_DEST_DIR)
assert os.path.isdir(test_path)
assert os.path.isfile(os.path.join(BACKUP_DEST_DIR, os.path.split(TEST_TEXT_FILE)[1]))
assert os.path.isdir(os.path.join(BACKUP_DEST_DIR, test_dir))
@pytest.mark.parametrize('invalid', {".Trash", ".npm", ".cache", ".rvm"})
def test_copy_dir_invalid(self, invalid):
"""
Test that attempting to copy an invalid directory fails
"""
copy_dir_if_valid(invalid, FAKE_HOME_DIR)
assert not os.path.isdir(os.path.join(BACKUP_DEST_DIR, invalid))
| 32.090909 | 94 | 0.65779 | 1,466 | 0.830595 | 0 | 0 | 814 | 0.46119 | 0 | 0 | 371 | 0.210198 |
f2320f768e412bebfaa5c2e31eeb4a3c480eacaf | 1,395 | py | Python | loan/killeragent.py | Casper-Smet/LOAN | 3aabf80cf4314bcba33779329fc6e4971b85e742 | [
"MIT"
] | null | null | null | loan/killeragent.py | Casper-Smet/LOAN | 3aabf80cf4314bcba33779329fc6e4971b85e742 | [
"MIT"
] | null | null | null | loan/killeragent.py | Casper-Smet/LOAN | 3aabf80cf4314bcba33779329fc6e4971b85e742 | [
"MIT"
] | null | null | null | from collections import namedtuple
import networkx as nx
from mesa import Agent, Model
class KillerAgent(Agent):
def __init__(self, unique_id: int, model: Model, creator, pos: int, target_location: int, target_disease: str) -> None:
super().__init__(unique_id, model)
self.creator = creator
self.pos = pos
self.target_location = target_location
self.target_disease = target_disease
self.arrived_on_location = False
self.shortest_path_to_target_node = []
def perceive(self) -> None:
self.arrived_on_location = self.pos == self.target_location
self.shortest_path_to_target_node = nx.shortest_path(G=self.model.network, source=self.pos, target=self.target_location)
def act(self) -> None:
...
def update(self) -> None:
if self.arrived_on_location:
if self.pos in self.model.ill_vertices:
self.model.restore_vertex(self.pos)
self.model.grid._remove_agent(self, self.pos)
self.model.schedule.remove(self)
else:
self.model.grid.move_agent(self, self.shortest_path_to_target_node[1])
def __repr__(self) -> str:
return f"{self.__class__.__name__} {self.model}/{self.unique_id}: Position {self.pos}"
def __str__(self) -> str:
return self.__repr__()
def emojify(self):
return " 💉" | 34.875 | 128 | 0.658065 | 1,308 | 0.935622 | 0 | 0 | 0 | 0 | 0 | 0 | 86 | 0.061516 |
f23235dddab2a9fffc993f7fe1be533663c51d2b | 290 | py | Python | src/calc.py | ceIery/epic7-speed-calculator | 2f91e57117e2b6873772e6a703e47241570ab75f | [
"MIT"
] | null | null | null | src/calc.py | ceIery/epic7-speed-calculator | 2f91e57117e2b6873772e6a703e47241570ab75f | [
"MIT"
] | null | null | null | src/calc.py | ceIery/epic7-speed-calculator | 2f91e57117e2b6873772e6a703e47241570ab75f | [
"MIT"
] | null | null | null | """
Given a base speed value and a list of percentages, calculates the speed value
for each percentage
"""
def get_speeds(percents, base):
speeds = []
for percent in percents:
speeds.append(round(((int)(base) * ((int)(percent) / 100))))
print(speeds)
return speeds
| 24.166667 | 78 | 0.662069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.365517 |
f2330e7134a6c2ae1cacee5b851dbdfec9f5f1d4 | 11,762 | py | Python | src/magi/actions/base.py | personalrobotics/magipy | 6f86d6938168f580f667cfc093cf7e9f218e2853 | [
"BSD-3-Clause"
] | null | null | null | src/magi/actions/base.py | personalrobotics/magipy | 6f86d6938168f580f667cfc093cf7e9f218e2853 | [
"BSD-3-Clause"
] | 1 | 2018-01-06T00:24:06.000Z | 2018-01-06T00:24:06.000Z | src/magi/actions/base.py | personalrobotics/magipy | 6f86d6938168f580f667cfc093cf7e9f218e2853 | [
"BSD-3-Clause"
] | null | null | null | """Base classes, context managers, and exceptions for MAGI actions."""
from abc import ABCMeta, abstractmethod
import logging
from openravepy import KinBody, Robot
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
class SaveAndJump(object):
"""
Save the state of the environment and jump the environment to the result of
a solution when entering. Jump back to the original state when exiting.
"""
def __init__(self, solution, env):
"""
@param solution: a Solution object
@param env: the OpenRAVE environment to call save and jump on
"""
self.solution = solution
self.env = env
def __enter__(self):
"""First call save on the solution, then jump."""
LOGGER.debug('Begin SaveAndJump: %s', self.solution.action.get_name())
self.cm = self.solution.save(self.env)
self.cm.__enter__()
self.solution.jump(self.env)
def __exit__(self, exc_type, exc_value, traceback):
"""Exit the context manager created when this context manager was entered."""
LOGGER.debug('End SaveAndJump: %s', (self.solution.action.get_name()))
retval = self.cm.__exit__(exc_type, exc_value, traceback)
return retval
class Validate(object):
"""Check a precondition when entering and a postcondition when exiting."""
def __init__(self,
env,
precondition=None,
postcondition=None,
detector=None):
"""
@param env: OpenRAVE environment
@param precondition: Validator that validates preconditions
@param postcondition: Validator that validates postconditions
@param detector: object detector (implements DetectObjects, Update)
"""
self.env = env
self.precondition = precondition
self.postcondition = postcondition
self.detector = detector
def __enter__(self):
"""Validate precondition."""
LOGGER.info('Validate precondition: %s', self.precondition)
if self.precondition is not None:
self.precondition.validate(self.env, self.detector)
def __exit__(self, exc_type, exc_value, traceback):
"""Validate postcondition."""
LOGGER.info('Validate postcondition: %s', self.postcondition)
if self.postcondition is not None:
self.postcondition.validate(self.env, self.detector)
class ActionError(Exception):
"""Base exception class for actions."""
KNOWN_KWARGS = {'deterministic'}
def __init__(self, *args, **kwargs):
super(ActionError, self).__init__(*args)
assert self.KNOWN_KWARGS.issuperset(kwargs.keys())
self.deterministic = kwargs.get('deterministic', None)
class CheckpointError(ActionError):
"""Exception class for checkpoints."""
pass
class ExecutionError(Exception):
"""Exception class for executing solutions."""
def __init__(self, message='', solution=None):
super(ExecutionError, self).__init__(message)
self.failed_solution = solution
class ValidationError(Exception):
"""Exception class for validating solutions."""
def __init__(self, message='', validator=None):
super(ValidationError, self).__init__(message)
self.failed_validator = validator
class Action(object):
"""Abstract base class for actions."""
__metaclass__ = ABCMeta
def __init__(self,
name=None,
precondition=None,
postcondition=None,
checkpoint=False):
"""
@param name: name of the action
@param precondition: Validator that validates preconditions
@param postcondition: Validator that validates postconditions
@param checkpoint: True if this action is a checkpoint - once a Solution
is achieved, neither the plan method of this action nor any of its
predecessors will be called again
"""
self._name = name
self.precondition = precondition
self.postcondition = postcondition
self.checkpoint = checkpoint
def get_name(self):
"""Return the name of the action."""
return self._name
@abstractmethod
def plan(self, env):
"""
Return a Solution that realizes this action.
This method attempts to realize this action in the input environment, if
possible. It MUST restore the environment to its original state before
returning. If successful, this method returns a Solution object.
Otherwise, it raises an ActionError.
The implementation of this method MAY be stochastic. If so, the method
may return a different solution each time it is called.
The environment MUST be locked when calling this method.
Ideally, planners should "with Validate(env, self.precondition)" when
calling this.
@param env: OpenRAVE environment
@return Solution object
"""
pass
def execute(self, env, simulate):
"""
Plan, postprocess, and execute this action.
This is a helper method that wraps the plan() method.
The environment MUST NOT be locked while calling this method.
@param env: OpenRAVE environment
@param simulate: flag to run in simulation
@return result of executing the action
"""
with env:
solution = self.plan(env)
executable_solution = solution.postprocess(env)
return executable_solution.execute(env, simulate)
class Solution(object):
"""Abstract base class for solutions."""
__metaclass__ = ABCMeta
def __init__(self,
action,
deterministic,
precondition=None,
postcondition=None):
"""
@param action: Action that generated this Solution
@param deterministic: True if calling the plan method on the action
multiple times will give the exact same solution
@param precondition: Validator. Can be more specific than action's
precondition.
@param postcondition: Validator. Can be more specific than action's
postcondition.
"""
self.action = action
self.deterministic = deterministic
self.precondition = precondition if precondition else action.precondition
self.postcondition = postcondition if postcondition else action.postcondition
def save_and_jump(self, env):
"""
Return a context manager that preserves the state of the environmnet
then jumps the environment to the result of this solution.
This context manager MUST restore the environment to its original state
before returning.
@param env: OpenRAVE environment
@return context manager
"""
return SaveAndJump(self, env)
@abstractmethod
def save(self, env):
"""
Return a context manager that preserves the state of the environment.
This method returns a context manager that preserves the state of the
robot that is changed by the jump() method or by executing the
solution. This context manager MUST restore the environment to its
original state before returning.
@param env: OpenRAVE environment
@return context manager
"""
pass
@abstractmethod
def jump(self, env):
"""
Set the state of the environment to the result of this solution.
The input environment to this method MUST be in the same state that was
used to plan this action. The environment MUST be modified to match the
result of executing action. This method SHOULD perform the minimal
computation necessary to achieve this result.
The environment MUST be locked while calling this method.
@param env: OpenRAVE environment
"""
pass
@abstractmethod
def postprocess(self, env):
"""
Return an ExecutableSolution that can be executed.
Post-process this solution to prepare for execution. The input
environment to this method MUST be in the same state that was used to
plan the environment. The environment MUST be restored to this state
before returning.
This operation MUST NOT be capable of failing and MUST NOT change the
state of the environment after executing the action. As long as these
two properties are satisfied, the result MAY be stochastic.
The environment MUST be locked while calling this method.
@param env: OpenRAVE environment
@return ExecutableSolution object
"""
pass
def execute(self, env, simulate):
"""
Postprocess and execute this solution.
This is a helper method that wraps the postprocess() method.
The environment MUST NOT be locked while calling this method.
@param env: OpenRAVE environment
@param simulate: flag to run in simulation
@return result of executing the solution
"""
with env:
executable_solution = self.postprocess(env)
return executable_solution.execute(env, simulate)
class ExecutableSolution(object):
"""Abstract base class for executing post-processed solutions."""
__metaclass__ = ABCMeta
def __init__(self, solution):
"""
@param solution: Solution that generated this ExecutableSolution
"""
self.solution = solution
self.precondition = solution.precondition
self.postcondition = solution.postcondition
@abstractmethod
def execute(self, env, simulate):
"""
Execute this solution.
If execution fails, this method should raise an ExecutionError.
The environment MUST NOT be locked while calling this method.
@param env: OpenRAVE environment
@param simulate: flag to run in simulation
@return result of executing the solution
"""
pass
def to_key(obj):
"""
Return a tuple that uniquely identifies an object in an Environment.
The output of this function can be passed to from_key to find the
equivalent object in, potentially, a different OpenRAVE environment.
@param obj: object in an OpenRAVE environment
@return tuple that uniquely identifies the object
"""
if obj is None:
return None
elif isinstance(obj, (KinBody, Robot)):
key = obj.GetName(),
elif isinstance(obj, (KinBody.Joint, KinBody.Link)):
key = obj.GetParent().GetName(), obj.GetName()
elif isinstance(obj, Robot.Manipulator):
key = obj.GetRobot().GetName(), obj.GetName()
else:
raise TypeError('Unknown type "{!s}".'.format(type(obj)))
return (type(obj), ) + key
def from_key(env, key):
"""
Return the object identified by the input key in an Environment.
The input of this function is constructed by the to_key function.
@param env: an OpenRAVE environment
@param key: tuple that uniquely identifies the object
@return object in the input OpenRAVE environment
"""
if key is None:
return None
obj_type = key[0]
if issubclass(obj_type, (KinBody, Robot)):
return env.GetKinBody(key[1])
elif issubclass(obj_type, KinBody.Joint):
return env.GetKinBody(key[1]).GetJoint(key[2])
elif issubclass(obj_type, KinBody.Link):
return env.GetKinBody(key[1]).GetLink(key[2])
elif issubclass(obj_type, Robot.Manipulator):
return env.GetRobot(key[1]).GetManipulator(key[2])
else:
raise TypeError('Unknown type "{!s}".'.format(obj_type))
| 32.672222 | 85 | 0.655841 | 9,845 | 0.837018 | 0 | 0 | 3,017 | 0.256504 | 0 | 0 | 6,659 | 0.566145 |
f233b62fa43bf27f7df361b2d0940e083df21551 | 6,471 | py | Python | src/core/python/core/io/od.py | railtoolkit/OpenLinTim | 27eba8b6038946ce162e9f7bbc0bd23045029d51 | [
"MIT"
] | null | null | null | src/core/python/core/io/od.py | railtoolkit/OpenLinTim | 27eba8b6038946ce162e9f7bbc0bd23045029d51 | [
"MIT"
] | null | null | null | src/core/python/core/io/od.py | railtoolkit/OpenLinTim | 27eba8b6038946ce162e9f7bbc0bd23045029d51 | [
"MIT"
] | null | null | null | from typing import List
from core.exceptions.input_exceptions import (InputFormatException,
InputTypeInconsistencyException)
from core.model.graph import Graph
from core.model.impl.fullOD import FullOD
from core.model.impl.mapOD import MapOD
from core.model.infrastructure import InfrastructureNode
from core.model.od import OD, ODPair
from core.io.csv import CsvReader, CsvWriter
from core.model.ptn import Stop, Link
from core.util.config import Config, default_config
class ODReader:
"""
Class to read files of od matrices.
"""
def __init__(self, source_file_name: str, od: OD):
"""
Constructor of an ODReader for a demand collection and a given file
name. The given name will not influence the read file but the used name
in any error message, so be sure to tuse the same name in here and in
the CsvReader!
"""
self.sourceFileName = source_file_name
self.od = od
def process_od_line(self, args: [str], lineNumber: int) -> None:
"""
Process the contents of an od matric line.
:param args the content of the line
:param lineNumber the numberm used for error handling
:raise exceptions if the line does not contain exactly 3 entries
if the specific types of the entries do not match
the expectations.
"""
if len(args) != 3:
raise InputFormatException(self.sourceFileName, len(args), 3)
try:
origin = int(args[0])
except ValueError:
raise InputTypeInconsistencyException(self.sourceFileName, 1,
lineNumber, "int", args[0])
try:
destination = int(args[1])
except ValueError:
raise InputTypeInconsistencyException(self.sourceFileName, 2,
lineNumber, "int", args[1])
try:
passengers = float(args[2])
except ValueError:
raise InputTypeInconsistencyException(self.sourceFileName, 3,
lineNumber, "float", args[2])
self.od.setValue(origin, destination, passengers)
@staticmethod
def read(od: OD, size: int = None, file_name: str = "", config: Config = Config.getDefaultConfig()) -> OD:
"""
Read the given file into an od object. If parameters are not given but needed,
the respective values will be read from the given config.
:param od: the od to fill. If not given, an empty MapOD will be used. If a size is given, a FullOD of the
corresponding size will be used
:param size: the size of the FullOD to use (if no od is given directly)
:param file_name: the file name to read the od matrix from
:param config: the config to read the parameters from that are not given
:return the read of matrix
"""
if not od and size:
od = FullOD(size)
if not od:
od = MapOD()
if not file_name:
file_name = config.getStringValue("default_od_file")
reader = ODReader(file_name, od)
CsvReader.readCsv(file_name, reader.process_od_line)
return od
@staticmethod
def readNodeOd(od: OD, size: int = None, file_name: str = "", config: Config = Config.getDefaultConfig()) -> OD:
"""
Read the given file into an od object. If parameters are not given but needed,
the respective values will be read from the given config.
:param od: the od to fill. If not given, an empty MapOD will be used. If a size is given, a FullOD of the
corresponding size will be used
:param size: the size of the FullOD to use (if no od is given directly)
:param file_name: the file name to read the od matrix from
:param config: the config to read the parameters from that are not given
:return the read of matrix
"""
if not file_name:
file_name = config.getStringValue("filename_od_nodes_file")
return ODReader.read(od, size, file_name, config)
class ODWriter:
"""
Class implementing the writing of an od matrix as a static method. Just
call write(Graph, OD, Config) to write the od matrix to the file
specified in the config.
"""
@staticmethod
def write(ptn: Graph[Stop, Link], od: OD, file_name: str= "", header: str= "",
config: Config = Config.getDefaultConfig()):
"""
Write the given od matrix to the file specified in the config by
default_od_file. Will write all od pairs, including those with weight
0.
:param ptn the ptn the od matrix is based on
:param od the od matrix to write
:param config Used for reading the values of default_od_file and
od_header
:param file_name the file name to write the od matrix to
:param header the header to write in the od file
"""
od_pairs = []
if not file_name:
file_name = config.getStringValue("default_od_file")
if not header:
header = config.getStringValue("od_header")
for origin in ptn.getNodes():
for destination in ptn.getNodes():
od_pairs.append(ODPair(origin.getId(), destination.getId(), od.getValue(origin.getId(), destination.getId())))
CsvWriter.writeListStatic(file_name, od_pairs, ODPair.toCsvStrings, header=header)
@staticmethod
def writeNodeOd(od: OD, file_name: str="", header: str="",
config: Config = Config.getDefaultConfig()):
"""
Write the given od matrix to the file specified or the corresponding file name from the config. Will write
only the od pairs with positive demand
:param od: the od object to write
:param file_name: the file to write the od data to
:param header: the header to use
:param config: the config to read parameters from that are needed but not given
"""
if not file_name:
file_name = config.getStringValue("filename_od_nodes_file")
if not header:
header = config.getStringValue("od_nodes_header")
od_pairs = od.getODPairs()
CsvWriter.writeListStatic(file_name, od_pairs, ODPair.toCsvStrings, header=header)
| 44.020408 | 126 | 0.626642 | 5,947 | 0.919023 | 0 | 0 | 3,936 | 0.608252 | 0 | 0 | 3,099 | 0.478906 |
f23575bb8b4e289c914a5be32dd736b94767c391 | 4,395 | py | Python | kriging/_kriging.py | ERSSLE/ordinary_kriging | f983081e4f12b0bae03bd042a6f451c65dcb2759 | [
"MIT"
] | 3 | 2020-09-08T16:55:44.000Z | 2021-12-04T15:35:07.000Z | kriging/_kriging.py | ERSSLE/ordinary_kriging | f983081e4f12b0bae03bd042a6f451c65dcb2759 | [
"MIT"
] | null | null | null | kriging/_kriging.py | ERSSLE/ordinary_kriging | f983081e4f12b0bae03bd042a6f451c65dcb2759 | [
"MIT"
] | 2 | 2021-08-25T09:35:50.000Z | 2021-12-07T08:19:11.000Z | # encoding: utf-8
"""
Ordinary Kriging interpolation is a linear estimation of regionalized variables.
It assumes that the data change into a normal distribution,
and considers that the expected value of regionalized variable Z is unknown.
The interpolation process is similar to the weighted sliding average,
and the weight value is determined by spatial data analysis.
"""
import numpy as np
from shapely.geometry import Polygon,Point,shape
from shapely.geometry.multipolygon import MultiPolygon
from shapely.prepared import prep
class Kriging():
"""Ordinary Kriging interpolation class"""
def _distance(self,xy1,xy2):
xdmat = (xy1[:,[0]] - xy2[:,0])**2
ydmat = (xy1[:,[1]] - xy2[:,1])**2
return np.sqrt(xdmat + ydmat)
def _rh(self,z):
return 1/2 * (z - z.reshape(-1,1))**2
def _proportional(self,x,y):
""" x*y / x**2 """
return (x*y).sum()/(x ** 2).sum()
def fit(self,xy=None,z=None):
"""
The training process mainly includes half variance and distance matrix calculation.
"""
self.xy = xy.copy()
self.z = z.copy()
h = self._distance(xy,xy)
r = self._rh(z)
hh_f = np.triu(h+1,0)
rr_f = np.triu(r+1,0)
hh=np.triu(h,0)
rr=np.triu(r,0)
self.k = self._proportional(hh[(hh!=0) | (hh_f!=0)],rr[(rr!=0) | (rr_f!=0)])
self.hnew=h*self.k
self.hnew = np.r_[self.hnew,np.ones((1,self.hnew.shape[1]))]
self.hnew = np.c_[self.hnew,np.ones((self.hnew.shape[0],1))]
self.hnew[self.hnew.shape[0]-1,self.hnew.shape[1]-1] = 0
def predict(self,xy):
"""
The interpolating weights are calculated and the interpolating results are obtained.
"""
oh = self._distance(self.xy,xy)
oh = self.k * oh
oh = np.r_[oh,np.ones((1,oh.shape[1]))]
self.w = np.dot(np.linalg.inv(self.hnew),oh)
res = (self.z.reshape(-1,1) * self.w[:-1,:]).sum(0)
return res
def shape_shadow(xgrid,ygrid,mapdata):
"""
Mask processing.
Parameters
----------
xgrid: grid coordinates of longitude.
ygrid: grid coordinates of latitude.
mapdata: array of map data.
Return
------
np.ndarray: An array of Boolean types.
"""
newshp = Polygon()
for shap in mapdata:
newshp = newshp.union(shape({'type':'Polygon','coordinates':[shap]}))
points = []
for xi,yi in zip(xgrid.ravel(),ygrid.ravel()):
points.append(Point([xi,yi]))
prep_newshp = prep(newshp)
mask = []
for p in points:
mask.append(bool(prep_newshp.contains(p)-1))
mask = np.array(mask).reshape(xgrid.shape)
return mask
def interpolate(xy,z,extension=1.2,point_counts=(100,100)):
"""
Interpolate through the Kriging class, and return the grid points
of the longitude and latitude interpolation results
Parameters
----------
xy: The latitude and longitude coordinates of a spatial data point.
z: The latitude and longitude coordinates of a spatial data point.
extension: The interpolating region is expanded to cover a wider area.
point_counts: How many data points to interpolate, default is 100 * 100.
"""
kri = Kriging()
kri.fit(xy,z)
x_max,x_min,y_max,y_min = xy[:,0].max(),xy[:,0].min(),xy[:,1].max(),xy[:,1].min()
p = (extension - 1.0)/2
x_s = x_min - (x_max-x_min)*p
x_e = x_max + (x_max-x_min)*p
y_s = y_min - (y_max-y_min)*p
y_e = y_max + (y_max-y_min)*p
xls = np.linspace(x_s,x_e,point_counts[0])
yls = np.linspace(y_s,y_e,point_counts[1])
xgrid,ygrid = np.meshgrid(xls,yls)
xgridls,ygridls = xgrid.ravel(),ygrid.ravel()
if len(xgridls) > 100000: # Consider memory limit loop handling.
zgridls = np.array([])
for s,e in zip(np.arange(0,len(xgridls),100000)[:-1],np.arange(0,len(xgridls),100000)[1:]):
zgridls = np.concatenate([zgridls,kri.predict(np.c_[xgridls[s:e],ygridls[s:e]])])
if e < len(xgridls):
zgridls = np.concatenate([zgridls,kri.predict(np.c_[xgridls[e:],ygridls[e:]])])
else:
zgridls = kri.predict(np.c_[xgridls,ygridls])
zgrid = zgridls.reshape(xgrid.shape)
return xgrid,ygrid,zgrid
| 35.731707 | 100 | 0.597952 | 1,504 | 0.342207 | 0 | 0 | 0 | 0 | 0 | 0 | 1,467 | 0.333788 |
f236ea30d7814e6e5f7e36351bc7667f7fad4f04 | 125 | py | Python | steam/utils/__init__.py | ivicel/steamkit-python | 0a3f250e432cf890965db5e7245841aa512bca22 | [
"Apache-2.0"
] | 5 | 2018-11-16T08:59:41.000Z | 2021-04-03T05:32:18.000Z | steam/utils/__init__.py | ivicel/steamkit-python | 0a3f250e432cf890965db5e7245841aa512bca22 | [
"Apache-2.0"
] | null | null | null | steam/utils/__init__.py | ivicel/steamkit-python | 0a3f250e432cf890965db5e7245841aa512bca22 | [
"Apache-2.0"
] | null | null | null | from .util import clear_proto_mask, is_proto_msg, add_proto_mask
__all__ = [clear_proto_mask, is_proto_msg, add_proto_mask] | 31.25 | 64 | 0.84 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f2377bda1f457053d6b4f86097a8d1ba0041422b | 260 | py | Python | src/arm/src/iksolverservicetester.py | Busboombot/ros_idx6dof | 63b3a49393ab2c619b6b56c634cd440ab9b464ef | [
"MIT"
] | 1 | 2020-03-15T15:30:43.000Z | 2020-03-15T15:30:43.000Z | src/arm/src/iksolverservicetester.py | Busboombot/ros_idx6dof | 63b3a49393ab2c619b6b56c634cd440ab9b464ef | [
"MIT"
] | null | null | null | src/arm/src/iksolverservicetester.py | Busboombot/ros_idx6dof | 63b3a49393ab2c619b6b56c634cd440ab9b464ef | [
"MIT"
] | null | null | null | #!/usr/bin/python
import rospy
from arm.srv import IKService, IKServiceResponse
rospy.init_node("asdf", anonymous=True)
rospy.wait_for_service('IKService')
srv = rospy.ServiceProxy('IKService', IKService)
resp = srv([5, 16, 8, 0, 0, 0], None)
print resp
| 17.333333 | 48 | 0.734615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 45 | 0.173077 |
f23806bdb5c4b2e6ddeae98b2f41f0141fe5c5b9 | 1,410 | py | Python | crypto-scrapers/scrapers/spiders/coin_market_cap.py | chnsh/crypto-index-fund | 6c4122b868372ba99aba4f703e85d8ee12af07de | [
"MIT"
] | 14 | 2018-05-27T19:34:59.000Z | 2022-02-09T12:02:38.000Z | crypto-scrapers/scrapers/spiders/coin_market_cap.py | chnsh/crypto-index-fund | 6c4122b868372ba99aba4f703e85d8ee12af07de | [
"MIT"
] | 4 | 2018-05-28T02:44:07.000Z | 2022-03-02T14:55:20.000Z | crypto-scrapers/scrapers/spiders/coin_market_cap.py | chnsh/crypto-index-fund | 6c4122b868372ba99aba4f703e85d8ee12af07de | [
"MIT"
] | 1 | 2022-03-07T05:26:47.000Z | 2022-03-07T05:26:47.000Z | from datetime import datetime
from locale import *
import scrapy
from injector import Injector
from scrapers.items import CoinMarketCapItem
from scrapers.utils import UrlListGenerator
setlocale(LC_NUMERIC, '')
class CoinMarketCapSpider(scrapy.Spider):
name = "cmc"
custom_settings = {
'ITEM_PIPELINES': {
'scrapers.pipelines.CMCPipeline': 100,
}
}
def start_requests(self):
for url in Injector().get(UrlListGenerator).generate_cmc_url_list():
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
coin = response.css('h1.text-large small::text') \
.extract_first() \
.replace('(', '') \
.replace(')', '')
for row in response.css('table tbody tr'):
data = row.css('td::text').extract()
yield CoinMarketCapItem(
date=datetime.strptime(data[0], '%b %d, %Y').date(),
open_price=atof(data[1]) if data[1] != '-' else None,
high_price=atof(data[2]) if data[2] != '-' else None,
low_price=atof(data[3]) if data[3] != '-' else None,
close_price=atof(data[4]) if data[4] != '-' else None,
volume=atof(data[5]) if data[5] != '-' else None,
market_cap=atof(data[6]) if data[6] != '-' else None,
coin=coin
)
| 33.571429 | 76 | 0.565248 | 1,194 | 0.846809 | 1,005 | 0.712766 | 0 | 0 | 0 | 0 | 147 | 0.104255 |
f23af2303a08de830f84db88bf6e00cef4e25589 | 4,361 | py | Python | crawler/cli.py | NicolasLM/crawler | 15ed6441fef3b68bfadc970f597271191fe66cf8 | [
"MIT"
] | null | null | null | crawler/cli.py | NicolasLM/crawler | 15ed6441fef3b68bfadc970f597271191fe66cf8 | [
"MIT"
] | null | null | null | crawler/cli.py | NicolasLM/crawler | 15ed6441fef3b68bfadc970f597271191fe66cf8 | [
"MIT"
] | null | null | null | from collections import OrderedDict
from urllib.parse import urlparse
import click
import rethinkdb as r
import redis
import crawler.conf as conf
# cli does not need to be thread-safe
conn = r.connect(host=conf.RethinkDBConf.HOST,
db=conf.RethinkDBConf.DB)
domains = r.table('domains')
@click.group()
@click.version_option()
def cli():
"""Crawler command line tool."""
@cli.command('as', short_help='most popular AS')
@click.option('--count', default=15, help='number of AS to show')
def top_as(count):
"""Show which Autonomous Systems are the most popular."""
data = domains.filter(r.row['success'] == True).\
group(r.row['asn']).count().run(conn)
top('Autonomous Systems', count, data)
@cli.command('countries', short_help='most popular countries')
@click.option('--count', default=15, help='number of countries to show')
def top_countries(count):
"""Show which countries are the most popular."""
data = domains.filter(r.row['success'] == True).\
group(r.row['country']).count().run(conn)
top('countries', count, data)
def top(kind, count, data):
top = OrderedDict(sorted(data.items(), key=lambda t: -t[1]))
i = 1
click.secho('Top {} {}'.format(count, kind), bold=True)
for value, occurences in top.items():
if not value:
continue
click.echo('{:>15} {}'.format(value, occurences))
i += 1
if i > count:
break
@cli.command('stats', short_help='statistics about domains')
def stats():
"""Show statistics about domains."""
success = domains.filter(r.row['success'] == True).count().run(conn)
failure = domains.filter(r.row['success'] == False).count().run(conn)
redis_url = urlparse(conf.CeleryConf.BROKER_URL)
redis_conn = redis.StrictRedis(redis_url.hostname,
port=redis_url.port,
db=redis_url.path[1:])
pending = redis_conn.llen('celery')
try:
percent_failure = failure*100/success
except ZeroDivisionError:
percent_failure = 0.0
click.secho('Domain statistics', bold=True)
click.secho('Success: {}'.format(success), fg='green')
click.secho('Pending: {}'.format(pending), fg='yellow')
click.secho('Failed: {} ({:.2f}%)'.format(failure, percent_failure),
fg='red')
@cli.command('domain', short_help='information about a domain')
@click.argument('name')
def domain(name):
"""Show information about a domain."""
import pprint
domain_name = name.lower()
try:
pprint.pprint(domains.filter({'name': domain_name}).run(conn).next())
except r.net.DefaultCursorEmpty:
click.echo('No information on {}'.format(domain_name))
@cli.command('insert', short_help='insert a domain in the list to crawl')
@click.argument('name')
def insert(name):
"""Insert a domain in the list of domains to crawl."""
from .crawler import crawl_domain
name = name.lower()
crawl_domain.delay(name)
click.secho('Domain {} added to Celery tasks'.format(name),
fg='yellow')
@cli.command('rethinkdb', short_help='prepare RethinkDB')
def rethinkdb():
"""Prepare database and table in RethinkDB"""
from rethinkdb.errors import ReqlOpFailedError, ReqlRuntimeError
conn = r.connect(host=conf.RethinkDBConf.HOST)
# Create database
try:
r.db_create(conf.RethinkDBConf.DB).run(conn)
click.secho('Created database {}'.format(conf.RethinkDBConf.DB),
fg='yellow')
except ReqlOpFailedError:
click.secho('Database {} already exists'.format(conf.RethinkDBConf.DB),
fg='green')
# Create table 'domains'
conn = r.connect(host=conf.RethinkDBConf.HOST,
db=conf.RethinkDBConf.DB)
try:
r.table_create('domains', durability=conf.RethinkDBConf.DURABILITY).\
run(conn)
click.secho('Created table domains', fg='yellow')
except ReqlOpFailedError:
click.secho('Table domains already exists', fg='green')
# Create index on domains.name
try:
r.table('domains').index_create('name').run(conn)
click.secho('Created index domains.name', fg='yellow')
except ReqlRuntimeError:
click.secho('Index domains.name already exists', fg='green')
| 33.806202 | 79 | 0.64022 | 0 | 0 | 0 | 0 | 3,670 | 0.84155 | 0 | 0 | 1,211 | 0.277689 |
f23b010b735f63cc59ac899de4d7a1e041082294 | 9,667 | py | Python | run.py | keyunluo/Pytorch-DDP | ff91affdd2c4cebe1719e9a46f118405c308fd1f | [
"Apache-2.0"
] | null | null | null | run.py | keyunluo/Pytorch-DDP | ff91affdd2c4cebe1719e9a46f118405c308fd1f | [
"Apache-2.0"
] | null | null | null | run.py | keyunluo/Pytorch-DDP | ff91affdd2c4cebe1719e9a46f118405c308fd1f | [
"Apache-2.0"
] | null | null | null | # -8*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
import torch.multiprocessing as mp
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader, Dataset
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
import argparse, random, time, os
import numpy as np
class MyDataset(Dataset):
def __init__(self):
super().__init__()
self.docs = torch.randn((1024, 32, 16))
def __len__(self):
return len(self.docs)
def __getitem__(self, index) :
return self.docs[index]
class MyModel(nn.Module):
def __init__(self, max_seq_len=32, emb_dim=16):
super().__init__()
self.max_seq_len = max_seq_len
self.position_layer = nn.Embedding(max_seq_len, emb_dim)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=emb_dim, nhead=2, dropout=0.2, batch_first=True)
self.encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=2)
self.fc = nn.Linear(emb_dim, 4)
def forward(self, imgs, mask):
postions = self.position_layer(torch.arange(self.max_seq_len).repeat((imgs.shape[0], 1)).to(imgs).long())
imgs = imgs + postions
feature = self.encoder(imgs, src_key_padding_mask=~mask)
pooling1 = torch.sum((feature * mask.unsqueeze(-1)), axis=1) / mask.sum(axis=1)
pooling2 = torch.max((feature * mask.unsqueeze(-1)), axis=1)[0]
pooling = torch.cat([pooling1, pooling2], dim=1)
output = self.fc(pooling)
return output
class Trainer():
def __init__(self, model, dataloader, datasampler, device, rank, args):
self.model = model
self.dataloader = dataloader
self.datasampler = datasampler
self.device = device
self.rank = rank
self.args = args
def _data_to_gpu(self, data, device):
for k in data:
data[k] = torch.tensor(data[k]).to(device)
return data
def predict(self, dataloader=None, is_valid=False):
y_true, y_pred = [], []
self.model.eval()
if dataloader is None:
dataloader = self.dataloader
with torch.no_grad():
for batch in dataloader:
input = [self._data_to_gpu(data, self.device) for data in batch]
if is_valid:
feature, label = input[:-1], input[-1]
else:
feature, label = input[:-1], None
output = self.model(feature)
predicted_label = torch.argmax(output, dim=1).detach().cpu().numpy().tolist()
y_pred += predicted_label
y_true += [0] * len(predicted_label) if not is_valid else label.detach().cpu().numpy().tolist()
self.model.eval()
return y_true, y_pred
def fit(self, epoch, optimizer, criterion, saved_model, scheduler=None, validloader=None):
for epoch in range(1, epoch+1):
time1 = time.time()
self.model.train(True)
self.datasampler.set_epoch(epoch)
total_loss = []
for batch in self.dataloader:
optimizer.zero_grad()
input = [self._data_to_gpu(data, self.device) for data in batch]
feature, label = input[:-1], input[-1]
output = self.model(feature)
loss = criterion(output, label)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_norm)
optimizer.step()
if self.rank == 0:
total_loss.append(loss.item())
if self.rank == 0:
epoch_avg_loss = np.mean(total_loss)
print("Epoch {:02d}, Time {:.02f}s, AvgLoss {:.06f}".format(epoch, time.time()-time1, epoch_avg_loss))
state_dict = self.model.module.state_dict()
os.makedirs(os.path.dirname(saved_model), exist_ok=True)
torch.save(state_dict, saved_model)
if validloader:
test_out = self.predict(validloader, True)
torch.distributed.all_reduce(test_out)
if self.rank == 0:
y_true, y_pred = test_out
torch.cuda.empty_cache()
if scheduler is not None:
scheduler.step()
def parameter_parser():
parser = argparse.ArgumentParser(description="Run Model")
parser.add_argument("--seq_len",
type=int,
default=512,
help="max sequence length")
parser.add_argument("--ip",
type=str,
default="localhost",
help="ip address")
parser.add_argument("--port",
type=str,
default=str(random.randint(20000, 30000)),
help="port num")
parser.add_argument("--cuda_devices",
type=int,
nargs='+',
default=list(range(torch.cuda.device_count())),
help="cuda devices")
parser.add_argument("--mode",
type=str,
choices=["train", "eval"],
help="train or eval")
parser.add_argument("--num_worker",
type=int,
default=8,
help="number of data loader worker")
parser.add_argument("--batch_size",
type=int,
default=32,
help="batch size")
parser.add_argument("--epoch",
type=int,
default=5,
help="num epoch")
parser.add_argument("--max_norm",
type=int,
default=30,
help="max norm value")
return parser.parse_args()
def set_manual_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
cudnn.benchmark = False
cudnn.deterministic = True
def dist_init(ip, rank, local_rank, world_size, port):
"""
initialize data distributed
"""
host_addr_full = 'tcp://' + ip + ':' + str(port)
torch.distributed.init_process_group("nccl", init_method=host_addr_full, rank=rank, world_size=world_size)
torch.cuda.set_device(local_rank)
assert torch.distributed.is_initialized()
def init_weights(module):
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight.data)
nn.init.constant_(module.bias.data, 0.0)
elif isinstance(module, nn.LSTM):
nn.init.xavier_uniform_(module.weight_ih_l0.data)
nn.init.orthogonal_(module.weight_hh_l0.data)
nn.init.constant_(module.bias_ih_l0.data, 0.0)
nn.init.constant_(module.bias_hh_l0.data, 0.0)
hidden_size = module.bias_hh_l0.data.shape[0] // 4
module.bias_hh_l0.data[hidden_size:(2*hidden_size)] = 1.0
if module.bidirectional:
nn.init.xavier_uniform_(module.weight_ih_l0_reverse.data)
nn.init.orthogonal_(module.weight_hh_l0_reverse.data)
nn.init.constant_(module.bias_ih_l0_reverse.data, 0.0)
nn.init.constant_(module.bias_hh_l0_reverse.data, 0.0)
module.bias_hh_l0_reverse.data[hidden_size:(
2*hidden_size)] = 1.0
def train_worker(rank, args, world_size):
model_file = "model.torch"
device = args.cuda_devices[rank]
dist_init(args.ip, rank, device, world_size, args.port)
model = prepare_model(model_file, args, need_load=False, is_train=True, distributed=True)
criterion = nn.CrossEntropyLoss()
train_dataset = MyDataset()
train_datasampler = DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, pin_memory=True, num_workers=args.num_worker, batch_size=args.batch_size, sampler=train_datasampler)
optimizer = optim.Adam(model.parameters(), lr=1e-5)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=32, eta_min=1e-6)
trainer = Trainer(model, train_dataloader, train_datasampler, device, rank, args)
valid_dataset = MyDataset()
valid_datasampler = DistributedSampler(valid_dataset)
valid_dataloader = DataLoader(valid_dataset, pin_memory=True, num_workers=args.num_worker, batch_size=args.batch_size, sampler=valid_datasampler)
trainer.fit(args.epoch, optimizer, criterion, model_file=model_file, scheduler=scheduler,
validloader=valid_dataloader, validset=valid_dataset)
def prepare_model(model_file, args, need_load=False, is_train=True, distributed=True):
if distributed:
rank, device = torch.distributed.get_rank(), torch.cuda.current_device()
else:
rank, device = 0, torch.cuda.current_device()
model = MyModel()
model = model.to(device)
if need_load:
model.load_state_dict(torch.load(model_file, map_location='cuda:{}'.format(device)))
if rank == 0:
print("[*] load model {}".format(model_file))
else:
model.apply(init_weights)
if is_train and distributed:
model = DistributedDataParallel(model, device_ids=[device])
print("[*] rank:{}, device:{}".format(rank, device))
return model
def trainer():
world_size = len(args.cuda_devices)
mp.spawn(train_worker, args=(args, world_size), nprocs=world_size)
if __name__ == '__main__':
args = parameter_parser()
if args.mode == "train":
trainer()
| 39.618852 | 150 | 0.602359 | 4,058 | 0.419779 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.050998 |
f23c95d3f1d786e4a9f7ff9ea7ec7de8d8f85605 | 373 | py | Python | newsletter/urls.py | vallka/djellifique | fb84fba6be413f9d38276d89ae84aeaff761218f | [
"MIT"
] | null | null | null | newsletter/urls.py | vallka/djellifique | fb84fba6be413f9d38276d89ae84aeaff761218f | [
"MIT"
] | null | null | null | newsletter/urls.py | vallka/djellifique | fb84fba6be413f9d38276d89ae84aeaff761218f | [
"MIT"
] | null | null | null | from django.urls import path
from .views import *
app_name = 'newsletter'
urlpatterns = [
path('pixel/', my_image, name='pixel'),
path('click/<str:uuid>/', click_redirect, name='click'),
path('notification/', notification, name='notification'),
path('sendtest/<str:slug>', sendtest, name='sendtest'),
path('stats/<str:slug>', stats, name='stats'),
]
| 26.642857 | 61 | 0.659517 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.369973 |
f23e9e3046350977154c8ce79c350de302fd2dce | 197 | py | Python | 04_While/Step03/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | 04_While/Step03/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | 3 | 2020-11-04T05:38:53.000Z | 2021-03-02T02:15:19.000Z | 04_While/Step03/gamjapark.py | StudyForCoding/BEAKJOON | 84e1c5e463255e919ccf6b6a782978c205420dbf | [
"MIT"
] | null | null | null | n = int(input())
temp_n = n
k=0
while True:
a = int(temp_n / 10)
b = temp_n % 10
c = (a + b) % 10
new = b*10 + c
k += 1
if new == n:
break
temp_n = new
print(k)
| 14.071429 | 24 | 0.446701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f23ec17cf55792ab6ef9150b36b5c3e6f5471fbb | 6,491 | py | Python | vesc_driver/src/mathdir/cubic_spline_planner.py | Taek-16/vesc_study | c4f8e56a2530b17622ca73e9eba57830a1b51ad9 | [
"Apache-2.0"
] | 1 | 2021-02-13T10:48:13.000Z | 2021-02-13T10:48:13.000Z | vesc_driver/src/mathdir/cubic_spline_planner.py | Taek-16/vesc_study | c4f8e56a2530b17622ca73e9eba57830a1b51ad9 | [
"Apache-2.0"
] | null | null | null | vesc_driver/src/mathdir/cubic_spline_planner.py | Taek-16/vesc_study | c4f8e56a2530b17622ca73e9eba57830a1b51ad9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
cubic spline planner
Author: Atsushi Sakai
"""
import math
import numpy as np
import bisect
from scipy.spatial import distance
class Spline:
"""
Cubic Spline class
"""
def __init__(self, x, y):
self.b, self.c, self.d, self.w = [], [], [], []
self.x = x
self.y = y
self.nx = len(x) # dimension of x
h = np.diff(x)
# calc coefficient c
self.a = [iy for iy in y]
# calc coefficient c
A = self.__calc_A(h)
B = self.__calc_B(h)
self.c = np.linalg.solve(A, B)
# print(self.c1)
# calc spline coefficient b and d
for i in range(self.nx - 1):
self.d.append((self.c[i + 1] - self.c[i]) / (3.0 * h[i]))
tb = (self.a[i + 1] - self.a[i]) / h[i] - h[i] * \
(self.c[i + 1] + 2.0 * self.c[i]) / 3.0
self.b.append(tb)
def calc(self, t):
"""
Calc position
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.a[i] + self.b[i] * dx + \
self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0
return result
def calcd(self, t):
"""
Calc first derivative
if t is outside of the input x, return None
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0
return result
def calcdd(self, t):
"""
Calc second derivative
"""
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
dx = t - self.x[i]
result = 2.0 * self.c[i] + 6.0 * self.d[i] * dx
return result
def calcddd(self, t):
if t < self.x[0]:
return None
elif t > self.x[-1]:
return None
i = self.__search_index(t)
result = 6.0 * self.d[i]
return result
def __search_index(self, x):
"""
search data segment index
"""
return bisect.bisect(self.x, x) - 1
def __calc_A(self, h):
"""
calc matrix A for spline coefficient c
"""
A = np.zeros((self.nx, self.nx))
A[0, 0] = 1.0
for i in range(self.nx - 1):
if i != (self.nx - 2):
A[i + 1, i + 1] = 2.0 * (h[i] + h[i + 1])
A[i + 1, i] = h[i]
A[i, i + 1] = h[i]
A[0, 1] = 0.0
A[self.nx - 1, self.nx - 2] = 0.0
A[self.nx - 1, self.nx - 1] = 1.0
# print(A)
return A
def __calc_B(self, h):
"""
calc matrix B for spline coefficient c
"""
B = np.zeros(self.nx)
for i in range(self.nx - 2):
B[i + 1] = 3.0 * (self.a[i + 2] - self.a[i + 1]) / \
h[i + 1] - 3.0 * (self.a[i + 1] - self.a[i]) / h[i]
# print(B)
return B
class Spline2D:
"""
2D Cubic Spline class
"""
def __init__(self, x, y):
self.s = self.__calc_s(x, y)
self.sx = Spline(self.s, x)
self.sy = Spline(self.s, y)
def __calc_s(self, x, y):
dx = np.diff(x)
dy = np.diff(y)
self.ds = [math.sqrt(idx ** 2 + idy ** 2)
for (idx, idy) in zip(dx, dy)]
s = [0]
s.extend(np.cumsum(self.ds))
return s
def calc_position(self, s):
"""
calc position
"""
x = self.sx.calc(s)
y = self.sy.calc(s)
return x, y
def calc_curvature(self, s):
"""
calc curvature
"""
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
k = (ddy * dx - ddx * dy) / (dx ** 2 + dy ** 2)
return k
def calc_d_curvature(self, s):
"""
calc d_curvature which is derivative of curvature by s
"""
dx = self.sx.calcd(s)
ddx = self.sx.calcdd(s)
dddx = self.sx.calcddd(s)
dy = self.sy.calcd(s)
ddy = self.sy.calcdd(s)
dddy = self.sy.calcddd(s)
squareterm = dx * dx + dy * dy
dk = ((dddy + dx - dddx * dy) * squareterm - 3 * (ddy * dx - ddx * dy) * (dx * ddx + dy * ddy)) / (squareterm * squareterm)
return dk
def calc_yaw(self, s):
"""
calc yaw
"""
dx = self.sx.calcd(s)
dy = self.sy.calcd(s)
yaw = math.atan2(dy, dx)
return yaw
def calc_spline_course(x, y, ds=0.1):
sp = Spline2D(x, y)
s = list(np.arange(0, sp.s[-1], ds))
rx, ry, ryaw, rk, rdk = [], [], [], [], []
for i_s in s:
ix, iy = sp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(sp.calc_yaw(i_s))
rk.append(sp.calc_curvature(i_s))
rdk.append(sp.calc_d_curvature(i_s))
return rx, ry, ryaw, rk, rdk, s
def main():
print("Spline 2D test")
import matplotlib.pyplot as plt
import numpy as np
manhae1 = np.load(file='/home/menguiin/catkin_ws/src/macaron_2/path/K-CITY-garage-1m.npy')
x = manhae1[0:manhae1.shape[0]-1, 0]
y = manhae1[0:manhae1.shape[0]-1, 1]
rx, ry, ryaw, rk, rdk, s = calc_spline_course(x, y)
s = np.array(s)
flg, ax = plt.subplots(1)
plt.plot(range(-s.shape[0],s.shape[0],2),s, "s", label="s-value")
plt.grid(True)
plt.axis("equal")
plt.xlabel("index")
plt.ylabel("sval")
plt.legend()
flg, ax = plt.subplots(1)
plt.plot(x, y, "xb", label="input")
plt.plot(rx, ry, "-r", label="spline")
plt.grid(True)
plt.axis("equal")
plt.xlabel("x[m]")
plt.ylabel("y[m]")
plt.legend()
flg, ax = plt.subplots(1)
plt.plot(s, [math.degrees(iyaw) for iyaw in ryaw], "or", label="yaw")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("yaw angle[deg]")
flg, ax = plt.subplots(1)
plt.plot(s, rk, "-r", label="curvature")
plt.grid(True)
plt.legend()
plt.xlabel("line length[m]")
plt.ylabel("curvature [1/m]")
plt.show()
if __name__ == '__main__':
main()
| 24.130112 | 131 | 0.475582 | 4,687 | 0.722077 | 0 | 0 | 0 | 0 | 0 | 0 | 1,121 | 0.172701 |
f23ec9a0fbd46e6d9b5f8659349c47ab52aec354 | 333 | py | Python | bbtest/steps/appliance_steps.py | jancajthaml-openbank/e2e | a2ef84b6564022e95de76438fc795e2ef927aa2b | [
"Apache-2.0"
] | null | null | null | bbtest/steps/appliance_steps.py | jancajthaml-openbank/e2e | a2ef84b6564022e95de76438fc795e2ef927aa2b | [
"Apache-2.0"
] | 30 | 2018-03-18T05:58:32.000Z | 2022-01-19T23:21:31.000Z | bbtest/steps/appliance_steps.py | jancajthaml-openbank/e2e | a2ef84b6564022e95de76438fc795e2ef927aa2b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from behave import *
from helpers.eventually import eventually
@given('appliance is running')
def appliance_running(context):
@eventually(5)
def wait_for_appliance_up():
assert context.appliance.running(), 'appliance did not start within 5 seconds'
wait_for_appliance_up()
| 23.785714 | 82 | 0.744745 | 0 | 0 | 0 | 0 | 219 | 0.657658 | 0 | 0 | 109 | 0.327327 |
f23fb929e898694417f38446747b98726264f0e7 | 1,211 | py | Python | irkshop/urls.py | Beomi/irkshop | c109a62216cb6550add64fbf402883debc5011d1 | [
"Apache-2.0"
] | 19 | 2016-11-06T10:28:14.000Z | 2020-11-01T02:04:51.000Z | irkshop/urls.py | Beomi/irkshop | c109a62216cb6550add64fbf402883debc5011d1 | [
"Apache-2.0"
] | 17 | 2016-10-19T11:58:48.000Z | 2022-01-13T00:32:34.000Z | irkshop/urls.py | Beomi/irkshop | c109a62216cb6550add64fbf402883debc5011d1 | [
"Apache-2.0"
] | 4 | 2016-11-06T10:54:26.000Z | 2019-08-31T16:08:56.000Z | from django.conf.urls import url, include
from django.contrib import admin
from django.contrib.auth.views import login, logout
from django.conf import settings
from django.views.static import serve
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^accounts/login/$', login,
{'template_name': 'login_page/login.html'}, name='login'),
url(r'^accounts/logout/$', logout, name='logout'),
url('', include('social_django.urls', namespace='social')), # 이 줄을 등록해주면 됩니다.
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^ht/', include('health_check.urls')),
url(r'^paypal/', include('paypal.standard.ipn.urls')),
url(r'^shop/', include('goods.urls', namespace='shop')),
url(r'^$', TemplateView.as_view(template_name='index.html')),
]
if settings.DEBUG:
import debug_toolbar
from django.conf.urls.static import static
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
url(r'^uploads/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 41.758621 | 101 | 0.663088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.300892 |
f240eb401196f0b66c32fe422e4a7253f5e5528f | 1,469 | py | Python | mojave_setup/fonts.py | RuchirChawdhry/macOS-Mojave-Setup | 5e61fe8c20abc42e63fcbd1c7e310aab8cc02a1c | [
"MIT"
] | null | null | null | mojave_setup/fonts.py | RuchirChawdhry/macOS-Mojave-Setup | 5e61fe8c20abc42e63fcbd1c7e310aab8cc02a1c | [
"MIT"
] | null | null | null | mojave_setup/fonts.py | RuchirChawdhry/macOS-Mojave-Setup | 5e61fe8c20abc42e63fcbd1c7e310aab8cc02a1c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess as sp
class Fonts:
FONTS = [
"source-code-pro",
"source-sans-pro",
"source-serif-pro",
"roboto",
"roboto-mono",
"roboto-slab",
"open-sans",
"open-sans-condensed",
"lato",
"ibm-plex",
"ibm-plex-mono",
"ibm-plex-sans",
"georgia",
"ibm-plex-sans-condensed",
"fira-mono",
"fira-sans",
"fira-code",
"times-new-roman",
"great-vibes",
"grand-hotel",
"montserrat",
"hack",
"simple-line-icons",
"old-standard-tt",
"ibm-plex-serif",
"inconsolata",
"impact",
"bebas-neue",
"arial",
"arial-black",
"alex-brush",
"alegreya",
"alegreya-sans",
"aguafina-script",
"libre-baskerville",
"lobster",
"material-icons",
"raleway",
"rajdhani",
"raleway-dots",
"merriweather",
"merriweather-sans",
"redhat",
"pacifico",
]
def get_noto_casks(self):
cmd = ["brew", "search", "font-noto", "--casks"]
noto = sp.run(cmd, capture_output=True).stdout.decode().splitlines()[1:]
return noto
def install(self):
self.FONTS += self.get_noto_casks()
for font in self.FONTS:
sp.run(["brew", "cask", "install", font])
| 22.953125 | 80 | 0.479238 | 1,394 | 0.948945 | 0 | 0 | 0 | 0 | 0 | 0 | 679 | 0.462219 |
f2414f0188cf0460b22148b0732eea50d4b58390 | 5,142 | py | Python | plag/urls.py | neetu6860/plagiarism-detection-software | 7f05210aafdccf33a2bc732a40348eff43f46fba | [
"MIT"
] | 19 | 2018-09-03T09:10:20.000Z | 2021-12-24T13:52:18.000Z | plag/urls.py | neetu6860/plagiarism-detection-software | 7f05210aafdccf33a2bc732a40348eff43f46fba | [
"MIT"
] | 3 | 2019-10-31T18:42:38.000Z | 2021-06-10T21:37:23.000Z | plag/urls.py | neetu6860/plagiarism-detection-software | 7f05210aafdccf33a2bc732a40348eff43f46fba | [
"MIT"
] | 16 | 2018-06-06T15:04:59.000Z | 2022-03-29T04:53:07.000Z | from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
admin.autodiscover()
from plag import views, const
urlpatterns = patterns('',
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^index-trial/$', views.IndexTrialView.as_view(), name='index_trial'),
url(r'^download/(?P<prot_res_id>\d+)$', views.download_file, name='download'),
url(r'^products/$', TemplateView.as_view(template_name='plag/static/products.html'),
name='products'),
url(r'^features-screenshots/$',
TemplateView.as_view(template_name='plag/static/features_and_screenshots.html'),
name='features'),
url(r'^url-protection/$', TemplateView.as_view(template_name='plag/static/url_protection.html'),
name='url_prot'),
url(r'^document-protection/$',
TemplateView.as_view(template_name='plag/static/doc_protection.html'), name='doc_prot'),
url(r'^pricing/$', TemplateView.as_view(template_name='plag/static/pricing.html'),
name='pricing'),
url(r'^risks-of-plagiarism/$',
TemplateView.as_view(template_name='plag/static/risks_of_plagiarism.html'),
name='risks_plag'),
url(r'^about-us/$', TemplateView.as_view(template_name='plag/static/about.html'), name='about'),
url(r'^our-customers/$', TemplateView.as_view(template_name='plag/static/our_customers.html'),
name='our_customers'),
url(r'^contact-us/$', TemplateView.as_view(template_name='plag/static/contact_us.html'),
name='contact'),
url(r'^order/$', views.OrderView.as_view(), name='order'),
url(r'^ajax/username-check/$', views.username_unique, name='ajax_username_unique'),
url(r'^account/$', views.account, name='account'),
url(r'^account/profile/$', login_required(views.ProfileView.as_view()), name='profile'),
url(r'^account/invoice/(?P<pk>\d+)$', views.invoice, name='invoice'),
url(r'^account/invoice/pay/(?P<pk>\d+)$', views.pay_invoice, name='pay_invoice'),
url(r'^account/invoice/subscribe/(?P<pk>\d+)$', views.subscribe_invoice,
name='subscribe_invoice'),
url(r'^ipn-endpoint/$', views.ipn, name='ipn'),
url(r'^account/recent-scans/$', views.recent_scans, name='recent_scans_default'),
url(r'^account/recent-scans/(?P<num_days>\d+)$', views.recent_scans,
name='recent_scans'),
url(r'^account/recent-scans/(?P<num_days>\d+)/(?P<hide_zero>hide-zero)$',
views.recent_scans, name='recent_scans_hide_zero'),
url(r'^account/scan-history/$', views.scan_history, name='scan_history'),
url(r'^account/scan-history/(?P<hide_zero>hide-zero)$', views.scan_history,
name='scan_history_hide_zero'),
url(r'^ajax/plag-results/$', views.plagiarism_results,
name='ajax_plag_results_default'),
url(r'^ajax/plag-results/(?P<scan_id>\d+)$', views.plagiarism_results,
name='plag_results'),
url(r'^ajax/sitemap/$', views.sitemap_to_urls, name='ajax_urls'),
url(r'^account/protected-resources/$',
login_required(views.ProtectedResources.as_view()), name='protected_resources'),
url(r'^sitemap/$', TemplateView.as_view(template_name='plag/static/sitemap.html'),
name='sitemap'),
url(r'^terms-of-service/$',
TemplateView.as_view(template_name='plag/static/terms_of_service.html'),
name='terms_of_service'),
url(r'^privacy-policy/$', TemplateView.as_view(template_name='plag/static/privacy_policy.html'),
name='privacy_policy'),
# TODO Remove
url(r'^data-cleanse/$', views.data_cleanse, name='data_cleanse'),
url(r'^copyright/$', TemplateView.as_view(template_name='plag/static/copyright.html'),
name='copyright'),
url(r'^login/$', 'django.contrib.auth.views.login',
{'template_name': 'plag/static/login_error.html'}),
url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page': 'index'}, name='logout'),
)
| 57.775281 | 119 | 0.532283 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,877 | 0.365033 |
f242795159bdd7a9675c51d4615c5d8382e738be | 2,419 | py | Python | coinut.py | DanHenry4/AutoCoinut | f7c79f19a9193bc7c1193712088ca836b030f523 | [
"MIT"
] | 4 | 2016-05-08T02:15:50.000Z | 2020-07-01T08:16:45.000Z | coinut.py | DanHenry4/AutoCoinut | f7c79f19a9193bc7c1193712088ca836b030f523 | [
"MIT"
] | null | null | null | coinut.py | DanHenry4/AutoCoinut | f7c79f19a9193bc7c1193712088ca836b030f523 | [
"MIT"
] | null | null | null | import hmac
import hashlib
import json
import uuid
import httplib2
COINUT_URL = 'https://coinut.com/api/'
class Coinut():
def __init__(self, user = None, api_key = None):
self.user = user
self.api_key = api_key
self.http = httplib2.Http()
def request(self, api, content = {}):
url = COINUT_URL + api
headers = {}
content["nonce"] = uuid.uuid4().get_hex()
content = json.dumps(content)
if self.api_key is not None and self.user is not None:
sig = hmac.new(self.api_key, msg=content,
digestmod=hashlib.sha256).hexdigest()
headers = {'X-USER': self.user, "X-SIGNATURE": sig}
response, content = self.http.request(url, 'POST',
headers=headers, body=content)
return json.loads(content)
def tick(self, asset):
return self.request("tick/" + asset)
def balance(self):
return self.request("balance")
def assets(self, deriv_type):
return self.request("assets", {'deriv_type' : deriv_type})
def expiry_time(self, deriv_type, asset):
return self.request("expiry_time",
{'deriv_type' : deriv_type,
'asset': asset})
def strike_prices(self, deriv_type, asset, expiry_time):
m = {
'deriv_type' : deriv_type,
'asset': asset,
'expiry_time': expiry_time
}
return self.request("strike_prices", m)
def orderbook(self, deriv_type, asset, expiry_time, strike, put_call):
m = {
'deriv_type' : deriv_type,
'asset': asset,
'expiry_time': expiry_time,
'strike': strike,
'put_call': put_call
}
return self.request('orderbook', m)
def new_orders(self, orders):
return self.request("new_orders", {'orders': orders})
def orders(self):
return self.request("orders")
def cancel_orders(self, order_ids):
return self.request("cancel_orders", {'order_ids': order_ids})
def positions(self):
return self.request("positions")
def history_positions(self, start_timestamp, end_timestamp):
m = {'start_timestamp': start_timestamp,
'end_timestamp': end_timestamp}
return self.request("history_positions", m)
| 27.488636 | 76 | 0.574618 | 2,309 | 0.954527 | 0 | 0 | 0 | 0 | 0 | 0 | 351 | 0.145101 |
f2430c615c25842a6a15c7289e5e98e1e77f49ce | 1,817 | py | Python | src/neighborly/core/residence.py | ShiJbey/neighborly | 5af1e3211f1ef0e25803790850e7cd3d3a49be69 | [
"MIT"
] | null | null | null | src/neighborly/core/residence.py | ShiJbey/neighborly | 5af1e3211f1ef0e25803790850e7cd3d3a49be69 | [
"MIT"
] | null | null | null | src/neighborly/core/residence.py | ShiJbey/neighborly | 5af1e3211f1ef0e25803790850e7cd3d3a49be69 | [
"MIT"
] | null | null | null | from typing import Any, Dict
from ordered_set import OrderedSet
from neighborly.core.ecs import Component
from neighborly.core.engine import AbstractFactory, ComponentDefinition
class Residence(Component):
__slots__ = "owners", "former_owners", "residents", "former_residents", "_vacant"
def __init__(self) -> None:
super().__init__()
self.owners: OrderedSet[int] = OrderedSet([])
self.former_owners: OrderedSet[int] = OrderedSet([])
self.residents: OrderedSet[int] = OrderedSet([])
self.former_residents: OrderedSet[int] = OrderedSet([])
self._vacant: bool = True
def to_dict(self) -> Dict[str, Any]:
return {
**super().to_dict(),
"owners": list(self.owners),
"former_owners": list(self.former_owners),
"residents": list(self.residents),
"former_residents": list(self.former_residents),
"vacant": self._vacant,
}
def add_tenant(self, person: int, is_owner: bool = False) -> None:
"""Add a tenant to this residence"""
self.residents.add(person)
if is_owner:
self.owners.add(person)
self._vacant = False
def remove_tenant(self, person: int) -> None:
"""Remove a tenant rom this residence"""
self.residents.remove(person)
self.former_residents.add(person)
if person in self.owners:
self.owners.remove(person)
self.former_owners.add(person)
self._vacant = len(self.residents) == 0
def is_vacant(self) -> bool:
return self._vacant
class ResidenceFactory(AbstractFactory):
def __init__(self):
super().__init__("Residence")
def create(self, spec: ComponentDefinition, **kwargs) -> Residence:
return Residence()
| 32.446429 | 85 | 0.63071 | 1,631 | 0.897633 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.114474 |
f2439cb603c2e5bb9b0700a3b097f6415267d55a | 15,518 | py | Python | tests/SBHRun_Environment.py | SD2E/synbiohub_adapter | 492f9ef1054b17d790654310b895bb7ad155808e | [
"MIT"
] | 1 | 2019-10-08T20:31:16.000Z | 2019-10-08T20:31:16.000Z | tests/SBHRun_Environment.py | SD2E/synbiohub_adapter | 492f9ef1054b17d790654310b895bb7ad155808e | [
"MIT"
] | 84 | 2018-03-06T16:02:30.000Z | 2020-09-01T18:17:54.000Z | tests/SBHRun_Environment.py | SD2E/synbiohub_adapter | 492f9ef1054b17d790654310b895bb7ad155808e | [
"MIT"
] | 1 | 2019-02-06T17:17:54.000Z | 2019-02-06T17:17:54.000Z |
import threading
import time
import pandas as pd
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import os
import fnmatch
import random
import re
import getpass
import sys
from rdflib import Graph
from synbiohub_adapter.SynBioHubUtil import *
from sbol import *
"""
This class will perform unit testing to query information from SynBioHub's instances.
Installation Requirement(s):
- This test environment requires two third party packages to display plot:
1. pip install pandas
2. python -mpip install -U matplotlib
To run this python file, enter in the following command from the synbiohub_adapter directory:
python -m tests.SBHRun_Environment
author(s) :Tramy Nguyen
"""
class myThread (threading.Thread):
"""
An instance of this class will allow a user to execute N numbers of pushes to a SynBioHub instance.
sbolTriples: A list of SBOL Triples that stores SBOL documents
sbh_connector: An instance of pySBOL's PartShop needed to perform login
for pushing and pulling data to and from SynBioHub
"""
def __init__(self, sbolTriples, sbh_connector):
threading.Thread.__init__(self)
self.sbolTriples_list = sbolTriples
self.sbh_connector = sbh_connector
self.thread_start = self.thread_end = 0
self.tupTime_List = []
self.pushPull_List = []
"""
A default run method that will run after a thread is created and started
"""
def run(self):
self.thread_start = time.clock()
for sbolTriple in self.sbolTriples_list:
push_time = push_sbh(sbolTriple.sbolDoc(), self.sbh_connector)
self.tupTime_List.append((push_time, sbolTriple))
# TODO: currently pull will not work on current pySBOL build so set to 0
self.pushPull_List.append((push_time, 0))
self.thread_end = time.clock()
"""
Returns the time (seconds) it took to run an instance of this thread
"""
def thread_duration(self):
return self.thread_end - self.thread_start
"""
Returns a list of python triples where each Triples are structured as (t1, t2).
t1 = Time it took for each push
t2 = An instance of the SBOLTriple class that holds information about the given SBOL file.
"""
def tripleTime_List(self):
return self.tupTime_List
def pushPull_Times(self):
return self.pushPull_List
class SBOLTriple():
"""
An instance of this class will allow a user to access 3 types of information about an SBOLDocument.
1. the number of SBOL triples found in a SBOL document,
2. the SBOL document object generated from pySBOL, and
3. the full path of the XML file used to generate the SBOL document.
xmlFile: the full path of the SBOL File used to create the SBOL document
"""
def __init__(self, xmlFile, uid):
xmlGraph = Graph()
xmlGraph.parse(xmlFile)
total_obj = []
for sbol_subj, sbol_pred, sbol_obj in xmlGraph:
total_obj.append(sbol_obj)
self.__tripleSize = len(total_obj)
self.__sbolDoc = self.create_sbolDoc(xmlFile, uid)
self.__sbolFile = xmlFile
"""
Returns a new SBOL document created from the given SBOL file and an instance of an SBOLTriple
"""
def create_sbolDoc(self, sbolFile, uid):
sbolDoc = Document()
sbolDoc.read(sbolFile)
sbolDoc.displayId = uid
sbolDoc.name = uid + "_name"
sbolDoc.description = uid + "_description"
sbolDoc.version = str("1")
return sbolDoc
# Returns this objects SBOL document
def sbolDoc(self):
return self.__sbolDoc
# Returns a string value of the SBOL file that was assigned to this triple object
def get_xmlFile(self):
return self.__sbolFile
# Returns the total number of SBOL triples found in the given SBOL file
def totalTriples(self):
return self.__tripleSize
def get_uniqueID(idPrefix):
"""Generates a unique id
"""
t = time.ctime()
uid = '_'.join([idPrefix, t])
return re.sub(r'[: ]', '_', uid)
def create_sbolDocs(numDocs, idPrefix, sbolFile):
"""Returns a list of SBOL Documents
numDocs: An integer value to indicate how many SBOL documents this method should create
idPrefix: A unique id prefix to set each SBOL document
sbolFile: the SBOL file to create an SBOL document from
"""
sbolDoc_List = []
sbolTriples = []
u_counter = 0
for i in range(0, numDocs):
uid = get_uniqueID(idPrefix + "_d" + str(i))
trip_obj = SBOLTriple(sbolFile, uid)
sbolTriples.append(trip_obj)
sbolDoc_List.append(trip_obj.sbolDoc())
print("created doc%s" % i)
return sbolDoc_List, sbolTriples
def get_randomFile(sbolFiles):
"""Returns the full path of a randomly selected SBOL file found in the given directory
dirLocation: The directory to select a random SBOL file from
"""
selectedFile = random.choice(sbolFiles)
return selectedFile
def get_sbolList(dirLocation):
"""Returns a list of xml file found in the given directory
"""
for root, dir, files in os.walk(dirLocation):
sbolFiles = [os.path.abspath(os.path.join(root, fileName)) for fileName in files]
return sbolFiles
def push_sbh(sbolDoc, sbh_connector):
"""Returns the time (seconds) it takes to make a push to a new Collection on SynBioHub
sbh_connector: An instance of pySBOL's PartShop needed to perform login
for pushing and pulling data to and from SynBioHub
sbolURI: The URI of the SynBioHub collection or the specific part to be fetched
"""
start = time.clock()
result = sbh_connector.submit(sbolDoc)
end = time.clock()
print(result)
if result != 'Successfully uploaded':
sys.exit()
return end - start
def pull_sbh(sbh_connector, sbolURI):
"""Returns the time (seconds) it takes to make a pull from an existing SynBioHub Collection
sbh_connector: An instance of pySBOL's PartShop needed to perform login
for pushing and pulling data to and from SynBioHub
sbolURI: The URI of the SynBioHub collection or the specific part to be fetched
"""
sbolDoc = Document()
setHomespace("https://bbn.com")
start = time.clock()
sbh_connector.pull(sbolURI, sbolDoc)
end = time.clock()
if sbolDoc is None:
print("Found nothing and caused no error.")
else:
experimentalData_tl = []
for tl in sbolDoc:
if topLevel.type == 'http://sd2e.org#ExperimentalData':
experimentalData_tl.append(topLevel)
if len(experimentalData_tl) != 74:
print("Found the wrong SynBioHub Part with this uri: %s" % sbolURI)
return end - start
def createThreads(threadNum, sbh_connector, sbolDoc_size, idPrefix, sbolFile):
threads = []
for t in range(threadNum):
time.sleep(1)
_, sbolTriples = create_sbolDocs(sbolDoc_size, idPrefix + "_t" + str(t), sbolFile)
threads.append(myThread(sbolTriples, sbh_connector))
return threads
def generate_speedData(sbolFile, sbh_connector, sbolDoc_size, idPrefix):
pushTimes = []
pullTimes = []
currTotal = []
threads = createThreads(1, sbh_connector, sbolDoc_size, idPrefix + "ST_Coll_", sbolFile)
for t in threads:
t.start()
for t in threads:
t.join()
for t in threads:
sum = 0
for r1, r2 in t.pushPull_Times():
pushTimes.append(r1)
pullTimes.append(r2)
sum += r1
currTotal.append(sum)
df = pd.DataFrame({"Pull_Time": pullTimes,
"Push_Time": pushTimes,
"Total_Time": currTotal})
# df.loc['Total'] = df.sum()
return df
def run_triples(sbh_connector, collPrefix, sbolFiles):
triples_list = []
doc = 0
for s in sbolFiles:
print(s)
uid = get_uniqueID(collPrefix + "_t" + str(1) + "_d" + str(doc))
trip_obj = SBOLTriple(s, uid)
triples_list.append(trip_obj)
doc += 1
t = myThread(triples_list, sbh_connector)
t.start()
t.join()
pushTimes = []
sbol_tripleSizes = []
for v1, v2 in t.tripleTime_List():
pushTimes.append(v1)
sbol_tripleSizes.append(v2.totalTriples())
return sbol_tripleSizes, pushTimes
def run_setThreads(sbh_connector, set_size, t_growthRate, sbolFile, sbolDoc_size, collPrefix):
setId_List = []
threadId_List = []
threadDur_List = []
threadSize = t_growthRate
for i in range(1, set_size + 1):
curr_set = createThreads(threadSize, sbh_connector, sbolDoc_size, collPrefix, sbolFile)
for t in curr_set:
t.start()
for t in curr_set:
t.join()
for t in curr_set:
t_dur = t.thread_duration()
threadId_List.append(t.getName())
threadDur_List.append(t_dur)
setId_List.extend(["set_t" + str(threadSize)] * len(curr_set))
threadSize += t_growthRate
return setId_List, threadId_List, threadDur_List
def generate_setData(sbh_connector, iterations, set_size, t_growthRate, sbolFile, sbolDoc_size, collPrefix):
runId_List = []
setId_List = []
threadId_List = []
threadDur_List = []
for i in range(1, iterations + 1):
r1, r2, r3 = run_setThreads(sbh_connector, set_size, t_growthRate, sbolFile, sbolDoc_size, collPrefix)
runId_List.extend(['run' + str(i)] * len(r1))
setId_List.extend(r1)
threadId_List.extend(r2)
threadDur_List.extend(r3)
df = pd.DataFrame({"Run_ID": runId_List,
"Set_ID": setId_List,
"Thread_ID": threadId_List,
"Time/Thread": threadDur_List},
columns=['Run_ID', 'Set_ID', 'Thread_ID', 'Time/Thread'])
return df
def generate_tripleData(sbh_connector, iterations, collPrefix, sbolFiles):
runId_List = []
tripeSize_List = []
pushTime_List = []
for i in range(1, iterations + 1):
sbol_tripleSizes, pushTimes = run_triples(sbh_connector, collPrefix + str(i), sbolFiles)
runId_List.extend(['Run' + str(i)] * len(pushTimes))
tripeSize_List.extend(sbol_tripleSizes)
pushTime_List.extend(pushTimes)
df = pd.DataFrame({"Run_ID": runId_List,
"Triple_Size": tripeSize_List,
"Push_Time": pushTime_List},
columns=['Run_ID', 'Triple_Size', 'Push_Time'])
return df
def get_fileName(filePath):
file_ext = os.path.basename(filePath)
file_name, f_ext = os.path.splitext(file_ext)
return file_name
def br_speed(sbh_connector, sbolDoc_size, sbolFiles):
for f in sbolFiles:
print(f)
df = generate_speedData(f, sbh_connector, sbolDoc_size, "RS_")
fileName = get_fileName(f)
trip_obj = SBOLTriple(f, "temp_id")
triple_size = trip_obj.totalTriples()
create_SpeedLinePlot(df, f, sbolDoc_size, triple_size)
create_SpeedLine2Plot(df, f, sbolDoc_size, triple_size)
df.to_csv("outputs/SpeedResult_f%s_d%s.csv" % (fileName, sbolDoc_size))
def br_setThread(sbh_connector, iterations, set_size, t_growthRate, sbolDoc_size, sbolFiles):
for f in sbolFiles:
df = generate_setData(sbh_connector, iterations, set_size, t_growthRate, f, sbolDoc_size, "RST_")
trip_obj = SBOLTriple(f, "temp_id")
fileName = get_fileName(f)
create_SetBarPlot(df, iterations, set_size, f, trip_obj.totalTriples(), sbolDoc_size)
df.to_csv("outputs/Set_f%s_iter%s_s%s_d%s.csv" % (fileName, iterations, set_size, sbolDoc_size))
def br_triples(sbh_connector, iterations, sbolFiles):
df = generate_tripleData(sbh_connector, iterations, "RT", sbolFiles)
create_TripleScatterPlot(df, iterations)
df.to_csv("outputs/Triples_iter%s.csv" % (iterations))
def create_SpeedLinePlot(df, f, sbolDoc_size, trip_size):
y_max = 20
fig, ax = plt.subplots()
plt.ylim((0, y_max))
ax.set_title("Time to Push %s Triples to SynBioHub" % trip_size)
ax.set_ylabel("Time to Push (sec)")
ax.set_xlabel("Push Index")
df.plot(x=df.index + 1, y='Push_Time', ax=ax)
fileName = get_fileName(f)
fig.savefig('outputs/SpeedResult_f%s_d%s.pdf' % (fileName, sbolDoc_size))
def create_SpeedLine2Plot(df, f, sbolDoc_size, trip_size):
fig, ax = plt.subplots()
ax.set_title("Time to Push %s Triples to SynBioHub" % trip_size)
ax.set_ylabel("Time to Push (sec)")
ax.set_xlabel("Push Index")
df.plot(x=df.index + 1, y='Total_Time', ax=ax)
fileName = get_fileName(f)
fig.savefig('outputs/SpeedResult2_f%s_d%s.pdf' % (fileName, sbolDoc_size))
def create_SetBarPlot(df, iterations, set_size, f, trip_size, doc_size):
fig, ax = plt.subplots()
# max_index = df.groupby(['Run_ID', 'Set_ID'])['Time/Thread'].transform(max) == df['Time/Thread']
# max_df = df[max_index]
grouped_max = df.groupby(['Set_ID'])
means = grouped_max.mean()
errors = grouped_max.std()
g = plt.get_cmap('Dark2')
means.plot.barh(xerr=errors, ax=ax, legend=False, colormap=g)
ax.set_title("Average Time to Push %s Triples per Thread" % (trip_size))
ax.set_xlabel("Time to Push (sec)")
ax.set_ylabel("Thread Group")
fileName = get_fileName(f)
fig.savefig('outputs/Set_f%s_iter%s_s%s_d%s.pdf' % (fileName, iterations, set_size, doc_size))
def create_TripleScatterPlot(df, iterations):
fig, ax = plt.subplots()
plt.ylim((0, 20))
grouped_runs = df.groupby('Run_ID')
for name, group in grouped_runs:
fit = np.polyfit(group['Triple_Size'], group['Push_Time'], deg=1)
ax.plot(group['Triple_Size'], fit[0] * group['Triple_Size'] + fit[1], color='black')
ax.scatter(data=group, x='Triple_Size', y='Push_Time', marker='o', c='orange')
ax.set_title("Time to Push SBOL Documents with Varying Size")
ax.set_ylabel("Time to Push (sec)")
ax.set_xlabel("Document Size (# of Triples)")
fig.savefig('outputs/Triples_iter%s.pdf' % (iterations))
def backup_sequentialLoad():
# At one point, update pushing to SBH to do something like this so performance doesn't suffer.
sbolDoc = Document()
sbolDoc.read("./examples/c_trips10000.xml")
for i in range(1):
print(i)
uid = get_uniqueID("ex_")
sbolDoc.displayId = uid
sbolDoc.name = uid + "_name"
sbolDoc.description = uid + "_description"
sbolDoc.version = str("1")
push_sbh(sbolDoc, sbh_connector)
if __name__ == '__main__':
server_name = "https://synbiohub.bbn.com"
print("Logging into: " + server_name)
sbh_connector = PartShop(server_name)
sbh_user = input('Enter Username: ')
sbh_connector.login(sbh_user, getpass.getpass(prompt='Enter SynBioHub Password: ', stream=sys.stderr))
# Config.setOption("verbose", True)
# sbolFiles = get_sbolList("./examples/workingFiles")
sbolFiles = ["./examples/c_trips40000.xml"]
iterations = 1
sbolDoc_size = 1
br_speed(sbh_connector, sbolDoc_size, sbolFiles)
# br_triples(sbh_connector, iterations, sbolFiles)
# iterations, set_size=10, t_growthRate=5, sbolDoc_size=100
# TODO: MAKE SURE TO CHANGE COLOR OF BAR GRAPH TO MAKE IT LOOK COOL...
# br_setThread(sbh_connector, 3, 5, 3, 50, sbolFiles)
| 33.661605 | 110 | 0.660974 | 3,251 | 0.209499 | 0 | 0 | 0 | 0 | 0 | 0 | 4,996 | 0.321949 |
f245528c941762eda827c561627c5aa634c97c9f | 2,842 | py | Python | setup.py | Unidata/drilsdown | 55aca7168fb390f31c36729605401564e9b82c56 | [
"MIT"
] | 3 | 2018-05-25T00:19:12.000Z | 2021-01-08T15:54:36.000Z | setup.py | suvarchal/drilsdown | e82f58396f640fef847353caf1bd4b2bf016c7a6 | [
"MIT"
] | 11 | 2017-10-31T20:15:24.000Z | 2019-12-16T21:01:55.000Z | setup.py | suvarchal/drilsdown | e82f58396f640fef847353caf1bd4b2bf016c7a6 | [
"MIT"
] | 10 | 2018-02-08T22:23:28.000Z | 2019-09-29T23:25:19.000Z | import os
from six import iteritems
from setuptools import setup
from setuptools.command.develop import develop
from setuptools.command.install import install
import subprocess
PACKAGE_NAME = 'drilsdown'
SOURCES = {
'ipython_IDV': 'projects/ipython_IDV',
'idv_teleport': 'projects/IDV_teleport',
'ramadda_publish': 'projects/RAMADDA_publish',
}
VERSION = '2.4.91'
def install_drilsdown_projects(sources, develop=False):
""" Use pip to install all drilsdown projects. """
print("installing all drilsdown projects in {} mode".format(
"development" if develop else "normal"))
wd = os.getcwd()
for k, v in iteritems(sources):
try:
os.chdir(os.path.join(wd, v))
if develop:
subprocess.check_call(['pip', 'install', '-e', '.']) # could be pip3 on certain platforms
else:
subprocess.check_call(['pip', 'install', '.']) # could be pip3 on certain platforms
except Exception as e:
print("Oops, something went wrong installing", k)
print(e)
finally:
os.chdir(wd)
class DevelopCmd(develop):
""" Add custom steps for the develop command """
def run(self):
install_drilsdown_projects(SOURCES, develop=True)
develop.run(self)
class InstallCmd(install):
""" Add custom steps for the install command """
def run(self):
install_drilsdown_projects(SOURCES, develop=False)
install.run(self)
setup(
name=PACKAGE_NAME,
version=VERSION,
author="Drilsdown team",
author_email="drilsdown@unidata.ucar.edu",
description="A collection of tools for jupyter notebooks",
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
url="https://github.com/Unidata/drilsdown",
license="MIT",
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires=[
'future',
'six',
'requests',
'ipython',
'ipywidgets>=7.1.0rc',
'jupyter-client',
# 'ipython_IDV>=' + VERSION + "'", # cannot be source and a dependency??
'ipython-IDV', # from pypi
'ramadda_publish', #from pypi
'idv_teleport', #from pypi
],
cmdclass={
#'install': InstallCmd, # do not overwrite for now to make
# pip install and python setup.py install do same.
# note in class pip might be called pip3 on certain platforms
'develop': DevelopCmd,
},
extras_require={
'addons': ['numpy','netcdf4','xarray','metpy'],
'visual': ['pyviz','geoviews'],
}
)
| 32.295455 | 105 | 0.60943 | 365 | 0.128431 | 0 | 0 | 0 | 0 | 0 | 0 | 1,277 | 0.449331 |
f24567e433386b2908e8d4a58f10fb0b2a6b3b98 | 2,129 | py | Python | ejercicios/Ejercicio6.py | Xavitheforce/Ejercicios_Iteracion | e840439e1277b5946592128d5c771d895c2fac2c | [
"Apache-2.0"
] | null | null | null | ejercicios/Ejercicio6.py | Xavitheforce/Ejercicios_Iteracion | e840439e1277b5946592128d5c771d895c2fac2c | [
"Apache-2.0"
] | null | null | null | ejercicios/Ejercicio6.py | Xavitheforce/Ejercicios_Iteracion | e840439e1277b5946592128d5c771d895c2fac2c | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
class Banco():
def __init__(self):
self.cuentas = {
'1':{
'nombre': 'Marcos Martinez',
'balance': 173735,
'tipo': 1,
'movimientos': []
},'2':{
'nombre': 'Alejandro Sanchez',
'balance': 1342,
'tipo': 0,
'movimientos': []
},'3':{
'nombre': 'Claudia Plaza',
'balance': 120984,
'tipo': 1,
'movimientos': []
},
}
def movimiento(self):
cuenta, cuenta_destino, cantidad = input('Introduce el Número de Cuenta de origen: '), input('Introduce el Número de Cuenta de destino: '), input('Introduce la cantidad a Mover: ')
balance_cuenta = self.cuentas[str(cuenta)]['balance']
if not self.cuentas[str(cuenta)]:
return print('La cuenta de origen no está registrada.')
if not self.cuentas[str(cuenta_destino)]:
return print('La cuenta de destino no está registrada.')
if balance_cuenta < int(cantidad):
return print('La cantidad introducida es superior a la disponible en la cuenta Nº: ' + str(cuenta) + '.')
movimientos_cuenta_origen = self.cuentas[str(cuenta)]['movimientos']
movimientos_cuenta_origen.append({
'cuenta_destino': str(cuenta_destino),
'cantidad': '-' + str(cantidad),
'hora': str(datetime.now())
})
self.cuentas[str(cuenta)]['balance'] -= int(cantidad)
movimientos_cuenta_destino = self.cuentas[str(cuenta_destino)]['movimientos']
movimientos_cuenta_destino.append({
'cuenta_origen': str(cuenta),
'cantidad': '+' + str(cantidad),
'hora': str(datetime.now())
})
self.cuentas[str(cuenta_destino)]['balance'] += int(cantidad)
print(self.cuentas[cuenta]['movimientos'])
print(self.cuentas[cuenta]['balance'], self.cuentas[cuenta_destino]['balance'])
def iniciar(self):
start = input('Bienvenido a Bancos Ramirez. ¿Quieres realizar algun operación?(S/N): ')
if start.lower() == 's':
decision = start
while decision.lower() == 's':
Banco().movimiento()
decision = input('¿Quieres seguir haciendo operaciones?(S/N): ') | 39.425926 | 184 | 0.622828 | 2,106 | 0.985494 | 0 | 0 | 0 | 0 | 0 | 0 | 730 | 0.3416 |
f2470b57f1baf4a7e69d418b396753a2d81c5b04 | 752 | py | Python | authentik/sources/saml/migrations/0011_auto_20210324_0736.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 15 | 2020-01-05T09:09:57.000Z | 2020-11-28T05:27:39.000Z | authentik/sources/saml/migrations/0011_auto_20210324_0736.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 302 | 2020-01-21T08:03:59.000Z | 2020-12-04T05:04:57.000Z | authentik/sources/saml/migrations/0011_auto_20210324_0736.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 3 | 2020-03-04T08:21:59.000Z | 2020-08-01T20:37:18.000Z | # Generated by Django 3.1.7 on 2021-03-24 07:36
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_flows", "0016_auto_20201202_1307"),
("authentik_sources_saml", "0010_samlsource_pre_authentication_flow"),
]
operations = [
migrations.AlterField(
model_name="samlsource",
name="pre_authentication_flow",
field=models.ForeignKey(
help_text="Flow used before authentication.",
on_delete=django.db.models.deletion.CASCADE,
related_name="source_pre_authentication",
to="authentik_flows.flow",
),
),
]
| 28.923077 | 78 | 0.62633 | 626 | 0.832447 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.364362 |
f248957a375715c7681a4295ca66a47a10ee7ea3 | 6,891 | py | Python | tempest/tests/common/test_service_clients.py | xavpaice/tempest | 958bd694df27511e0346d799876fe49331b8145c | [
"Apache-2.0"
] | null | null | null | tempest/tests/common/test_service_clients.py | xavpaice/tempest | 958bd694df27511e0346d799876fe49331b8145c | [
"Apache-2.0"
] | null | null | null | tempest/tests/common/test_service_clients.py | xavpaice/tempest | 958bd694df27511e0346d799876fe49331b8145c | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import random
import six
from tempest.services.baremetal.v1.json import baremetal_client
from tempest.services.data_processing.v1_1 import data_processing_client
from tempest.services.database.json import flavors_client as db_flavor_client
from tempest.services.database.json import versions_client as db_version_client
from tempest.services.identity.v2.json import identity_client as \
identity_v2_identity_client
from tempest.services.identity.v3.json import credentials_client
from tempest.services.identity.v3.json import endpoints_client
from tempest.services.identity.v3.json import identity_client as \
identity_v3_identity_client
from tempest.services.identity.v3.json import policies_client
from tempest.services.identity.v3.json import regions_client
from tempest.services.identity.v3.json import services_client
from tempest.services.image.v1.json import images_client
from tempest.services.image.v2.json import images_client as images_v2_client
from tempest.services.messaging.json import messaging_client
from tempest.services.network.json import network_client
from tempest.services.object_storage import account_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
from tempest.services.orchestration.json import orchestration_client
from tempest.services.telemetry.json import alarming_client
from tempest.services.telemetry.json import telemetry_client
from tempest.services.volume.v1.json.admin import hosts_client \
as volume_hosts_client
from tempest.services.volume.v1.json.admin import quotas_client \
as volume_quotas_client
from tempest.services.volume.v1.json.admin import services_client \
as volume_services_client
from tempest.services.volume.v1.json.admin import types_client \
as volume_types_client
from tempest.services.volume.v1.json import availability_zone_client \
as volume_az_client
from tempest.services.volume.v1.json import backups_client
from tempest.services.volume.v1.json import extensions_client \
as volume_extensions_client
from tempest.services.volume.v1.json import qos_client
from tempest.services.volume.v1.json import snapshots_client
from tempest.services.volume.v1.json import volumes_client
from tempest.services.volume.v2.json.admin import hosts_client \
as volume_v2_hosts_client
from tempest.services.volume.v2.json.admin import quotas_client \
as volume_v2_quotas_client
from tempest.services.volume.v2.json.admin import services_client \
as volume_v2_services_client
from tempest.services.volume.v2.json.admin import types_client \
as volume_v2_types_client
from tempest.services.volume.v2.json import availability_zone_client \
as volume_v2_az_client
from tempest.services.volume.v2.json import backups_client \
as volume_v2_backups_client
from tempest.services.volume.v2.json import extensions_client \
as volume_v2_extensions_client
from tempest.services.volume.v2.json import qos_client as volume_v2_qos_client
from tempest.services.volume.v2.json import snapshots_client \
as volume_v2_snapshots_client
from tempest.services.volume.v2.json import volumes_client as \
volume_v2_volumes_client
from tempest.tests import base
class TestServiceClient(base.TestCase):
@mock.patch('tempest_lib.common.rest_client.RestClient.__init__')
def test_service_client_creations_with_specified_args(self, mock_init):
test_clients = [
baremetal_client.BaremetalClient,
data_processing_client.DataProcessingClient,
db_flavor_client.DatabaseFlavorsClient,
db_version_client.DatabaseVersionsClient,
messaging_client.MessagingClient,
network_client.NetworkClient,
account_client.AccountClient,
container_client.ContainerClient,
object_client.ObjectClient,
orchestration_client.OrchestrationClient,
telemetry_client.TelemetryClient,
alarming_client.AlarmingClient,
qos_client.QosSpecsClient,
volume_hosts_client.HostsClient,
volume_quotas_client.QuotasClient,
volume_services_client.ServicesClient,
volume_types_client.TypesClient,
volume_az_client.AvailabilityZoneClient,
backups_client.BackupsClient,
volume_extensions_client.ExtensionsClient,
snapshots_client.SnapshotsClient,
volumes_client.VolumesClient,
volume_v2_hosts_client.HostsClient,
volume_v2_quotas_client.QuotasClient,
volume_v2_services_client.ServicesClient,
volume_v2_types_client.TypesClient,
volume_v2_az_client.AvailabilityZoneClient,
volume_v2_backups_client.BackupsClient,
volume_v2_extensions_client.ExtensionsClient,
volume_v2_qos_client.QosSpecsClient,
volume_v2_snapshots_client.SnapshotsClient,
volume_v2_volumes_client.VolumesClient,
identity_v2_identity_client.IdentityClient,
credentials_client.CredentialsClient,
endpoints_client.EndPointClient,
identity_v3_identity_client.IdentityV3Client,
policies_client.PoliciesClient,
regions_client.RegionsClient,
services_client.ServicesClient,
images_client.ImagesClient,
images_v2_client.ImagesClientV2
]
for client in test_clients:
fake_string = six.text_type(random.randint(1, 0x7fffffff))
auth = 'auth' + fake_string
service = 'service' + fake_string
region = 'region' + fake_string
params = {
'endpoint_type': 'URL' + fake_string,
'build_interval': random.randint(1, 100),
'build_timeout': random.randint(1, 100),
'disable_ssl_certificate_validation':
True if random.randint(0, 1) else False,
'ca_certs': None,
'trace_requests': 'foo' + fake_string
}
client(auth, service, region, **params)
mock_init.assert_called_once_with(auth, service, region, **params)
mock_init.reset_mock()
| 47.524138 | 79 | 0.753882 | 3,014 | 0.437382 | 0 | 0 | 2,969 | 0.430852 | 0 | 0 | 810 | 0.117545 |
f2490fc27568d943c3ececc3e75fce355b5da3ff | 3,497 | py | Python | advent/days/day17/day.py | RuedigerLudwig/advent2021 | ce069d485bb34b4752ec4e89f195f7cc8cf084cc | [
"Unlicense"
] | null | null | null | advent/days/day17/day.py | RuedigerLudwig/advent2021 | ce069d485bb34b4752ec4e89f195f7cc8cf084cc | [
"Unlicense"
] | null | null | null | advent/days/day17/day.py | RuedigerLudwig/advent2021 | ce069d485bb34b4752ec4e89f195f7cc8cf084cc | [
"Unlicense"
] | null | null | null | from __future__ import annotations
from itertools import product
from typing import Iterator
day_num = 17
def part1(lines: Iterator[str]) -> int:
probe = Target.from_str(next(lines))
mx = max(y for _, y in probe.get_possible())
return mx * (mx + 1) >> 1
def part2(lines: Iterator[str]) -> int:
probe = Target.from_str(next(lines))
return probe.count_possible()
Range = tuple[int, int]
XStepRange = tuple[int, int | None]
YStepRange = tuple[int, int]
Pos = tuple[int, int]
class Target:
@staticmethod
def from_str(line: str) -> Target:
def get_range(text: str) -> Range:
match text.split(".."):
case [start, end]:
return int(start.strip()), int(end.strip())
case _:
raise NotImplementedError
match line.split(","):
case [x, y]:
range_x = get_range(x.split("=")[1])
range_y = get_range(y.split("=")[1])
return Target(range_x, range_y)
case _:
raise NotImplementedError
def __init__(self, range_x: Range, range_y: Range) -> None:
self.range_x = range_x
self.range_y = range_y
def __eq__(self, other: object) -> bool:
if isinstance(other, Target):
return self.range_x == other.range_x and self.range_y == other.range_y
raise NotImplementedError
def possible_x(self) -> Iterator[tuple[int, XStepRange]]:
for x_start in range(1, self.range_x[1] + 1):
min_steps: int | None = None
steps = 1
x_pos = x_start
x_vel = x_start - 1
done = False
while not done:
if x_pos > self.range_x[1]:
if min_steps is not None:
yield x_start, (min_steps, steps - 1)
done = True
elif x_pos >= self.range_x[0] and min_steps is None:
min_steps = steps
elif x_vel == 0:
if min_steps is not None:
yield x_start, (min_steps, None)
done = True
steps += 1
x_pos += x_vel
x_vel -= 1
def possible_y(self) -> Iterator[tuple[int, YStepRange]]:
for y_start in range(self.range_y[0], -self.range_y[0] + 1):
if y_start <= 0:
steps = 1
y_vel = y_start - 1
else:
steps = y_start * 2 + 2
y_vel = -y_start - 2
min_steps = None
y_pos = y_vel + 1
done = False
while not done:
if y_pos < self.range_y[0]:
if min_steps is not None:
yield y_start, (min_steps, steps - 1)
done = True
elif y_pos <= self.range_y[1] and min_steps is None:
min_steps = steps
steps += 1
y_pos += y_vel
y_vel -= 1
def get_possible(self) -> Iterator[Pos]:
posx = self.possible_x()
posy = self.possible_y()
for (x, (min_x, max_x)), (y, (min_y, max_y)) in product(posx, posy):
mn = max(min_x, min_y)
mx = max_y if max_x is None else min(max_x, max_y)
if mn <= mx:
yield x, y
def count_possible(self) -> int:
return sum(1 for _ in self.get_possible())
| 31.223214 | 82 | 0.501859 | 2,994 | 0.856162 | 1,968 | 0.562768 | 573 | 0.163855 | 0 | 0 | 13 | 0.003717 |
f249ee34b1745d4a243c396362c75f872d9af531 | 687 | py | Python | Software_Carpentry/Conway/test_conway.py | dgasmith/SICM2-Software-Summer-School-2014 | af97770cbade3bf4a246f21e607e8be66c9df7da | [
"MIT"
] | 2 | 2015-07-16T14:00:27.000Z | 2016-01-10T20:21:48.000Z | Software_Carpentry/Conway/test_conway.py | dgasmith/SICM2-Software-Summer-School-2014 | af97770cbade3bf4a246f21e607e8be66c9df7da | [
"MIT"
] | null | null | null | Software_Carpentry/Conway/test_conway.py | dgasmith/SICM2-Software-Summer-School-2014 | af97770cbade3bf4a246f21e607e8be66c9df7da | [
"MIT"
] | null | null | null | from conway import *
def test_neighbors_at_origin():
result = [(1,1), (-1,-1), (0,1), (1,0), (-1,1), (1,-1), (-1,0), (0,-1)]
nb = neighbors((0,0))
assert( set(result) == set(nb) )
def test_neighbors_at_negative_quadrant():
result = [(0, -1), (-2, -1), (-1, 0), (-1, -2), (0, 0), (0, -2), (-2, 0), (-2, -2)]
nb = neighbors((-1,-1))
assert( set(result) == set(nb) )
def test_blinker():
blinker = [(-1,0), (0,0), (1,0)]
result = conway(blinker, generations=2)
assert( set(result) == set(blinker) )
def test_block():
block = [(0,0), (0,1), (1,0), (1,1)]
result = conway(block, generations=2)
assert( set(result) == set(block) )
| 28.625 | 87 | 0.513828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f24a5dc578f63a0c2e113a798ce9969cd7ed080c | 5,426 | py | Python | app_backend/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 1 | 2020-06-21T04:08:26.000Z | 2020-06-21T04:08:26.000Z | app_backend/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 13 | 2019-10-18T17:19:32.000Z | 2022-01-13T00:44:43.000Z | app_backend/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 5 | 2019-02-07T03:15:16.000Z | 2021-09-04T14:06:28.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: __init__.py
@time: 2018-03-06 00:00
"""
from __future__ import unicode_literals
import eventlet
eventlet.monkey_patch()
from logging.config import dictConfig
from config import current_config
from flask import Flask
from flask_wtf.csrf import CSRFProtect
from flask_login import LoginManager
from flask_moment import Moment
from flask_oauthlib.client import OAuth
from flask_mail import Mail
from flask_principal import Principal
import flask_excel as excel
# from flask_socketio import SocketIO
from flask_sqlalchemy import SQLAlchemy
from flask_babel import Babel, gettext as _
from app_common.libs.redis_session import RedisSessionInterface
from app_backend.clients.client_redis import redis_client
app = Flask(__name__)
app.config.from_object(current_config)
app.config['REMEMBER_COOKIE_NAME'] = app.config['REMEMBER_COOKIE_NAME_BACKEND']
app.session_cookie_name = app.config['SESSION_COOKIE_NAME_BACKEND']
app.session_interface = RedisSessionInterface(
redis=redis_client,
prefix=app.config['REDIS_SESSION_PREFIX_BACKEND'],
)
# CSRF Protection AJAX requests
csrf = CSRFProtect(app)
login_manager = LoginManager()
login_manager.init_app(app) # setup_app 方法已淘汰
login_manager.login_view = 'auth.index'
login_manager.login_message = _('Please log in to access this page.')
login_manager.login_message_category = 'warning' # 设置消息分类
login_manager.localize_callback = _ # 设置翻译回调
login_manager.session_protection = 'basic' # 设置安全等级(basic、strong、None)
# 用户电脑的标识(基本上是 IP 地址和 User Agent 的 MD5 hash 值)
# basic 模式下,如果该标识未匹配,会话会简单地被标记为非活 跃的,且任何需要活跃登入的东西会强制用户重新验证。
# strong模式下,如果该标识未匹配,整个会话(记住的令牌如果存在,则同样)被删除。
# Moment 时间插件
moment = Moment(app)
# 权限管理插件
principals = Principal(app, skip_static=True)
# 国际化 本地化
babel = Babel(app)
excel.init_excel(app)
# SocketIO
# socketio = SocketIO()
# socketio.init_app(app, async_mode='eventlet', message_queue=app.config['REDIS_URL'])
# 第三方开放授权登录
oauth = OAuth(app)
# 邮件
mail = Mail(app)
# GitHub
oauth_github = oauth.remote_app(
'github',
**app.config['GITHUB_OAUTH']
)
# QQ
oauth_qq = oauth.remote_app(
'qq',
**app.config['QQ_OAUTH']
)
# WeiBo
oauth_weibo = oauth.remote_app(
'weibo',
**app.config['WEIBO_OAUTH']
)
# Google
# 要银子,妹的
# 配置日志
dictConfig(app.config['LOG_CONFIG'])
# 这个 import 语句放在这里, 防止views, models import发生循环import
from app_backend import views
from app_backend.views.permissions import bp_permissions
from app_backend.views.captcha import bp_captcha
from app_backend.views.customer import bp_customer
from app_backend.views.customer_contact import bp_customer_contact
from app_backend.views.customer_invoice import bp_customer_invoice
from app_backend.views.supplier import bp_supplier
from app_backend.views.supplier_contact import bp_supplier_contact
from app_backend.views.supplier_invoice import bp_supplier_invoice
from app_backend.views.user import bp_user
from app_backend.views.user_auth import bp_auth
from app_backend.views.production import bp_production
from app_backend.views.production_sensitive import bp_production_sensitive
from app_backend.views.quotation import bp_quotation
from app_backend.views.quotation_items import bp_quotation_items
from app_backend.views.enquiry import bp_enquiry
from app_backend.views.enquiry_items import bp_enquiry_items
from app_backend.views.buyer_order import bp_buyer_order
from app_backend.views.purchase import bp_purchase
from app_backend.views.sales_order import bp_sales_order
from app_backend.views.delivery import bp_delivery
from app_backend.views.warehouse import bp_warehouse
from app_backend.views.rack import bp_rack
from app_backend.views.inventory import bp_inventory
from app_backend.views.futures import bp_futures
from app_backend.views.purchase import bp_purchase
from app_backend.views.delivery import bp_delivery
from app_backend.views.system import bp_system
# from app_backend.views.socket_io import bp_socket_io
from app_backend.views.price import bp_price
from app_backend.views.bank import bp_bank
from app_backend.views.cash import bp_cash
from app_backend.views.bank_account import bp_bank_account
# 注册蓝图
app.register_blueprint(bp_permissions)
app.register_blueprint(bp_captcha)
app.register_blueprint(bp_customer)
app.register_blueprint(bp_customer_contact)
app.register_blueprint(bp_customer_invoice)
app.register_blueprint(bp_supplier)
app.register_blueprint(bp_supplier_contact)
app.register_blueprint(bp_supplier_invoice)
app.register_blueprint(bp_user)
app.register_blueprint(bp_auth)
app.register_blueprint(bp_production)
app.register_blueprint(bp_production_sensitive)
app.register_blueprint(bp_quotation)
app.register_blueprint(bp_quotation_items)
app.register_blueprint(bp_enquiry)
app.register_blueprint(bp_enquiry_items)
app.register_blueprint(bp_buyer_order)
app.register_blueprint(bp_purchase)
app.register_blueprint(bp_sales_order)
app.register_blueprint(bp_delivery)
app.register_blueprint(bp_warehouse)
app.register_blueprint(bp_rack)
app.register_blueprint(bp_inventory)
app.register_blueprint(bp_futures)
app.register_blueprint(bp_purchase)
app.register_blueprint(bp_delivery)
app.register_blueprint(bp_system)
# app.register_blueprint(bp_socket_io)
app.register_blueprint(bp_price)
app.register_blueprint(bp_bank)
app.register_blueprint(bp_cash)
app.register_blueprint(bp_bank_account)
# 导入自定义过滤器
from app_backend import filters
| 30.483146 | 86 | 0.838555 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,397 | 0.240282 |
f24ac29d015f11200dad8879234dd7ab9c174313 | 2,003 | py | Python | N50.py | kstatebioinfo/stanford_swc | daa3f37bcbbe4a8a3cbe59a48b380603b9794634 | [
"CC0-1.0"
] | null | null | null | N50.py | kstatebioinfo/stanford_swc | daa3f37bcbbe4a8a3cbe59a48b380603b9794634 | [
"CC0-1.0"
] | null | null | null | N50.py | kstatebioinfo/stanford_swc | daa3f37bcbbe4a8a3cbe59a48b380603b9794634 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
##########################################################################
# USAGE: import N50
# help(N50)
# N50.main(~/stanford_swc/fasta-o-matic/fasta/normal.fa)
# DESCRIPTION: Function that calculates N50 for a FASTA file
# Created by Jennifer M Shelton
##########################################################################
import sys
import re
def n50(lengths):
'''
Reverse sort list of lengths and return N50
'''
lengths = sorted(lengths, reverse = True) # reverse sort lengths large
# to small
cumulative_length = sum(lengths) # get total length
fraction = cumulative_length # set fraction of total to 100%
my_n50 = 0 # initialize n50
for seq_length in lengths:
if fraction > (cumulative_length/2.0):
fraction = fraction - seq_length
my_n50 = seq_length
else: # when the fraction has passed 50% total length get N50
return(my_n50)
def main():
'''
calculates N50 for a FASTA file
'''
script = sys.argv[0]
filename = sys.argv[1]
fasta = open(filename, 'r')
header_pattern = re.compile('^>.*') # pattern for a header line
## Initialize strings for headers and sequences and a list for lengths
lengths = []
dna = ''
header = ''
for line in fasta:
line = line.rstrip()
if header_pattern.match(line):
if not dna == '': # skip the first (empty record)
lengths.append(len(dna))
dna = ''
else:
dna = dna + line
else:
lengths.append(len(dna))
my_n50 = n50(lengths)
print(my_n50)
##########################################################################
##### Execute main unless script is simply imported ############
##### for individual functions ############
##########################################################################
if __name__ == '__main__':
main()
| 34.534483 | 74 | 0.495756 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,074 | 0.536196 |
f24b0ee4bbb24e050ab403a0d1e6bf087f8143ee | 34,017 | py | Python | ColDoc/latex.py | mennucc/ColDoc_project | 947a79592b689f57e59652b37868cc22e520f724 | [
"BSD-3-Clause"
] | null | null | null | ColDoc/latex.py | mennucc/ColDoc_project | 947a79592b689f57e59652b37868cc22e520f724 | [
"BSD-3-Clause"
] | null | null | null | ColDoc/latex.py | mennucc/ColDoc_project | 947a79592b689f57e59652b37868cc22e520f724 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
__all__ = ('main_by_args','latex_main','latex_uuid','latex_tree')
cmd_help="""
Command help:
blob
compile the blob(s) with --uuid=UUID,
tree
compile all the blobs starting from --uuid=UUID
main_public
compile the whole document, for the general public
main_private
compile the whole document, including protected material, visible to the editors
all
all of the above
"""
import os, sys, shutil, subprocess, json, argparse, pathlib, tempfile, hashlib, pickle, base64, re, json, dbm
from os.path import join as osjoin
if __name__ == '__main__':
for j in ('','.'):
if j in sys.path:
sys.stderr.write('Warning: deleting %r from sys.path\n',j)
del sys.path[sys.path.index(j)]
#
a = os.path.realpath(sys.argv[0])
a = os.path.dirname(a)
a = os.path.dirname(a)
assert os.path.isdir(a), a
if a not in sys.path:
sys.path.insert(0, a)
del a
#
from ColDoc import loggin
import logging
logger = logging.getLogger(__name__)
############## ColDoc stuff
#
ColDoc_latex_engines=[
('pdflatex','LaTeX'),
('xelatex','XeLaTeX'),
('lualatex','LuaLaTeX'),
]
#from ColDoc import config, utils
import ColDoc, ColDoc.utils, ColDoc.config, ColDoc.transform
import plasTeX
import plasTeX.TeX, plasTeX.Base.LaTeX, plasTeX.Context , plasTeX.Tokenizer , plasTeX.Base
from plasTeX.TeX import TeX
from plasTeX import TeXDocument, Command
import plasTeX.Base as Base
from plasTeX.Packages import amsthm , graphicx
# the package ColDocUUID.sty defines a LaTeX command \uuid , that can be overriden in the preamble
environments_we_wont_latex = ColDoc.config.ColDoc_environments_we_wont_latex
standalone_template=r"""\documentclass[varwidth=%(width)s]{standalone}
%(latex_macros)s
\def\uuidbaseurl{%(url_UUID)s}
\input{preamble.tex}
\usepackage{ColDocUUID}
\begin{document}
%(begin)s
\input{%(input)s}
%(end)s
\end{document}
"""
preview_template=r"""\documentclass %(documentclass_options)s {%(documentclass)s}
%(latex_macros)s
\def\uuidbaseurl{%(url_UUID)s}
\input{preamble.tex}
\usepackage{hyperref}
\usepackage{ColDocUUID}
\begin{document}
%(begin)s
\input{%(input)s}
%(end)s
\end{document}
"""
## TODO investigate, this generates an empty PDF
##\setlength\PreviewBorder{5pt}
##%\usepackage[active]{preview}
plastex_template=r"""\documentclass{article}
%(latex_macros)s
\def\uuidbaseurl{%(url_UUID)s}
\input{preamble.tex}
\usepackage{hyperref}
\usepackage{ColDocUUID}
\begin{document}
%(begin)s
\input{%(input)s}
%(end)s
\end{document}
"""
def latex_uuid(blobs_dir, uuid, lang=None, metadata=None, warn=True, options = {}):
" `latex` the blob identified `uuid`; if `lang` is None, `latex` all languages; ( `metadata` are courtesy , to avoid recomputing )"
log_level = logging.WARNING if warn else logging.DEBUG
if metadata is None:
uuid_, uuid_dir, metadata = ColDoc.utils.resolve_uuid(uuid=uuid, uuid_dir=None,
blobs_dir = blobs_dir,
coldoc = options.get('coldoc'),
metadata_class= options['metadata_class'])
else:
uuid_dir = None
#
if metadata.environ in environments_we_wont_latex :
## 'include_preamble' is maybe illegal LaTeX; 'usepackage' is not yet implemented
logger.log(warn, 'Cannot `pdflatex` environ=%r',metadata.environ)
return True
#
if metadata.environ == 'main_file':
logger.log(log_level, 'Do not need to `pdflatex` the main_file')
return True
#
if lang is not None:
langs=[lang]
else:
langs=metadata.get('lang')
if not langs:
logger.debug('No languages for blob %r in blobs_dir %r',uuid,blobs_dir)
return True
#
res = True
for l in langs:
rh, rp = latex_blob(blobs_dir, metadata=metadata, lang=l,
uuid_dir=uuid_dir, options = options)
res = res and rh and rp
if lang is None:
# update only if all languages were recomputed
metadata.latex_time_update()
metadata.save()
return res
def latex_blob(blobs_dir, metadata, lang, uuid_dir=None, options = {}, squash = True):
""" `latex` the blob identified by the `metadata`, for the given language `lang`.
( `uuid` and `uuid_dir` are courtesy , to avoid recomputing )
Optionally squashes all sublevels, replacing with \\uuidplaceholder """
uuid = metadata.uuid
if uuid_dir is None:
uuid_dir = ColDoc.utils.uuid_to_dir(uuid, blobs_dir=blobs_dir)
#
if lang is None or lang == '':
_lang=''
else:
_lang = '_' + lang
#
if squash is None:
squash = options.get('squash')
# note that extensions are missing
save_name = os.path.join(uuid_dir, 'view' + _lang)
save_abs_name = os.path.join(blobs_dir, save_name)
fake_texfile = tempfile.NamedTemporaryFile(prefix='fakelatex' + _lang + '_' + uuid + '_',
suffix='.tex', dir = blobs_dir , mode='w+', delete=False)
fake_abs_name = fake_texfile.name[:-4]
fake_name = os.path.basename(fake_abs_name)
#
D = {'uuiddir':uuid_dir, 'lang':lang, 'uuid':uuid,
'_lang':_lang,
'width':'4in',
'begin':'','end':'',
'url_UUID' : options['url_UUID'],
'latex_macros' : options.get('latex_macros',metadata.coldoc.latex_macros_uuid),
}
#
b = os.path.join(uuid_dir,'blob'+_lang+'.tex')
s = os.path.join(uuid_dir,'squash'+_lang+'.tex')
if squash:
ColDoc.transform.squash_latex(b, s, blobs_dir, options,
helper = options.get('squash_helper')(blobs_dir, metadata, options))
D['input'] = s
else:
D['input'] = b
#
environ = metadata.environ
if environ[:2] == 'E_' and environ not in ( 'E_document', ):
env = environ[2:]
D['begin'] = r'\begin{'+env+'}'
D['end'] = r'\end{'+env+'}'
if 'split_list' in options and env in options['split_list']:
D['begin'] += r'\item'
##
## create pdf
logger.debug('create pdf for %r',save_abs_name)
env = metadata.environ
if env == 'main_file':
# never used, the main_file is compiled with the latex_main() function
logger.error("should never reach this line")
fake_texfile.write(open(os.path.join(blobs_dir, uuid_dir, 'blob'+_lang+'.tex')).read())
fake_texfile.close()
else:
#
ltclsch = metadata.get('latex_documentclass_choice')
ltclsch = ltclsch[0] if ltclsch else 'auto'
ltcls = options.get('documentclass')
if ltclsch == 'auto':
if env in ColDoc.config.ColDoc_environments_sectioning or env == 'E_document':
ltclsch = 'main'
else:
ltclsch = 'standalone'
if ltclsch == 'main' and not ltcls:
logger.warning('When LaTeXing uuid %r, could not use latex_documentclass_choice = "main"', uuid)
ltclsch = 'standalone'
if ltclsch == 'main':
latextemplate = preview_template
D['documentclass'] = ltcls
elif ltclsch == 'standalone':
latextemplate = standalone_template
elif ltclsch in ('article','book'):
latextemplate = preview_template
D['documentclass'] = ltclsch
else:
raise RuntimeError("unimplemented latex_documentclass_choice = %r",ltclsch)
# from metadata or from coldoc
ltclsopt = metadata.get('documentclassoptions')
if ltclsopt:
ltclsopt = ltclsopt[0]
else:
ltclsopt = options.get('documentclassoptions')
ltclsopt = ColDoc.utils.parenthesizes(ltclsopt, '[]')
D['documentclass_options'] = ltclsopt
#
fake_texfile.write(latextemplate % D)
fake_texfile.close()
rp = pdflatex_engine(blobs_dir, fake_name, save_name, environ, options)
##
# rewrite log to replace temporary file name with final file name
for ext in '.log','.fls':
try:
a = open(save_abs_name+ext).read()
b = a.replace(fake_name,save_name)
open(save_abs_name+ext,'w').write(b)
except Exception as e:
logger.warning(e)
## create html
logger.debug('create html for %r',save_abs_name)
main_file = open(fake_abs_name+'.tex', 'w')
D['url_UUID'] = ColDoc.config.ColDoc_url_placeholder
main_file.write(plastex_template % D)
main_file.close()
rh = plastex_engine(blobs_dir, fake_name, save_name, environ, options)
# paux is quite large and it will not be used after this line
if os.path.isfile(save_abs_name+'_plastex.paux'):
os.unlink(save_abs_name+'_plastex.paux')
# TODO there is a fundamental mistake here. This function may be called to
# update the PDF/HTML view of only one language. This timestamp
# does not record which language was updated. We should have different timestamps
# for different languages.
if len(metadata.get('lang')) == 1:
metadata.latex_time_update()
#
retcodes = ColDoc.utils.json_to_dict(metadata.latex_return_codes)
j = (':'+lang) if (isinstance(lang,str) and lang) else ''
ColDoc.utils.dict_save_or_del( retcodes, 'latex'+j, rp)
ColDoc.utils.dict_save_or_del( retcodes, 'plastex'+j, rh)
metadata.latex_return_codes = ColDoc.utils.dict_to_json(retcodes)
#
metadata.save()
return rh, rp
def latex_anon(coldoc_dir, uuid='001', lang=None, options = {}, access='public', verbose_name=None, email_to=None):
#
assert access=='public'
#
if isinstance(options, (str,bytes) ):
# base64 accepts both bytes and str
options = pickle.loads(base64.b64decode(options))
#
metadata_class = options.get('metadata_class')
assert coldoc_dir == options.get('coldoc_dir',coldoc_dir)
coldoc = options.get('coldoc')
warn = options.get('warn')
#
n, anon_dir = ColDoc.utils.prepare_anon_tree(coldoc_dir, uuid=uuid, lang=lang,
metadata_class=metadata_class, coldoc=coldoc)
if anon_dir is not None:
assert isinstance(anon_dir, (str, pathlib.Path)), anon_dir
return latex_main(anon_dir, uuid=uuid, lang=lang, options = options, access='public')
else:
return False
def latex_main(blobs_dir, uuid='001', lang=None, options = {}, access=None, verbose_name=None, email_to=None):
"latex the main document, as the authors intended it ; save all results in UUID dir, as main.* "
#
assert access in ('public','private')
assert isinstance(blobs_dir, (str, pathlib.Path)), blobs_dir
assert os.path.isdir(blobs_dir)
#
if isinstance(options, (str,bytes) ):
# base64 accepts both bytes and str
options = pickle.loads(base64.b64decode(options))
#
metadata_class = options.get('metadata_class')
coldoc_dir = options.get('coldoc_dir')
coldoc = options.get('coldoc')
#
if coldoc_dir is not None:
options = prepare_options_for_latex(coldoc_dir, blobs_dir, metadata_class, coldoc, options)
#
uuid_, uuid_dir, metadata = ColDoc.utils.resolve_uuid(uuid=uuid, uuid_dir=None,
blobs_dir = blobs_dir,
coldoc = coldoc,
metadata_class = metadata_class)
environ = metadata.environ
#
if access =='public':
options['plastex_theme'] = 'blue'
latex_macros = metadata.coldoc.latex_macros_public
else:
options['plastex_theme'] = 'green'
latex_macros = metadata.coldoc.latex_macros_private
if lang is not None:
langs=[lang]
else:
langs=metadata.get('lang')
#
ret = True
coldoc = options.get('coldoc')
if coldoc is not None:
retcodes = ColDoc.utils.json_to_dict(coldoc.latex_return_codes)
#
for lang in langs:
#
_lang = ('_'+lang) if (isinstance(lang,str) and lang) else ''
lang_ = (':'+lang) if (isinstance(lang,str) and lang) else ''
#
uuid_dir = ColDoc.utils.uuid_to_dir(uuid, blobs_dir=blobs_dir)
# note that extensions are missing
save_name = os.path.join(uuid_dir, 'main' + _lang)
save_abs_name = os.path.join(blobs_dir, save_name)
fake_name = 'fakemain' + _lang
fake_abs_name = os.path.join(blobs_dir, fake_name)
#
a = os.path.join(blobs_dir, uuid_dir, 'blob'+_lang+'.tex')
prologue, preamble, body, epilogue = ColDoc.utils.split_blob(open(a))
if not(preamble):
logger.warning(r" cannot locate '\begin{document}' ")
if True:
preamble = [latex_macros] + preamble
import re
r = re.compile(r'\\usepackage{ColDocUUID}')
if not any(r.match(a) for a in preamble):
preamble += ['\\usepackage{ColDocUUID}\n']
logger.debug(r" adding \usepackage{ColDocUUID}")
a = (r'\def\uuidbaseurl{%s}'%(options['url_UUID'],)+'\n')
f_pdf = ''.join(prologue + preamble + [a] + body + epilogue)
a = (r'\def\uuidbaseurl{%s}'%(ColDoc.config.ColDoc_url_placeholder,)+'\n')
f_html = ''.join(prologue + preamble + [a] + body + epilogue)
#
open(fake_abs_name+'.tex','w').write(f_pdf)
rp = pdflatex_engine(blobs_dir, fake_name, save_name, environ, options)
ColDoc.utils.dict_save_or_del(retcodes, 'latex'+lang_+':'+access, rp)
try:
ColDoc.utils.os_rel_symlink(save_name+'.pdf','main'+_lang+'.pdf',
blobs_dir, False, True)
except:
logger.exception('while symlinking')
open(fake_abs_name+'.tex','w').write(f_html)
rh = plastex_engine(blobs_dir, fake_name, save_name, environ, options,
levels = True, tok = True, strip_head = False)
parse_plastex_html(blobs_dir, osjoin(blobs_dir, save_name+'_html'), save_abs_name+'_plastex.paux')
# paux is quite large and it will not be used after this line
os.unlink(save_abs_name+'_plastex.paux')
ColDoc.utils.dict_save_or_del(retcodes, 'plastex'+lang_+':'+access, rh)
try:
ColDoc.utils.os_rel_symlink(save_name+'_html','main'+_lang+'_html',
blobs_dir, True, True)
except:
logger.exception('while symlinking')
#
for e in ('.aux','.bbl','_plastex.paux'):
# keep a copy of the aux file
# TODO should encode by language
a,b = osjoin(blobs_dir,save_name+e), osjoin(blobs_dir,'main'+e)
if os.path.isfile(a):
logger.debug('Copy %r to %r',a,b)
shutil.copy(a,b)
#
ret = ret and rh and rp
#
if coldoc is not None:
if lang is None:
# update only if all languages were updated
coldoc.latex_time_update()
coldoc.latex_return_codes = ColDoc.utils.dict_to_json(retcodes)
coldoc.save()
return ret
def parse_plastex_paux(blobs_dir, paux):
if isinstance(paux,str):
if not os.path.isabs(paux):
paux = osjoin(blobs_dir, paux)
try:
paux = open(paux,'rb')
except OSError as e:
logger.error('Cannot open %r : %r',paux,e)
return {}
a = pickle.load(paux)
a = a['HTML5']
D = {}
for n in a:
try:
if n.startswith('UUID:'):
uuid = n[5:]
url = a[n]['url']
if '#' in url:
S,name = url.split('#')
D[uuid] = (S, '#' + name)
else:
D[uuid] = (url, '')
except:
logger.exception('vv')
return D
def parse_plastex_html(blobs_dir, html_dir, paux):
try:
from bs4 import BeautifulSoup
except ImportError:
logger.error('Please install BeautifulSoup4: pip3 install BeautifulSoup4')
return
D = parse_plastex_paux(blobs_dir, paux)
P = ColDoc.config.ColDoc_url_placeholder
for S in os.listdir(html_dir):
if S.endswith('html'):
name = href = uuid = None
soup = BeautifulSoup(open(osjoin(html_dir,S)).read(), 'html.parser')
for link in soup.find_all('a'):
h = link.get('href')
n = link.get('name')
if n:
if n.startswith('UUID:'):
uuid = n[5:]
D[uuid] = (S, n)
else:
name = n
if h and h.startswith(P):
uuid = h[len(P):]
if uuid not in D and name:
D[uuid] = (S, '#' + name)
#pickle.dump(D,open(osjoin(blobs_dir,'.UUID_html_mapping.pickle'),'wb'))
db = dbm.open(osjoin(blobs_dir,'.UUID_html_mapping.dbm'),'c')
for k,v in D.items():
db[k] = json.dumps(v)
db.close()
json.dump(D,open(osjoin(blobs_dir,'.UUID_html_mapping.json'),'w'),indent=1)
def get_specific_html_for_UUID(blobs_dir,UUID):
try:
db = dbm.open(osjoin(blobs_dir,'.UUID_html_mapping.dbm'))
return json.loads(db[UUID])
except KeyError:
logger.info('Cannot resolve uuid=%r in %r',UUID,blobs_dir)
return '',''
except:
logger.exception('Cannot resolve uuid=%r in %r',UUID,blobs_dir)
return '',''
def dedup_html(src, options):
replacements = []
dedup_root = options.get('dedup_root')
dedup_url = options.get('dedup_url')
if dedup_root is not None:
coldoc_site_root = options['coldoc_site_root']
for k in 'js', 'styles', 'symbol-defs.svg' :
k_ = osjoin(src,k)
if os.path.exists(k_):
dedup = ColDoc.utils.replace_with_hash_symlink(coldoc_site_root, src, dedup_root, k)
if os.path.isfile(k_):
replacements.append( (k, dedup_url + '/' + dedup) )
elif os.path.isdir(k_):
for dirpath, dirnames, filenames in os.walk(k_):
for f in filenames:
a = osjoin(dirpath,f)
o = a[(len(src)+1):]
r = a[(len(src)+len(k)+2):]
replacements.append( ( o, (dedup_url + '/' + dedup + '/' + r) ) )
return replacements
def plastex_engine(blobs_dir, fake_name, save_name, environ, options,
levels = False, tok = False, strip_head = True, plastex_theme=None):
" compiles the `fake_name` latex, and generates the `save_name` result ; note that extensions are missing "
save_abs_name = os.path.join(blobs_dir, save_name)
fake_abs_name = os.path.join(blobs_dir, fake_name)
#
plastex_theme = options.get('plastex_theme','green')
#
fake_support=[]
for es,ed in ColDoc.config.ColDoc_plastex_fakemain_reuse_extensions:
a = osjoin(blobs_dir,'main'+es)
if os.path.exists(a):
logger.debug("Re-using %r as %r",a,fake_abs_name+ed)
shutil.copy2(a,fake_abs_name+ed)
fake_support.append((a,fake_abs_name+ed))
elif os.path.exists(save_abs_name+es):
logger.debug("Re-using %r as %r",save_abs_name+es,fake_abs_name+ed)
shutil.copy(save_abs_name+es,fake_abs_name+ed)
fake_support.append((save_abs_name+es,fake_abs_name+ed))
#
F = fake_name+'.tex'
d = os.path.dirname(F)
#assert os.path.isfile(F),F
if d :
logger.warning("The argument of `plastex` is not in the blobs directory: %r", F)
#
a,b = os.path.split(save_abs_name+'_html')
save_name_tmp = tempfile.mkdtemp(dir=a,prefix=b)
#
argv = ['-d',save_name_tmp,"--renderer=HTML5", '--theme-css', plastex_theme]
if not levels :
argv += [ '--split-level', '-3']
if tok is False or (environ[:2] == 'E_' and tok == 'auto'):
argv.append( '--no-display-toc' )
#n = osjoin(blobs_dir,save_name+'_paux')
#if not os.path.isdir(n): os.mkdir(n)
## do not use ['--paux-dirs',save_name+'_paux'] until we understand what it does
argv += ['--log',F]
stdout_ = osjoin(blobs_dir,save_name+'_plastex.stdout')
ret = ColDoc.utils.plastex_invoke(cwd_ = blobs_dir ,
stdout_ = stdout_,
argv_ = argv,
logfile = fake_name+'.log')
if os.path.exists(save_abs_name+'_html') :
shutil.rmtree(save_abs_name+'_html')
os.rename(save_name_tmp, save_abs_name+'_html')
extensions = '.log','.paux','.tex','.bbl'
if ret :
logger.warning('Failed: cd %r ; plastex %s',blobs_dir,' '.join(argv))
for e in extensions:
if os.path.exists(save_abs_name+'_plastex'+e):
os.rename(save_abs_name+'_plastex'+e,save_abs_name+'_plastex'+e+'~')
if os.path.exists(fake_abs_name+e):
s,d = fake_abs_name+e,save_abs_name+'_plastex'+e
os.rename(s,d)
if ret: logger.warning(' rename %r to %r',s,d)
if os.path.isfile(osjoin(blobs_dir, save_name+'_html','index.html')):
logger.info('created html version of %r ',save_abs_name)
else:
logger.warning('no "index.html" in %r',save_name+'_html')
return False
#
replacements = dedup_html(osjoin(blobs_dir, save_name+'_html'), options)
# replace urls in html to point to dedup-ed stuff
for f in os.listdir(osjoin(blobs_dir, save_name+'_html')):
f = osjoin(blobs_dir, save_name+'_html', f)
if f[-5:]=='.html':
L = O = open(f).read()
# ok, regular expressions may be cooler
for p in 'href="' , 'src="' :
for e in '"', '#':
for o,r in replacements:
L = L.replace(p+o+e , p+r+e)
if L != O:
os.rename(f,f+'~')
open(f,'w').write(L)
#
if strip_head:
for f in os.listdir(osjoin(blobs_dir, save_name+'_html')):
f = osjoin(blobs_dir, save_name+'_html', f)
if f[-5:]=='.html':
logger.debug('stripping <head> of %r ',f)
os.rename(f,f+'~~')
L=open(f+'~~').readlines()
try:
ns, ne = None,None
for n,s in enumerate(L):
s = s.strip()
if s == '<body>': ns = n
if s == '</body>': ne = n
assert ns,ne
L = L[ns+1:ne]
F = open(f,'w')
for l in L:
if l[:7] != '<script':
F.write(l)
except:
logger.exception('ARGH')
return ret == 0
def pdflatex_engine(blobs_dir, fake_name, save_name, environ, options, repeat = None):
" If repeat is None, it will be run twice if bib data or aux data changed"
save_abs_name = os.path.join(blobs_dir, save_name)
fake_abs_name = os.path.join(blobs_dir, fake_name)
# 'main.aux' and 'main.bbl' are saved latex_main()
for e in ColDoc.config.ColDoc_pdflatex_fakemain_reuse_extensions:
a = os.path.join(blobs_dir,'main'+e)
if os.path.exists(save_abs_name+e):
logger.debug("Re-using %r for %r",save_abs_name+e,fake_abs_name+e)
shutil.copy2(save_abs_name+e, fake_abs_name+e)
elif os.path.exists(a):
logger.debug("Re-using %r for %r (hoping for the best)",a,fake_abs_name+e)
shutil.copy2(a,fake_abs_name+e)
else:
logger.debug("No %r file for this job",e)
#
extensions = ColDoc.config.ColDoc_pdflatex_fakemain_preserve_extensions
#
## dunno what this may be useful for
#for e in extensions:
# if e not in ('.tex','.aux','.bbl') and os.path.exists(fake_abs_name+e):
# logger.warning('Overwriting: %r',fake_abs_name+e)
#
engine = options.get('latex_engine','pdflatex')
logger.debug('Using engine %r',engine)
args = [engine,'-file-line-error','-interaction','batchmode',
'-recorder','-no-shell-escape','-no-parse-first-line',
##TODO may use -output-directory directory
## TODO TEST THIS
##( r"\def\uuidbaseurl{%s}" % (options['url_UUID'],)), r"\input",
## TODO for luatex may add --nosocket --safer
fake_name+'.tex']
#
p = subprocess.Popen(args,cwd=blobs_dir,stdin=open(os.devnull),
stdout=open(os.devnull,'w'),stderr=subprocess.STDOUT)
r=p.wait()
logger.debug('Engine result %r',r)
#
if r != 0:
logger.debug('LaTeX failed %r will not run BiBTeX',r)
elif environ in ( 'main_file', 'E_document') and \
os.path.isfile(fake_abs_name+'.aux') and \
'\\bibdata' in open(fake_abs_name+'.aux').read():
logger.debug('Running BiBTeX')
if os.path.isfile(fake_abs_name+'.bbl'):
file_md5 = hashlib.md5(open(fake_abs_name+'.bbl','rb').read()).hexdigest()
else:
file_md5 = None
p = subprocess.Popen(['bibtex',fake_name],
cwd=blobs_dir,stdin=open(os.devnull),
stdout=subprocess.PIPE ,stderr=subprocess.STDOUT)
a = p.stdout.read()
if p.wait() != 0:
logger.warning('bibtex fails, see %r'%(save_abs_name+'.blg',))
logger.warning('bibtex output: %r',a)
else:
if os.path.isfile(fake_abs_name+'.bbl'):
if file_md5 is None or file_md5 != hashlib.md5(open(fake_abs_name+'.bbl','rb').read()).hexdigest():
if repeat is None:
logger.debug('BibTeX changed the .bbl file, will rerun')
repeat = True
else:
logger.debug('BibTeX changed the .bbl file')
else:
logger.debug('BibTeX did not change the .bbl file')
else:
logger.warning('BiBTeX did not generate %r',fake_abs_name+'.bbl')
#
a = 'Rerun to get cross-references right'
if r == 0:
if repeat is None and a in open(fake_abs_name+'.log').read():
logger.debug('%r reports %r in log, will rerun',engine,a)
repeat = True
elif repeat is None:
logger.debug('%r does not report %r in log, will not rerun',engine,a)
#
if r == 0 and repeat:
logger.debug('Rerunning engine %r',engine)
p = subprocess.Popen(args,cwd=blobs_dir,stdin=open(os.devnull),
stdout=open(os.devnull,'w'),stderr=subprocess.STDOUT)
r = p.wait()
logger.debug('Engine result %r',r)
#
res = r == 0
if not res:
logger.warning('%r fails, see %r'%(engine,save_abs_name+'.log'))
#
for e in extensions:
if os.path.exists(save_abs_name+e):
os.rename(save_abs_name+e,save_abs_name+e+'~')
if os.path.exists(fake_abs_name+e):
if e == '.pdf':
siz=os.path.getsize(fake_abs_name+e)
if siz :
logger.info("Created pdf %r size %d"%(save_abs_name+e,siz))
else:
logger.warning("Created empty pdf %r "%(save_abs_name+e,))
a,b=fake_abs_name+e,save_abs_name+e
logger.debug('Rename %r to %r',a,b)
os.rename(a,b)
else:
if e not in ( '.pdf', '.aux' ) :
logger.debug("Missing :%r"%(fake_abs_name+e,))
else:
logger.warning("Missing :%r"%(fake_abs_name+e,))
if e=='.pdf': res=False
return res
def latex_tree(blobs_dir, uuid=None, lang=None, warn=False, options={}, verbose_name=None, email_to=None):
" latex the whole tree, starting from `uuid` "
log_level = logging.WARNING if warn else logging.DEBUG
#
if isinstance(options, (str,bytes) ):
# base64 accepts both bytes and str
options = pickle.loads(base64.b64decode(options))
#
metadata_class = options.get('metadata_class')
coldoc_dir = options.get('coldoc_dir')
coldoc = options.get('coldoc')
#
if coldoc_dir is not None:
options = prepare_options_for_latex(coldoc_dir, blobs_dir, metadata_class, coldoc, options)
#
if uuid is None:
logger.warning('Assuming root_uuid = 001')
uuid = '001'
uuid_, uuid_dir, metadata = ColDoc.utils.resolve_uuid(uuid=uuid, uuid_dir=None,
blobs_dir = blobs_dir,
coldoc = coldoc,
metadata_class=metadata_class)
#
ret = True
if metadata.environ in environments_we_wont_latex:
logger.log(log_level, 'Cannot `latex` environ %r , UUID = %r'%(metadata.environ, uuid,))
else:
r = latex_uuid(blobs_dir, uuid=uuid, metadata=metadata, lang=lang, warn=warn, options=options)
ret = ret and r
for u in metadata.get('child_uuid'):
logger.debug('moving down from node %r to node %r',uuid,u)
r = latex_tree(blobs_dir, uuid=u, lang=lang, warn=warn, options=options)
ret = ret and r
return ret
def prepare_options_for_latex(coldoc_dir, blobs_dir, metadata_class, coldoc=None, options = None):
if options is None:
options = {}
### get and set some options
if coldoc is None:
coldoc = options.get('coldoc')
else:
options['coldoc'] = coldoc
options['coldoc_dir'] = coldoc_dir
#
try:
blobinator_args = ColDoc.utils.get_blobinator_args(blobs_dir)
options.update(blobinator_args)
except:
logger.exception('No blobinator_args')
#
a = osjoin(coldoc_dir, 'coldoc.json')
if os.path.isfile( a ):
coldoc_args = json.load(open(a))
options.update(coldoc_args['fields'])
#
coldoc_root_uuid = options.get('root_uuid')
if isinstance(coldoc_root_uuid,int):
coldoc_root_uuid = ColDoc.utils.int_to_uuid(coldoc_root_uuid)
options['root_uuid'] = coldoc_root_uuid
#
root_metadata = metadata_class.load_by_uuid(uuid=coldoc_root_uuid, coldoc=coldoc, basepath=blobs_dir)
for a in ('documentclass', 'documentclassoptions'):
b = root_metadata.get(a)
if b:
options[a] = b[0]
logger.debug('In root uuid %r = %r',a,b)
else:
logger.warning('In root uuid no value for %r',a)
#
logger.debug('From %r options %r',a,options)
else:
logger.error('No %r',a)
#
return options
def prepare_parser(cmd_help=cmd_help):
# parse arguments
COLDOC_SITE_ROOT = os.environ.get('COLDOC_SITE_ROOT')
parser = argparse.ArgumentParser(description='Compile coldoc material, using `latex` and `plastex` ',
epilog=cmd_help,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--verbose','-v',action='count',default=0)
parser.add_argument('--uuid',help='UUID to work on/start from')
parser.add_argument('command', help='specific command',nargs='+')
return parser
def main(argv):
parser = prepare_parser()
parser.add_argument('--blobs-dir',type=str,\
help='directory where the blob_ized output is saved',
required=True)
parser.add_argument('--url-UUID',type=str,\
help='URL of the website that will show the UUIDs, used by my \\uuid macro in PDF',
required=True)
args = parser.parse_args(argv[1:])
#
blobs_dir = args.blobs_dir
assert os.path.isdir(blobs_dir), blobs_dir
#
args.coldoc_dir = coldoc_dir = os.path.dirname(os.path.dirname(blobs_dir))
from ColDoc.utils import FMetadata
options = prepare_options_for_latex(coldoc_dir, blobs_dir, FMetadata)
options['url_UUID'] = args.url_UUID
#
options["squash_helper"] = ColDoc.transform.squash_input_uuid
options['metadata_class'] = ColDoc.utils.FMetadata
return main_by_args(args,options)
def main_by_args(args,options):
argv = args.command
blobs_dir = args.blobs_dir
coldoc_dir = args.coldoc_dir
logger.setLevel(logging.WARNING)
if args.verbose > 1 :
logger.setLevel(logging.DEBUG)
elif args.verbose > 0 :
logger.setLevel(logging.INFO)
#
if args.uuid is not None:
UUID = args.uuid
elif 'root_uuid' in options:
UUID = options['root_uuid']
else:
UUID = '001'
#
ret = True
if argv[0] == 'blob':
lang = None
if len(argv)>2:
lang = argv[2]
ret = latex_uuid(blobs_dir,UUID,lang=lang, options=options)
elif argv[0] == 'tree':
ret = latex_tree(blobs_dir,UUID, options=options)
elif argv[0] == 'main_private':
ret = latex_main(blobs_dir, uuid=UUID, options=options, access='private')
elif argv[0] == 'main_public':
ret = latex_anon(coldoc_dir, uuid=UUID, options=options, access='public')
elif argv[0] == 'all':
ret = latex_main(blobs_dir, uuid=UUID, options=options, access='private')
ret &= latex_anon(coldoc_dir, uuid=UUID, options=options, access='public')
ret &= latex_tree(blobs_dir,UUID, options=options)
else:
sys.stderr.write('Unknown command, see --help')
return False
return ret
if __name__ == '__main__':
ret = main(sys.argv)
sys.exit(0 if ret else 13)
| 39.010321 | 135 | 0.587559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,370 | 0.246053 |
f24b88cb32a898b91b261cd705b2ad3fcd5d1287 | 2,950 | py | Python | extension/visualizer/generate_visualizer_header.py | AldoMyrtaj/duckdb | 3aa4978a2ceab8df25e4b20c388bcd7629de73ed | [
"MIT"
] | 2,816 | 2018-06-26T18:52:52.000Z | 2021-04-06T10:39:15.000Z | extension/visualizer/generate_visualizer_header.py | AldoMyrtaj/duckdb | 3aa4978a2ceab8df25e4b20c388bcd7629de73ed | [
"MIT"
] | 1,310 | 2021-04-06T16:04:52.000Z | 2022-03-31T13:52:53.000Z | extension/visualizer/generate_visualizer_header.py | AldoMyrtaj/duckdb | 3aa4978a2ceab8df25e4b20c388bcd7629de73ed | [
"MIT"
] | 270 | 2021-04-09T06:18:28.000Z | 2022-03-31T11:55:37.000Z | # this script generates visualizer header
import os
visualizer_dir = 'extension/visualizer'
visualizer_css = os.path.join(visualizer_dir, 'visualizer.css')
visualizer_d3 = os.path.join(visualizer_dir, 'd3.js')
visualizer_script = os.path.join(visualizer_dir, 'script.js')
visualizer_header = os.path.join(visualizer_dir, 'include', 'visualizer_constants.hpp')
def open_utf8(fpath, flags):
import sys
if sys.version_info[0] < 3:
return open(fpath, flags)
else:
return open(fpath, flags, encoding="utf8")
def get_byte_array(fpath, add_null_terminator = True):
with open(fpath, 'rb') as f:
text = bytearray(f.read())
result_text = ""
first = True
for byte in text:
if first:
result_text += str(byte)
else:
result_text += ", " + str(byte)
first = False
if add_null_terminator:
result_text += ", 0"
return result_text
def write_file(fname, varname):
result = "const uint8_t %s[] = {" % (varname,) + get_byte_array(fname) + "};\n"
return result
def create_visualizer_header():
result = """/* THIS FILE WAS AUTOMATICALLY GENERATED BY generate_visualizer_header.py */
/*
Copyright 2010-2020 Mike Bostock
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the author nor the names of contributors may be used to
endorse or promote products derived from this software without specific prior
written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
"""
result += write_file(visualizer_css, "css")
result += write_file(visualizer_d3, "d3")
result += write_file(visualizer_script, "script")
with open_utf8(visualizer_header, 'w+') as f:
f.write(result)
create_visualizer_header() | 36.419753 | 92 | 0.737627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,788 | 0.606102 |
f24c7bebfc50062402e4f3d020937fffe8042def | 1,945 | py | Python | kivyx/uix/aspectratio.py | gottadiveintopython/kivyx.uix.aspectratio | e8b049fe76c9350b8c167ff1fb32299b8feceba7 | [
"MIT"
] | null | null | null | kivyx/uix/aspectratio.py | gottadiveintopython/kivyx.uix.aspectratio | e8b049fe76c9350b8c167ff1fb32299b8feceba7 | [
"MIT"
] | null | null | null | kivyx/uix/aspectratio.py | gottadiveintopython/kivyx.uix.aspectratio | e8b049fe76c9350b8c167ff1fb32299b8feceba7 | [
"MIT"
] | null | null | null | __all__ = ('KXAspectRatio', )
from kivy.uix.layout import Layout
from kivy.properties import BoundedNumericProperty, OptionProperty
HALIGN_TO_ATTR = {
'center': 'center_x',
'middle': 'center_x',
'left': 'x',
'right': 'right',
}
VALIGN_TO_ATTR = {
'center': 'center_y',
'middle': 'center_y',
'bottom': 'y',
'top': 'top',
}
class KXAspectRatio(Layout):
aspect_ratio = BoundedNumericProperty(1, min=0)
halign = OptionProperty(
'center', options=('center', 'middle', 'left', 'right', ))
valign = OptionProperty(
'center', options=('center', 'middle', 'bottom', 'top', ))
def __init__(self, **kwargs):
super().__init__(**kwargs)
tl = self._trigger_layout
self.bind(
parent=tl, children=tl, size=tl, pos=tl,
aspect_ratio=tl, halign=tl, valign=tl)
def add_widget(self, *args, **kwargs):
if self.children:
raise Exception('KXAspectRatio can only have one child')
return super().add_widget(*args, **kwargs)
def do_layout(self, *args):
if not self.children:
return
c = self.children[0]
c_aspect_ratio = self.aspect_ratio
w = self.width
h = self.height
x_attr = HALIGN_TO_ATTR[self.halign]
y_attr = VALIGN_TO_ATTR[self.valign]
if c_aspect_ratio == 0 or w <= 0 or h <= 0:
c.width = 0
c.height = 0
setattr(c, x_attr, getattr(self, x_attr))
setattr(c, y_attr, getattr(self, y_attr))
else:
if (w / h) < c_aspect_ratio:
c.width = w
c.height = w / c_aspect_ratio
c.x = self.x
setattr(c, y_attr, getattr(self, y_attr))
else:
c.width = h * c_aspect_ratio
c.height = h
setattr(c, x_attr, getattr(self, x_attr))
c.y = self.y
| 29.469697 | 68 | 0.5491 | 1,585 | 0.81491 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.12545 |
f24e4b499348b1e6839320b71759fce8e46d5cc8 | 4,006 | py | Python | src/analyze_img.py | IW276/IW276SS21-P13 | 851e220c34d55caa91f0967e02dc86c34deee2fa | [
"MIT"
] | null | null | null | src/analyze_img.py | IW276/IW276SS21-P13 | 851e220c34d55caa91f0967e02dc86c34deee2fa | [
"MIT"
] | null | null | null | src/analyze_img.py | IW276/IW276SS21-P13 | 851e220c34d55caa91f0967e02dc86c34deee2fa | [
"MIT"
] | null | null | null | import cv2
import numpy as np
from matplotlib import pyplot as plt
brightness = {"DARK": 0,
"NORMAL": 1,
"LIGHT": 2}
contrast = {"HIGH": 2,
"NORMAL": 1,
"LOW": 0}
class ImageSetup:
def __init__(self):
self.brightness = None
self.contrast = None
self.gamma = 1
# grayscale values
self.average = -1
self.std_deviation = -1
self.threshold = -1
# saturation values
self.sat_average = -1
self.sat_std_deviation = -1
self.sat_threshold = -1
def average(img2d):
rows, cols = img2d.shape
m = np.mean(img2d[0:rows, 0:cols])
return m
def variance_std_deviation(img2d):
# variance
v = np.var(img2d)
# standard deviation
s = np.sqrt(v)
return v, s
def histogram(img2d, name=None, plot=False):
hist = cv2.calcHist([img2d], [0], None, [256], [0, 256])
if plot:
plt.hist(img2d.ravel(), 256, [0, 256])
plt.xlabel(name)
plt.show()
hist_norm = hist.ravel() / hist.sum()
return hist, hist_norm
def threshold(img2d):
# return is the threshold value followed by the result image
thr, o1 = cv2.threshold(img2d, 0, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C + cv2.THRESH_OTSU)
return thr
class Configuration:
def __init__(self, image):
self.img = image
self.imgGray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
self.imgHSV = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)
self.rows, self.cols, self.cha = self.img.shape
self.pixels = self.cols * self.rows
self.imgSetup = ImageSetup()
def get_brightness(self):
m = average(self.imgGray)
if m < 100:
self.imgSetup.brightness = brightness["DARK"]
elif 100 < m < 150:
self.imgSetup.brightness = brightness["NORMAL"]
else:
self.imgSetup.brightness = brightness["LIGHT"]
self.imgSetup.average = m
def get_saturation(self):
m_sat = average(self.imgHSV[:, :, 1])
s2, s = variance_std_deviation(self.imgHSV[:, :, 1])
self.imgSetup.sat_average = m_sat
self.imgSetup.sat_std_deviation = s
def get_contrast(self):
s2, s = variance_std_deviation(self.imgGray)
if s >= 70:
self.imgSetup.contrast = contrast["HIGH"]
elif s >= 40:
self.imgSetup.contrast = contrast["NORMAL"]
else:
self.imgSetup.contrast = contrast["LOW"]
self.imgSetup.std_deviation = s
def get_thresholds(self):
gray_thresh = threshold(self.imgGray)
sat_thresh = threshold(self.imgHSV[:, :, 1])
self.imgSetup.threshold = gray_thresh
self.imgSetup.sat_threshold = sat_thresh
def print_values(self, do_print=True):
if do_print:
print("Average brightness: " + str(self.imgSetup.average))
print("Standard deviation: " + str(self.imgSetup.std_deviation))
print("Average saturation: " + str(self.imgSetup.sat_average))
print("Std. deviation sat: " + str(self.imgSetup.sat_std_deviation))
print("Threshold gray: " + str(self.imgSetup.threshold))
print("Threshold sat: " + str(self.imgSetup.sat_threshold))
print("Brightness: " + str(self.imgSetup.brightness))
print("Contrast: " + str(self.imgSetup.contrast))
def show(self, show=True):
if show:
cv2.imshow("Color", self.img)
cv2.waitKey(0)
cv2.imshow("Gray", self.imgGray)
cv2.waitKey(0)
cv2.imshow("Saturation", self.imgHSV[:, :, 1])
cv2.waitKey(0)
cv2.destroyAllWindows()
def evaluate(img):
c = Configuration(img)
c.get_brightness()
c.get_contrast()
histogram(c.imgGray, "gray")
histogram(c.imgHSV[:, :, 1], "saturation")
c.get_saturation()
c.get_thresholds()
c.print_values(False)
c.show(False)
return c.imgSetup
| 30.120301 | 92 | 0.595856 | 2,784 | 0.694958 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.099601 |
f2503cce75279fee15a3fc46cd4a46df58314fef | 3,799 | py | Python | models/game/bots/RandoMaxBot.py | zachdj/ultimate-tic-tac-toe | b8e6128d9d19628f6f889a3958d30854527a8645 | [
"MIT"
] | null | null | null | models/game/bots/RandoMaxBot.py | zachdj/ultimate-tic-tac-toe | b8e6128d9d19628f6f889a3958d30854527a8645 | [
"MIT"
] | null | null | null | models/game/bots/RandoMaxBot.py | zachdj/ultimate-tic-tac-toe | b8e6128d9d19628f6f889a3958d30854527a8645 | [
"MIT"
] | null | null | null | import random
from models.game.bots.Bot import Bot
from models.game.Board import Board
class RandoMaxBot(Bot):
""" Semi-random bot
This is a minimax bot that scores moves randomly unless the end of the game is seen within a 2-ply lookahead
"""
def __init__(self, number, name=None):
if name is None:
name = "Rando-Max Bot"
Bot.__init__(self, number, name=name)
self.player_type = 'randomax'
random.seed()
def compute_next_move(self, board, valid_moves):
score, selected_move = self._max(board, valid_moves,-float('inf'), float('inf'), 2)
return selected_move
def _max(self, board, valid_moves, alpha, beta, max_depth):
"""
Private function which computes the move that a rational maximizing player would choose
:param board: GlobalBoard object representing the current state
:param valid_moves: list of valid moves that can be made on the board object
:param alpha: the current value of alpha (the best score that MAX can guarantee so far)
:param beta: the current value of beta (the best score that MIN can guarantee so far)
:return: the value (score) of the best move and the move object itself
"""
if board.board_completed: # termination test
if board.winner == Board.EMPTY or board.winner == Board.CAT:
return 0, None
elif board.winner == self.number:
return 10000000, None
else:
return -10000000, None
elif max_depth == 0:
# scores are computed from the perspective of the 'X' player, so they need to be flipped if our bot is 'O'
if self.number == Board.X:
return self.compute_score(board), None
else:
return -self.compute_score(board), None
a, b = alpha, beta
value = -float('inf')
best_move = None
for move in valid_moves:
child_board = board.clone()
child_board.make_move(move)
move_value, minimizing_move = self._min(child_board, child_board.get_valid_moves(move), a, b, max_depth-1)
if move_value > value:
value = move_value
best_move = move
if value >= b:
return value, best_move
a = max(a, move_value)
return value, best_move
def _min(self, board, valid_moves, alpha, beta, max_depth):
# test for stopping condition
if board.board_completed:
if board.winner == Board.EMPTY or board.winner == Board.CAT:
return 0, None
elif board.winner == self.number:
return 10000000, None
else:
return -10000000, None
elif max_depth == 0:
# scores are computed from the perspective of the 'X' player, so they need to be flipped if our bot is 'O'
if self.number == Board.X:
return self.compute_score(board), None
else:
return -self.compute_score(board), None
a, b = alpha, beta
value = float('inf')
best_move = None
for move in valid_moves:
child_board = board.clone()
child_board.make_move(move)
move_value, maximizing_move = self._max(child_board, child_board.get_valid_moves(move), a, b, max_depth - 1)
if move_value < value:
value = move_value
best_move = move
if value <= a:
return value, best_move
b = min(b, move_value)
return value, best_move
def compute_score(self, board):
return random.uniform(-1, 1)
def setup_bot(self, game):
pass
| 36.528846 | 120 | 0.589102 | 3,709 | 0.97631 | 0 | 0 | 0 | 0 | 0 | 0 | 982 | 0.258489 |
f251f3a1ac391e245be08c921c85c8b349b00732 | 1,924 | py | Python | fineDinner.py | SMartQi/whose-treat | 85f1d27dfb2b728a33cf8b6fcd73213ca24edb0b | [
"MIT"
] | 1 | 2020-01-30T11:09:31.000Z | 2020-01-30T11:09:31.000Z | fineDinner.py | SMartQi/whose-treat | 85f1d27dfb2b728a33cf8b6fcd73213ca24edb0b | [
"MIT"
] | null | null | null | fineDinner.py | SMartQi/whose-treat | 85f1d27dfb2b728a33cf8b6fcd73213ca24edb0b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- encoding:UTF-8 -*-
"""
Background:
JJ and MM want to have a fine dinner, celebrating their annual bonuses. They make this rule:
This dinner is on the person who gets more annual bonus. And the cost of the dinner is the diff of money they make mod 300, per capita.
Requirement:
Decide the money amount and the money provider, without letting one know how much the other's annual bonus is.
Method:
Hide the input.
Use the method "Best two out of three" in case of any typo, since the input strings are hidden.
"""
import getpass
def cal():
"""
Decide the money amount and the money provider.
"""
incomejj = validInput("JJ: ")
incomemm = validInput("MM: ")
diff = incomejj - incomemm
onWhom = "JJ"
if diff < 0:
onWhom = "MM"
result = int(round(abs(diff) % 300))
return result, onWhom
def validInput(prompt):
"""
Get a valid input and convert it to a float number.
"""
while 1:
inputStr = getpass.getpass(prompt)
try:
inputFloat = float(inputStr)
return inputFloat
except ValueError:
print("Invalid input. Try again.")
pass
if __name__ == "__main__":
"""
Use the method "Best two out of three" in case of any typo, since the input strings are hidden.
"""
(result1, onWhom1) = cal()
print("Let's double check.")
(result2, onWhom2) = cal()
if result1 == result2 and onWhom1 == onWhom2:
if result1 == 0:
print("No dinner at all. But go to buy some lottery~")
else :
print("OK. Let's have dinner. " + str(result1) + " yuan per person on " + onWhom1 + ".")
else :
print("Something's wrong. Let's triple check.")
(result3, onWhom3) = cal()
if (result1 == result3 and onWhom1 == onWhom3) or (result2 == result3 and onWhom2 == onWhom3):
if result3 == 0:
print("No dinner at all. But go to buy some lottery~")
else :
print("OK. " + str(result3) + " it is. It's on " + onWhom3 + ".")
else:
print("Are you kidding me? I quit!")
| 29.6 | 135 | 0.670478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,081 | 0.56185 |
f25714dd2e5fb95e7b87e1b330afecfe7458cf18 | 49 | py | Python | libs/pytvmaze/__init__.py | Sparklingx/nzbhydra | e2433e1155255ba37341cc79750b104e7dd8889a | [
"Apache-2.0"
] | 674 | 2015-11-06T04:22:47.000Z | 2022-02-26T17:31:43.000Z | libs/pytvmaze/__init__.py | Sparklingx/nzbhydra | e2433e1155255ba37341cc79750b104e7dd8889a | [
"Apache-2.0"
] | 713 | 2015-11-06T10:48:58.000Z | 2018-11-27T16:32:18.000Z | libs/pytvmaze/__init__.py | Sparklingx/nzbhydra | e2433e1155255ba37341cc79750b104e7dd8889a | [
"Apache-2.0"
] | 106 | 2015-12-07T11:21:06.000Z | 2022-03-11T10:58:41.000Z | #!/usr/bin/python
from pytvmaze.tvmaze import *
| 12.25 | 29 | 0.734694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.346939 |
f25ce39acdbb3d945528b6cb2be68ac5895f77bb | 1,241 | py | Python | backend/server.py | mugeshk97/billing-api | 3bf6899f62bee6db7870c3b6008a10c887eb3aa3 | [
"MIT"
] | null | null | null | backend/server.py | mugeshk97/billing-api | 3bf6899f62bee6db7870c3b6008a10c887eb3aa3 | [
"MIT"
] | null | null | null | backend/server.py | mugeshk97/billing-api | 3bf6899f62bee6db7870c3b6008a10c887eb3aa3 | [
"MIT"
] | null | null | null | from flask import Flask, request, jsonify
from connection import get_sql_connection
from product import get_all_products, insert_product, delete_product
import json
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
cnx = get_sql_connection()
@app.route('/getProducts', methods=['GET'])
def get_products():
products = get_all_products(cnx)
response = jsonify(products)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/insertProduct', methods=['POST'])
def insert_prod():
request_payload = json.loads(request.form['data'])
print(request_payload)
product_id = insert_product(cnx, request_payload)
response = jsonify(
{'product_id': product_id}
)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/deleteProduct', methods=['POST'])
def delete_prod():
request_payload = json.loads(request.form['product_id'])
return_id = delete_product(cnx, request_payload['product_id'])
response = jsonify(
{'product_id': return_id}
)
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == '__main__':
app.run(host= '0.0.0.0', port=5050, debug= True) | 29.547619 | 68 | 0.709106 | 0 | 0 | 0 | 0 | 900 | 0.725222 | 0 | 0 | 232 | 0.186946 |
f25fca280607b95bdb378b87fdab5966ef3e46d2 | 555 | py | Python | api/restaurant_helper_functions.py | daniellespencer/stfu-and-eat | cb82b364ba226dd61f11547720a20a132c1562f6 | [
"MIT"
] | 1 | 2020-05-15T01:36:59.000Z | 2020-05-15T01:36:59.000Z | api/restaurant_helper_functions.py | daniellespencer/stfu-and-eat | cb82b364ba226dd61f11547720a20a132c1562f6 | [
"MIT"
] | null | null | null | api/restaurant_helper_functions.py | daniellespencer/stfu-and-eat | cb82b364ba226dd61f11547720a20a132c1562f6 | [
"MIT"
] | 2 | 2020-05-15T01:31:37.000Z | 2020-05-20T00:04:41.000Z | import random
from api.config import restaurant_collection as restaurants
def organize_restaurant_output():
output = []
for q in restaurants.find():
output.append({
"id" : str(q['_id']),
'name' : q['name'],
'neighborhood' : q['neighborhood'],
'cuisine' : q['cuisine'],
'address' : q['address'],
'website' : q['website']
})
return output
def select_random_restaurant(options):
value = random.randint(0, len(options)-1)
return options[value] | 26.428571 | 59 | 0.567568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.185586 |
f26261a417ed0c023adbf8199f1af50c5d35454f | 351 | py | Python | source/exceptions.py | corradiniste/Paneka-discord-bot | 43672b2720c88635266e0962f446bd36bd7ced7d | [
"MIT"
] | 12 | 2020-08-03T05:47:20.000Z | 2021-10-06T06:20:19.000Z | source/exceptions.py | Shivanirudh/Paneka-discord-bot | 0ec257e92e40baf23233711c9cf9889e8c56ab53 | [
"MIT"
] | 5 | 2020-10-03T08:27:35.000Z | 2021-06-02T04:45:57.000Z | source/exceptions.py | Shivanirudh/Paneka-discord-bot | 0ec257e92e40baf23233711c9cf9889e8c56ab53 | [
"MIT"
] | 6 | 2020-08-06T10:41:49.000Z | 2022-02-14T17:26:07.000Z | class InvalidLimitException(Exception):
"""
Invalid number of matches requested
"""
pass
class InvalidLeagueCodeException(Exception):
"""
The League code requested is either invalid or not supported
"""
pass
class InvalidTeamCodeException(Exception):
"""
The Team Code requested is invalid
"""
pass
| 17.55 | 64 | 0.672365 | 344 | 0.980057 | 0 | 0 | 0 | 0 | 0 | 0 | 177 | 0.504274 |
f26337b1b3af5eb32cdd87718a2212d8a63d5996 | 6,187 | py | Python | nz_snow_tools/eval/brewster_calibration_TF.py | jonoconway/nz_snow_tools | 7002fb401fb48225260fada6fd5b5b7ca5ad1184 | [
"MIT"
] | 3 | 2020-09-01T07:53:05.000Z | 2021-02-02T20:28:37.000Z | nz_snow_tools/eval/brewster_calibration_TF.py | jonoconway/nz_snow_tools | 7002fb401fb48225260fada6fd5b5b7ca5ad1184 | [
"MIT"
] | null | null | null | nz_snow_tools/eval/brewster_calibration_TF.py | jonoconway/nz_snow_tools | 7002fb401fb48225260fada6fd5b5b7ca5ad1184 | [
"MIT"
] | null | null | null | """
code to call the snow model for a simple test case using brewster glacier data
"""
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
import datetime as dt
from nz_snow_tools.util.utils import resample_to_fsca, nash_sut, mean_bias, rmsd, mean_absolute_error, coef_determ
seb_dat = np.genfromtxt(
'S:\Scratch\Jono\Final Brewster Datasets\SEB_output\cdf - code2p0_MC_meas_noQPS_single_fixed output_fixed_B\modelOUT_br1_headings.txt', skip_header=3)
sw_net = seb_dat[:, 14 - 1]
lw_net = seb_dat[:, 17 - 1]
qs = seb_dat[:, 19 - 1]
ql = seb_dat[:, 20 - 1]
qc = seb_dat[:, 21 - 1]
qprc = seb_dat[:, 22 - 1]
qst = seb_dat[:, 24 - 1]
qm = seb_dat[:, 25 - 1]
t_dep_flux = lw_net + qs + ql + qc + qst
qm_wo_sw_prc = qm - sw_net - qprc
qm_wo_sw_prc[(qm == 0)] = 0
ta = seb_dat[:, 8 - 1]
ea = seb_dat[:, 10 - 1]
ws = seb_dat[:, 7 - 1]
r2_ea = coef_determ(qm_wo_sw_prc, ea)
r2_ta = coef_determ(qm_wo_sw_prc, ta)
r2_ea_ws = coef_determ(qm_wo_sw_prc, ea*ws)
r2_ea_pos = coef_determ(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ea[(qm_wo_sw_prc > 0)])
r2_ta_pos = coef_determ(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ta[(qm_wo_sw_prc > 0)])
r2_ea_ws_pos = coef_determ(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ea[(qm_wo_sw_prc > 0)]*ws[(qm_wo_sw_prc > 0)])
print(r2_ea)
print(r2_ta)
print (r2_ea_ws)
print(r2_ea_pos)
print(r2_ta_pos)
print (r2_ea_ws_pos)
print(
np.sum(ta>0),
np.sum(np.logical_and(ta>0,qm_wo_sw_prc > 0)),
np.sum(qm_wo_sw_prc > 0),
np.sum(np.logical_and(ta>0,qm_wo_sw_prc > 0))/np.sum(ta>0),
)
print(
np.sum(ea>6.112),
np.sum(np.logical_and(ea>6.1120,qm_wo_sw_prc > 0)),
np.sum(qm_wo_sw_prc > 0),
np.sum(np.logical_and(ea>6.1120,qm_wo_sw_prc > 0))/np.sum(ea>6.112),
)
plt.figure()
plt.hexbin(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ta[(qm_wo_sw_prc > 0)], cmap=plt.cm.inferno_r)
plt.plot(range(200), np.arange(200) / 14.7,'k')
plt.plot(range(100), np.arange(100) / 8.7,'r')
plt.xlabel('QM - SWnet - Qprecip')
plt.ylabel('Air temperature (C)')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ta posQM.png')
plt.figure()
plt.hexbin(qm_wo_sw_prc[(qm_wo_sw_prc > 0)], ea[(qm_wo_sw_prc > 0)], cmap=plt.cm.inferno_r)
plt.plot(range(200), 6.112 + np.arange(200) / 42.0,'k')
plt.xlabel('QM - SWnet - Qprecip')
plt.ylabel('Vapour pressure (hPa)')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ea posQM.png')
plt.figure()
plt.hexbin(qm_wo_sw_prc[~(qm_wo_sw_prc == 0)], ta[~(qm_wo_sw_prc == 0)], cmap=plt.cm.inferno_r)
plt.plot(range(200), np.arange(200) / 14.7,'k')
plt.plot(range(100), np.arange(100) / 8.7,'r')
plt.xlabel('QM - SWnet - Qprecip')
plt.ylabel('Air temperature (C)')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ta.png')
plt.figure()
plt.hexbin(qm_wo_sw_prc[~(qm_wo_sw_prc == 0)], ea[~(qm_wo_sw_prc == 0)], cmap=plt.cm.inferno_r)
plt.plot(range(200), 6.112 + np.arange(200) / 42.0,'k')
plt.xlabel('QM - SWnet - Qprecip')
plt.ylabel('Vapour pressure (hPa)')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ea.png')
#plt.show()
print(
np.sum(qm_wo_sw_prc[qm>0])/sw_net.shape,# average positive melt energy from temp dep fluxes
np.sum(sw_net[qm>0])/sw_net.shape, # average melt energy from sw_net
np.sum(qprc[qm>0])/sw_net.shape # average melt energy from precipitation
)
qm_wo_sw_prc[qm_wo_sw_prc<0] = 0 # set all negative melt energy to zero
# find optimal parameters for ea and ta
from scipy.optimize import curve_fit
def f(x, A): # this is your 'straight line' y=f(x)
return A*x
# sum melt energy from ea and ta
# melt factor was 0.025 mm w.e. per hour per hPa
ea_pos = ea-6.112
ea_pos[ea_pos<0] = 0
A = curve_fit(f,ea_pos, qm_wo_sw_prc)[0] # find optimal ea_q factor = 41.9
np.median(qm_wo_sw_prc[qm_wo_sw_prc>0]/ea_pos[qm_wo_sw_prc>0]) # median Wm^-2 per K = 41.7
ea_q = ea_pos * 42
# Wm^-2 per K (melt rate of 0.05 mm w.e. per hour per K = 4.6 Wm^-2 per K)
ta_pos = ta - 0.
ta_pos[ta_pos<0] = 0
A = curve_fit(f,ta_pos, qm_wo_sw_prc)[0]# find optimal ta_q factor = 8.7
np.median(qm_wo_sw_prc[qm_wo_sw_prc>0]/ta_pos[qm_wo_sw_prc>0]) # median Wm^-2 per K = 14.7
ta_q = ta_pos * 8.7
#K * / (mm w.e. W) *
print(
np.sum(qm_wo_sw_prc[qm>0])/sw_net.shape,# average positive melt energy from temp dep fluxes
np.sum(ea_q)/sw_net.shape, # average calculated melt energy from temp dep fluxes using ea
np.sum(ta_q)/sw_net.shape, # average calculated melt energy from temp dep fluxes using ta
np.sum(sw_net[qm>0])/sw_net.shape, # average melt energy from sw_net
np.sum(sw_net[np.logical_and(qm>0,ta<0)])/sw_net.shape, # average melt energy from sw_net when temperature below 0
np.sum(sw_net[np.logical_and(qm>0,ta>0)])/sw_net.shape, # average melt energy from sw_net when temperature above 0
np.sum(qprc[qm>0])/sw_net.shape # average melt energy from precipitation
)
plt.figure()
plt.hexbin(qm_wo_sw_prc[np.logical_and(ta_q>0,qm_wo_sw_prc>0)],ta_q[np.logical_and(ta_q>0,qm_wo_sw_prc>0)])
plt.plot(range(300),range(300),'b--')
plt.ylabel('mod'),plt.xlabel('obs'),plt.title('ta_q vs qm_wo_sw_prc')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ta_q.png')
plt.figure()
plt.hexbin(qm_wo_sw_prc[np.logical_and(ea_q>0,qm_wo_sw_prc>0)],ea_q[np.logical_and(ea_q>0,qm_wo_sw_prc>0)])
plt.ylabel('mod'),plt.xlabel('obs'),plt.title('ea_q vs qm_wo_sw_prc')
plt.plot(range(300),range(300),'b--')
plt.savefig(r'D:\Snow project\Oct2018 Results\qm_wo_sw_prc vs ea_q.png')
plt.figure()
plt.hist(qm_wo_sw_prc[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]/ta_pos[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)],20)
plt.xlabel('ta_q_factor (W m-2 K-1)')
plt.savefig(r'D:\Snow project\Oct2018 Results\ta_q_factor_hist.png')
#plt.show()
print(
rmsd(qm_wo_sw_prc,ta_q),
rmsd(qm_wo_sw_prc,ea_q)
)
es = 6.1121 * np.exp(17.502*ta/(240.97+ta))
rh = (ea/es) * 100
plt.scatter(rh[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]*ws[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]/10.,qm_wo_sw_prc[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]/ta_pos[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)],3)
plt.scatter(rh[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)],qm_wo_sw_prc[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)]/ta_pos[np.logical_and(ta_pos>0.5,qm_wo_sw_prc>0)])
plt.scatter(ql,qm_wo_sw_prc-ta_q)
plt.scatter(ta,qm_wo_sw_prc-ta_q) | 38.66875 | 216 | 0.725715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,843 | 0.297883 |
f263d10e4b0315d66a52d4a47d9ce8cba72ce9a2 | 336 | py | Python | Task1F.py | momopmoXZ/1a-flood-coding | 13d2f6387e136f046b07a045eadfe654e9c2c27f | [
"MIT"
] | null | null | null | Task1F.py | momopmoXZ/1a-flood-coding | 13d2f6387e136f046b07a045eadfe654e9c2c27f | [
"MIT"
] | null | null | null | Task1F.py | momopmoXZ/1a-flood-coding | 13d2f6387e136f046b07a045eadfe654e9c2c27f | [
"MIT"
] | 1 | 2022-02-07T17:04:41.000Z | 2022-02-07T17:04:41.000Z | from floodsystem.stationdata import build_station_list
from floodsystem.station import inconsistent_typical_range_stations
stations = build_station_list()
incon_station=inconsistent_typical_range_stations(stations)
incon_names=[]
for station in incon_station:
incon_names.append(station.name)
incon_names.sort()
print (incon_names)
| 33.6 | 67 | 0.863095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f263dc6e6df0ca9888bd8e9bcfdb5d8ed564b445 | 507 | py | Python | yaga_ga/evolutionary_algorithm/operators/base.py | alessandrolenzi/yaga | 872503ad04a2831135143750bc309188e5685284 | [
"MIT"
] | null | null | null | yaga_ga/evolutionary_algorithm/operators/base.py | alessandrolenzi/yaga | 872503ad04a2831135143750bc309188e5685284 | [
"MIT"
] | null | null | null | yaga_ga/evolutionary_algorithm/operators/base.py | alessandrolenzi/yaga | 872503ad04a2831135143750bc309188e5685284 | [
"MIT"
] | null | null | null | from typing import Generic, TypeVar
from typing_extensions import Final
from yaga_ga.evolutionary_algorithm.individuals import IndividualStructure
class InvalidOperatorError(ValueError):
pass
IndividualType = TypeVar("IndividualType")
GeneType = TypeVar("GeneType")
class GeneticOperator(Generic[IndividualType, GeneType]):
def __init__(
self, individual_structure: IndividualStructure[IndividualType, GeneType]
):
self.individual_structure: Final = individual_structure
| 24.142857 | 81 | 0.792899 | 276 | 0.544379 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.051282 |
f2647ec6e2d3b985a5cc52948c24f37ae5751457 | 3,973 | py | Python | stimuli.py | lieke2020/workmate_match | 803f4e3b1fa62280cc0d6a7cd61eb80929dae918 | [
"MIT"
] | null | null | null | stimuli.py | lieke2020/workmate_match | 803f4e3b1fa62280cc0d6a7cd61eb80929dae918 | [
"MIT"
] | null | null | null | stimuli.py | lieke2020/workmate_match | 803f4e3b1fa62280cc0d6a7cd61eb80929dae918 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 1 13:21:44 2021
This file holds the stimuli that are used in the world to represent cues.
obs_time --> Stimulus representing time
match_cifar --> Natural scenes for phase 1 learning
obs_cifar --> Natural scenes for phase 2 learning
match_alpha --> Alphabetic letters for phase 1 learning
obs_alpha --> Alphabetic letters for phase 2 learning
Detailed information on the stimuli can be found in README.txt
@author: Lieke Ceton
"""
#%% Dependencies
import numpy as np
import string
from random import sample
import csv
from sklearn.preprocessing import normalize
#%% Time cell coding
maxtime = 10
# Time vectors are created by convolving a response vector
# with an identity matrix, yielding [maxtime] rows of time cell responses,
# each peaking at a unique, consecutive time.
z = [0.1, 0.25, 0.5, 1, 0.5, 0.25, 0.1]
crop = int((len(z)-1)/2) # the '3'-cropping here removes edge artefacts from convolution;
# Time cell 0 (at row 0) peaks at the first moment in time (column 0).
tmat = np.vstack([np.convolve(z, t)[crop:maxtime + crop] for t in np.eye(maxtime)])
def obs_time(t=0):
"""Vector that represents time"""
return tmat[t]
#%% CIFAR-10 observations for both learning phases
#CIFAR-10 features are extracted from a pre-trained CNN (Caley Woy, see README)
#They are the activity vectors of the second fully connected layer.
#load .csv file
with open("CIFAR_10_kaggle_feature_2.csv", 'r') as f:
csv_features = list(csv.reader(f, delimiter=","))
all_feat = np.array(csv_features[1:], dtype=np.float) #get the first row out
match_dict = normalize(all_feat[:,1:-2]) #normalize
feat_sample = all_feat[0:500,1:-2] #Sample the first 500 features/images
cifar_dict = normalize(feat_sample) #normalise
def match_cifar():
"""Stimuli for phase 1 learning, random natural scenes from CIFAR-10 dataset"""
a = np.random.choice(match_dict.shape[1])
return match_dict[a]
def obs_cifar(obs=1):
"""Stimuli for phase 2 learning, a specific set of CIFAR-10 stimuli is selected"""
return cifar_dict[obs]
#%% Alpha observations for both learning phases
#Construct stimulus dictionary
stimbits = 10 #length of stimuli
#Construct binary stim_repres
binstr = '0{}b'.format(stimbits)
binstrings = [format(i, binstr) for i in range(2**stimbits)]
tobinarr = lambda s : np.array([float(c) for c in s])
Dx = np.vstack([tobinarr(i) for i in binstrings]) #--> a
shuffle = sample(range(len(Dx)),len(Dx)) #shuffle the rows randomly
Dx = Dx[shuffle,:]
# Dx now is a matrix of 128 x 7 bits. 'stimbits' is a dict that will order the
# first 52 of these in a lookup table, #why not choose 2**6 when you only use the first 52? (LJC)
chars = string.ascii_lowercase + string.ascii_uppercase
stimdict = dict(list(zip( chars, Dx )))
# Stimuli with these 5 letters are used in prosaccade/antisaccade, and here made
# linearly separable, cf. Rombouts et al., 2015
stimdict['g'] = np.zeros(stimbits)
stimdict['p'] = np.eye(stimbits)[0]
stimdict['a'] = np.eye(stimbits)[1]
stimdict['l'] = np.eye(stimbits)[2]
stimdict['r'] = np.eye(stimbits)[3] #why? this ruins the neat dictionary that you just made.. (LJC)
# digits, used in 12-AX, are added to the stimdict in a similar manner
digdict = dict(
[(d,Dx[i + 2**(stimbits-1) ]) for i,d in enumerate(string.digits) ])
stimdict.update(digdict)
len_Dx = Dx.shape[0]
def match_alpha():
"""Stimuli for phase 1 learning, random vector selected from binary stimuli"""
rand_int = np.random.choice(len_Dx)
return Dx[rand_int,:]
def obs_alpha(obs='A'):
"""Stimuli for phase 2 learning, all lower and uppercase letters (52 stimuli)"""
# return the row of activity from the selected stimdict index as the observation
return stimdict[obs]
| 37.838095 | 100 | 0.683614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,280 | 0.573874 |
f2661fcc769c20d3c2e052ada4cb40f950039d1a | 675 | py | Python | tallerestructurasselectivas/14.py | juandab07/Algoritmos-y-programacion | f3c10f5c4620b15432ecfe2b9f5831437a49ace9 | [
"MIT"
] | null | null | null | tallerestructurasselectivas/14.py | juandab07/Algoritmos-y-programacion | f3c10f5c4620b15432ecfe2b9f5831437a49ace9 | [
"MIT"
] | null | null | null | tallerestructurasselectivas/14.py | juandab07/Algoritmos-y-programacion | f3c10f5c4620b15432ecfe2b9f5831437a49ace9 | [
"MIT"
] | null | null | null | print('ingrese el monto a pagar en aseo urbano')
aseo=float(input())
print('ingrese el valor de lectura del mes anterior')
ant=float(input())
print('ingrese el valor de lectura del mes actual')
act=float(input())
cons=act-ant
if 0<cons<=100:
pago=cons*4600
print('debera pagar $',pago,'en luz electrica y',aseo,'en aseo urbano')
if 101<cons<=300:
pago=cons*80000
print('debera pagar $',pago,'en luz electrica y',aseo,'en aseo urbano')
if 301<cons<=500:
pago=cons*100000
print('debera pagar $',pago,'en luz electrica y',aseo,'en aseo urbano')
if cons>500:
pago=cons*120000
print('debera pagar $',pago,'en luz electrica y',aseo,'en aseo urbano') | 35.526316 | 75 | 0.694815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 339 | 0.502222 |
f2684fd08fdc8ebf74875458af9886f1554c5e7c | 1,040 | py | Python | meilisearch/tests/test_synonyms_meilisearch.py | jtmiclat/meilisearch-python | b6a48a62bb64ae58181550a0ddc793dcdc0a2b06 | [
"MIT"
] | null | null | null | meilisearch/tests/test_synonyms_meilisearch.py | jtmiclat/meilisearch-python | b6a48a62bb64ae58181550a0ddc793dcdc0a2b06 | [
"MIT"
] | null | null | null | meilisearch/tests/test_synonyms_meilisearch.py | jtmiclat/meilisearch-python | b6a48a62bb64ae58181550a0ddc793dcdc0a2b06 | [
"MIT"
] | null | null | null | import time
import meilisearch
from meilisearch.tests import BASE_URL, MASTER_KEY
class TestSynonyms:
client = meilisearch.Client(BASE_URL, MASTER_KEY)
index = None
new_synonyms = {
'hp': ['harry potter']
}
default_synonyms = {}
def setup_class(self):
self.index = self.client.create_index(uid='indexUID')
def teardown_class(self):
self.index.delete()
def test_update_synonyms(self):
response = self.index.update_synonyms(self.new_synonyms)
assert isinstance(response, object)
assert 'updateId' in response
def test_get_synonyms(self):
response = self.index.get_synonyms()
assert isinstance(response, object)
assert response == self.new_synonyms
def test_reset_synonyms(self):
response = self.index.reset_synonyms()
assert isinstance(response, object)
assert 'updateId' in response
time.sleep(2)
response = self.index.get_synonyms()
assert response == self.default_synonyms
| 28.888889 | 64 | 0.674038 | 956 | 0.919231 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.046154 |
f2689ab69abc970864477a6211da1d0af11f1927 | 168 | py | Python | main.py | dansoliveira/pasc-compiler | 642f2745395dcc5b4ebbdd1fa83169362f863e61 | [
"MIT"
] | null | null | null | main.py | dansoliveira/pasc-compiler | 642f2745395dcc5b4ebbdd1fa83169362f863e61 | [
"MIT"
] | 1 | 2018-05-10T13:03:04.000Z | 2018-05-10T13:03:04.000Z | main.py | dansoliveira/pasc-compiler | 642f2745395dcc5b4ebbdd1fa83169362f863e61 | [
"MIT"
] | null | null | null | from lexer import Lexer
from parser import Parser
if __name__ == "__main__":
lexer = Lexer("exemplos/teste2.pasc")
parser = Parser(lexer)
parser.executa() | 21 | 41 | 0.702381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.190476 |
f26a5c6b870f2f9eb67aa2735878c21021be7143 | 324 | py | Python | leetcode/easy/plus-one.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 8 | 2019-05-14T12:50:29.000Z | 2022-03-01T09:08:27.000Z | leetcode/easy/plus-one.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 46 | 2019-03-24T20:59:29.000Z | 2019-04-09T16:28:43.000Z | leetcode/easy/plus-one.py | vtemian/interviews-prep | ddef96b5ecc699a590376a892a804c143fe18034 | [
"Apache-2.0"
] | 1 | 2022-01-28T12:46:29.000Z | 2022-01-28T12:46:29.000Z | class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
carry = 1
result = []
for digit in digits[::-1]:
digit += carry
result.append(digit % 10)
carry = digit // 10
if carry:
result.append(carry)
return result[::-1]
| 20.25 | 54 | 0.475309 | 323 | 0.996914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f26a6e2aee87a0b97e130dc33aaab4654d6c6049 | 69 | py | Python | microdevices/connector/__init__.py | lmokto/microdevices | 75a129d1c32f64afe9027338c4be304322ded857 | [
"MIT"
] | null | null | null | microdevices/connector/__init__.py | lmokto/microdevices | 75a129d1c32f64afe9027338c4be304322ded857 | [
"MIT"
] | 1 | 2021-06-02T00:01:14.000Z | 2021-06-02T00:01:14.000Z | microdevices/connector/__init__.py | lmokto/microdevices | 75a129d1c32f64afe9027338c4be304322ded857 | [
"MIT"
] | null | null | null | from .mqtt import MQTTClient
from .sql import *
from .redis import *
| 17.25 | 28 | 0.753623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
f26a8afeac7319e72d66512791f4976ac936a01f | 1,275 | py | Python | examples/1-marshmallow/server/resources/user/schema.py | FlyingBird95/openapi_generator | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 3 | 2022-01-10T12:43:36.000Z | 2022-01-13T18:08:15.000Z | examples/1-marshmallow/server/resources/user/schema.py | FlyingBird95/openapi_generator | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 6 | 2022-02-06T19:00:05.000Z | 2022-03-22T14:22:21.000Z | examples/1-marshmallow/server/resources/user/schema.py | FlyingBird95/openapi-builder | df4649b9723eb89fa370b02220356b7596794069 | [
"MIT"
] | 2 | 2021-12-17T17:26:06.000Z | 2021-12-17T17:39:00.000Z | from marshmallow import Schema, fields
class RegisterUser(Schema):
"""Deserialize register user schema."""
email = fields.Email(required=True)
"""Email."""
first_name = fields.String(required=True)
"""First name."""
last_name = fields.String(required=True)
"""Last name."""
password = fields.String(required=True)
"""Password."""
class UpdateUser(Schema):
"""Deserialize update user schema."""
first_name = fields.String(required=False)
"""First name."""
last_name = fields.String(required=False)
"""Last name."""
password = fields.String(required=False)
"""Password."""
class UserSchema(Schema):
"""User response schema."""
id = fields.Integer()
"""ID."""
email = fields.Email()
"""Email."""
first_name = fields.String()
"""First name."""
last_name = fields.String()
"""Last name."""
register_date = fields.DateTime()
"""Register date."""
class ErrorSchema(Schema):
"""Error response schema."""
message = fields.String()
"""The error message."""
class FunctionSchema(Schema):
"""Test schema for showing how a custom field can be serialized."""
@fields.Function
def list_of_strings(self):
return ["abc", "def"]
| 19.615385 | 71 | 0.620392 | 1,221 | 0.957647 | 0 | 0 | 77 | 0.060392 | 0 | 0 | 414 | 0.324706 |
f26cee0b9842c7bd2fa3f00e76d7e1a08850c951 | 450 | py | Python | coloredterm/__init__.py | hostedposted/coloredterm | 72d07a0bd12eb797e4b2772dfe45aca5234d27b6 | [
"MIT"
] | 1 | 2021-02-12T01:21:44.000Z | 2021-02-12T01:21:44.000Z | coloredterm/__init__.py | hostedposted/coloredterm | 72d07a0bd12eb797e4b2772dfe45aca5234d27b6 | [
"MIT"
] | 4 | 2021-07-07T04:09:58.000Z | 2022-02-03T04:05:30.000Z | coloredterm/__init__.py | hostedposted/coloredterm | 72d07a0bd12eb797e4b2772dfe45aca5234d27b6 | [
"MIT"
] | 1 | 2021-02-20T22:58:31.000Z | 2021-02-20T22:58:31.000Z | """Collection of tools for changing the text of your terminal."""
from coloredterm.coloredterm import (
Back,
bg,
colored,
colors,
cprint,
fg,
Fore,
names,
pattern_input,
pattern_print,
rand,
Style
)
__version__ = "0.1.9"
__all__ = [
'Back',
'bg',
'colored',
'colors',
'cprint',
'fg',
'Fore',
'names',
'pattern_input',
'pattern_print',
'rand',
'Style'
] | 14.516129 | 65 | 0.542222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.371111 |
f26e13939dbd7efae31817537aae9cd55a260550 | 1,706 | py | Python | src/export_as_csv.py | mustilica/tt-history | 1bb60cb81e97ef1abecf657cfa078798bb29cace | [
"MIT"
] | 26 | 2015-02-12T20:33:01.000Z | 2018-04-25T05:29:52.000Z | src/export_as_csv.py | mustilica/tt-history | 1bb60cb81e97ef1abecf657cfa078798bb29cace | [
"MIT"
] | 3 | 2019-11-27T18:19:23.000Z | 2020-11-26T08:53:13.000Z | src/export_as_csv.py | mustilica/tt-history | 1bb60cb81e97ef1abecf657cfa078798bb29cace | [
"MIT"
] | 8 | 2015-01-11T00:12:40.000Z | 2018-04-01T22:34:45.000Z | # Run from GAE remote API:
# {GAE Path}\remote_api_shell.py -s {YourAPPName}.appspot.com
# import export_as_csv
import csv
from google.appengine.ext import db
from google.appengine.ext.db import GqlQuery
def exportToCsv(query, csvFileName, delimiter):
with open(csvFileName, 'wb') as csvFile:
csvWriter = csv.writer(csvFile, delimiter=delimiter,
quotechar='|', quoting=csv.QUOTE_MINIMAL)
writeHeader(csvWriter)
rowsPerQuery = 1000
totalRowsSaved = 0
cursor = None
areMoreRows = True
while areMoreRows:
if cursor is not None:
query.with_cursor(cursor)
items = query.fetch(rowsPerQuery)
cursor = query.cursor()
currentRows = 0
for item in items:
saveItem(csvWriter, item)
currentRows += 1
totalRowsSaved += currentRows
areMoreRows = currentRows >= rowsPerQuery
print 'Saved ' + str(totalRowsSaved) + ' rows'
print 'Finished saving all rows.'
def writeHeader(csvWriter):
# Output csv header
csvWriter.writerow(['hashtag', 'region', 'timestamp',
'duration (in minutes)'])
def saveItem(csvWriter, item):
# Save items in preferred format
csvWriter.writerow([item.name, item.woeid, item.timestamp, item.time])
class Trend(db.Model):
name = db.StringProperty()
woeid = db.IntegerProperty()
timestamp = db.IntegerProperty()
time = db.IntegerProperty()
# Query for items
query = GqlQuery("SELECT * FROM Trend WHERE name = '#JeSuisCharlie'")
exportToCsv(query, '/home/mustilica/remote.csv', ',')
| 28.433333 | 74 | 0.622509 | 155 | 0.090856 | 0 | 0 | 0 | 0 | 0 | 0 | 361 | 0.211606 |
f26f15c108eabe8ae9328cc4ea34ff13c08d0947 | 950 | py | Python | main.py | AbhigyanRanjan0505/dvigyuoixfhybiocthgnkfi | db1b5198f1a0902aff21c74c58578dcb1feda39d | [
"MIT"
] | null | null | null | main.py | AbhigyanRanjan0505/dvigyuoixfhybiocthgnkfi | db1b5198f1a0902aff21c74c58578dcb1feda39d | [
"MIT"
] | null | null | null | main.py | AbhigyanRanjan0505/dvigyuoixfhybiocthgnkfi | db1b5198f1a0902aff21c74c58578dcb1feda39d | [
"MIT"
] | null | null | null | import plotly.figure_factory as figure_factory
import statistics
import random
import pandas
df = pandas.read_csv("data.csv")
data = df["reading_time"].tolist()
population_mean = statistics.mean(data)
print("Population mean :", population_mean)
def show_fig(mean_list):
df = mean_list
fig = figure_factory.create_distplot(
[df], ["reading_time"], show_hist=False)
fig.show()
def random_set_of_mean(counter):
dataset = []
for i in range(0, counter):
random_index = random.randint(0, len(data))
value = data[random_index]
dataset.append(value)
mean = statistics.mean(dataset)
return mean
def setup():
mean_list = []
for i in range(0, 100):
set_of_means = random_set_of_mean(30)
mean_list.append(set_of_means)
show_fig(mean_list)
mean = statistics.mean(mean_list)
print("Sampling mean :", mean)
setup()
| 22.093023 | 52 | 0.648421 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.077895 |
f26f70f686db6ff49ef92baf12b70818b5613277 | 209 | py | Python | ddtrace/contrib/sqlite3/connection.py | sharov/dd-trace-py | d0995b49cf7147ab463d0a67a38779fad3f539b4 | [
"BSD-3-Clause"
] | 1 | 2019-11-24T23:09:29.000Z | 2019-11-24T23:09:29.000Z | ddtrace/contrib/sqlite3/connection.py | sharov/dd-trace-py | d0995b49cf7147ab463d0a67a38779fad3f539b4 | [
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/sqlite3/connection.py | sharov/dd-trace-py | d0995b49cf7147ab463d0a67a38779fad3f539b4 | [
"BSD-3-Clause"
] | 2 | 2017-05-27T05:58:36.000Z | 2019-02-07T13:38:53.000Z | from sqlite3 import Connection
from ddtrace.util import deprecated
@deprecated(message='Use patching instead (see the docs).', version='0.6.0')
def connection_factory(*args, **kwargs):
return Connection
| 26.125 | 76 | 0.76555 | 0 | 0 | 0 | 0 | 139 | 0.665072 | 0 | 0 | 45 | 0.215311 |
f27341117d08bd618bf3ac5014feb6d7ff7d069e | 801 | py | Python | kafka_client_decorators/util/logging_helper.py | cdsedson/kafka-decorator | f2c958df88c5698148aae4c5314dd39e31e995c3 | [
"MIT"
] | null | null | null | kafka_client_decorators/util/logging_helper.py | cdsedson/kafka-decorator | f2c958df88c5698148aae4c5314dd39e31e995c3 | [
"MIT"
] | null | null | null | kafka_client_decorators/util/logging_helper.py | cdsedson/kafka-decorator | f2c958df88c5698148aae4c5314dd39e31e995c3 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Define function used on logging."""
import logging
__KAFKA_DECORATOR_DEBUG__ = None
def set_debug_level(level):
"""Set the level of log.
Set logging level for all loggers create by get_logger function
Parameters
----------
level: log level define in logging module
"""
global __KAFKA_DECORATOR_DEBUG__
__KAFKA_DECORATOR_DEBUG__ = level
def get_logger(name):
"""Create and return a logger.
Parameters
----------
name: str
Logger name
Returns
-------
logging.Logger
A standard python logger
"""
logger = logging.getLogger(name)
if __KAFKA_DECORATOR_DEBUG__ is not None:
logger.setLevel(__KAFKA_DECORATOR_DEBUG__)
return logger
| 19.536585 | 67 | 0.636704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 456 | 0.569288 |
f2738d7e2edb6f5a98849ea7773345dc1a404833 | 1,409 | py | Python | hseling_lib_diachrony_webvectors/hseling_lib_diachrony_webvectors/strings_reader.py | wadimiusz/hseling-repo-diachrony-webvectors | 5488d74141df360a6a721637ae7c7577136172d7 | [
"MIT"
] | null | null | null | hseling_lib_diachrony_webvectors/hseling_lib_diachrony_webvectors/strings_reader.py | wadimiusz/hseling-repo-diachrony-webvectors | 5488d74141df360a6a721637ae7c7577136172d7 | [
"MIT"
] | null | null | null | hseling_lib_diachrony_webvectors/hseling_lib_diachrony_webvectors/strings_reader.py | wadimiusz/hseling-repo-diachrony-webvectors | 5488d74141df360a6a721637ae7c7577136172d7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding:utf8
"""
this module reads strings.csv, which contains all
the strings, and lets the main app use it
"""
import sys
import csv
import os
from flask import Markup
import configparser
config = configparser.RawConfigParser()
path = '../hseling_api_diachrony_webvectors/hseling_api_diachrony_webvectors/webvectors.cfg'
assert os.path.isfile(path), "Current path: {}".format(os.getcwd())
config.read(path)
root = config.get('Files and directories', 'root')
l10nfile = config.get('Files and directories', 'l10n')
# open the strings database:
csvfile = open("../hseling_lib_diachrony_webvectors/hseling_lib_diachrony_webvectors/" + l10nfile, 'rU')
acrobat = csv.reader(csvfile, dialect='excel', delimiter=',')
# initialize a dictionary for each language:
language_dicts = {}
langnames = config.get('Languages', 'interface_languages').split(',')
header = next(acrobat)
included_columns = []
for langname in langnames:
language_dicts[langname] = {}
included_columns.append(header.index(langname))
# read the csvfile, populate language_dicts:
for row in acrobat:
for i in included_columns: # range(1, len(row)):
# Markup() is used to prevent autoescaping in templates
if sys.version_info[0] < 3:
language_dicts[header[i]][row[0]] = Markup(row[i].decode('utf-8'))
else:
language_dicts[header[i]][row[0]] = Markup(row[i])
| 32.022727 | 104 | 0.721079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 613 | 0.43506 |
f274273a939d4c8377fbaeb7efafd00e9604432e | 1,077 | py | Python | day 5&6/linked list.py | yogeshkhola/100daysofDSA | 93f0d30d718795e4e3eb5d8e677b87baebd0df7c | [
"MIT"
] | 3 | 2021-03-01T17:04:33.000Z | 2021-03-01T17:44:23.000Z | day 5&6/linked list.py | yogeshkhola/100daysofDSA | 93f0d30d718795e4e3eb5d8e677b87baebd0df7c | [
"MIT"
] | null | null | null | day 5&6/linked list.py | yogeshkhola/100daysofDSA | 93f0d30d718795e4e3eb5d8e677b87baebd0df7c | [
"MIT"
] | null | null | null | class node:
def __init__(self,data):
self.data=data
self.next=None
class LinkedList:
def __init__(self):
self.start=None #(self/head)
def viewList(self):#this function print the whole list
if self.start==None:
print("list is empty")
else:
temp=self.start
while temp!=None:
print(temp.data,end=" ")
temp=temp.next
def deleteFirst(self):
if self.start==None:
print("Linked list is empty")
else:
# temp=self.start
self.start=self.start.next
def insertLast(self,value):
newNode=node(value)
if(self.start==None):
self.start=newNode
else:
temp=self.start
while temp.next!=None:
temp=temp.next
temp.next=newNode
mylist=LinkedList()
mylist.insertLast(10)
mylist.insertLast(20)
mylist.insertLast(17)
mylist.insertLast(18)
mylist.insertLast(60)
mylist.viewList()
print()
mylist.deleteFirst()
mylist.viewList() | 21.54 | 58 | 0.571959 | 876 | 0.81337 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.096565 |
f2765c1d1962f66a204431e4dc547e6e1d4a52be | 40,603 | py | Python | detex/getdata.py | d-chambers/Detex | 46602eb8e05e080a23111c8f2716065a016613c2 | [
"BSD-3-Clause"
] | 39 | 2015-08-15T20:10:14.000Z | 2022-03-17T00:41:57.000Z | detex/getdata.py | d-chambers/Detex | 46602eb8e05e080a23111c8f2716065a016613c2 | [
"BSD-3-Clause"
] | 39 | 2015-09-28T23:50:59.000Z | 2019-07-16T20:38:31.000Z | detex/getdata.py | d-chambers/Detex | 46602eb8e05e080a23111c8f2716065a016613c2 | [
"BSD-3-Clause"
] | 8 | 2015-10-08T20:43:40.000Z | 2020-08-05T22:47:45.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 10 20:21:46 2015
@author: derrick
"""
from __future__ import print_function, absolute_import, unicode_literals, division
import glob
import itertools
import json
import os
import random
import numpy as np
import obspy
import pandas as pd
from six import string_types
import detex
# client imports
import obspy.clients.fdsn
import obspy.clients.neic
import obspy.clients.earthworm
conDirDefault = 'ContinuousWaveForms'
eveDirDefault = 'EventWaveForms'
# extension key to map obspy output type to extension. Add more here
formatKey = {'mseed': 'msd', 'pickle': 'pkl', 'sac': 'sac', 'Q': 'Q'}
def read(path):
"""
function to read a file from a path. If IOError or TypeError simply try
appending os.set to start
"""
try:
st = obspy.read(path)
except (IOError, TypeError):
try:
st = obspy.read(os.path.join(os.path.sep, path))
except (IOError, TypeError):
msg = 'Cannot read %s, the file may be corrupt, skipping it' % path
detex.log(__name__, msg, level='warn', pri=True)
return None
return st
def quickFetch(fetch_arg, **kwargs):
"""
Instantiate a DataFetcher using as little information as possible.
Parameters
----------
fetch_arg : str or DataFetcher instance
fetch_arg can be one of three things:
1. An instance of DataFetcher
2. A valid DataFetcher Method other than dir
3. A path to a directory containing waveform data
fetch_arg is checked in that order, so if you are trying to use a
data directory make sure it does not share names with a valid
DataFetcher method
kwargs are passed to the DataFetcher class, see DataFetcher
docs for details
Returns
-------
An instance of DataFetcher
Notes
--------
For client methods (eg 'uuss', 'iris') remove response is assumed True
with the default prelim. filter. If you don't want this make a custom
instance of DataFetcher.
"""
if isinstance(fetch_arg, DataFetcher):
dat_fet = fetch_arg
elif isinstance(fetch_arg, string_types):
if fetch_arg in DataFetcher.supMethods:
if fetch_arg == 'dir':
msg = 'If using method dir you must pass a path to directory'
detex.log(__name__, msg, level='error')
dat_fet = DataFetcher(fetch_arg, removeResponse=True, **kwargs)
else:
if not os.path.exists(fetch_arg):
msg = 'Directory %s does not exist' % fetch_arg
detex.log(__name__, msg, level='error')
dat_fet = DataFetcher('dir', directoryName=fetch_arg, **kwargs)
else:
msg = 'Input not understood, read docs and try again'
detex.log(__name__, msg, level='error')
return dat_fet
def makeDataDirectories(templateKey='TemplateKey.csv',
stationKey='StationKey.csv',
fetch='IRIS',
formatOut='mseed',
templateDir=eveDirDefault,
timeBeforeOrigin=1 * 60,
timeAfterOrigin=4 * 60,
conDir=conDirDefault,
secBuf=120,
conDatDuration=3600,
multiPro=False,
getContinuous=True,
getTemplates=True,
removeResponse=True,
opType='VEL',
prefilt=[.05, .1, 15, 20]):
"""
Function designed to fetch data needed for detex and store them in local
directories. StationKey.csv and TemplateKey.csv indicate which events to
download and for which stations. Organizes ContinuousWaveForms and
EventWaveForms directories.
Parameters
------------
template_key : str or pd DataFrame
The path to the TemplateKey csv
station_key : str or pd DataFrame
The path to the station key
fetch : str or FetchData instance
String for method argument of FetchData class or FetchData instance
formatOut : str
Seismic data file format, most obspy formats acceptable, options are:
'mseed','sac','GSE2','sacxy','q','sh_asc',' slist', 'tspair','segy',
'su', 'pickle', 'h5' (h5 only if obspyh5 module installed)
tempalateDir : str
The name of the template directory. Using the default is recommended
else the templateDir parameter will have to be set in calling most
other detex functions
timeBeforeOrigin: real number
The time in seconds before the reported origin of each template that
is downloaded.
timeAfterOrigin : real number(int, float, etc.)
The time in seconds to download after the origin time of each template.
conDir : str
The name of the continuous waveform directory. Using the default is
recommended
secBuf : real number (int, float, etc.)
The number of seconds to download after each hour of continuous data.
This might be non-zero in order to capture some detections that would
normally be overlooked if data did not overlap somewhat.
conDatDuration : real number (int, float, etc.)
The duration of the continuous data to download in seconds.
multiPro : bool
If True fork several processes to get data at once, potentially much
faster but a bit inconsiderate on the server hosting the data
getContinuous : bool
If True fetch continuous data with station and date ranges listed in
the station key
getTemplates : bool
If True get template data with stations listed in the station key
and events listed in the template key
removeResponse : bool
If true remove instrument response
opType : str
Output type after removing instrument response. Choices are:
"DISP" (m), "VEL" (m/s), or "ACC" (m/s**2)
prefilt : list 4 real numbers
Pre-filter parameters for removing instrument response, response is
flat from corners 2 to 3.
"""
temkey = detex.util.readKey(templateKey, 'template')
stakey = detex.util.readKey(stationKey, 'station')
# Check output type
if formatOut not in formatKey.keys():
msg = ('%s is not an acceptable format, choices are %s' %
(formatOut, formatKey.keys()))
detex.log(__name__, msg, level='error')
# Configure data fetcher
if isinstance(fetch, detex.getdata.DataFetcher):
fetcher = fetch
# Make sure DataFetcher is on same page as function inputs
fetcher.opType = opType
fetcher.removeResponse = removeResponse
fetcher.prefilt = prefilt
else:
fetcher = detex.getdata.DataFetcher(fetch,
removeResponse=removeResponse,
opType=opType,
prefilt=prefilt)
## Get templates
if getTemplates:
msg = 'Getting template waveforms'
detex.log(__name__, msg, level='info', pri=True)
_getTemData(temkey, stakey, templateDir, formatOut,
fetcher, timeBeforeOrigin, timeAfterOrigin)
## Get continuous data
if getContinuous:
msg = 'Getting continuous data'
detex.log(__name__, msg, level='info', pri=True)
_getConData(fetcher, stakey, conDir, secBuf, opType, formatOut,
duration=conDatDuration)
## Log finish
msg = "finished makeDataDirectories call"
detex.log(__name__, msg, level='info', close=True, pri=True)
def _getTemData(temkey, stakey, temDir, formatOut, fetcher, tb4, taft):
streamGenerator = fetcher.getTemData(temkey, stakey, tb4, taft,
returnName=True, temDir=temDir,
skipIfExists=True)
for st, name in streamGenerator:
netsta = st[0].stats.network + '.' + st[0].stats.station
fname = netsta + '.' + name + '.' + formatKey[formatOut]
fdir = os.path.join(temDir, name)
if not os.path.exists(fdir):
os.makedirs(fdir)
st.write(os.path.join(fdir, fname), formatOut)
if not os.path.exists(os.path.join(temDir, '.index.db')):
indexDirectory(temDir)
def _getConData(fetcher, stakey, conDir, secBuf, opType, formatOut,
duration=3600):
streamGenerator = fetcher.getConData(stakey,
secBuf,
returnName=True,
conDir=conDir,
skipIfExists=True,
duration=duration)
for st, path, fname in streamGenerator:
if st is not None: # if data were returned
if not os.path.exists(path):
os.makedirs(path)
fname = fname + '.' + formatKey[formatOut]
st.write(os.path.join(path, fname), formatOut)
if not os.path.exists(os.path.join(conDir, '.index.db')):
indexDirectory(conDir)
class DataFetcher(object):
"""
\n
Class to handle data acquisition
Parameters
----------
method : str or int
One of the approved methods for getting data as supported by detex
Options are:
"dir" : A data directory as created by makeDataDirectories
"client" : an obspy client can be passed to get data
useful if using an in-network database
"iris" : an iris client is initiated, also uses IRIS for inventory
"uuss" : A client attached to the university of utah
seismograph stations is initiated using CWB for waveforms
and IRIS is used for station inventories
client : An obspy client object
Client object used to get data, from obspy.clients
removeResponse : bool
If True remove response before returning stream.
inventoryArg : None, obspy client object, or obspy Inventory object
A seperate client for station inventories, only used if
removeResponse == True, also supports keyword "iris" for iris client
directoryName : str
A path to the continuous waveforms directory or event waveforms
directory. If None is passed default names are used
(ContinuousWaveForms and EventWaveForms)
opType : str
Output type after removing instrument response. Choices are:
"DISP" (m), "VEL" (m/s), or "ACC" (m/s**2)
prefilt : list of real numbers
Pre-filter parameters for removing instrument response.
conDatDuration : int or float
Duration for continuous data in seconds
conBuff : int or float
The amount of data, in seconds, to download at the end of the
conDatDuration. Ideally should be equal to template length, important
in order to avoid missing potential events at the end of a stream
timeBeforeOrigin : int or float
Seconds before origin of each event to fetch (used in getTemData)
timeAfterOrigin : int or float
Seconds after origin of each event to fetch (used in getTemData)
checkData : bool
If True apply some data checks before returning streams, can be useful
for older data sets.
fillZeros : bool
If True fill data that are not available with 0s (provided some data are
available)
"""
supMethods = ['dir', 'client', 'uuss', 'iris']
def __init__(self, method, client=None, removeResponse=True,
inventoryArg=None, directoryName=None, opType='VEL',
prefilt=[.05, .1, 15, 20], conDatDuration=3600, conBuff=120,
timeBeforeOrigin=1 * 60, timeAfterOrigin=4 * 60, checkData=True,
fillZeros=False):
self.__dict__.update(locals()) # Instantiate all inputs
self.inventory = _getInventory(inventoryArg)
self._checkInputs()
if self.removeResponse and self.inventory is None:
if self.method == 'dir':
msg = ('Cannot remove response without a valid inventoryArg, '
'setting removeResponse to False')
detex.log(__name__, msg, level='warning', pri=True)
self.removeResponse = False
def _checkInputs(self):
if not isinstance(self.method, string_types):
msg = 'method must be a string. options:\n %s' % self.supMethods
detex.log(__name__, msg, level='error', e=TypeError)
self.method = self.method.lower() # parameter to lowercase
if not self.method in DataFetcher.supMethods:
msg = ('method %s not supported. Options are:\n %s' %
(self.method, self.supMethods))
detex.log(__name__, msg, level='error', e=ValueError)
if self.method == 'dir':
if self.directoryName is None:
self.directoryName = conDirDefault
dirPath = glob.glob(self.directoryName)
if len(dirPath) < 1:
msg = ('directory %s not found make sure path is correct' %
self.directoryName)
detex.log(__name__, msg, level='error', e=IOError)
else:
self.directory = dirPath[0]
self._getStream = _loadDirectoryData
elif self.method == "client":
if self.client is None:
msg = 'Method %s requires a valid obspy client' % self.method
detex.log(__name__, msg, level='error', e=ValueError)
self._getStream = _assignClientFunction(self.client)
elif self.method == "iris":
self.client = obspy.clients.fdsn.Client("IRIS")
self._getStream = _assignClientFunction(self.client)
elif self.method == 'uuss': # uuss setting
self.client = obspy.clients.neic.Client('128.110.129.227')
self._getStream = _assignClientFunction(self.client)
self.inventory = obspy.clients.fdsn.Client('iris') # use iris for resps
def getTemData(self, temkey, stakey, tb4=None, taft=None, returnName=True,
temDir=None, skipIfExists=False, skipDict=None,
returnTimes=False, phases=None):
"""
Take detex station keys and template keys and yield stream objects of
all possible combinations
Parameters
----------
temkey : pd DataFrame
Detex template key
stakey : pd DataFrame
Detex station key
tb4 : None, or real number
Time before origin (or first phase pick if phases is not None)
taft : None or real number
Time after origin (or first phase pick if phases is not None)
returnName : bool
If True return name of event as found in template key
returnNames : bool
If True return event names and template names
temDir : str or None
Name of template directory, used to check if exists
skipIfExists : bool
If True dont return if file is in temDir
skipDict : dict
Dictionary of stations (keys, net.sta) and events (values)
to skip
returnTimes : bool
If True return times of data
phases : None, str, or DataFrame
If not None must be a path to a phasePick file, in the same format
as detex.util.pickPhases, or a path to a saved csv of the same.
tb4 and taft will be referenced to the first arrival for each
event and station, or the origin if none are available.
Yields
--------
Stream objects of possible combination if data are fetchable and event
names if returnName == True or times of data if returnTimes == True
"""
if tb4 is None:
tb4 = self.timeBeforeOrigin
if taft is None:
taft = self.timeAfterOrigin
if skipDict is not None and len(skipDict.keys()) < 1:
skipDict = None
stakey = detex.util.readKey(stakey, key_type='station')
temkey = detex.util.readKey(temkey, key_type='template')
if phases is not None:
phases = detex.util.readKey(phases, "phases")
indexiter = itertools.product(stakey.index, temkey.index)
# iter through each station/event pair and fetch data
for stain, temin in indexiter:
ser = temkey.loc[temin].combine_first(stakey.loc[stain])
netsta = ser.NETWORK + '.' + ser.STATION
# Skip event/station combos in skipDict
if skipDict is not None and netsta in skipDict.keys():
vals = skipDict[netsta]
if ser.NAME in vals:
continue
# skip events that already have files
if skipIfExists:
pfile = glob.glob(os.path.join(temDir, ser.NAME, netsta + '*'))
if len(pfile) > 0:
continue
if isinstance(ser.TIME, string_types) and 'T' in ser.TIME:
time = ser.TIME
else:
time = float(ser.TIME)
net = ser.NETWORK
sta = ser.STATION
chan = ser.CHANNELS.split('-')
# if phases option is used then find first phase and use it
if phases is not None:
con1 = (phases.Event == ser.NAME)
con2 = (phases.Station == '%s.%s' % (net, sta))
curEve = phases[con1 & con2]
if len(curEve) < 1: # if event station pair not in phases
msg = (('%s on %s was not in phase file, using origin')
% (ser.NAME, sta))
detex.log(__name__, msg, level='info')
t = obspy.UTCDateTime(time)
else:
utcs = [obspy.UTCDateTime(x) for x in curEve.TimeStamp]
t = min(utcs)
else:
t = obspy.UTCDateTime(time)
start = t - tb4
end = t + taft
st = self.getStream(start, end, net, sta, chan, '??')
if st is None: # skip if returns nothing
continue
if returnName:
yield st, ser.NAME
elif returnTimes:
yield st, start, end
else:
yield st
def getConData(self, stakey, secBuff=None, returnName=False,
returnTimes=False, conDir=None, skipIfExists=False,
utcstart=None, utcend=None, duration=None, randSamps=None):
"""
Get continuous data defined by the stations and time range in
the station key
Parameters
-----------
stakey : str or pd.DataFrame
A path to the stationkey or a loaded DF of the stationkey
secBuff : int
A buffer in seconds to add to end of continuous data chunk
so that consecutive files overlap by secBuf
returnName : bool
If True return the name of the file and expected path
CondDir : str
Path to Continuous data directory if it exists. Used to check
if a file already exists so it can be skipped if skipIfExists
skipIfExists : bool
If True files already exists wont be downloaded again
utcstart : None, int, str or obspy.UTCDateTime instance
An object readable by obspy.UTCDateTime class which is the start
time of continuous data to fetch. If None use time in station key
utcend : None, int or str, or obspy.UTCDateTime instance
An object readable by obspy.UTCDateTime class which is the end
time of continuous data to fetch. If None use time in station key
duration : None, int, or float
The duration of each continuous data chunk to fetch, if None
use conDataDuration attribute of DataFetcher instance
randSamps : None or int
If not None, return random number of traces rather than whole
range
Yields
--------
Obspy trace and other requested parameters
"""
stakey = detex.util.readKey(stakey, 'station')
if secBuff is None:
secBuff = self.conBuff
if duration is None:
duration = self.conDatDuration
for num, ser in stakey.iterrows():
netsta = ser.NETWORK + '.' + ser.STATION
if utcstart is None:
ts1 = obspy.UTCDateTime(ser.STARTTIME)
else:
ts1 = utcstart
if utcend is None:
ts2 = obspy.UTCDateTime(ser.ENDTIME)
else:
ts2 = utcend
utcs = _divideIntoChunks(ts1, ts2, duration, randSamps)
for utc in utcs:
if conDir is not None:
path, fil = _makePathFile(conDir, netsta, utc)
if skipIfExists:
pfile = glob.glob(os.path.join(path, fil + '*'))
if len(pfile) > 0: # if already exists then skip
continue
start = utc
end = utc + self.conDatDuration + secBuff
net = ser.NETWORK
sta = ser.STATION
chan = ser.CHANNELS.split('-')
st = self.getStream(start, end, net, sta, chan, '*')
if st is None or len(st) < 1:
continue
if not utcend is None:
if utcend.timestamp < st[0].stats.endtime.timestamp: # trim if needed
st.trim(endtime=utcend)
if len(st) < 1:
continue
if returnName and returnTimes:
path, fname = _makePathFile(conDir, netsta, utc)
yield st, path, fname, start, end
elif returnName:
path, fname = _makePathFile(conDir, netsta, utc)
yield st, path, fname
elif returnTimes:
yield st, start, end
else:
yield st
def getStream(self, start, end, net, sta, chan='???', loc='??'):
"""
function for getting data.\n
Parameters
----------
start : obspy.UTCDateTime object
Start time to fetch
end : obspy.UTCDateTime object
End time to fetch
net : str
Network code, usually 2 letters
sta : str
Station code
chan : str or list of str (should support wildcard)
Channels to fetch
loc : str
Location code for station
Returns
---------
An instance of obspy.Stream populated with requested data, or None if
not available.
"""
# make sure start and end are UTCDateTimes
start = obspy.UTCDateTime(start)
end = obspy.UTCDateTime(end)
# check that chan input is ok
if not isinstance(chan, (list, tuple)):
if not isinstance(chan, string_types):
msg = 'chan must be a string or list of strings'
detex.log(__name__, msg, level='error')
chan = [chan]
# fetch stream
st = self._getStream(self, start, end, net, sta, chan, loc)
# perform checks if required
if self.checkData:
st = _dataCheck(st, start, end)
# if no data return None
if st is None or len(st) < 1:
return None
# attach response
if self.removeResponse and self.inventory is not None:
if not _hasResponse(st):
st = _attachResponse(self, st, start, end, net, sta, loc, chan)
# remove response
if self.removeResponse:
st = _removeInstrumentResponse(self, st)
if st is None: # return None if response removal failed
return None
# trims and zero fills
st.trim(starttime=start, endtime=end)
st.merge(1) # merge and split to overwrite overlaps
st = st.split()
st.detrend('linear')
if self.fillZeros:
st.trim(starttime=start, endtime=end, pad=True, fill_value=0.0)
st.merge(1, fill_value=0.0)
return st
########## Functions for loading data based on selected methods ###########
def _loadDirectoryData(fet, start, end, net, sta, chan, loc):
"""
Function to load continuous data from the detex directory structure
"""
# get times with slight buffer
t1 = obspy.UTCDateTime(start).timestamp
t2 = obspy.UTCDateTime(end).timestamp
buf = 3 * fet.conDatDuration
dfind = _loadIndexDb(fet.directoryName, net + '.' + sta, t1 - buf, t2 + buf)
if dfind is None:
t1p = obspy.UTCDateTime(t1)
t2p = obspy.UTCDateTime(t2)
msg = 'data from %s to %s on %s not found in %s' % (t1p, t2p, sta,
fet.directoryName)
detex.log(__name__, msg, level='warning', pri=False)
return None
# define conditions in which condata should not be loaded
# con1 and con2 - No overlap (other than 10%)
tra = t2 - t1 # time range
con1 = ((dfind.Starttime <= t1) & (dfind.Endtime - tra * .1 < t1) &
(dfind.Starttime < t2) & (dfind.Endtime < t2))
con2 = ((dfind.Starttime > t1) & (dfind.Endtime > t1) &
(dfind.Starttime + tra * .1 > t2) & (dfind.Endtime >= t2))
df = dfind[~(con1 | con2)]
if len(df) < 1:
t1p = obspy.UTCDateTime(t1)
t2p = obspy.UTCDateTime(t2)
msg = 'data from %s to %s on %s not found in %s' % (t1p, t2p, sta,
fet.directoryName)
detex.log(__name__, msg, level='warning', pri=False)
return None
st = obspy.core.Stream()
if len(df.Path) < 1: # if no event fits description
return None
for path, fname in zip(df.Path, df.FileName):
fil = os.path.join(path, fname)
st1 = read(fil)
if not st1 is None:
st += st1
# st.trim(starttime=start, endtime=end)
# check if chan variable is string else iterate
if isinstance(chan, string_types):
stout = st.select(channel=chan)
else:
stout = obspy.core.Stream()
for cha in chan:
stout += st.select(channel=cha)
loc = '*' if loc in ['???', '??'] else loc # convert ? to *
stout = stout.select(location=loc)
return stout
def _assignClientFunction(client):
"""
function to take an obspy client FDSN, NEIC, EW, etc. return the
correct loadFromClient function for getting data.
"""
if isinstance(client, obspy.clients.fdsn.Client):
return _loadFromFDSN
elif isinstance(client, obspy.clients.neic.Client):
return _loadFromNEIC
elif isinstance(client, obspy.clients.earthworm.Client):
return _loadFromEarthworm
else:
msg = 'Client type not supported'
detex.log(__name__, msg, level='error', e=TypeError)
## load from client functions, this is needed because the APIs are not the same
def _loadFromNEIC(fet, start, end, net, sta, chan, loc):
"""
Use obspy.neic.Client to fetch waveforms
"""
client = fet.client
# str reps of utc objects for error messages
startstr = str(start)
endstr = str(end)
st = obspy.Stream()
for cha in chan:
try: # try neic client
st += client.get_waveforms(net, sta, loc, cha, start, end)
except:
msg = ('Could not fetch data on %s from %s to %s' %
(net + '.' + sta, startstr, endstr))
detex.log(__name__, msg, level='warning', pri=False)
st = None
return st
def _loadFromEarthworm(fet, start, end, net, sta, chan, loc):
client = fet.client
startstr = str(start)
endstr = str(end)
st = obspy.Stream()
if '*' in loc or '?' in loc: # adjust for earthworm loc codes
loc = '--'
for cha in chan:
try: # try neic client
st += client.get_waveforms(net, sta, loc, cha, start, end)
except:
msg = ('Could not fetch data on %s from %s to %s' %
(net + '.' + sta, startstr, endstr))
detex.log(__name__, msg, level='warning', pri=False)
st = None
return st
def _loadFromFDSN(fet, start, end, net, sta, chan, loc):
"""
Use obspy.clients.fdsn.Client to fetch waveforms
"""
client = fet.client
# str reps of utc objects for error messages
startstr = str(start)
endstr = str(end)
# convert channels to correct format (list seperated by ,)
if not isinstance(chan, string_types):
chan = ','.join(chan)
else:
if '-' in chan:
chan = ','.join(chan.split('-'))
# try to get waveforms, else return None
try:
st = client.get_waveforms(net, sta, loc, chan, start, end, attach_response=fet.removeResponse)
except:
msg = ('Could not fetch data on %s from %s to %s' %
(net + '.' + sta, startstr, endstr))
detex.log(__name__, msg, level='warning', pri=False)
st = None
return st
########## MISC functions #############
def _attachResponse(fet, st, start, end, net, sta, loc, chan):
"""
Function to attach response from inventory or client
"""
if not fet.removeResponse or fet.inventory is None:
return st
if isinstance(fet.inventory, obspy.core.inventory.Inventory):
st.attach_response(fet.inventory)
else:
inv = obspy.core.inventory.Inventory([], 'detex')
for cha in chan:
inv += fet.inventory.get_stations(starttime=start,
endtime=end,
network=net,
station=sta,
loc=loc,
channel=cha,
level="response")
st.attach_response(inv)
return st
def _getInventory(invArg):
"""
Take a string, Obspy client, or inventory object and return inventory
object used to attach responses to stream objects for response removal
"""
if isinstance(invArg, string_types):
if invArg.lower() == 'iris':
invArg = obspy.clients.fdsn.Client('IRIS')
elif not os.path.exists(invArg):
msg = ('if inventoryArg is str then it must be a client name, ie '
'IRIS, or a path to a station xml')
detex.log(__name__, msg, level='error')
else:
return obspy.read_inventory(invArg)
elif isinstance(invArg, obspy.station.inventory.Inventory):
return invArg
elif isinstance(invArg, obspy.clients.fdsn.Client):
return invArg
elif invArg is None:
return None
def _dataCheck(st, start, end):
# if none or empty return None
if st is None or len(st) < 1:
return None
netsta = st[0].stats.network + '.' + st[0].stats.station
time = str(st[0].stats.starttime).split('.')[0]
# check if data range is way off what was requested
utcmin = min([x.stats.starttime for x in st])
utcmax = max([x.stats.endtime for x in st])
if (end - start) - (utcmax - utcmin) > 60 * 10: # give 10 mine tolerance
msg = '%s starting on %s is shorter than expected' % (netsta, time)
detex.log(__name__, msg, pri=True)
# Check sample rates
if any([tr.stats.sampling_rate % 1 != 0 for tr in st]):
for tr in st:
tr.stats.sampling_rate = np.round(tr.stats.sampling_rate)
msg = ('Found non-int sampling_rates, rounded to nearest \
int on %s around %s' % (netsta, time))
detex.log(__name__, msg, level='warning')
if any([not np.any(x.data) for x in st]):
msg = ('At least one channel is all 0s on %s around %s, skipping' %
(netsta, time))
detex.log(__name__, msg, level='warn', pri=True)
return None
return st
def _hasResponse(st):
"""
Test if all channels have responses of a stream, return bool
"""
return all([hasattr(tr.stats, 'response') for tr in st])
def _removeInstrumentResponse(fet, st):
if not fet.removeResponse: # pass stream back if no response removal
return st
st.detrend('linear') # detrend
st = _fftprep(st)
try:
st.remove_response(output=fet.opType, pre_filt=fet.prefilt)
except:
utc1 = str(st[0].stats.starttime).split('.')[0]
utc2 = str(st[0].stats.endtime).split('.')[0]
msg = 'RemoveResponse Failed for %s,%s, from %s to %s, skipping' % (
st[0].stats.network, st[0].stats.station, utc1, utc2)
detex.log(__name__, msg, level='warning', pri=True)
st = None
return st
def _fftprep(st):
data = st[0].data
"data is numpy vector, makes sure it is not of odd length or fft drags"
if len(data) % 2 != 0 and len(data) % 100 > 50:
data = np.insert(data, 0, data[0])
st[0].data = data
st[0].stats.starttime = st[0].stats.starttime - st[0].stats.delta
elif len(data) % 2 != 0 and len(data) % 100 < 50:
data = data[1:]
st[0].data = data
st[0].stats.starttime = st[0].stats.starttime + st[0].stats.delta
return st
def _divideIntoChunks(utc1, utc2, duration, randSamps):
"""
Function to take two utc date time objects and create a generator to yield
all time in between by intercals of duration. If randSamps is not None
it will return a random subsample, still divisible by randSamps to make
loading files easier. The randSamps parameter can at most rep.
Inputs can be any obspy readable format
"""
utc1 = obspy.UTCDateTime(utc1)
utc2 = obspy.UTCDateTime(utc2)
# convert to time stamps (epoch time)
ts1 = utc1.timestamp - utc1.timestamp % duration
ts2 = utc2.timestamp - utc2.timestamp % duration
if randSamps is None:
t = ts1
while t <= ts2:
yield obspy.UTCDateTime(t) # yield a value
t += duration # add an hour
else:
utcList = np.arange(utc1.timestamp, utc2.timestamp, duration)
if randSamps > len(utcList) / 4:
msg = ('Population too small for %d random samples, taking %d' % (
randSamps, len(utcList)))
detex.log(__name__, msg, level='info')
randSamps = len(utcList)
ranutc = random.sample(utcList, randSamps)
rsamps = [obspy.UTCDateTime(x) for x in ranutc]
for samp in rsamps:
yield samp
def _makePathFile(conDir, netsta, utc):
"""
Make the expected filename to see if continuous data chunk exists
"""
utc = obspy.UTCDateTime(utc)
year = '%04d' % utc.year
jd = '%03d' % utc.julday
hr = '%02d' % utc.hour
mi = '%02d' % utc.minute
se = '%02d' % utc.second
path = os.path.join(conDir, netsta, year, jd)
fname = netsta + '.' + year + '-' + jd + 'T' + '-'.join([hr, mi, se])
return path, fname
###### Index directory functions ##########
def indexDirectory(dirPath):
"""
Create an index (.index.db) for a directory with stored waveform files
which also contains quality info of each file
Parameters
__________
dirPath : str
The path to the directory containing waveform data (any structure)
"""
columns = ['Path', 'FileName', 'Starttime', 'Endtime', 'Gaps', 'Nc', 'Nt',
'Duration', 'Station']
df = pd.DataFrame(columns=columns) # DataFrame for indexing
msg = 'indexing, or updating index for %s' % dirPath
detex.log(__name__, msg, level='info', pri=True)
# Create a list of possible path permutations to save space in database
pathList = [] # A list of lists with different path permutations
for dirpath, dirname, filenames in os.walk(dirPath):
dirList = os.path.abspath(dirpath).split(os.path.sep)
# Expand pathList if needed
while len(dirList) > len(pathList):
pathList.append([])
# loop and put info in pathList that isnt already there
for ind, value in enumerate(dirList):
if not isinstance(value, list):
value = [[value]]
for val in value:
for va in val:
if va not in pathList[ind]:
pathList[ind].append(va)
# Loop over file names perform quality checks
for fname in filenames:
if fname[0] == '.':
continue
fpath = os.path.join(*dirList)
fullpath = os.path.join(fpath, fname)
qualDict = _checkQuality(fullpath)
if qualDict is None: # If file is not obspy readable
msg = 'obspy failed to read %s , skipping' % fullpath
detex.log(__name__, msg, level='warning', pri=True)
continue # skip to next file
pathInts = [pathList[num].index(x) for num,
x in enumerate(dirList)]
df.loc[len(df), 'Path'] = json.dumps(pathInts)
for key, value in qualDict.iteritems():
df.loc[len(df) - 1, key] = value
df.loc[len(df) - 1, 'FileName'] = fname
# Create path index key
if len(pathList) < 1:
msg = 'No obspy readable files found in %s' % dirPath
detex.log(__name__, msg, level='error')
dfInd = _createIndexDF(pathList)
detex.util.saveSQLite(df, os.path.join(dirPath, '.index.db'), 'ind')
detex.util.saveSQLite(dfInd, os.path.join(dirPath, '.index.db'), 'indkey')
def _createIndexDF(pathList):
indLength = len(pathList)
colLength = max([len(x) for x in pathList])
ind = [x for x in range(indLength)]
cols = ['col_' + str(x) for x in range(colLength)]
df = pd.DataFrame(index=ind, columns=cols)
df.fillna(value='', inplace=True)
for ind1, pl in enumerate(pathList):
for ind2, item in enumerate(pl):
df.loc[ind1, 'col_' + str(ind2)] = item
return df
def _checkQuality(stPath):
"""
load a path to an obspy trace and check quality
"""
st = read(stPath)
if st is None:
return None
lengthStream = len(st)
gaps = st.get_gaps()
gapsum = np.sum([x[-2] for x in gaps])
starttime = min([x.stats.starttime.timestamp for x in st])
endtime = max([x.stats.endtime.timestamp for x in st])
duration = endtime - starttime
nc = len(list(set([x.stats.channel for x in st])))
netsta = st[0].stats.network + '.' + st[0].stats.station
outDict = {'Gaps': gapsum, 'Starttime': starttime, 'Endtime': endtime,
'Duration': duration, 'Nc': nc, 'Nt': lengthStream,
'Station': netsta}
return outDict
def _loadIndexDb(dirPath, station, t1, t2):
indexFile = glob.glob(os.path.join(dirPath, '.index.db'))
if len(indexFile) < 1:
msg = '%s is not currently indexed, indexing now' % dirPath
detex.log(__name__, msg, level='info', pri=True)
indexDirectory(dirPath)
indexFile = glob.glob(os.path.join(dirPath, '.index.db'))
sql = (('SELECT %s FROM %s WHERE Starttime>=%f AND ' +
'Endtime<=%f AND Station="%s"') %
('*', 'ind', t1, t2, station))
df = detex.util.loadSQLite(indexFile[0], 'ind', sql=sql, silent=False)
if df is None or len(df) < 1: # if not in database
return None
dfin = detex.util.loadSQLite(indexFile[0], 'indkey', convertNumeric=False)
dfin.columns = [int(x.split('_')[1]) for x in dfin.columns]
dfin.index = [int(x) for x in dfin.index]
# reconstruct path
df['Path'] = [_associatePathList(x, dfin) for x in df['Path']]
df.sort_values(by='FileName', inplace=True)
df.reset_index(drop=True, inplace=True)
return df
def _associatePathList(pathList, dfin):
pl = json.loads(pathList)
pat = []
for num, p in enumerate(pl):
pat.append(dfin.loc[num, p])
return os.path.join(*pat)
getAllData = makeDataDirectories
| 38.929051 | 102 | 0.582174 | 15,476 | 0.381154 | 9,473 | 0.233308 | 0 | 0 | 0 | 0 | 16,142 | 0.397557 |
f2766a9a2df58d6c9fe0fc41dab441157d2a7a7d | 4,850 | py | Python | HouseHunter/core.py | JGBMichalski/House-Hunter | 7ad1e866907545b8e2302c1a775cadbd8f807ad9 | [
"MIT"
] | null | null | null | HouseHunter/core.py | JGBMichalski/House-Hunter | 7ad1e866907545b8e2302c1a775cadbd8f807ad9 | [
"MIT"
] | null | null | null | HouseHunter/core.py | JGBMichalski/House-Hunter | 7ad1e866907545b8e2302c1a775cadbd8f807ad9 | [
"MIT"
] | null | null | null | from tarfile import SUPPORTED_TYPES
import requests
import re
from bs4 import BeautifulSoup
import json
import HouseHunter.globals as Globals
from HouseHunter.ad import *
from pathlib import Path
class Core():
def __init__(self, filename="ads.json"):
self.filepath = Path().absolute().joinpath(filename) if filename else None
self.all_ads = {}
self.new_ads = {}
self.third_party_ads = []
self.load_ads()
# Reads given file and creates a dict of ads in file
def load_ads(self):
# If filepath is None, just skip local file
if self.filepath:
# If the file doesn't exist create it
if not self.filepath.exists():
ads_file = self.filepath.open(mode='w')
ads_file.write("{}")
ads_file.close()
return
with self.filepath.open(mode="r") as ads_file:
self.all_ads = json.load(ads_file)
# Save ads to file
def save_ads(self):
# If filepath is None, just skip local file
if self.filepath:
with self.filepath.open(mode="w") as ads_file:
json.dump(self.all_ads, ads_file)
def validate_origin(self, url):
for origin in Globals.SUPPORTED_ORIGINS:
if origin in url:
return Globals.SUPPORTED_ORIGINS.index(origin)
return -1
# Pulls page data from a given url and finds all ads on each page
def scrape_url_for_ads(self, url):
self.new_ads = {}
email_title = None
origin = self.validate_origin(url)
if origin < 0:
print("Site not supported: {}".format(url))
return self.new_ads, email_title
while url:
# Get the html data from the URL
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
# If the email title doesnt exist pull it from the html data
if email_title is None:
email_title = self.get_email_title(origin, soup)
# Find ads on the page
self.find_ads(soup, origin)
# Set url for next page of ads
# Depending on supported origins, this may not apply to all
url = soup.find("a", string="Next")
if not url:
url = soup.find("a", href=True, rel="next")
if url:
url = Globals.SUPPORTED_ORIGINS[origin] + url['href']
return self.new_ads, email_title
def find_ads(self, soup, origin):
# Finds all ad trees in page html.
ad_regex = re.compile('.*{}.*'.format(Globals.AD_ROOT_CLASS_NAMES[origin][Globals.PRIMARY]))
ads = soup.find_all(Globals.AD_ROOT_ELEMENT_TYPE[origin], {"class": ad_regex})
# If no ads use different class name
if not ads:
ad_regex = re.compile('.*{}.*'.format(Globals.AD_ROOT_CLASS_NAMES[origin][Globals.SECONDARY]))
ads = soup.find_all(Globals.AD_ROOT_ELEMENT_TYPE[origin], {"class": ad_regex})
# Create a dictionary of all ads with ad id being the key
for ad in ads:
if origin == 0:
current_ad = WFPAd(origin, ad)
elif origin == 1:
current_ad = RewAd(origin, ad)
else:
return
# Skip third-party ads and ads already found
if (current_ad.id not in self.all_ads):
self.new_ads[current_ad.id] = current_ad.info
self.all_ads[current_ad.id] = current_ad.info
def get_email_title(self, origin, soup):
if origin != 0:
# Used for origins that do not give any details about the search options
return Globals.SUPPORTED_FULL_NAMES[origin]
else:
# Depending on supported origins, this may not apply to all
email_title_location = soup.find('div', {"class": "results-info"}).find('h1')
if email_title_location:
# Depending on supported origins, this may not apply to all
return Globals.SUPPORTED_FULL_NAMES[origin] + " - " + self.format_title(email_title_location.text.split(' in ')[1].strip('"'))
else:
return Globals.SUPPORTED_FULL_NAMES[origin]
# Makes the first letter of every word upper-case
def format_title(self, title):
new_title = []
title = title.split()
for word in title:
new_word = ''
new_word += word[0].upper()
if len(word) > 1:
new_word += word[1:]
new_title.append(new_word)
return ' '.join(new_title)
# Returns a given list of words to lower-case words
def words_to_lower(self, words):
return [word.lower() for word in words]
| 34.15493 | 142 | 0.583711 | 4,652 | 0.959175 | 0 | 0 | 0 | 0 | 0 | 0 | 1,086 | 0.223918 |
f27a87d9305d94ef4ecc93fe8c501738b9c6465e | 582 | py | Python | recipes/Python/474122_neat/recipe-474122.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/474122_neat/recipe-474122.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/474122_neat/recipe-474122.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # nice and clean closure notation
def get_counter_neat():
def f():
f.x += 1
return f.x
f.x = 0
return f
# traditional, not_so_neat closure notation
def get_counter_traditional():
x = [0]
def f():
x[0] += 1
return x[0]
return f
#### EXAMPLE ###########################################################
cnt_a = get_counter_neat()
cnt_b = get_counter_neat()
print cnt_a() # >>> 1
print cnt_a() # >>> 2
print cnt_a() # >>> 3
print cnt_b() # >>> 1
print cnt_a() # >>> 4
print cnt_b() # >>> 2
print cnt_b() # >>> 3
| 20.068966 | 72 | 0.487973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 197 | 0.338488 |