text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""Python implementation of 'Mountain Car' environment.
An underpowered car must drive up a hill, to succeed you must go back/forth.
This is a classic environment in RL research, first described by:
A Moore, Efficient Memory-Based Learning for Robot Control,
PhD thesis, University of Cambridge, 1990.
"""
from typing import Optional
from bsuite.environments import base
from bsuite.experiments.mountain_car import sweep
import dm_env
from dm_env import specs
import numpy as np
class MountainCar(base.Environment):
"""Mountain Car, an underpowered car must power up a hill."""
def __init__(self,
max_steps: int = 1000,
seed: Optional[int] = None):
"""Mountain Car, an underpowered car must power up a hill.
Args:
max_steps : maximum number of steps to perform per episode
seed : randomization seed
"""
super().__init__()
self._min_pos = -1.2
self._max_pos = 0.6
self._max_speed = 0.07
self._goal_pos = 0.5
self._force = 0.001
self._gravity = 0.0025
self._max_steps = max_steps
self._rng = np.random.RandomState(seed)
self._timestep = 0
self._raw_return = 0.
self._position = 0.
self._velocity = 0.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def _get_observation(self):
obs = [self._position, self._velocity, self._timestep / self._max_steps]
return np.array([obs], dtype=np.float32)
def _reset(self) -> dm_env.TimeStep:
"""Random initialize in [-0.6, -0.4] and zero velocity."""
self._timestep = 0
self._position = self._rng.uniform(-0.6, -0.4)
self._velocity = 0
return dm_env.restart(self._get_observation())
def _step(self, action: int) -> dm_env.TimeStep:
self._timestep += 1
reward = -1.
self._raw_return += reward
# Step the environment
self._velocity += (action - 1) * self._force + np.cos(
3 * self._position) * -self._gravity
self._velocity = np.clip(self._velocity, -self._max_speed, self._max_speed)
self._position += self._velocity
self._position = np.clip(self._position, self._min_pos, self._max_pos)
if self._position == self._min_pos:
self._velocity = np.clip(self._velocity, 0, self._max_speed)
observation = self._get_observation()
if self._position >= self._goal_pos or self._timestep >= self._max_steps:
return dm_env.termination(reward=reward, observation=observation)
return dm_env.transition(reward=reward, observation=observation)
def observation_spec(self):
return specs.Array(shape=(1, 3), dtype=np.float32, name='observation')
def action_spec(self):
"""Actions [0,1,2] -> [Left, Stay, Right]."""
return specs.DiscreteArray(3, name='action')
def bsuite_info(self):
return dict(raw_return=self._raw_return)
| {
"content_hash": "f09e8d4e6db95ff1838cea0f1a24930a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 32.14942528735632,
"alnum_prop": 0.6571326421165534,
"repo_name": "deepmind/bsuite",
"id": "4eb3e7d91f269285ea0cda25940aaf7f91f8e780",
"size": "3527",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bsuite/environments/mountain_car.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "106470"
},
{
"name": "Python",
"bytes": "448602"
},
{
"name": "Shell",
"bytes": "2425"
},
{
"name": "TeX",
"bytes": "233184"
}
],
"symlink_target": ""
} |
'''
A demo of Kivy Properties and why they rule
'''
from kivy.app import App
from kivy.properties import ObjectProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.animation import Animation, AnimationTransition
Builder.load_string('''
<ButtonBox>:
b1: b1
b2: b2
b3: b3
orientation: 'horizontal'
spacing: '10sp'
padding: 10, 0, 10, 0
Button:
id: b1
text: "one"
Button:
id: b2
text: "two"
Button:
id: b3
text: "three"
''')
class ButtonBox(BoxLayout):
'''
Illustrate animation
'''
b1 = ObjectProperty()
b2 = ObjectProperty()
b3 = ObjectProperty()
def start(self):
'''
Create and start the animations
Note. In production code, you would probably want a different
'on_complete' callback for each animation. We use the same on
here for simplicity sake.
'''
anim = Animation(size_hint_y=0 if self.b1.size_hint_y > 0.5 else 1,
duration=1)
anim.bind(on_complete=lambda x, y: self.start())
anim.start(self.b1)
anim2 = Animation(size_hint_y=0 if self.b2.size_hint_y > 0.5 else 1,
duration=1,
transition=AnimationTransition.in_bounce)
anim2.start(self.b2)
anim3 = Animation(size_hint_y=0 if self.b3.size_hint_y > 0.5 else 1,
duration=1,
transition=AnimationTransition.in_circ)
anim3.start(self.b3)
class AnimDemo(App):
def build(self):
bb = ButtonBox()
bb.start()
return bb
if __name__ == '__main__':
AnimDemo().run()
| {
"content_hash": "eb496164ccf7b8ee4f0b3fd6c730b9a8",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 25.028985507246375,
"alnum_prop": 0.5715112912565142,
"repo_name": "Zen-CODE/kivybits",
"id": "0e9b8f0d255d874b4f07878fb8418ad5b3bad471",
"size": "1727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Examples/Properties/main.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "758604"
},
{
"name": "Jupyter Notebook",
"bytes": "48393"
},
{
"name": "Python",
"bytes": "6343"
}
],
"symlink_target": ""
} |
from conure.libs.db import db
from .user import UserAccesser
# user <-> sites
class Sub(db.Document,UserAccesser):
feedsite = db.ReferenceField("FeedSite")
#counter = db.IntField(default=0)
unread_counter = db.IntField(default=0)
start_date = db.DateTimeField()
meta = {
'allow_inheritance': False,
'index_types': False,
'indexes': [
{'fields': ['userid','feedsite'],'unique': True},
]
}
@classmethod
def get_sub_by_userid_feedsite(cls,userid=None,feedsite=None):
return cls.objects(userid=userid,feedsite=feedsite).first()
@classmethod
def get_unread_counter_by_userid_feedsite(cls,userid=None,feedsite=None):
return cls.objects(userid=userid,feedsite=feedsite).only("unread_counter").first().unread_counter
@classmethod
def add_sub(cls,userid,feedsite):
from feed import Feed
self = cls(userid=userid,feedsite=feedsite)
self.userid = userid
self.feedsite = feedsite
temp = feedsite.feed_item_counter
self.unread_counter = temp if temp <=15 else 15
self.start_date = feedsite.get_last_feed(skip=self.unread_counter-1).create_date
self.save()
feeds = Feed.get_feed_items_by_siteid(siteid=feedsite.id,
limit=temp)
for feed in feeds:
ReadFeed.add(feed,userid)
return self
@classmethod
def exist_sub(cls,userid=None,feedsite=None):
return cls.objects(userid=userid,feedsite=feedsite).first() is not None
# all user sub subscript is in uncategoried folder
# user <-> folder <- sites <- feed
# for only for view, ignore at first
class FeedFolder(db.Document,UserAccesser):
name = db.StringField(required=True)
site_list = db.ListField(db.ReferenceField("FeedSite"))
has_open = db.BooleanField(default=False)
meta = {
'allow_inheritance': False,
'index_types': False,
'indexes': [
{'fields': ['userid']},
]
}
@classmethod
def get_folders_by_userid(cls,userid):
return cls.objects(userid=userid)
def safe_save(self):
self.save()
def safe_delete(self):
self.delete()
@property
def unread_feeds(self):
pass
@property
def unread_feeds_counter(self):
#100+ not 1000+
sum_counter = 0
for fs in self.site_list:
t = self.user.get_unread_feeds_on_feedsite(fs)
sum_counter += t
return sum_counter if sum_counter <= 100 else "100+"
#user <-> feeds, means use start a feed;
#it is possible that user can star a feed but have not read it
class StarFeed(db.Document,UserAccesser):
pass
#user <-> feeds, means user has read the feed
class ReadFeed(db.Document,UserAccesser):
feed = db.ReferenceField("Feed")
unread = db.BooleanField(default=True)
meta = {
'allow_inheritance': False,
'index_types': False,
'indexes': [
{'fields': ['feed','userid'], 'unique': True},
]
}
@classmethod
def add(cls,feed,userid):
return cls(feed=feed,userid=userid).save()
def safe_save(self):
self.save()
@classmethod
def get_readfeed_by_feed_and_userid(cls,feed=None, userid=None):
return ReadFeed.objects(feed=feed,userid=userid).first()
| {
"content_hash": "6fd60b00e0586e2f42a0dac9a6918b56",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 105,
"avg_line_length": 28.165354330708663,
"alnum_prop": 0.586525020967291,
"repo_name": "zhy0216/google-read-clone",
"id": "26f44d8c5b749a3bc3992490bf677f0cca94e548",
"size": "3579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conure/model/user_feed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "405167"
},
{
"name": "JavaScript",
"bytes": "103219"
},
{
"name": "Python",
"bytes": "17153"
}
],
"symlink_target": ""
} |
import json
import logging
import ssl
import threading
import websocket
from ..compat import queue
from .event_factory import SlackEventDict
def send_messages(q, websocket, keep_alive=True, donotdie=True):
logger = logging.getLogger(__name__)
ping_msg = json.dumps({'type': 'ping'}).encode('utf-8')
while True:
try:
msg = q.get(timeout=3)
websocket.send(msg)
raise RuntimeError
except queue.Empty:
if keep_alive:
websocket.send(ping_msg)
except Exception as e:
if donotdie:
logger.debug("Something went wrong in the message sender: {}".format(e), exc_info=True)
continue
else:
logger.exception(e)
raise e
def receive_messages(q, websocket, ignore_pong=True, donotdie=True):
logger = logging.getLogger(__name__)
while True:
try:
msg = json.loads(websocket.recv())
if msg['type'] == 'pong' and ignore_pong:
continue
else:
q.put(msg)
raise RuntimeError
except ssl.SSLError as e:
if e.errno == 2:
logger.debug("Something went wrong in the message receiver: {}".format(e), exc_info=True)
continue
except Exception as e:
if donotdie:
logger.debug("Something went wrong in the message receiver: {}".format(e), exc_info=True)
continue
else:
logger.exception(e)
raise e
class SlackRTMClient(object):
"""The RTM Client
"""
def __init__(self, token, url, event_factory=SlackEventDict, client=None, keep_alive=True, ignore_pong=True, donotdie=True):
"""
:param token: A :class:`str` token
:param url: A :class:`str` the base url for slack
:param event_factory: A class to process events coming off the real time messaging api
:param client: A :class:`slackly.SlackClient` or None.
:param keep_alive: The option to have the websocket automatically written to at least every 3 seconds
:param ignore_pong: Whether to emit or squash the response to pings
"""
if client is None:
from .api_client import SlackClient
client = SlackClient(token=token)
self.client = client
self.token = token
self.url = url
self.event_factory = event_factory
self.donotdie = True
self.websocket = None
self.send_queue = queue.Queue()
self.send_daemon = None
self.keep_alive = keep_alive
self.receive_queue = queue.Queue()
self.receive_daemon = None
self.ignore_pong = ignore_pong
self.connected = False
@classmethod
def from_response(cls, endpoint, token, response):
url = response['url']
rtm_client = cls(token=token, url=url)
rtm_client.connect()
return rtm_client
@classmethod
def from_token(cls, token):
from .api_client import SlackClient
from ..api import SlackAPI
client = SlackClient(token=token)
api = SlackAPI()
api.bind = client
return api.rtm.connect()
@classmethod
def from_client(cls, client):
from ..api import SlackAPI
api = SlackAPI()
api.bind = client
rtm_client = api.rtm.connect()
rtm_client.client = client
return rtm_client
def connect(self):
self.websocket = websocket.create_connection(self.url)
self.websocket.sock.setblocking(True)
self.send_daemon = threading.Thread(target=send_messages, args=(self.send_queue, self.websocket, self.keep_alive, self.donotdie), daemon=True)
self.send_daemon.start()
self.receive_daemon = threading.Thread(target=receive_messages, args=(self.receive_queue, self.websocket, self.donotdie), daemon=True)
self.receive_daemon.start()
self.connected = True
def get_event(self, timeout=None):
try:
return self.event_factory(self.receive_queue.get(timeout=timeout))
except queue.Empty:
pass
def get_events(self):
while True:
try:
yield self.event_factory(self.receive_queue.get(block=False))
except queue.Empty:
break
def get_events_forever(self):
while True:
yield self.get_event()
def send_event(self, event):
event_bytes = json.dumps(event).encode('utf-8')
self.send_queue.put(event_bytes)
def __repr__(self):
inbox = self.receive_queue.qsize()
outbox = self.send_queue.qsize()
token = "...{}".format(self.token[-5:])
return "{cls.__class__.__name__}(token='{token}' | " \
"connected: {cls.connected} | " \
"inbox: {inbox} | " \
"outbox: {outbox})".format(cls=self, token=token, inbox=inbox, outbox=outbox)
| {
"content_hash": "960a0ab267253933fd633130ae18ee0c",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 150,
"avg_line_length": 32.21656050955414,
"alnum_prop": 0.582839066824832,
"repo_name": "huntcsg/slackly",
"id": "18e7d631360ffe470d42fc5ab4ecbf832ad66379",
"size": "5058",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/slackly/client/rtm_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "362475"
},
{
"name": "Shell",
"bytes": "1497"
}
],
"symlink_target": ""
} |
from math import sqrt, acos, pi
from decimal import Decimal, getcontext
getcontext().prec = 30
class Vector(object):
CANNOT_NORMALIZE_ZERO_VECTOR_MESG = 'Cannot normalize the zero vector'
def __init__(self, coordinates):
try:
if not coordinates:
raise ValueError
self.coordinates = tuple([Decimal(x) for x in coordinates])
self.dimension = len(self.coordinates)
except ValueError:
raise ValueError('The coordinates must be nonempty')
except TypeError:
raise TypeError('The coordinates must be an iterable')
def __str__(self):
return 'Vector: {}'.format(self.coordinates)
def __eq__(self, v):
return self.coordinates == v.coordinates
def plus(self, v):
new_coordinates = [x+y for x,y in zip(self.coordinates, v.coordinates)]
return Vector(new_coordinates)
def minus(self, v):
new_coordinates = [x-y for x,y in zip(self.coordinates, v.coordinates)]
return Vector(new_coordinates)
def times_scalar(self, c):
new_coordinates = [x*c for x in self.coordinates]
return Vector(new_coordinates)
def magnitude(self):
coordinates_squared = [x**2 for x in self.coordinates]
return sqrt(sum(coordinates_squared))
def normalized(self):
try:
magnitude = self.magnitude()
return self.times_scalar(Decimal('1.0')/magnitude)
except ZeroDivisionError:
raise Exception('Cannot normalize the zero vector')
def dot(self, v):
return sum([x*y for x,y in zip(self.coordinates, v.coordinates)])
# in_degrees will return either degrees or radians
def angle_with(self, v, in_degrees=False):
try:
u1 = self.normalized()
u2 = v.normalized()
angle_in_radians = acos(u1.dot(u2))
if in_degrees:
degrees_per_radian = 180. / pi
return angle_in_radians * degrees_per_radian
else:
return angle_in_radians
except Exception as e:
if str(e) == self.CANNOT_NORMALIZE_ZERO_VECTOR_MESG:
raise Exception('Cannot compute an angle with the zero vector')
else:
raise e
v = Vector(['7.887', '4.138'])
w = Vector(['-8.802', '6.776'])
print v.dot(w)
| {
"content_hash": "5b045545b8a24a5dcaecb733f0bbdb27",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 79,
"avg_line_length": 29.51851851851852,
"alnum_prop": 0.5943120033458804,
"repo_name": "famunity/deep-learning-term1",
"id": "4067d024ba4b34e20928db9e26b05f314c05b5e4",
"size": "2391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3LinearAlgebra/lesson1/vector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "903"
},
{
"name": "HTML",
"bytes": "375105"
},
{
"name": "Jupyter Notebook",
"bytes": "42842"
},
{
"name": "Python",
"bytes": "60143"
}
],
"symlink_target": ""
} |
"""
This module defines the FeffInputSet abstract base class and a concrete
implementation for the Materials Project. The basic concept behind an input
set is to specify a scheme to generate a consistent set of Feff inputs from a
structure without further user intervention. This ensures comparability across
runs.
"""
import abc
import logging
import os
import sys
from copy import deepcopy
import numpy as np
from monty.json import MSONable
from monty.os.path import zpath
from monty.serialization import loadfn
from pymatgen.io.feff.inputs import Atoms, Header, Potential, Tags
__author__ = "Kiran Mathew"
__credits__ = "Alan Dozier, Anubhav Jain, Shyue Ping Ong"
__version__ = "1.1"
__maintainer__ = "Kiran Mathew"
__email__ = "kmathew@lbl.gov"
__date__ = "Sept 10, 2016"
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s: %(levelname)s: %(name)s: %(message)s")
sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(formatter)
logger.addHandler(sh)
class AbstractFeffInputSet(MSONable, metaclass=abc.ABCMeta):
"""
Abstract base class representing a set of Feff input parameters.
The idea is that using a FeffInputSet, a complete set of input files
(feffPOT, feffXANES, feffEXAFS, ATOMS, feff.inp)set_
can be generated in an automated fashion for any structure.
"""
@abc.abstractmethod
def header(self):
"""
Returns header to be used in feff.inp file from a pymatgen structure
"""
@property
@abc.abstractmethod
def atoms(self):
"""
Returns Atoms string from a structure that goes in feff.inp file.
Returns:
Atoms object.
"""
@property
@abc.abstractmethod
def tags(self):
"""
Returns standard calculation parameters.
"""
return
@property
@abc.abstractmethod
def potential(self):
"""
Returns POTENTIAL section used in feff.inp from a structure.
"""
def all_input(self):
"""
Returns all input files as a dict of {filename: feffio object}
"""
d = {"HEADER": self.header(), "PARAMETERS": self.tags}
if "RECIPROCAL" not in self.tags:
d.update({"POTENTIALS": self.potential, "ATOMS": self.atoms})
return d
def write_input(self, output_dir=".", make_dir_if_not_present=True):
"""
Writes a set of FEFF input to a directory.
Args:
output_dir: Directory to output the FEFF input files
make_dir_if_not_present: Set to True if you want the directory (
and the whole path) to be created if it is not present.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
feff = self.all_input()
feff_input = "\n\n".join(str(feff[k]) for k in ["HEADER", "PARAMETERS", "POTENTIALS", "ATOMS"] if k in feff)
for k, v in feff.items():
with open(os.path.join(output_dir, k), "w") as f:
f.write(str(v))
with open(os.path.join(output_dir, "feff.inp"), "w") as f:
f.write(feff_input)
# write the structure to cif file
if "ATOMS" not in feff:
self.atoms.struct.to(fmt="cif", filename=os.path.join(output_dir, feff["PARAMETERS"]["CIF"]))
class FEFFDictSet(AbstractFeffInputSet):
"""
Standard implementation of FeffInputSet, which can be extended by specific
implementations.
"""
def __init__(
self,
absorbing_atom,
structure,
radius,
config_dict,
edge="K",
spectrum="EXAFS",
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
radius (float): cluster radius
config_dict (dict): control tag settings dict
edge (str): absorption edge
spectrum (str): type of spectrum to calculate, available options :
EXAFS, XANES, DANES, XMCD, ELNES, EXELFS, FPRIME, NRIXS, XES.
The default is EXAFS.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings. To delete
tags, set the key '_del' in the user_tag_settings.
eg: user_tag_settings={"_del": ["COREHOLE", "EXCHANGE"]}
"""
self.absorbing_atom = absorbing_atom
self.structure = structure
self.radius = radius
self.config_dict = deepcopy(config_dict)
self.edge = edge
self.spectrum = spectrum
self.nkpts = nkpts
self.user_tag_settings = user_tag_settings or {}
self.config_dict["EDGE"] = self.edge
self.config_dict.update(self.user_tag_settings)
if "_del" in self.user_tag_settings:
for tag in self.user_tag_settings["_del"]:
if tag in self.config_dict:
del self.config_dict[tag]
del self.config_dict["_del"]
# k-space feff only for small systems. The hardcoded system size in
# feff is around 14 atoms.
self.small_system = len(self.structure) < 14 and "EXAFS" not in self.config_dict
def header(self, source="", comment=""):
"""
Creates header string from structure object
Args:
source: Source identifier used to create structure, can be defined
however user wants to organize structures, calculations, etc.
example would be Materials Project material ID number.
comment: comment to include in header
Returns:
Header
"""
return Header(self.structure, source, comment)
@property
def tags(self):
"""
FEFF job parameters.
Returns:
Tags
"""
if "RECIPROCAL" in self.config_dict:
if self.small_system:
self.config_dict["CIF"] = f"{self.structure.formula.replace(' ', '')}.cif"
self.config_dict["TARGET"] = self.atoms.center_index + 1
self.config_dict["COREHOLE"] = "RPA"
logger.warning("Setting COREHOLE = RPA for K-space calculation")
if not self.config_dict.get("KMESH", None):
abc = self.structure.lattice.abc
mult = (self.nkpts * abc[0] * abc[1] * abc[2]) ** (1 / 3)
self.config_dict["KMESH"] = [int(round(mult / l)) for l in abc]
else:
logger.warning(
"Large system(>=14 atoms) or EXAFS calculation, \
removing K-space settings"
)
del self.config_dict["RECIPROCAL"]
self.config_dict.pop("CIF", None)
self.config_dict.pop("TARGET", None)
self.config_dict.pop("KMESH", None)
self.config_dict.pop("STRFAC", None)
return Tags(self.config_dict)
@property
def potential(self):
"""
FEFF potential
Returns:
Potential
"""
return Potential(self.structure, self.absorbing_atom)
@property
def atoms(self):
"""
absorber + the rest
Returns:
Atoms
"""
return Atoms(self.structure, self.absorbing_atom, self.radius)
def __str__(self):
output = [self.spectrum]
output.extend([f"{k} = {v}" for k, v in self.config_dict.items()])
output.append("")
return "\n".join(output)
@staticmethod
def from_directory(input_dir):
"""
Read in a set of FEFF input files from a directory, which is
useful when existing FEFF input needs some adjustment.
"""
sub_d = {}
for fname, ftype in [("HEADER", Header), ("PARAMETERS", Tags)]:
fullzpath = zpath(os.path.join(input_dir, fname))
sub_d[fname.lower()] = ftype.from_file(fullzpath)
# Generation of FEFFDict set requires absorbing atom, need to search
# the index of absorption atom in the structure according to the
# distance matrix and shell species information contained in feff.inp
absorber_index = []
radius = None
feffinp = zpath(os.path.join(input_dir, "feff.inp"))
if "RECIPROCAL" not in sub_d["parameters"]:
input_atoms = Atoms.cluster_from_file(feffinp)
shell_species = np.array([x.species_string for x in input_atoms])
# First row of distance matrix represents the distance from the absorber to
# the rest atoms
distance_matrix = input_atoms.distance_matrix[0, :]
# Get radius value
from math import ceil
radius = int(
ceil(
input_atoms.get_distance(
input_atoms.index(input_atoms[0]),
input_atoms.index(input_atoms[-1]),
)
)
)
for site_index, site in enumerate(sub_d["header"].struct):
if site.specie == input_atoms[0].specie:
site_atoms = Atoms(sub_d["header"].struct, absorbing_atom=site_index, radius=radius)
site_distance = np.array(site_atoms.get_lines())[:, 5].astype(np.float64)
site_shell_species = np.array(site_atoms.get_lines())[:, 4]
shell_overlap = min(shell_species.shape[0], site_shell_species.shape[0])
if np.allclose(distance_matrix[:shell_overlap], site_distance[:shell_overlap]) and np.all(
site_shell_species[:shell_overlap] == shell_species[:shell_overlap]
):
absorber_index.append(site_index)
if "RECIPROCAL" in sub_d["parameters"]:
absorber_index = sub_d["parameters"]["TARGET"]
absorber_index[0] = int(absorber_index[0]) - 1
# Generate the input set
if "XANES" in sub_d["parameters"]:
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPXANESSet.yaml"))
if radius is None:
radius = 10
return FEFFDictSet(
absorber_index[0],
sub_d["header"].struct,
radius=radius,
config_dict=CONFIG,
edge=sub_d["parameters"]["EDGE"],
nkpts=1000,
user_tag_settings=sub_d["parameters"],
)
raise ValueError("Bad input directory.")
class MPXANESSet(FEFFDictSet):
"""
FeffDictSet for XANES spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPXANESSet.yaml"))
def __init__(
self,
absorbing_atom,
structure,
edge="K",
radius=10.0,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super().__init__(
absorbing_atom,
structure,
radius,
MPXANESSet.CONFIG,
edge=edge,
spectrum="XANES",
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
class MPEXAFSSet(FEFFDictSet):
"""
FeffDictSet for EXAFS spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPEXAFSSet.yaml"))
def __init__(
self,
absorbing_atom,
structure,
edge="K",
radius=10.0,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super().__init__(
absorbing_atom,
structure,
radius,
MPEXAFSSet.CONFIG,
edge=edge,
spectrum="EXAFS",
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
class MPEELSDictSet(FEFFDictSet):
"""
FeffDictSet for ELNES spectroscopy.
"""
def __init__(
self,
absorbing_atom,
structure,
edge,
spectrum,
radius,
beam_energy,
beam_direction,
collection_angle,
convergence_angle,
config_dict,
user_eels_settings=None,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
spectrum (str): ELNES or EXELFS
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPELNESSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
self.beam_energy = beam_energy
self.beam_direction = beam_direction
self.collection_angle = collection_angle
self.convergence_angle = convergence_angle
self.user_eels_settings = user_eels_settings
eels_config_dict = deepcopy(config_dict)
if beam_direction:
beam_energy_list = [beam_energy, 0, 1, 1]
eels_config_dict[spectrum]["BEAM_DIRECTION"] = beam_direction
else:
beam_energy_list = [beam_energy, 1, 0, 1]
del eels_config_dict[spectrum]["BEAM_DIRECTION"]
eels_config_dict[spectrum]["BEAM_ENERGY"] = beam_energy_list
eels_config_dict[spectrum]["ANGLES"] = [collection_angle, convergence_angle]
if user_eels_settings:
eels_config_dict[spectrum].update(user_eels_settings)
super().__init__(
absorbing_atom,
structure,
radius,
eels_config_dict,
edge=edge,
spectrum=spectrum,
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
class MPELNESSet(MPEELSDictSet):
"""
FeffDictSet for ELNES spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPELNESSet.yaml"))
def __init__(
self,
absorbing_atom,
structure,
edge="K",
radius=10.0,
beam_energy=100,
beam_direction=None,
collection_angle=1,
convergence_angle=1,
user_eels_settings=None,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPELNESSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super().__init__(
absorbing_atom,
structure,
edge,
"ELNES",
radius,
beam_energy,
beam_direction,
collection_angle,
convergence_angle,
MPELNESSet.CONFIG,
user_eels_settings=user_eels_settings,
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
class MPEXELFSSet(MPEELSDictSet):
"""
FeffDictSet for EXELFS spectroscopy.
"""
CONFIG = loadfn(os.path.join(MODULE_DIR, "MPEXELFSSet.yaml"))
def __init__(
self,
absorbing_atom,
structure,
edge="K",
radius=10.0,
beam_energy=100,
beam_direction=None,
collection_angle=1,
convergence_angle=1,
user_eels_settings=None,
nkpts=1000,
user_tag_settings=None,
):
"""
Args:
absorbing_atom (str/int): absorbing atom symbol or site index
structure (Structure): input structure
edge (str): absorption edge
radius (float): cluster radius in Angstroms.
beam_energy (float): Incident beam energy in keV
beam_direction (list): Incident beam direction. If None, the
cross section will be averaged.
collection_angle (float): Detector collection angle in mrad.
convergence_angle (float): Beam convergence angle in mrad.
user_eels_settings (dict): override default EELS config.
See MPEXELFSSet.yaml for supported keys.
nkpts (int): Total number of kpoints in the brillouin zone. Used
only when feff is run in the reciprocal space mode.
user_tag_settings (dict): override default tag settings
"""
super().__init__(
absorbing_atom,
structure,
edge,
"EXELFS",
radius,
beam_energy,
beam_direction,
collection_angle,
convergence_angle,
MPEXELFSSet.CONFIG,
user_eels_settings=user_eels_settings,
nkpts=nkpts,
user_tag_settings=user_tag_settings,
)
| {
"content_hash": "efd27d0cebe957badd8a7188c402bf24",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 116,
"avg_line_length": 33.486865148861646,
"alnum_prop": 0.5665498666387742,
"repo_name": "fraricci/pymatgen",
"id": "d4c36025dd65b4e702c6c405cd520d7ee39b131f",
"size": "19215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/feff/sets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "38793"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9195124"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
} |
import copy as cp
import numpy as np
from scipy import linalg
from .mixin import TransformerMixin
from ..cov import _regularized_covariance
class CSP(TransformerMixin):
"""M/EEG signal decomposition using the Common Spatial Patterns (CSP).
This object can be used as a supervised decomposition to estimate
spatial filters for feature extraction in a 2 class decoding problem.
CSP in the context of EEG was first described in [1]; a comprehensive
tutorial on CSP can be found in [2].
Parameters
----------
n_components : int (default 4)
The number of components to decompose M/EEG signals.
This number should be set by cross-validation.
reg : float | str | None (default None)
if not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
log : bool (default True)
If true, apply log to standardize the features.
If false, features are just z-scored.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP components used to decompose the data, else None.
patterns_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP patterns used to restore M/EEG signals, else None.
mean_ : ndarray, shape (n_channels,)
If fit, the mean squared power for each component.
std_ : ndarray, shape (n_channels,)
If fit, the std squared power for each component.
References
----------
[1] Zoltan J. Koles, Michael S. Lazar, Steven Z. Zhou. Spatial Patterns
Underlying Population Differences in the Background EEG. Brain
Topography 2(4), 275-284, 1990.
[2] Benjamin Blankertz, Ryota Tomioka, Steven Lemm, Motoaki Kawanabe,
Klaus-Robert Müller. Optimizing Spatial Filters for Robust EEG
Single-Trial Analysis. IEEE Signal Processing Magazine 25(1), 41-56,
2008.
"""
def __init__(self, n_components=4, reg=None, log=True):
"""Init of CSP."""
self.n_components = n_components
self.reg = reg
self.log = log
self.filters_ = None
self.patterns_ = None
self.mean_ = None
self.std_ = None
def fit(self, epochs_data, y):
"""Estimate the CSP decomposition on epochs.
Parameters
----------
epochs_data : ndarray, shape (n_epochs, n_channels, n_times)
The data to estimate the CSP on.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of CSP
Returns the modified instance.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
# check number of epochs
if epochs_data.shape[0] != len(y):
raise ValueError("n_epochs must be the same for epochs_data and y")
classes = np.unique(y)
if len(classes) != 2:
raise ValueError("More than two different classes in the data.")
# concatenate epochs
class_1 = np.transpose(epochs_data[y == classes[0]],
[1, 0, 2]).reshape(epochs_data.shape[1], -1)
class_2 = np.transpose(epochs_data[y == classes[1]],
[1, 0, 2]).reshape(epochs_data.shape[1], -1)
cov_1 = _regularized_covariance(class_1, reg=self.reg)
cov_2 = _regularized_covariance(class_2, reg=self.reg)
cov_1 /= np.trace(cov_1)
cov_2 /= np.trace(cov_2)
e, w = linalg.eigh(cov_1, cov_1 + cov_2)
n_vals = len(e)
# Rearrange vectors
ind = np.empty(n_vals, dtype=int)
ind[::2] = np.arange(n_vals - 1, n_vals // 2 - 1, -1)
ind[1::2] = np.arange(0, int(np.ceil(n_vals / 2.0)) - 1)
w = w[:, ind] # first, last, second, second last, third, ...
self.filters_ = w.T
self.patterns_ = linalg.pinv(w)
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in epochs_data])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def transform(self, epochs_data, y=None):
"""Estimate epochs sources given the CSP filters.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None
Not used.
Returns
-------
X : ndarray of shape (n_epochs, n_sources)
The CSP features averaged over time.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
if self.filters_ is None:
raise RuntimeError('No filters available. Please first fit CSP '
'decomposition.')
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, epoch) for epoch in epochs_data])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
if self.log:
X = np.log(X)
else:
X -= self.mean_
X /= self.std_
return X
def plot_patterns(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic patterns of CSP components.
The CSP patterns explain how the measured data was generated
from the neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
def plot_filters(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic filters of CSP components.
The CSP filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
filters = EvokedArray(self.filters_, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
| {
"content_hash": "c3188da5342b9e260c7745eab337e67e",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 79,
"avg_line_length": 47.17420814479638,
"alnum_prop": 0.5776221763944175,
"repo_name": "yousrabk/mne-python",
"id": "e1dd3f803bab4a9fda86aa09705045edd9f6897e",
"size": "21142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/decoding/csp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3171"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4489354"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import os
from django.core.management.base import BaseCommand
from parkmap.models import Park, Neighborhood
class Command(BaseCommand):
help = 'Assigns intersecting neighborhoods to parks.'
def handle(self, *args, **options):
parks = Park.objects.all()
for park in parks:
neighborhoods = Neighborhood.objects.filter(geometry__intersects=park.geometry)
park.neighborhoods.add(*neighborhoods)
self.stdout.write('Updated "%s"\n' % park.name)
| {
"content_hash": "72a45a1aa9394f210e3ceedf7ba542fc",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 91,
"avg_line_length": 31.5,
"alnum_prop": 0.6904761904761905,
"repo_name": "MAPC/bostonparks",
"id": "37395a9927499aeb165254e33507bdcd90f6e901",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parkmap/management/commands/parks2neighborhoods.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "116604"
},
{
"name": "Python",
"bytes": "61825"
}
],
"symlink_target": ""
} |
"""
State Space Representation, Kalman Filter, Smoother, and Simulation Smoother
Author: Chad Fulton
License: Simplified-BSD
"""
import numpy as np
from .kalman_smoother import KalmanSmoother
from .cfa_simulation_smoother import CFASimulationSmoother
from . import tools
SIMULATION_STATE = 0x01
SIMULATION_DISTURBANCE = 0x04
SIMULATION_ALL = (
SIMULATION_STATE | SIMULATION_DISTURBANCE
)
class SimulationSmoother(KalmanSmoother):
r"""
State space representation of a time series process, with Kalman filter
and smoother, and with simulation smoother.
Parameters
----------
k_endog : {array_like, int}
The observed time-series process :math:`y` if array like or the
number of variables in the process if an integer.
k_states : int
The dimension of the unobserved state process.
k_posdef : int, optional
The dimension of a guaranteed positive definite covariance matrix
describing the shocks in the measurement equation. Must be less than
or equal to `k_states`. Default is `k_states`.
simulation_smooth_results_class : class, optional
Default results class to use to save output of simulation smoothing.
Default is `SimulationSmoothResults`. If specified, class must extend
from `SimulationSmoothResults`.
simulation_smoother_classes : dict, optional
Dictionary with BLAS prefixes as keys and classes as values.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices, for Kalman filtering options, for Kalman smoothing
options, or for Simulation smoothing options.
See `Representation`, `KalmanFilter`, and `KalmanSmoother` for more
details.
"""
simulation_outputs = [
'simulate_state', 'simulate_disturbance', 'simulate_all'
]
def __init__(self, k_endog, k_states, k_posdef=None,
simulation_smooth_results_class=None,
simulation_smoother_classes=None, **kwargs):
super(SimulationSmoother, self).__init__(
k_endog, k_states, k_posdef, **kwargs
)
if simulation_smooth_results_class is None:
simulation_smooth_results_class = SimulationSmoothResults
self.simulation_smooth_results_class = simulation_smooth_results_class
self.prefix_simulation_smoother_map = (
simulation_smoother_classes
if simulation_smoother_classes is not None
else tools.prefix_simulation_smoother_map.copy())
# Holder for an model-level simulation smoother objects, to use in
# simulating new time series.
self._simulators = {}
def get_simulation_output(self, simulation_output=None,
simulate_state=None, simulate_disturbance=None,
simulate_all=None, **kwargs):
r"""
Get simulation output bitmask
Helper method to get final simulation output bitmask from a set of
optional arguments including the bitmask itself and possibly boolean
flags.
Parameters
----------
simulation_output : int, optional
Simulation output bitmask. If this is specified, it is simply
returned and the other arguments are ignored.
simulate_state : bool, optional
Whether or not to include the state in the simulation output.
simulate_disturbance : bool, optional
Whether or not to include the state and observation disturbances
in the simulation output.
simulate_all : bool, optional
Whether or not to include all simulation output.
\*\*kwargs
Additional keyword arguments. Present so that calls to this method
can use \*\*kwargs without clearing out additional arguments.
"""
# If we do not explicitly have simulation_output, try to get it from
# kwargs
if simulation_output is None:
simulation_output = 0
if simulate_state:
simulation_output |= SIMULATION_STATE
if simulate_disturbance:
simulation_output |= SIMULATION_DISTURBANCE
if simulate_all:
simulation_output |= SIMULATION_ALL
# Handle case of no information in kwargs
if simulation_output == 0:
# If some arguments were passed, but we still do not have any
# simulation output, raise an exception
argument_set = not all([
simulate_state is None, simulate_disturbance is None,
simulate_all is None
])
if argument_set:
raise ValueError("Invalid simulation output options:"
" given options would result in no"
" output.")
# Otherwise set simulation output to be the same as smoother
# output
simulation_output = self.smoother_output
return simulation_output
def _simulate(self, nsimulations, measurement_shocks, state_shocks,
initial_state):
# Initialize the filter and representation
prefix, dtype, create_smoother, create_filter, create_statespace = (
self._initialize_smoother())
# Initialize the state
self._initialize_state(prefix=prefix)
# Create the simulator if necessary
if (prefix not in self._simulators or
not nsimulations == self._simulators[prefix].nobs):
simulation_output = 0
# Kalman smoother parameters
smoother_output = -1
# Kalman filter parameters
filter_method = self.filter_method
inversion_method = self.inversion_method
stability_method = self.stability_method
conserve_memory = self.conserve_memory
filter_timing = self.filter_timing
loglikelihood_burn = self.loglikelihood_burn
tolerance = self.tolerance
# Create a new simulation smoother object
cls = self.prefix_simulation_smoother_map[prefix]
self._simulators[prefix] = cls(
self._statespaces[prefix],
filter_method, inversion_method, stability_method,
conserve_memory, filter_timing, tolerance, loglikelihood_burn,
smoother_output, simulation_output, nsimulations
)
simulator = self._simulators[prefix]
# Set the disturbance variates
if measurement_shocks is not None and state_shocks is not None:
disturbance_variates = np.atleast_1d(np.array(
np.r_[measurement_shocks.ravel(), state_shocks.ravel()],
dtype=self.dtype
).squeeze())
simulator.set_disturbance_variates(disturbance_variates,
pretransformed=True)
elif measurement_shocks is None and state_shocks is None:
pass
elif measurement_shocks is not None:
raise ValueError('Must set `state_shocks` if `measurement_shocks`'
' is set.')
elif state_shocks is not None:
raise ValueError('Must set `measurement_shocks` if `state_shocks`'
' is set.')
# Set the intial state vector
initial_state = np.atleast_1d(np.array(
initial_state, dtype=self.dtype
).squeeze())
simulator.set_initial_state(initial_state)
# Perform simulation smoothing
# Note: simulation_output=-1 corresponds to whatever was setup when
# the simulation smoother was constructed
simulator.simulate(-1)
simulated_obs = np.array(simulator.generated_obs, copy=True)
simulated_state = np.array(simulator.generated_state, copy=True)
return (
simulated_obs[:, :nsimulations].T,
simulated_state[:, :nsimulations].T
)
def simulation_smoother(self, simulation_output=None, method='kfs',
results_class=None, prefix=None, **kwargs):
r"""
Retrieve a simulation smoother for the statespace model.
Parameters
----------
simulation_output : int, optional
Determines which simulation smoother output is calculated.
Default is all (including state and disturbances).
method : {'kfs', 'cfa'}, optional
Method for simulation smoothing. If `method='kfs'`, then the
simulation smoother is based on Kalman filtering and smoothing
recursions. If `method='cfa'`, then the simulation smoother is
based on the Cholesky Factor Algorithm (CFA) approach. The CFA
approach is not applicable to all state space models, but can be
faster for the cases in which it is supported.
simulation_smooth_results_class : class, optional
Default results class to use to save output of simulation
smoothing. Default is `SimulationSmoothResults`. If specified,
class must extend from `SimulationSmoothResults`.
prefix : str
The prefix of the datatype. Usually only used internally.
**kwargs
Additional keyword arguments, used to set the simulation output.
See `set_simulation_output` for more details.
Returns
-------
SimulationSmoothResults
"""
method = method.lower()
# Short-circuit for CFA
if method == 'cfa':
if simulation_output not in [None, 1, -1]:
raise ValueError('Can only retrieve simulations of the state'
' vector using the CFA simulation smoother.')
return CFASimulationSmoother(self)
elif method != 'kfs':
raise ValueError('Invalid simulation smoother method "%s". Valid'
' methods are "kfs" or "cfa".' % method)
# Set the class to be the default results class, if None provided
if results_class is None:
results_class = self.simulation_smooth_results_class
# Instantiate a new results object
if not issubclass(results_class, SimulationSmoothResults):
raise ValueError('Invalid results class provided.')
# Make sure we have the required Statespace representation
prefix, dtype, create_smoother, create_filter, create_statespace = (
self._initialize_smoother())
# Simulation smoother parameters
simulation_output = self.get_simulation_output(simulation_output,
**kwargs)
# Kalman smoother parameters
smoother_output = kwargs.get('smoother_output', simulation_output)
# Kalman filter parameters
filter_method = kwargs.get('filter_method', self.filter_method)
inversion_method = kwargs.get('inversion_method',
self.inversion_method)
stability_method = kwargs.get('stability_method',
self.stability_method)
conserve_memory = kwargs.get('conserve_memory',
self.conserve_memory)
filter_timing = kwargs.get('filter_timing',
self.filter_timing)
loglikelihood_burn = kwargs.get('loglikelihood_burn',
self.loglikelihood_burn)
tolerance = kwargs.get('tolerance', self.tolerance)
# Create a new simulation smoother object
cls = self.prefix_simulation_smoother_map[prefix]
simulation_smoother = cls(
self._statespaces[prefix],
filter_method, inversion_method, stability_method, conserve_memory,
filter_timing, tolerance, loglikelihood_burn, smoother_output,
simulation_output
)
# Create results object
results = results_class(self, simulation_smoother)
return results
class SimulationSmoothResults(object):
r"""
Results from applying the Kalman smoother and/or filter to a state space
model.
Parameters
----------
model : Representation
A Statespace representation
simulation_smoother : {{prefix}}SimulationSmoother object
The Cython simulation smoother object with which to simulation smooth.
Attributes
----------
model : Representation
A Statespace representation
dtype : dtype
Datatype of representation matrices
prefix : str
BLAS prefix of representation matrices
simulation_output : int
Bitmask controlling simulation output.
simulate_state : bool
Flag for if the state is included in simulation output.
simulate_disturbance : bool
Flag for if the state and observation disturbances are included in
simulation output.
simulate_all : bool
Flag for if simulation output should include everything.
generated_measurement_disturbance : ndarray
Measurement disturbance variates used to genereate the observation
vector.
generated_state_disturbance : ndarray
State disturbance variates used to genereate the state and
observation vectors.
generated_obs : ndarray
Generated observation vector produced as a byproduct of simulation
smoothing.
generated_state : ndarray
Generated state vector produced as a byproduct of simulation smoothing.
simulated_state : ndarray
Simulated state.
simulated_measurement_disturbance : ndarray
Simulated measurement disturbance.
simulated_state_disturbance : ndarray
Simulated state disturbance.
"""
def __init__(self, model, simulation_smoother):
self.model = model
self.prefix = model.prefix
self.dtype = model.dtype
self._simulation_smoother = simulation_smoother
# Output
self._generated_measurement_disturbance = None
self._generated_state_disturbance = None
self._generated_obs = None
self._generated_state = None
self._simulated_state = None
self._simulated_measurement_disturbance = None
self._simulated_state_disturbance = None
@property
def simulation_output(self):
return self._simulation_smoother.simulation_output
@simulation_output.setter
def simulation_output(self, value):
self._simulation_smoother.simulation_output = value
@property
def simulate_state(self):
return bool(self.simulation_output & SIMULATION_STATE)
@simulate_state.setter
def simulate_state(self, value):
if bool(value):
self.simulation_output = self.simulation_output | SIMULATION_STATE
else:
self.simulation_output = self.simulation_output & ~SIMULATION_STATE
@property
def simulate_disturbance(self):
return bool(self.simulation_output & SIMULATION_DISTURBANCE)
@simulate_disturbance.setter
def simulate_disturbance(self, value):
if bool(value):
self.simulation_output = (
self.simulation_output | SIMULATION_DISTURBANCE)
else:
self.simulation_output = (
self.simulation_output & ~SIMULATION_DISTURBANCE)
@property
def simulate_all(self):
return bool(self.simulation_output & SIMULATION_ALL)
@simulate_all.setter
def simulate_all(self, value):
if bool(value):
self.simulation_output = self.simulation_output | SIMULATION_ALL
else:
self.simulation_output = self.simulation_output & ~SIMULATION_ALL
@property
def generated_measurement_disturbance(self):
r"""
Randomly drawn measurement disturbance variates
Used to construct `generated_obs`.
Notes
-----
.. math::
\varepsilon_t^+ ~ N(0, H_t)
If `disturbance_variates` were provided to the `simulate()` method,
then this returns those variates (which were N(0,1)) transformed to the
distribution above.
"""
if self._generated_measurement_disturbance is None:
end = self.model.nobs * self.model.k_endog
self._generated_measurement_disturbance = np.array(
self._simulation_smoother.disturbance_variates[:end],
copy=True).reshape(self.model.nobs, self.model.k_endog)
return self._generated_measurement_disturbance
@property
def generated_state_disturbance(self):
r"""
Randomly drawn state disturbance variates, used to construct
`generated_state` and `generated_obs`.
Notes
-----
.. math::
\eta_t^+ ~ N(0, Q_t)
If `disturbance_variates` were provided to the `simulate()` method,
then this returns those variates (which were N(0,1)) transformed to the
distribution above.
"""
if self._generated_state_disturbance is None:
start = self.model.nobs * self.model.k_endog
self._generated_state_disturbance = np.array(
self._simulation_smoother.disturbance_variates[start:],
copy=True).reshape(self.model.nobs, self.model.k_posdef)
return self._generated_state_disturbance
@property
def generated_obs(self):
r"""
Generated vector of observations by iterating on the observation and
transition equations, given a random initial state draw and random
disturbance draws.
Notes
-----
.. math::
y_t^+ = d_t + Z_t \alpha_t^+ + \varepsilon_t^+
"""
if self._generated_obs is None:
self._generated_obs = np.array(
self._simulation_smoother.generated_obs, copy=True
)
return self._generated_obs
@property
def generated_state(self):
r"""
Generated vector of states by iterating on the transition equation,
given a random initial state draw and random disturbance draws.
Notes
-----
.. math::
\alpha_{t+1}^+ = c_t + T_t \alpha_t^+ + \eta_t^+
"""
if self._generated_state is None:
self._generated_state = np.array(
self._simulation_smoother.generated_state, copy=True
)
return self._generated_state
@property
def simulated_state(self):
r"""
Random draw of the state vector from its conditional distribution.
Notes
-----
.. math::
\alpha ~ p(\alpha \mid Y_n)
"""
if self._simulated_state is None:
self._simulated_state = np.array(
self._simulation_smoother.simulated_state, copy=True
)
return self._simulated_state
@property
def simulated_measurement_disturbance(self):
r"""
Random draw of the measurement disturbance vector from its conditional
distribution.
Notes
-----
.. math::
\varepsilon ~ N(\hat \varepsilon, Var(\hat \varepsilon \mid Y_n))
"""
if self._simulated_measurement_disturbance is None:
self._simulated_measurement_disturbance = np.array(
self._simulation_smoother.simulated_measurement_disturbance,
copy=True
)
return self._simulated_measurement_disturbance
@property
def simulated_state_disturbance(self):
r"""
Random draw of the state disturbance vector from its conditional
distribution.
Notes
-----
.. math::
\eta ~ N(\hat \eta, Var(\hat \eta \mid Y_n))
"""
if self._simulated_state_disturbance is None:
self._simulated_state_disturbance = np.array(
self._simulation_smoother.simulated_state_disturbance,
copy=True
)
return self._simulated_state_disturbance
def simulate(self, simulation_output=-1, disturbance_variates=None,
initial_state_variates=None, pretransformed_variates=False):
r"""
Perform simulation smoothing
Does not return anything, but populates the object's `simulated_*`
attributes, as specified by simulation output.
Parameters
----------
simulation_output : int, optional
Bitmask controlling simulation output. Default is to use the
simulation output defined in object initialization.
disturbance_variates : array_likes, optional
Random values to use as disturbance variates, distributed standard
Normal. Usually only specified if results are to be replicated
(e.g. to enforce a seed) or for testing. If not specified, random
variates are drawn.
initial_state_variates : array_likes, optional
Random values to use as initial state variates. Usually only
specified if results are to be replicated (e.g. to enforce a seed)
or for testing. If not specified, random variates are drawn.
"""
# Clear any previous output
self._generated_measurement_disturbance = None
self._generated_state_disturbance = None
self._generated_state = None
self._generated_obs = None
self._generated_state = None
self._simulated_state = None
self._simulated_measurement_disturbance = None
self._simulated_state_disturbance = None
# Re-initialize the _statespace representation
prefix, dtype, create_smoother, create_filter, create_statespace = (
self.model._initialize_smoother())
# Initialize the state
self.model._initialize_state(prefix=prefix)
# Draw the (independent) random variates for disturbances in the
# simulation
if disturbance_variates is not None:
self._simulation_smoother.set_disturbance_variates(
np.array(disturbance_variates, dtype=self.dtype),
pretransformed=pretransformed_variates
)
else:
self._simulation_smoother.draw_disturbance_variates()
# Draw the (independent) random variates for the initial states in the
# simulation
if initial_state_variates is not None:
self._simulation_smoother.set_initial_state_variates(
np.array(initial_state_variates, dtype=self.dtype),
pretransformed=pretransformed_variates
)
else:
self._simulation_smoother.draw_initial_state_variates()
# Perform simulation smoothing
# Note: simulation_output=-1 corresponds to whatever was setup when
# the simulation smoother was constructed
self._simulation_smoother.simulate(simulation_output)
| {
"content_hash": "0143181d88eda17478cb9cb55fa2acae",
"timestamp": "",
"source": "github",
"line_count": 609,
"max_line_length": 79,
"avg_line_length": 38.05911330049261,
"alnum_prop": 0.6167917853136595,
"repo_name": "jseabold/statsmodels",
"id": "c79db92ce8df975ae8e5e89a41ffb1595b67535f",
"size": "23178",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "statsmodels/tsa/statespace/simulation_smoother.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
} |
import asyncio
import datetime
import time
from enum import IntEnum, Enum
from typing import List, Optional, Union
import discord
from necrobot.util.necrodancer import level as necrolevel
from necrobot.config import Config
from necrobot.race.raceconfig import RaceConfig
from necrobot.race.raceinfo import RaceInfo
from necrobot.race.racer import Racer
from necrobot.util import console, racetime
from necrobot.util.ordinal import ordinal
from necrobot.util.necrodancer import seedgen
# from necrobot.util import ratelimit
# CHECK_RATE_LIMITS = False
# RaceEvent ---------------------------------------------
class RaceEvent(object):
class EventType(Enum):
RACER_ENTER = 0
RACER_UNENTER = 1
RACER_READY = 2
RACER_UNREADY = 3
RACER_FINISH = 4
RACER_UNFINISH = 5
RACER_FORFEIT = 6
RACER_UNFORFEIT = 7
RACE_BEGIN_COUNTDOWN = 101
RACE_CANCEL_COUNTDOWN = 102
RACE_BEGIN = 103
RACE_END = 104
RACE_CANCEL_FINALIZE = 105
RACE_FINALIZE = 106
RACE_CANCEL = 107
RACE_PAUSE = 108
RACE_UNPAUSE = 109
ADD_EXTRANEOUS = 201
CHANGE_RULES = 202
def __init__(self, race, event: EventType, **kwargs):
self.race = race
self.event = event
self._kwargs = kwargs
def __getattr__(self, item):
return self._kwargs[item]
# RaceStatus enum ---------------------------------------------------------
class RaceStatus(IntEnum):
"""An Enum describing the current "phase" of the race.
Values
------
uninitialized
initialize() should be called on this object (not called in __init__ because coroutine).
entry_open
The race is open to new entrants.
counting_down
The bot is counting down to race start.
If people .unready during this time, race reverts to the entry_open state.
racing
The race has begun, and at least one player is still racing.
race_completed
All players have either finished or forfeited.
If players .undone during this time, race reverts to the racing state.
race_finalized
All players have finished or forfeited, and the race results are marked as final and can be
recorded. No further changes possible.
canceled
The race has been canceled. No further changes possible.
"""
uninitialized = 0
entry_open = 1
counting_down = 2
racing = 3
paused = 4
completed = 5
finalized = 6
canceled = 7
def __str__(self):
status_strs = {
RaceStatus.uninitialized: 'Not initialized.',
RaceStatus.entry_open: 'Entry open!',
RaceStatus.counting_down: 'Starting!',
RaceStatus.racing: 'In progress!',
RaceStatus.paused: 'Paused!',
RaceStatus.completed: 'Complete.',
RaceStatus.finalized: 'Results finalized.',
RaceStatus.canceled: 'Race canceled.'
}
return status_strs[self]
# Race class --------------------------------------------------------------
class Race(object):
# NB: Call the coroutine initialize() to set up the room
def __init__(self, parent, race_info: RaceInfo, race_config: RaceConfig = RaceConfig()):
self.race_id = None # After recording, the ID of the race in the DB
self.parent = parent # The parent managing this race. Must implement write() and process().
self.race_info = RaceInfo.copy(race_info)
self.racers = [] # A list of Racer
self._status = RaceStatus.uninitialized # The status of this race
self._config = race_config # The RaceConfig to use (determines some race behavior)
self._countdown = int(0) # The current countdown
self._start_datetime = None # UTC time for the beginning of the race
self._adj_start_time = float(0) # System clock time for the beginning of the race (modified by pause)
self._last_pause_time = float(0) # System clock time for last time we called pause()
self._last_no_entrants_time = None # System clock time for the last time the race had zero entrants
self._delay_record = False # If true, delay an extra config.FINALIZE_TIME_SEC before recording
self._countdown_future = None # The Future object for the race countdown
self._finalize_future = None # The Future object for the finalization countdown
# Race data
# Returns the status string
@property
def status_str(self) -> str:
return str(self._status)
# Returns time elapsed in the race in ms
@property
def current_time(self) -> int or None:
if self._status == RaceStatus.paused:
return int(100 * (self._last_pause_time - self._adj_start_time))
elif self._status == RaceStatus.racing or self._status == RaceStatus.completed:
return int(100 * (time.monotonic() - self._adj_start_time))
else:
return None
# Returns the current time elapsed as a string "[m]m:ss.hh"
@property
def current_time_str(self) -> str:
current_time_ = self.current_time
if current_time_ is not None:
return racetime.to_str(current_time_)
else:
return ''
# Returns the UTC time for the beginning of the race
@property
def start_datetime(self) -> datetime.datetime:
return self._start_datetime
# True if the race has not started
@property
def before_race(self) -> bool:
return self._status < RaceStatus.racing
# True if the race is currently running
@property
def during_race(self) -> bool:
return self._status == RaceStatus.racing or self._status == RaceStatus.paused
# True if the race is finalized or canceled
@property
def complete(self) -> bool:
return self._status >= RaceStatus.completed
# True if racers can enter the race
@property
def entry_open(self) -> bool:
return self._status == RaceStatus.entry_open
# True if the race can no longer be modified (finalized or canceled)
@property
def final(self) -> bool:
return self._status >= RaceStatus.finalized
# True if we've passed the "no entrants" warning
@property
def passed_no_entrants_warning_time(self) -> bool:
time_since = datetime.timedelta(seconds=(time.monotonic() - self._last_no_entrants_time))
return self._status != RaceStatus.uninitialized and time_since > Config.NO_ENTRANTS_CLEANUP_WARNING
# True if we've passed the "no entrants" clear time
@property
def passed_no_entrants_cleanup_time(self) -> bool:
time_since = datetime.timedelta(seconds=(time.monotonic() - self._last_no_entrants_time))
return self._status != RaceStatus.uninitialized and time_since > Config.NO_ENTRANTS_CLEANUP
# True if the race has any entrants
@property
def any_entrants(self) -> bool:
return bool(self.racers)
# True if the race is paused
@property
def paused(self) -> bool:
return self._status == RaceStatus.paused
@property
def race_config(self) -> RaceConfig:
return self._config
# Racer data
# Returns true if all racers are ready and there's enough racers
@property
def all_racers_ready(self) -> bool:
min_length = 1 if self.race_info.can_be_solo else 2
return self.num_not_ready == 0 and len(self.racers) >= min_length
# Returns the number of racers not in the 'ready' state
@property
def num_not_ready(self) -> int:
num = 0
for racer in self.racers:
if not racer.is_ready:
num += 1
return num
# Return the number of racers in the 'finished' state
@property
def num_finished(self) -> int:
num = 0
for racer in self.racers:
if racer.is_finished:
num += 1
return num
# Returns a list of racers and their statuses.
@property
def leaderboard_text(self) -> str:
return self._leaderboard_text(False)
def _leaderboard_text(self, shortened) -> str:
char_limit = int(1900) # The character limit on discord messages
racer_list = []
max_name_len = 0
max_time = 0
for racer in self.racers:
max_name_len = max(max_name_len, len(racer.name))
racer_list.append(racer)
if racer.is_finished:
max_time = max(racer.time, max_time)
max_time += 1
# Sort racers: (1) Finished racers, by time; (2) Forfeit racers; (3) Racers still racing
racer_list.sort(key=lambda r: r.time if r.is_finished else (max_time if r.is_forfeit else max_time+1))
text = ''
rank = int(0)
for racer in racer_list:
rank += 1
rank_str = '{0: >4} '.format(str(rank) + '.' if racer.is_finished else ' ')
stat_str = racer.short_status_str if shortened else racer.status_str
text += (rank_str + racer.name + (' ' * (max_name_len - len(racer.name))) + ' --- ' + stat_str + '\n')
if len(text) > char_limit and not shortened:
return self._leaderboard_text(shortened=True)
else:
return text
@property
def winner(self) -> int or None:
if not self._status == RaceStatus.finalized or not self.racers:
return None
lead_racer = self.racers[0]
return lead_racer if lead_racer.is_finished else None
# True if the given discord.User is entered in the race
def has_racer(self, racer_usr: Union[discord.User, discord.Member]) -> bool:
for racer in self.racers:
if racer.member.id == racer_usr.id:
return True
return False
# Returns the given discord.User as a Racer, if possible
def get_racer(self, racer_usr: Union[discord.User, discord.Member]) -> Racer:
for racer in self.racers:
if racer.member.id == racer_usr.id:
return racer
# Public methods (all coroutines)
# Sets up the leaderboard, etc., for the race
async def initialize(self):
if self._status != RaceStatus.uninitialized:
return
self._status = RaceStatus.entry_open
self._last_no_entrants_time = time.monotonic()
# Begins the race if ready. (Writes a message if all racers are ready but an admin is not.)
# Returns true on success
async def begin_if_ready(self):
if self.all_racers_ready:
await self.begin_race_countdown()
return True
# Begin the race countdown and transition race state from 'entry_open' to 'counting_down'
async def begin_race_countdown(self):
if self._status == RaceStatus.entry_open:
self._status = RaceStatus.counting_down
self._countdown_future = asyncio.ensure_future(self._race_countdown())
await self._process(RaceEvent.EventType.RACE_BEGIN_COUNTDOWN)
# Pause the race timer.
async def pause(self, mute=False):
if self._status == RaceStatus.racing:
self._status = RaceStatus.paused
self._last_pause_time = time.monotonic()
mention_str = ''
for racer in self.racers:
mention_str += '{}, '.format(racer.member.mention)
mention_str = mention_str[:-2]
await self._write(mute=mute, text='Race paused. (Alerting {0}.)'.format(mention_str))
await self._process(RaceEvent.EventType.RACE_PAUSE)
# Unpause the race timer.
async def unpause(self, mute=False):
if self.paused:
await self._unpause_countdown(mute=mute)
# Enters the given discord Member in the race
async def enter_member(self, racer_member: discord.Member, mute=False):
if self.has_racer(racer_member):
await self._write(mute=mute, text='{0} is already entered.'.format(racer_member.mention))
return
if not self.before_race:
await self._write(
mute=mute,
text='{0}: Cannot enter; the race has already started.'.format(racer_member.mention))
return
if self._status == RaceStatus.counting_down:
await self._cancel_countdown()
await self._do_enter_racer(racer_member)
await self._write(
mute=mute,
text='{0} has entered the race. {1} entrants.'.format(racer_member.mention, len(self.racers)))
await self._process(RaceEvent.EventType.RACER_ENTER, racer_member=racer_member)
# Unenters the given discord Member in the race
async def unenter_member(self, racer_member: discord.Member, mute=False):
if not self.before_race:
await self.forfeit_member(racer_member)
return
if self.has_racer(racer_member):
self.racers = [r for r in self.racers if int(r.member.id) != int(racer_member.id)]
if not self.racers:
self._last_no_entrants_time = time.monotonic()
if (len(self.racers) < 2 and not self.race_info.can_be_solo) or len(self.racers) < 1:
await self._cancel_countdown()
await self._write(mute=mute, text='{0} is no longer entered.'.format(racer_member.mention))
await self.begin_if_ready()
await self._process(RaceEvent.EventType.RACER_UNENTER, racer_member=racer_member)
else:
await self._write(mute=mute, text='{0} is not entered.'.format(racer_member.mention))
# Enters the racer if not entered, and puts that racer in the 'ready' state
async def enter_and_ready_member(self, racer_member: discord.Member, mute=False):
already_entered = self.has_racer(racer_member)
if not already_entered and not self.before_race:
await self._write(mute=mute, text='{0}: The race has already started!'.format(racer_member.mention))
return
if not already_entered:
await self._do_enter_racer(racer_member)
racer = self.get_racer(racer_member)
if racer is None:
await self._write(mute=mute, text='Unexpected error.')
console.warning("Unexpected error in race.race.Race.enter_and_ready_member: "
"Couldn't find a Racer for the discord Member {0}.".format(racer_member.name))
return
if racer.is_ready:
await self._write(mute=mute, text='{0} is already ready!'.format(racer_member.mention))
return
racer.ready()
if self._status == RaceStatus.counting_down:
await self._cancel_countdown()
if len(self.racers) == 1 and not self.race_info.can_be_solo:
await self._write(mute=mute, text='Waiting on at least one other person to join the race.')
elif not already_entered:
await self._write(
mute=mute,
text='{0} has entered and is ready! {1} remaining.'.format(racer_member.mention, self.num_not_ready))
else:
await self._write(
mute=mute,
text='{0} is ready! {1} remaining.'.format(racer_member.mention, self.num_not_ready))
await self.begin_if_ready()
if not already_entered:
await self._process(RaceEvent.EventType.RACER_ENTER, racer_member=racer_member)
await self._process(RaceEvent.EventType.RACER_READY, racer_member=racer_member)
# Attempt to put the given Racer in the 'unready' state if they were ready
async def unready_member(self, racer_member: discord.Member, mute=False):
if not self.before_race:
return
racer = self.get_racer(racer_member)
if racer is None:
await self._write(
mute=mute,
text='{0}: Warning: You have not yet entered the race.'.format(racer_member.mention))
return
# See if we can cancel a countdown. If cancel_countdown() returns False,
# then there is a countdown and we failed to cancel it, so racer cannot be made unready.
success = await self._cancel_countdown()
if success and racer.unready():
await self._write(mute=mute, text='{0} is no longer ready.'.format(racer_member.mention))
await self._process(RaceEvent.EventType.RACER_UNREADY, racer_member=racer_member)
else:
await self._write(mute=mute, text="Can't unready!")
# Puts the given Racer in the 'finished' state and gets their time
async def finish_member(self, racer_member: discord.Member, mute=False):
if not (self._status == RaceStatus.racing or self._status == RaceStatus.completed):
return
racer = self.get_racer(racer_member)
if racer is None:
return
if racer.finish(self.current_time):
await self._write(
mute=mute,
text='{0} has finished in {1} place with a time of {2}.'.format(
racer_member.mention,
ordinal(self.num_finished),
racer.time_str))
if self._status == RaceStatus.racing:
await self._check_for_race_end()
await self._process(RaceEvent.EventType.RACER_FINISH, racer_member=racer_member)
# Attempt to put the given Racer in the 'racing' state if they were finished
async def unfinish_member(self, racer_member: discord.Member, mute=False):
if self.before_race or self.final:
return
racer = self.get_racer(racer_member)
if racer is None:
return
if not racer.is_finished:
await self._write(mute=mute, text='{0} is still racing!'.format(racer_member.mention))
# See if we can cancel a (possible) finalization. If cancel_finalization() returns False,
# then there is a finalization and we failed to cancel it, so racer cannot be made unready.
success = await self._cancel_finalization()
if success and racer.unfinish():
await self._write(mute=mute, text='{0} continues to race!'.format(racer_member.mention))
await self._process(RaceEvent.EventType.RACER_UNFINISH, racer_member=racer_member)
async def forfeit_racer(self, racer: Racer, mute=False):
if self.before_race or self.final:
return
await self._do_forfeit_racer(racer)
await self._write(mute=mute, text='{0} has forfeit the race.'.format(racer.member.mention))
# Puts the given Racer in the 'forfeit' state
async def forfeit_member(self, racer_member: discord.Member, mute=False):
racer = self.get_racer(racer_member)
if racer is not None:
await self.forfeit_racer(racer, mute)
await self._process(RaceEvent.EventType.RACER_FORFEIT, racer_member=racer_member)
# Attempt to put the given Racer in the 'racing' state if they had forfeit
async def unforfeit_member(self, racer_member: discord.Member, mute=False):
if self.before_race or self.final:
return False
racer = self.get_racer(racer_member)
if racer is None:
return
if not racer.is_forfeit:
return
# See if we can cancel a (possible) finalization. If cancel_finalization() returns False,
# then there is a finalization and we failed to cancel it, so racer cannot be made unready.
success = await self._cancel_finalization()
if success and racer.unforfeit():
await self._write(
mute=mute,
text='{0} is no longer forfeit and continues to race!'.format(racer_member.mention))
await self._process(RaceEvent.EventType.RACER_UNFORFEIT, racer_member=racer_member)
# Forfeits all racers that have not yet finished
async def forfeit_all_remaining(self, mute=False):
if not self.before_race:
forfeit_any = False
for racer in self.racers:
if racer.is_racing:
forfeit_any = True
await self._do_forfeit_racer(racer)
if forfeit_any:
await self._write(mute=mute, text='All remaining racers forfeit.')
await self._end_race()
# Adds the given string as a comment
async def add_comment_for_member(self, racer_member: discord.Member, comment_str: str):
if self.before_race or self.final:
return
racer = self.get_racer(racer_member)
if racer is None:
return
racer.add_comment(comment_str[:255])
await self._process(RaceEvent.EventType.ADD_EXTRANEOUS)
# Adds a death for the given member at the given level and causes them to forfeit
async def set_death_for_member(self, racer_member: discord.Member, level: int, mute=False):
if self.before_race or self.final:
return
racer = self.get_racer(racer_member)
if racer is None:
return
await self._do_forfeit_racer(racer)
await self._write(mute=mute, text='{0} has forfeit the race.'.format(racer_member.mention))
if not level == necrolevel.LEVEL_NOS:
racer.level = level
await self._process(RaceEvent.EventType.RACER_FORFEIT, racer_member=racer_member)
# Adds an in-game time for the given member
async def set_igt_for_member(self, racer_member: discord.Member, igt: int):
if self.before_race or self.final:
return
racer = self.get_racer(racer_member)
if racer is None:
return
if igt != -1 and racer.is_done_racing:
racer.igt = int(igt)
await self._process(RaceEvent.EventType.ADD_EXTRANEOUS)
# Kicks the specified racers from the race (they can re-enter)
async def kick_racers(self, names_to_kick: list, mute=False):
for racer in self.racers:
if racer.name.lower() in names_to_kick:
await self.unenter_member(racer.member, mute=mute)
# Cancel the race.
async def cancel(self):
self._status = RaceStatus.canceled
await self._cancel_countdown()
await self._cancel_finalization()
await self._process(RaceEvent.EventType.RACE_CANCEL)
# Reseed the race
async def reseed(self, mute=False):
if not self.race_info.seeded:
await self._write(mute=mute, text='This is not a seeded race. Use `.changerules` to change this.')
elif self.race_info.seed_fixed:
await self._write(
mute=mute,
text='The seed for this race was fixed by its rules. Use `.changerules` to change this.')
return
else:
self.race_info.seed = seedgen.get_new_seed()
await self._write(mute=mute, text='Changed seed to {0}.'.format(self.race_info.seed))
await self._process(RaceEvent.EventType.CHANGE_RULES)
# Private methods
# Sort racer list
def _sort_racers(self):
max_time = 0
for racer in self.racers:
if racer.is_finished:
max_time = max(racer.time, max_time)
max_time += 1
self.racers.sort(key=lambda r: r.time if r.is_finished else max_time)
# Process an event
async def _process(self, event_type: RaceEvent.EventType, **kwargs):
await self.parent.process(RaceEvent(self, event_type, **kwargs))
# Actually enter the racer
async def _do_enter_racer(self, racer_member):
racer = Racer(racer_member)
await racer.initialize()
if racer in self.racers:
return
self.racers.append(racer)
# Begins the race. Called by the countdown.
async def _begin_race(self, mute=False):
for racer in self.racers:
if not racer.begin_race():
console.warning("{} isn't ready while calling race._begin_race -- unexpected error.".format(
racer.name))
self._status = RaceStatus.racing
self._adj_start_time = time.monotonic()
self._start_datetime = datetime.datetime.utcnow()
await self._write(mute=mute, text='GO!')
await self._process(RaceEvent.EventType.RACE_BEGIN)
# Checks to see if all racers have either finished or forfeited. If so, ends the race.
# Return True if race was ended.
async def _check_for_race_end(self):
num_still_racing = 0
for racer in self.racers:
if not racer.is_done_racing:
num_still_racing += 1
if num_still_racing <= self._config.auto_forfeit:
await self.forfeit_all_remaining(mute=True)
await self._end_race()
# Ends the race, and begins a countdown until the results are 'finalized'
async def _end_race(self):
if self._status == RaceStatus.racing:
self._status = RaceStatus.completed
self._finalize_future = asyncio.ensure_future(self._finalization_countdown())
await self._process(RaceEvent.EventType.RACE_END)
# Countdown coroutine to be wrapped in self._countdown_future.
# Warning: Do not call this -- use begin_countdown instead.
async def _race_countdown(self, mute=False):
# TODO: The warnings = [5] is a hardcoded hack due to mananging current Discord rate limits.
await self._do_countdown(
length=self._config.countdown_length,
incremental_start=self._config.incremental_countdown_start,
warnings=[5],
mute=mute
)
await self._begin_race()
async def _do_countdown(
self, length: int, warnings: Optional[List[int]] = None, incremental_start: int = None, mute=False
):
if warnings is None:
warnings = []
fudge = 0.6
countdown_systemtime_begin = time.monotonic()
countdown_timer = length
if incremental_start is not None:
await self._write(mute=mute, text='The race will begin in {0} seconds.'.format(countdown_timer))
while countdown_timer > 0:
sleep_time = float(countdown_systemtime_begin + length - countdown_timer + 1 - time.monotonic())
if countdown_timer in warnings:
await self._write(mute=mute, text='{} seconds...'.format(countdown_timer))
if incremental_start is None or countdown_timer <= incremental_start:
await self._write(mute=mute, text='{}'.format(countdown_timer))
if sleep_time < fudge:
countdown_systemtime_begin += fudge - sleep_time
sleep_time = fudge
# print('Countdown cycle: Timer = {0}, Sleep Time = {1}'.format(countdown_timer, sleep_time))
if sleep_time > 0:
await asyncio.sleep(sleep_time) # sleep until the next tick
countdown_timer -= 1
# Countdown for an unpause
async def _unpause_countdown(self, mute=False):
await self._do_countdown(
length=self._config.unpause_countdown_length,
mute=mute
)
await self._do_unpause_race()
# Actually unpause the race
async def _do_unpause_race(self, mute=False):
if self._status == RaceStatus.paused:
await self._write(mute=mute, text='GO!')
self._status = RaceStatus.racing
self._adj_start_time += time.monotonic() - self._last_pause_time
await self._process(RaceEvent.EventType.RACE_UNPAUSE)
return True
return False
# Countdown coroutine to be wrapped in self._finalize_future.
# Warning: Do not call this -- use end_race instead.
async def _finalization_countdown(self):
self.delay_record = True
while self.delay_record:
self.delay_record = False
await asyncio.sleep(self._config.finalize_time_sec)
# Perform the finalization and record the race. At this point, the finalization cannot be canceled.
self._status = RaceStatus.finalized
await self.forfeit_all_remaining(mute=True)
self._sort_racers()
await self._process(RaceEvent.EventType.RACE_FINALIZE)
# Attempt to cancel the race countdown -- transition race state from 'counting_down' to 'entry_open'
# Returns False only if there IS a countdown, AND we failed to cancel it
async def _cancel_countdown(self, mute=False):
if self._status == RaceStatus.counting_down:
if self._countdown_future:
if self._countdown_future.cancel():
self._countdown_future = None
self._status = RaceStatus.entry_open
await self._process(RaceEvent.EventType.RACE_CANCEL_COUNTDOWN)
await self._write(mute=mute, text='Countdown canceled.')
return True
else:
return False
return True
# Attempt to cancel finalization and restart race -- transition race state from 'completed' to 'racing'
# Returns False only if race IS completed, AND we failed to restart it
async def _cancel_finalization(self, mute=False):
if self._status == RaceStatus.completed:
if self._finalize_future:
if self._finalize_future.cancel():
self._finalize_future = None
self._status = RaceStatus.racing
await self._process(RaceEvent.EventType.RACE_CANCEL_FINALIZE)
await self._write(mute=mute, text='Race end canceled -- unfinished racers may continue!')
return True
else:
return False
return True
# Causes the racer to forfeit
async def _do_forfeit_racer(self, racer: Racer):
if racer.forfeit(self.current_time):
await self._check_for_race_end()
# Write text
async def _write(self, text: str, mute=False):
if not mute:
await self.parent.write(text)
| {
"content_hash": "1b4e1caf342422c37dc3c3f1cc9f7e38",
"timestamp": "",
"source": "github",
"line_count": 757,
"max_line_length": 120,
"avg_line_length": 40.038309114927344,
"alnum_prop": 0.6104457421887888,
"repo_name": "incnone/necrobot",
"id": "c3601bca0809f4c4aace8f911faed05b3caf147f",
"size": "30471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "necrobot/race/race.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "637785"
}
],
"symlink_target": ""
} |
from mock import ANY, patch
from cell.actors import Actor
from cell.agents import dAgent
from cell.exceptions import CellError, NoReplyError
from cell.results import AsyncResult
from cell.tests.utils import Case, Mock, with_in_memory_connection
from kombu.utils import uuid
__author__ = 'rumi'
class A(Actor):
pass
class Ag(dAgent):
def get_default_scatter_limit(self):
return 5
class test_AsyncResuls(Case):
def get_async_result(self):
ticket = uuid()
actor = Mock()
ares = AsyncResult(ticket, actor)
return ares
def test_init(self):
ticket = uuid()
actor = Mock()
ares = AsyncResult(ticket, actor)
self.assertEquals(ares.ticket, ticket)
self.assertEqual(ares.actor, actor)
self.assertIsNone(ares._result)
self.assertEqual(ares.Error, CellError)
self.assertEqual(ares.NoReplyError, NoReplyError)
with self.assertRaises(TypeError):
AsyncResult(ticket)
def test_result_when_result_is_set(self):
val = 'the quick brown fox'
ares = self.get_async_result()
ares.get = Mock()
ares._result = val
res = ares.result()
self.assertEqual(res, val)
self.assertEqual(ares.get.call_count, 0)
def test_result_when_result_is_not_set(self):
val = 'the quick brown fox'
ares = self.get_async_result()
ares.get = Mock(return_value=val)
res = ares.result()
self.assertEqual(res, val)
self.assertEqual(ares._result, res)
ares.get.assert_called_once_with()
def test_to_python(self):
ok_message = {'ok': 'the quick_brown_fox'}
ares = self.get_async_result()
# ------------------------------
# reply is a successful message
# ------------------------------
# correct format
res = ares.to_python(ok_message)
self.assertEqual(res, ok_message['ok'])
# correct format with multiple keys in the reply dict
ok_message = {'ok': 'the quick_brown_fox',
'foo': 'the quick_brown_fox'}
res = ares.to_python(ok_message)
self.assertEqual(res, ok_message['ok'])
# contains both ok and nok
ok_message = {'ok': 'the quick_brown_fox',
'nok': 'the quick_brown_fox'}
res = ares.to_python(ok_message)
self.assertEqual(res, ok_message['ok'])
# ---------------------------
# reply is an error message
# ---------------------------
# correct error format with to propagate param set
error_message = {'nok': [Exception('jump over')]}
with self.assertRaises(ares.Error):
ares.to_python(error_message)
# correct error format with to propagate set to True
with self.assertRaises(ares.Error):
ares.to_python(error_message, propagate=True)
# correct error format with to propagate set to False
error_message = {'nok': ['jump over', None]}
res = ares.to_python(error_message, propagate=False)
self.assertEquals(res.__dict__,
ares.Error(*error_message.get('nok')).__dict__)
# neither nok or ok message given
error_message = {'foo': ['jump over']}
with self.assertRaises(ares.Error):
ares.to_python(error_message)
# multiple keys in the reply dics given, one of teh eks is nok
error_message = {'foo': 'the quick_brown_fox',
'nok': ['jump over']}
res = ares.to_python(error_message, propagate=False)
self.assertEqual(res.__dict__,
ares.Error(*error_message['nok']).__dict__)
def test_get(self):
id1, id2 = uuid(), uuid()
def gather():
yield id1
yield id2
# test that it calls gather with limit = 1 and kwargs
ares = self.get_async_result()
ares.gather = Mock(return_value=['1'])
ares.get()
ares.gather.assert_called_once_with(limit=1)
ares.gather.reset_mock()
kwargs = {'timeout': 100, 'ignore_timeout': False,
'foo': 'bar', 'propaget': True}
ares.get(**kwargs)
ares.gather.assert_called_once_with(**dict(kwargs, limit=1))
ares.gather.reset_mock()
kwargs = {'timeout': 100, 'ignore_timeout': False, 'limit': 10}
ares.get(**kwargs)
ares.gather.assert_called_once_with(**kwargs)
ares.gather.reset_mock()
# it returns the first value of whatever gather returns
ares.gather = Mock(return_value=gather())
res = ares.get()
self.assertEqual(res, id1)
# if gather does not return result:
# self.NoReplyError('No reply received within time constraint')
ares.gather = Mock(return_value=None)
with self.assertRaises(ares.NoReplyError):
ares.get()
ares.gather.reset_mock()
ares.gather = Mock(return_value={})
with self.assertRaises(ares.NoReplyError):
ares.get()
@with_in_memory_connection
def test_gather(self, conn):
def collect_replies():
yield 1
yield 2
yield 3
ticket = uuid()
actor = Actor(conn)
actor._collect_replies = Mock(return_value=collect_replies())
ares = AsyncResult(ticket, actor)
ares.to_python = Mock()
all = ares.gather()
list(all)
actor._collect_replies.assert_caleld_once_with(conn, ANY, ticket)
self.assertEqual(ares.to_python.call_count,
len(list(collect_replies())))
# test that the to_python is applied to all results
actor._collect_replies.reset_mock()
actor._collect_replies = Mock(return_value=collect_replies())
prev_to_python = ares.to_python
new_to_python = lambda x, propagate = True: 'called_%s' % x
ares.to_python = new_to_python
all = ares.gather()
vals = list(all)
expected_vals = [new_to_python(i) for i in collect_replies()]
actor._collect_replies.assert_caleld_once_with(conn, ANY, ticket)
self.assertEqual(vals, expected_vals)
ares.to_python = prev_to_python
# test kwargs
@patch('cell.actors.collect_replies')
@with_in_memory_connection
def test_gather_kwargs(self, conn, collect):
actor = Actor(conn)
ares = AsyncResult(uuid(), actor)
prev_to_python = ares.to_python
new_to_python = lambda x, propagate = True: x
ares.to_python = new_to_python
# Test default kwargs,
# nothing is passed, the actor does not have agent assigned
self.assert_gather_kwargs(
ares, collect, {},
timeout=actor.default_timeout, ignore_timeout=False)
# limit - set the default agent limit if NONE is set
# Test default kwargs, nothing is passed,
# the actor does have default agent assigned
actor.agent = dAgent(conn)
self.assert_gather_kwargs(
ares, collect, {},
timeout=actor.default_timeout, limit=None, ignore_timeout=False)
# limit - set the default agent limit if NONE is set
# Test default kwargs, nothing is passed,
# the actor does have agent with custom scatter limit assigned
ag = Ag(conn)
actor.agent = ag
self.assert_gather_kwargs(
ares, collect, {}, timeout=actor.default_timeout,
limit=ag.get_default_scatter_limit())
# pass all args
actor.agent = Ag(conn)
timeout, ignore_timeout, limit = 200.0, False, uuid()
self.assert_gather_kwargs(
ares, collect,
{'timeout': timeout, 'ignore_timeout': ignore_timeout,
'limit': limit},
timeout=timeout, limit=limit, ignore_timeout=ignore_timeout)
# ig ignore_tiemout is passed,
# the custom logic for limit is not applies
actor.agent = None
timeout, ignore_timeout = 200.0, True
self.assert_gather_kwargs(
ares, collect,
{'timeout': timeout, 'ignore_timeout': ignore_timeout},
timeout=timeout, ignore_timeout=ignore_timeout)
ares.to_python = prev_to_python
def assert_gather_kwargs(self, ares, collect, args, **kwargs):
def drain():
yield 1
yield 2
collect.return_value = drain()
all = ares.gather(**args)
self.assertEqual(list(all), list(drain()))
collect.assert_called_once_with(ANY, ANY, ANY, **kwargs)
collect.reset_mock()
| {
"content_hash": "072c8b3b8d8d7942fd441f7ccf0f5cb1",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 76,
"avg_line_length": 32.845283018867924,
"alnum_prop": 0.5807674632352942,
"repo_name": "celery/cell",
"id": "e3e5382492cebd16f8fbdccf8583aa6d0a68baeb",
"size": "8704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cell/tests/actors/test_results.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1495"
},
{
"name": "Python",
"bytes": "158962"
},
{
"name": "Shell",
"bytes": "2154"
}
],
"symlink_target": ""
} |
import unittest
from PIL import Image
from ric_encoder import ric_encode, ric_decode
class TestRicEncoder(unittest.TestCase):
def testEncoder(self):
img = open("samples/crop.jpg","rb").read()
options = [ { "imgwidth": 200, "crop": [210, 120, 510, 420]}, { "imgwidth": 400, "crop": [180, 90, 720, 480] } ]
output = ric_encode(img, options)
f = open("/tmp/test.ric", "wb")
f.write(output)
f.close()
def testDecoder(self):
img = open("/tmp/test.ric","rb").read()
output = ric_decode(img)
f = open("/tmp/test.webp", "wb")
f.write(output)
f.close()
def main():
unittest.main()
if __name__ == '__main__':
main()
| {
"content_hash": "e3fcf520ae09a31f59ae31c139faa52c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 120,
"avg_line_length": 28.56,
"alnum_prop": 0.5574229691876751,
"repo_name": "yoavweiss/Responsive-Image-Container",
"id": "869a4852213ec515488eab4eb9c8620da9681726",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_ric_encoder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15443"
}
],
"symlink_target": ""
} |
from setuptools import setup # , find_packages
import os
import sys
def use_package(package):
if not package:
return False
if package.startswith(('#', 'git+')):
return False
if sys.version_info.major > 2 and 'python_version <' in package:
return False
if sys.version_info.major == 2 and 'python_version >' in package:
return False
return True
def get_requirements():
reqs = []
for filename in ["requirements-base.txt", "requirements-dashboard.txt"]:
with open(filename, "r") as f:
reqs += [x.strip().split(";")[0] for x in f.readlines() if use_package(x.strip())]
return reqs
def get_version():
basedir = os.path.dirname(__file__)
with open(os.path.join(basedir, 'mrq/version.py')) as f:
locals = {}
exec(f.read(), locals)
return locals['VERSION']
raise RuntimeError('No version info found.')
setup(
name="mrq",
include_package_data=True,
packages=['mrq', 'mrq.basetasks', 'mrq.bin', 'mrq.dashboard'], # find_packages(exclude=['tests', 'tests.tasks']),
version=get_version(),
description="A simple yet powerful distributed worker task queue in Python",
author="Pricing Assistant",
license='MIT',
author_email="contact@pricingassistant.com",
url="http://github.com/pricingassistant/mrq",
# download_url="http://chardet.feedparser.org/download/python3-chardet-1.0.1.tgz",
keywords=["worker", "task", "distributed", "queue", "asynchronous", "redis", "mongodb", "job", "processing", "gevent"],
platforms='any',
entry_points={
'console_scripts': [
'mrq-worker = mrq.bin.mrq_worker:main',
'mrq-run = mrq.bin.mrq_run:main',
'mrq-agent = mrq.bin.mrq_agent:main',
'mrq-dashboard = mrq.dashboard.app:main'
]
},
# dependency_links=[
# "http://github.com/mongodb/mongo-python-driver/archive/cb4adb2193a83413bc5545d89b7bbde4d6087761.zip#egg=pymongo-2.7rc1"
# ],
zip_safe=False,
install_requires=get_requirements(),
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
#'Development Status :: 1 - Planning',
#'Development Status :: 2 - Pre-Alpha',
#'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
'Development Status :: 5 - Production/Stable',
#'Development Status :: 6 - Mature',
#'Development Status :: 7 - Inactive',
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Utilities"
],
long_description=open("README.md").read()
)
| {
"content_hash": "e6fbfadbb9d7ef69d25eefe2e494df35",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 129,
"avg_line_length": 36.148148148148145,
"alnum_prop": 0.6082650273224044,
"repo_name": "pricingassistant/mrq",
"id": "7f90e2519f7e2e37402b64656b6e56b76de4e2bf",
"size": "2928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5514"
},
{
"name": "Dockerfile",
"bytes": "2722"
},
{
"name": "HTML",
"bytes": "60608"
},
{
"name": "JavaScript",
"bytes": "78540"
},
{
"name": "Makefile",
"bytes": "2765"
},
{
"name": "Perl",
"bytes": "1374"
},
{
"name": "Python",
"bytes": "931744"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django import template
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from labJS.base import Labjs
from labJS.templatetags.labjs import LabjsNode, Wait
class FakeNode(object):
def render(self, context):
return 'some content'
class TestLabjs(TestCase):
def test_split_contents_empty_content(self):
lab = Labjs('')
self.assertFalse(lab.split_contents())
def test_split_contents_non_js_content(self):
lab = Labjs('<p class="test">I am not JS</p>')
self.assertFalse(lab.split_contents())
def test_split_contents_inline(self):
lab = Labjs('<script>document.write("Hello world");</script>')
self.assertEqual(
lab.split_contents(),
[{'data': 'document.write("Hello world");', 'type': 'inline'}]
)
def test_split_contents_script(self):
lab = Labjs('<script src="/static/script.js"></script>')
self.assertEqual(
lab.split_contents(),
[{'data': '/static/script.js', 'type': 'script'}]
)
def test_render_output_inline_contains_script(self):
lab = Labjs('<script>document.write("Hello world");</script>')
self.assertIn('document.write("Hello world");', lab.render_output())
def test_render_output_script_contains_src(self):
lab = Labjs('<script src="/static/script.js"></script>')
self.assertIn('/static/script.js', lab.render_output())
class TestLabjsNode(TestCase):
@override_settings(LABJS_DEBUG_TOGGLE='labjs')
def test_debug_mode_no_request_context(self):
node = LabjsNode(None)
context = {}
self.assertFalse(node.debug_mode(context))
@override_settings(LABJS_DEBUG_TOGGLE='labjs')
def test_debug_mode_no_toggle(self):
node = LabjsNode(None)
context = {
'request': RequestFactory().get('/'),
}
self.assertFalse(node.debug_mode(context))
@override_settings(LABJS_DEBUG_TOGGLE='labjs')
def test_debug_mode_with_toggle(self):
node = LabjsNode(None)
context = {
'request': RequestFactory().get('/?labjs=1'),
}
self.assertTrue(node.debug_mode(context))
@override_settings(LABJS_DEBUG_TOGGLE=None)
def test_debug_mode_setting_undefined(self):
node = LabjsNode(None)
context = {
'request': RequestFactory().get('/?labjs='),
}
self.assertFalse(node.debug_mode(context))
@override_settings(LABJS_ENABLED=False)
def test_disabled_leaves_content_as_original(self):
node = LabjsNode(FakeNode())
context = {
'request': RequestFactory().get('/?labjs='),
}
self.assertEqual(node.render(context), 'some content')
class TestWaitNode(TestCase):
def test_wait_node_renders_as_empty_script(self):
self.assertHTMLEqual(
Wait().render(template.Context({})),
'<script type="text/javascript"></script>'
)
class TestTemplateTags(TestCase):
def test_runlabjs_output_includes_runQueue(self):
t = template.Template('{% load labjs %}{% runlabjs %}')
self.assertIn('runQueue', t.render(template.Context({})))
| {
"content_hash": "cb60e7e12d33cba0da09cdafd2bbd03d",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 76,
"avg_line_length": 31.64761904761905,
"alnum_prop": 0.626241348179356,
"repo_name": "ashwoods/django-labjs",
"id": "b837f9a44e043facc5a89b356b712f558634ae87",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "labJS/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "19398"
},
{
"name": "Python",
"bytes": "12909"
}
],
"symlink_target": ""
} |
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class AccessPolicyUpdateKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
ADD = "add"
REPLACE = "replace"
REMOVE = "remove"
class CertificatePermissions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
GET = "get"
LIST = "list"
DELETE = "delete"
CREATE = "create"
IMPORT_ENUM = "import"
UPDATE = "update"
MANAGECONTACTS = "managecontacts"
GETISSUERS = "getissuers"
LISTISSUERS = "listissuers"
SETISSUERS = "setissuers"
DELETEISSUERS = "deleteissuers"
MANAGEISSUERS = "manageissuers"
RECOVER = "recover"
PURGE = "purge"
BACKUP = "backup"
RESTORE = "restore"
class CreateMode(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The vault's create mode to indicate whether the vault need to be recovered or not.
"""
RECOVER = "recover"
DEFAULT = "default"
class KeyPermissions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
ENCRYPT = "encrypt"
DECRYPT = "decrypt"
WRAP_KEY = "wrapKey"
UNWRAP_KEY = "unwrapKey"
SIGN = "sign"
VERIFY = "verify"
GET = "get"
LIST = "list"
CREATE = "create"
UPDATE = "update"
IMPORT_ENUM = "import"
DELETE = "delete"
BACKUP = "backup"
RESTORE = "restore"
RECOVER = "recover"
PURGE = "purge"
class NetworkRuleAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The default action when no rule from ipRules and from virtualNetworkRules match. This is only
used after the bypass property has been evaluated.
"""
ALLOW = "Allow"
DENY = "Deny"
class NetworkRuleBypassOptions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Tells what traffic can bypass network rules. This can be 'AzureServices' or 'None'. If not
specified the default is 'AzureServices'.
"""
AZURE_SERVICES = "AzureServices"
NONE = "None"
class PrivateEndpointConnectionProvisioningState(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The current provisioning state.
"""
SUCCEEDED = "Succeeded"
CREATING = "Creating"
UPDATING = "Updating"
DELETING = "Deleting"
FAILED = "Failed"
DISCONNECTED = "Disconnected"
class PrivateEndpointServiceConnectionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The private endpoint connection status.
"""
PENDING = "Pending"
APPROVED = "Approved"
REJECTED = "Rejected"
DISCONNECTED = "Disconnected"
class Reason(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The reason that a vault name could not be used. The Reason element is only returned if
NameAvailable is false.
"""
ACCOUNT_NAME_INVALID = "AccountNameInvalid"
ALREADY_EXISTS = "AlreadyExists"
class SecretPermissions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
GET = "get"
LIST = "list"
SET = "set"
DELETE = "delete"
BACKUP = "backup"
RESTORE = "restore"
RECOVER = "recover"
PURGE = "purge"
class SkuFamily(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""SKU family name
"""
A = "A"
class SkuName(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""SKU name to specify whether the key vault is a standard vault or a premium vault.
"""
STANDARD = "standard"
PREMIUM = "premium"
class StoragePermissions(str, Enum, metaclass=CaseInsensitiveEnumMeta):
GET = "get"
LIST = "list"
DELETE = "delete"
SET = "set"
UPDATE = "update"
REGENERATEKEY = "regeneratekey"
RECOVER = "recover"
PURGE = "purge"
BACKUP = "backup"
RESTORE = "restore"
SETSAS = "setsas"
LISTSAS = "listsas"
GETSAS = "getsas"
DELETESAS = "deletesas"
| {
"content_hash": "85c6af05bb8962b1e79fdb051c40934e",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 100,
"avg_line_length": 26.597122302158272,
"alnum_prop": 0.67243711117122,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c32d9677a80bb0f2d7f838c16edb9af927017e1c",
"size": "4165",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2018_02_14/models/_key_vault_management_client_enums.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
VERSION = '0.1.0.dev1'
def ensure_str(arg):
return type(arg) == Str and arg or Str(arg)
def return_str(func):
""" ensure to return Str """
def method(*args, **kwargs):
return ensure_str(func(*args, **kwargs))
return method
class Str(str):
@classmethod
def is_str(cls, s):
"""
is str or Str ?
:rtype: boolean
"""
return isinstance(s, str) or type(s) == cls
@return_str
def camelise(self):
"""
'foo-bar_is back' => 'FooBarIsBack'
:rtype: Str
"""
if len(self) == 0:
return self
parts = self.replace('-', '_').replace(' ', '_').split("_")
return ''.join([p[0].upper() + p[1:].lower() for p in parts])
@return_str
def remove_start(self, start):
"""
remove_start('foo-bar', 'foo-') => 'bar'
:type start: str/Str
:rtype: Str
"""
assert self.__class__.is_str(start)
assert len(start) <= len(self)
if len(self) == 0 or len(start) == 0:
return self
return self[len(start):]
@return_str
def remove_end(self, end):
"""
remove_end('foo-bar', '-bar') => 'foo'
:type end: str/Str
:rtype: Str
"""
assert self.__class__.is_str(end)
assert len(end) <= len(self)
if len(self) == 0 or len(end) == 0:
return self
return self[:len(end) * -1]
def slug_validate(self, special_chars='_-'):
"""
check slug validity (only alpha or digits) + special chars
:type special_chars: str/Str
:rtype: bool
"""
assert self.__class__.is_str(special_chars)
if len(self) == 0:
return False
import string
letters = string.ascii_letters + string.digits + special_chars
return all([c in letters for c in self])
# WRAPPERS
@staticmethod
@return_str
def uuid(hex=True):
"""
get uuid
deps: pip install uuid
:param hex: True will return uuid without dashes '-'
:type hex: bool
:rtype: Str
"""
assert isinstance(hex, bool)
import uuid
res = uuid.uuid4()
res = hex and res.hex or str(res)
assert isinstance(res, str)
return res
def uuid_validate(self, hex=True):
"""
is uuid valid ? (with dash or not)
:param hex: True will not have dashes '-'
:type hex: bool
:rtype : bool
"""
assert isinstance(hex, bool)
import re
pattern = hex and '[0-9a-f]{32}\Z' or '^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}\Z'
return bool(re.compile(pattern, re.I).match(self))
@staticmethod
@return_str
def json_encode(data):
"""
convert data to json
:param data: data to serialize in json
:type data: dict/list
:rtype Str
"""
assert isinstance(data, (dict, list))
import json
serialised = json.dumps(data)
assert isinstance(serialised, str)
return serialised
def json_parse(self):
"""
parse json
:rtype dict
"""
import json
data = json.loads(self)
assert isinstance(data, (dict, list))
return data
@return_str
def parse_template(self, context=None):
"""
parse python string template
:type context: dict/None
:rtype Str
"""
import string
if context is not None:
assert isinstance(context, dict)
res = string.Template(self).safe_substitute(context)
assert isinstance(res, str)
return res
@return_str
def parse_jinja(self, context=None):
"""
parse jinja2 template
deps: pip install jinja2
:type context: dict/None
:rtype Str
"""
import jinja2
if context is not None:
assert isinstance(context, dict)
res = jinja2.Template(self).render(context)
assert isinstance(res, str)
return res
@return_str
def parse_markdown(self):
"""
parse markdown
deps: pip install markdown
:rtype Str
"""
import markdown
res = markdown.markdown(self)
assert isinstance(res, str)
return res
@return_str
def linkify(self):
"""
'http://example.com' => '<a href="http://example.com" rel="nofollow">http://example.com</a>'
deps: pip install bleach
:rtype Str
"""
import bleach
res = bleach.linkify(self)
assert isinstance(res, str)
return res
@return_str
def html_clean(self):
"""
'<script>evil()</script>' => '<script>evil()</script>'
deps: pip install bleach
:rtype Str
"""
import bleach
res = bleach.clean(self)
assert isinstance(res, str)
return res
def s(s):
"""Str alias"""
return ensure_str(s)
| {
"content_hash": "387042bb58d2dcf8d88a2c4aca6ec40e",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 111,
"avg_line_length": 24.60096153846154,
"alnum_prop": 0.5194449872972445,
"repo_name": "pyseed/overstr",
"id": "85edf47bd1e91c89277ce155f2fe3aa73f822b9c",
"size": "5164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "overstr/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13293"
}
],
"symlink_target": ""
} |
import unittest
import logging, sys
from time import sleep
# Need a serial communication component for this test
from SerialCommunication import SerialCommunication
from CmdResponseDefinitions import *
import comm_packet_pb2
# Define a list of way-points for the purpose of testing
test_way_point_1 = comm_packet_pb2.CommandPacket()
test_way_point_1.WayPointCmd.Heading = 45
test_way_point_1.WayPointCmd.Distance = 0.5
test_way_point_1.WayPointCmd.Name = "WayPoint A"
test_way_point_2 = comm_packet_pb2.CommandPacket()
test_way_point_2.WayPointCmd.Heading = 90
test_way_point_2.WayPointCmd.Distance = 2.0
test_way_point_2.WayPointCmd.Name = "WayPoint B"
test_way_point_3 = comm_packet_pb2.CommandPacket()
test_way_point_3.WayPointCmd.Heading = 180
test_way_point_3.WayPointCmd.Distance = 3.0
test_way_point_3.WayPointCmd.Name = "WayPoint C"
test_route = [test_way_point_1, test_way_point_2, test_way_point_3]
class UtSerialCommunication(unittest.TestCase):
def setUp(self):
self.testArticle = SerialCommunication("/dev/ttyUSB0")
def test_sendCmdBadType(self):
logging.info("Sending an invalid type way point command")
cmd_packet = None
self.assertRaises(TypeError, self.testArticle.commandArduino, cmd_packet)
cmd_packet = 3
self.assertRaises(TypeError, self.testArticle.commandArduino, cmd_packet)
def test_sendNoNameWayPointCmd(self):
logging.info("Sending no name way point command")
cmd_packet = comm_packet_pb2.CommandPacket()
cmd_packet.WayPointCmd.Heading = 45.0
cmd_packet.WayPointCmd.Distance = 0.5
self.assertRaises(IOError, self.testArticle.commandArduino, cmd_packet)
def test_sendNoHeadingWayPointCmd(self):
logging.info("Sending no heading way point command")
cmd_packet = comm_packet_pb2.CommandPacket()
cmd_packet.WayPointCmd.Distance = 0.5
cmd_packet.WayPointCmd.Name = "WayPoint A"
self.assertRaises(IOError, self.testArticle.commandArduino, cmd_packet)
def test_sendNoDistanceWayPointCmd(self):
logging.info("Sending no distance way point command")
cmd_packet = comm_packet_pb2.CommandPacket()
cmd_packet.WayPointCmd.Heading = 45.0
cmd_packet.WayPointCmd.Name = "WayPoint A"
self.assertRaises(IOError, self.testArticle.commandArduino, cmd_packet)
def test_sendEmptyWayPointCmd(self):
logging.info("Sending empty command")
cmd_packet = comm_packet_pb2.CommandPacket()
# OK to send an empty command as long as it's of type CommandPacket
# Just don't expect a response
response = self.testArticle.commandArduino(cmd_packet)
print response
def test_commandOneWayPoint(self):
response = self.helper_SendOneWayPoint(test_route[0])
self.helper_checkResponse(response)
def test_commandRoute(self):
for test_way_point in test_route:
response = self.helper_SendOneCmdPacket(test_way_point)
self.helper_checkResponse(response)
sleep(7)
def test_getActiveWayPoint(self):
logging.info("Sending get active waypoint command")
cmd_packet = comm_packet_pb2.CommandPacket()
control_signal_cmd = cmd_packet.RoverCmds.add()
control_signal_cmd.Id = WP_GET_ACTIVE
response = self.helper_SendOneCmdPacket(cmd_packet)
self.helper_checkResponse(response)
#logging.info("The active waypoint is : " + response.ActiveWayPoint)
def test_commandTestDrive(self):
logging.info("Sending test drive command")
cmd_packet = comm_packet_pb2.CommandPacket()
control_signal_cmd = cmd_packet.RoverCmds.add()
control_signal_cmd.Id = DO_TEST_DRIVE
response = self.helper_SendOneCmdPacket(cmd_packet)
self.helper_checkResponse(response)
def helper_SendOneWayPoint(self, cmd_packet):
logging.info("Sending way point command : " + cmd_packet.WayPointCmd.Name)
return self.helper_SendOneCmdPacket(cmd_packet)
def helper_SendOneCmdPacket(self, cmd_packet):
return self.testArticle.commandArduino(cmd_packet)
def helper_checkResponse(self, response):
if response:
logging.info("Success Packet # : " + str(self.testArticle.NumReceivedPackets))
logging.info("Dumping received packet : \n" + str(response))
self.assertIsInstance(response, comm_packet_pb2.TelemetryPacket)
else:
logging.info("Failed Packet # : " + str(self.testArticle.NumFailedPackets))
self.assertIsNone(response)
self.assertTrue(self.testArticle.NumFailedPackets >= 1)
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG, format='%(levelname)s:%(message)s')
# Run the unit-tests
suite = unittest.TestLoader().loadTestsFromTestCase(UtSerialCommunication)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "e1eb4b59ec6d0f3b0352044a901f729c",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 99,
"avg_line_length": 42.39316239316239,
"alnum_prop": 0.7030241935483871,
"repo_name": "mike-moore/wheel_bot",
"id": "6b333ff072eec57855b5617e71b6cbffef145856",
"size": "4979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/UtSerialCommunication.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "5464"
},
{
"name": "Batchfile",
"bytes": "435"
},
{
"name": "C",
"bytes": "109940"
},
{
"name": "C++",
"bytes": "48037"
},
{
"name": "CMake",
"bytes": "10024"
},
{
"name": "Makefile",
"bytes": "1702"
},
{
"name": "Protocol Buffer",
"bytes": "41161"
},
{
"name": "Python",
"bytes": "76688"
},
{
"name": "Shell",
"bytes": "456"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'RevisionDate.day'
db.add_column(u'parsr_revisiondate', 'day',
self.gf('django.db.models.fields.IntegerField')(default=1),
keep_default=False)
# Adding field 'RevisionDate.weekday'
db.add_column(u'parsr_revisiondate', 'weekday',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'RevisionDate.day'
db.delete_column(u'parsr_revisiondate', 'day')
# Deleting field 'RevisionDate.weekday'
db.delete_column(u'parsr_revisiondate', 'weekday')
models = {
u'parsr.author': {
'Meta': {'object_name': 'Author'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Repo']", 'null': 'True', 'blank': 'True'})
},
u'parsr.file': {
'Meta': {'object_name': 'File'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Revision']"})
},
u'parsr.repo': {
'Meta': {'object_name': 'Repo'},
'analyzed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'analyzed_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'analyzing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'parsr.revision': {
'Meta': {'object_name': 'Revision'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Author']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.Repo']"}),
'revision_date': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['parsr.RevisionDate']", 'null': 'True', 'blank': 'True'})
},
u'parsr.revisiondate': {
'Meta': {'object_name': 'RevisionDate'},
'day': ('django.db.models.fields.IntegerField', [], {}),
'day_of_month': ('django.db.models.fields.IntegerField', [], {}),
'day_of_week': ('django.db.models.fields.IntegerField', [], {}),
'hour': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minute': ('django.db.models.fields.IntegerField', [], {}),
'month': ('django.db.models.fields.IntegerField', [], {}),
'weekday': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['parsr'] | {
"content_hash": "362489237d1c41af1ac45cc41062b6ed",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 150,
"avg_line_length": 53.3421052631579,
"alnum_prop": 0.5421805624074988,
"repo_name": "frontendphil/analyzr",
"id": "4a98a1c02c1a97fc01d974beb0ee5cde58066fc8",
"size": "4078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsr/migrations/0012_auto__add_field_revisiondate_day__add_field_revisiondate_weekday.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28706"
},
{
"name": "HTML",
"bytes": "21250"
},
{
"name": "JavaScript",
"bytes": "177454"
},
{
"name": "Python",
"bytes": "535628"
}
],
"symlink_target": ""
} |
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
from . import defaults
DB_PORT = 27017
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SEARCH_ENGINE = 'elastic'
USE_EMAIL = False
USE_CELERY = False
USE_GNUPG = False
# Email
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Session
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
USE_CDN_FOR_CLIENT_LIBS = False
SENTRY_DSN = None
| {
"content_hash": "cf3c05c896ff41837854a49762db5f40",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 77,
"avg_line_length": 19.64864864864865,
"alnum_prop": 0.7097661623108665,
"repo_name": "AndrewSallans/osf.io",
"id": "a6d1f23710f892cf839bc89d459f2860bbfb4786",
"size": "751",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/settings/local-travis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70439"
},
{
"name": "JavaScript",
"bytes": "2555546"
},
{
"name": "Python",
"bytes": "2159449"
}
],
"symlink_target": ""
} |
import os
from emuvim.api.openstack.resources.net import Net
from emuvim.api.osm.kafka import Kafka
from emuvim.api.osm.lcm import LCM
from emuvim.api.osm.mongo import Mongo
from emuvim.api.osm.mysql import Mysql
from emuvim.api.osm.nbi import NBI
from emuvim.api.osm.ro import RO
from emuvim.api.osm.zookeeper import Zookeeper
class OSM:
def __init__(self, net,
switch,
name='osm',
vca_host=os.environ.get('VCA_HOST'),
vca_secret=os.environ.get('VCA_SECRET'),
osm_version='releasefive-daily',
ip_start='10.0.0.100'):
ip_int = Net.ip_2_int(ip_start)
zookeeper_ip = ip_start
kafka_ip = Net.int_2_ip(ip_int + 1)
mongo_ip = Net.int_2_ip(ip_int + 2)
nbi_ip = Net.int_2_ip(ip_int + 3)
ro_db_ip = Net.int_2_ip(ip_int + 4)
ro_ip = Net.int_2_ip(ip_int + 5)
lcm_ip = Net.int_2_ip(ip_int + 6)
name_prefix = '%s-' % name
self.zookeeper = Zookeeper(net, '%s/16' % zookeeper_ip, name_prefix=name_prefix)
self.kafka = Kafka(net, '%s/16' % kafka_ip, zookeeper_ip, name_prefix=name_prefix)
self.mongo = Mongo(net, '%s/16' % mongo_ip, name_prefix=name_prefix)
self.nbi = NBI(net, '%s/16' % nbi_ip, mongo_ip, kafka_ip, version=osm_version, name_prefix=name_prefix)
self.ro_db = Mysql(net, '%s/16' % ro_db_ip, name_prefix=name_prefix)
self.ro = RO(net, '%s/16' % ro_ip, ro_db_ip, version=osm_version, name_prefix=name_prefix)
self.lcm = LCM(net, '%s/16' % lcm_ip, ro_ip, mongo_ip, kafka_ip,
vca_host, vca_secret, version=osm_version, name_prefix=name_prefix)
net.addLink(self.zookeeper.instance, switch)
net.addLink(self.kafka.instance, switch)
net.addLink(self.mongo.instance, switch)
net.addLink(self.nbi.instance, switch)
net.addLink(self.ro_db.instance, switch)
net.addLink(self.ro.instance, switch)
net.addLink(self.lcm.instance, switch)
def start(self):
self.zookeeper.start()
self.kafka.start()
self.mongo.start()
self.nbi.start()
self.ro_db.start()
self.ro.start()
self.lcm.start()
# forward api related calls
def onboard_vnfd(self, *args, **kwargs):
return self.nbi.onboard_vnfd(*args, **kwargs)
def onboard_nsd(self, *args, **kwargs):
return self.nbi.onboard_nsd(*args, **kwargs)
def register_emulated_api(self, *args, **kwargs):
return self.nbi.register_emulated_api(*args, **kwargs)
def ns_list(self):
return self.nbi.ns_list()
def ns_create(self, *args, **kwargs):
return self.nbi.ns_create(*args, **kwargs)
def ns_delete(self, *args, **kwargs):
return self.nbi.ns_delete(*args, **kwargs)
def ns_get(self, *args, **kwargs):
return self.nbi.ns_get(*args, **kwargs)
def ns_action(self, *args, **kwargs):
return self.nbi.ns_action(*args, **kwargs)
def ns_wait_until_all_in_status(self, *args, **kwargs):
return self.nbi.ns_wait_until_all_in_status(*args, **kwargs)
| {
"content_hash": "c88b81f8e3956264157d91792e98f703",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 111,
"avg_line_length": 38.024096385542165,
"alnum_prop": 0.6023447401774398,
"repo_name": "mpeuster/son-emu",
"id": "f4de8b9121240ada25fa21dcd3a62a1e98f02308",
"size": "3784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/emuvim/api/osm/osm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1766"
},
{
"name": "Dockerfile",
"bytes": "2767"
},
{
"name": "HTML",
"bytes": "4641"
},
{
"name": "JavaScript",
"bytes": "9195"
},
{
"name": "Python",
"bytes": "832407"
},
{
"name": "Shell",
"bytes": "6423"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0038_profile_theme'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='reports_allowed',
),
]
| {
"content_hash": "2b12d0902180dd1e82a02e5097f99894",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 43,
"avg_line_length": 19,
"alnum_prop": 0.5684210526315789,
"repo_name": "healthchecks/healthchecks",
"id": "7165c48020fd790050be4824e8c9b2522a90e3e6",
"size": "334",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hc/accounts/migrations/0039_remove_profile_reports_allowed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "65959"
},
{
"name": "Dockerfile",
"bytes": "1088"
},
{
"name": "HTML",
"bytes": "716643"
},
{
"name": "JavaScript",
"bytes": "50869"
},
{
"name": "Less",
"bytes": "211300"
},
{
"name": "Python",
"bytes": "1043149"
},
{
"name": "Shell",
"bytes": "1655"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, division
import warnings
import numpy as np
import astropy.units as u
__all__ = ["_get_x_in_wavenumbers", "_test_valid_x_range"]
def _get_x_in_wavenumbers(in_x):
"""
Convert input x to wavenumber given x has units.
Otherwise, assume x is in waveneumbers and issue a warning to this effect.
Parameters
----------
in_x : astropy.quantity or simple floats
x values
Returns
-------
x : floats
input x values in wavenumbers w/o units
"""
# handles the case where x is a scaler
in_x = np.atleast_1d(in_x)
# check if in_x is an astropy quantity, if not issue a warning
if not isinstance(in_x, u.Quantity):
warnings.warn(
"x has no units, assuming x units are inverse microns", UserWarning
)
# convert to wavenumbers (1/micron) if x input in units
# otherwise, assume x in appropriate wavenumber units
with u.add_enabled_equivalencies(u.spectral()):
x_quant = u.Quantity(in_x, 1.0 / u.micron, dtype=np.float64)
# strip the quantity to avoid needing to add units to all the
# polynomical coefficients
return x_quant.value
def _test_valid_x_range(x, x_range, outname):
"""
Test if any of the x values are outside of the valid range
Parameters
----------
x : float array
wavenumbers in inverse microns
x_range: 2 floats
allowed min/max of x
outname: str
name of curve for error message
"""
if np.logical_or(np.any(x < x_range[0]), np.any(x > x_range[1])):
raise ValueError(
"Input x outside of range defined for "
+ outname
+ " ["
+ str(x_range[0])
+ " <= x <= "
+ str(x_range[1])
+ ", x has units 1/micron]"
)
| {
"content_hash": "babfa0cece9cd249ee9a1a7fc165d50d",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 27,
"alnum_prop": 0.595276435856146,
"repo_name": "karllark/dust_extinction",
"id": "d6f51717f6d85bbd094c6a0b9c312827f6d8db70",
"size": "1863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dust_extinction/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "162641"
}
],
"symlink_target": ""
} |
import pprint
import webob.dec
@webob.dec.wsgify
def echo_app(request):
"""A WSGI application that echoes the CGI environment to the user."""
return webob.Response(content_type='application/json',
body=pprint.pformat(request.environ, indent=4))
def echo_app_factory(global_conf, **local_conf):
return echo_app
| {
"content_hash": "2992068e82b07b9530f6ece99e4c385a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 27.076923076923077,
"alnum_prop": 0.6846590909090909,
"repo_name": "jamielennox/keystonemiddleware-echo",
"id": "6488ce39db8d647c175f882502671ab10e69b2bb",
"size": "898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystonemiddleware_echo/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6423"
}
],
"symlink_target": ""
} |
import contextlib
import xcffib
import xcffib.xproto
from . import command
from . import hook
from . import window
from . import utils
from .log_utils import logger
class _Group(command.CommandObject):
"""A container for a bunch of windows
Analogous to workspaces in other window managers. Each client window
managed by the window manager belongs to exactly one group.
"""
def __init__(self, name, layout=None):
self.name = name
self.customLayout = layout # will be set on _configure
self.windows = set()
self.qtile = None
self.layouts = []
self.floating_layout = None
# self.focusHistory lists the group's windows in the order they
# received focus, from the oldest (first item) to the currently
# focused window (last item); NB the list does *not* contain any
# windows that never received focus; refer to self.windows for the
# complete set
self.focusHistory = []
self.screen = None
self.currentLayout = None
def _configure(self, layouts, floating_layout, qtile):
self.screen = None
self.currentLayout = 0
self.focusHistory = []
self.windows = set()
self.qtile = qtile
self.layouts = [i.clone(self) for i in layouts]
self.floating_layout = floating_layout
if self.customLayout is not None:
self.layout = self.customLayout
self.customLayout = None
@property
def currentWindow(self):
try:
return self.focusHistory[-1]
except IndexError:
# no window has focus
return None
@currentWindow.setter
def currentWindow(self, win):
try:
self.focusHistory.remove(win)
except ValueError:
# win has never received focus before
pass
self.focusHistory.append(win)
def _remove_from_focus_history(self, win):
try:
index = self.focusHistory.index(win)
except ValueError:
# win has never received focus
return False
else:
del self.focusHistory[index]
# return True if win was the last item (i.e. it was currentWindow)
return index == len(self.focusHistory)
@property
def layout(self):
return self.layouts[self.currentLayout]
@layout.setter
def layout(self, layout):
"""
Parameters
==========
layout :
a string with matching the name of a Layout object.
"""
for index, obj in enumerate(self.layouts):
if obj.name == layout:
self.currentLayout = index
hook.fire(
"layout_change",
self.layouts[self.currentLayout],
self
)
self.layoutAll()
return
raise ValueError("No such layout: %s" % layout)
def toLayoutIndex(self, index):
assert 0 <= index < len(self.layouts), "layout index out of bounds"
self.layout.hide()
self.currentLayout = index
hook.fire("layout_change", self.layouts[self.currentLayout], self)
self.layoutAll()
screen = self.screen.get_rect()
self.layout.show(screen)
def nextLayout(self):
self.toLayoutIndex((self.currentLayout + 1) % (len(self.layouts)))
def prevLayout(self):
self.toLayoutIndex((self.currentLayout - 1) % (len(self.layouts)))
def layoutAll(self, warp=False):
"""Layout the floating layer, then the current layout.
If we have have a currentWindow give it focus, optionally moving warp
to it.
"""
if self.screen and len(self.windows):
with self.disableMask(xcffib.xproto.EventMask.EnterWindow):
normal = [x for x in self.windows if not x.floating]
floating = [
x for x in self.windows
if x.floating and not x.minimized
]
screen = self.screen.get_rect()
if normal:
try:
self.layout.layout(normal, screen)
except:
logger.exception("Exception in layout %s",
self.layout.name)
if floating:
self.floating_layout.layout(floating, screen)
if self.currentWindow and \
self.screen == self.qtile.currentScreen:
self.currentWindow.focus(warp)
def _setScreen(self, screen):
"""Set this group's screen to new_screen"""
if screen == self.screen:
return
self.screen = screen
if self.screen:
# move all floating guys offset to new screen
self.floating_layout.to_screen(self, self.screen)
self.layoutAll()
rect = self.screen.get_rect()
self.floating_layout.show(rect)
self.layout.show(rect)
else:
self.hide()
def hide(self):
self.screen = None
with self.disableMask(xcffib.xproto.EventMask.EnterWindow |
xcffib.xproto.EventMask.FocusChange |
xcffib.xproto.EventMask.LeaveWindow):
for i in self.windows:
i.hide()
self.layout.hide()
@contextlib.contextmanager
def disableMask(self, mask):
for i in self.windows:
i._disableMask(mask)
yield
for i in self.windows:
i._resetMask()
def focus(self, win, warp=True, force=False):
"""Focus the given window
If win is in the group, blur any windows and call ``focus`` on the
layout (in case it wants to track anything), fire focus_change hook and
invoke layoutAll.
Parameters
==========
win :
Window to focus
warp :
Warp pointer to win. This should basically always be True, unless
the focus event is coming from something like EnterNotify, where
the user is actively using the mouse, or on full screen layouts
where only one window is "maximized" at a time, and it doesn't make
sense for the mouse to automatically move.
"""
if self.qtile._drag and not force:
# don't change focus while dragging windows (unless forced)
return
if win:
if win not in self.windows:
return
self.currentWindow = win
if win.floating:
for l in self.layouts:
l.blur()
self.floating_layout.focus(win)
else:
self.floating_layout.blur()
for l in self.layouts:
l.focus(win)
hook.fire("focus_change")
# !!! note that warp isn't hooked up now
self.layoutAll(warp)
def info(self):
return dict(
name=self.name,
focus=self.currentWindow.name if self.currentWindow else None,
windows=[i.name for i in self.windows],
focusHistory=[i.name for i in self.focusHistory],
layout=self.layout.name,
layouts=[l.name for l in self.layouts],
floating_info=self.floating_layout.info(),
screen=self.screen.index if self.screen else None
)
def add(self, win, focus=True, force=False):
hook.fire("group_window_add")
self.windows.add(win)
win.group = self
try:
if 'fullscreen' in win.window.get_net_wm_state() and \
self.qtile.config.auto_fullscreen:
win._float_state = window.FULLSCREEN
elif self.floating_layout.match(win):
# !!! tell it to float, can't set floating
# because it's too early
# so just set the flag underneath
win._float_state = window.FLOATING
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
pass # doesn't matter
if win.floating:
self.floating_layout.add(win)
else:
for i in self.layouts:
i.add(win)
if focus:
self.focus(win, warp=True, force=force)
def remove(self, win, force=False):
self.windows.remove(win)
hadfocus = self._remove_from_focus_history(win)
win.group = None
if win.floating:
nextfocus = self.floating_layout.remove(win)
nextfocus = nextfocus or \
self.currentWindow or \
self.layout.focus_first() or \
self.floating_layout.focus_first(group=self)
else:
for i in self.layouts:
if i is self.layout:
nextfocus = i.remove(win)
else:
i.remove(win)
nextfocus = nextfocus or \
self.floating_layout.focus_first(group=self) or \
self.currentWindow or \
self.layout.focus_first()
# a notification may not have focus
if hadfocus:
self.focus(nextfocus, warp=True, force=force)
# no next focus window means focus changed to nothing
if not nextfocus:
hook.fire("focus_change")
elif self.screen:
self.layoutAll()
def mark_floating(self, win, floating):
if floating:
if win in self.floating_layout.find_clients(self):
# already floating
pass
else:
for i in self.layouts:
i.remove(win)
if win is self.currentWindow:
i.blur()
self.floating_layout.add(win)
if win is self.currentWindow:
self.floating_layout.focus(win)
else:
self.floating_layout.remove(win)
self.floating_layout.blur()
for i in self.layouts:
i.add(win)
if win is self.currentWindow:
i.focus(win)
self.layoutAll()
def _items(self, name):
if name == "layout":
return (True, list(range(len(self.layouts))))
elif name == "window":
return (True, [i.window.wid for i in self.windows])
elif name == "screen":
return (True, None)
def _select(self, name, sel):
if name == "layout":
if sel is None:
return self.layout
else:
return utils.lget(self.layouts, sel)
elif name == "window":
if sel is None:
return self.currentWindow
else:
for i in self.windows:
if i.window.wid == sel:
return i
elif name == "screen":
return self.screen
def cmd_setlayout(self, layout):
self.layout = layout
def cmd_info(self):
"""Returns a dictionary of info for this group"""
return self.info()
def cmd_toscreen(self, screen=None):
"""Pull a group to a specified screen.
Parameters
==========
screen :
Screen offset. If not specified, we assume the current screen.
Examples
========
Pull group to the current screen::
toscreen()
Pull group to screen 0::
toscreen(0)
"""
if screen is None:
screen = self.qtile.currentScreen
else:
screen = self.qtile.screens[screen]
screen.setGroup(self)
def _dirGroup(self, direction, skip_empty=False, skip_managed=False):
"""Find a group walking the groups list in the specified direction
Parameters
==========
skip_empty :
skips the empty groups
skip_managed :
skips the groups that have a screen
"""
def match(group):
if group is self:
return True
if skip_empty and not group.windows:
return False
if skip_managed and group.screen:
return False
return True
groups = [group for group in self.qtile.groups if match(group)]
index = (groups.index(self) + direction) % len(groups)
return groups[index]
def prevGroup(self, skip_empty=False, skip_managed=False):
return self._dirGroup(-1, skip_empty, skip_managed)
def nextGroup(self, skip_empty=False, skip_managed=False):
return self._dirGroup(1, skip_empty, skip_managed)
def cmd_unminimize_all(self):
"""Unminimise all windows in this group"""
for w in self.windows:
w.minimized = False
self.layoutAll()
def cmd_next_window(self):
if not self.windows:
return
if self.currentWindow.floating:
nxt = self.floating_layout.focus_next(self.currentWindow) or \
self.layout.focus_first() or \
self.floating_layout.focus_first(group=self)
else:
nxt = self.layout.focus_next(self.currentWindow) or \
self.floating_layout.focus_first(group=self) or \
self.layout.focus_first()
self.focus(nxt, True)
def cmd_prev_window(self):
if not self.windows:
return
if self.currentWindow.floating:
nxt = self.floating_layout.focus_previous(self.currentWindow) or \
self.layout.focus_last() or \
self.floating_layout.focus_last(group=self)
else:
nxt = self.layout.focus_previous(self.currentWindow) or \
self.floating_layout.focus_last(group=self) or \
self.layout.focus_last()
self.focus(nxt, True)
def cmd_switch_groups(self, name):
"""Switch position of current group with name"""
self.qtile.cmd_switch_groups(self.name, name)
def __repr__(self):
return "<group.Group (%r)>" % self.name
| {
"content_hash": "2863ec58ffce78fa0df46091ee9fac8f",
"timestamp": "",
"source": "github",
"line_count": 420,
"max_line_length": 79,
"avg_line_length": 33.99761904761905,
"alnum_prop": 0.5457665102598221,
"repo_name": "de-vri-es/qtile",
"id": "116d9377b891c61ca946fe5438ed7428ee69e011",
"size": "15631",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "libqtile/group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3605"
},
{
"name": "Makefile",
"bytes": "1147"
},
{
"name": "Python",
"bytes": "998773"
},
{
"name": "Shell",
"bytes": "4084"
}
],
"symlink_target": ""
} |
import collections
import gc
import re
import traceback
from operator import itemgetter
import diesel
address_stripper = re.compile(r' at 0x[0-9a-f]+')
def print_greenlet_stacks():
"""Prints the stacks of greenlets from running loops.
The number of greenlets at the same position in the stack is displayed
on the line before the stack dump along with a simplified label for the
loop callable.
"""
stacks = collections.defaultdict(int)
loops = {}
for obj in gc.get_objects():
if not isinstance(obj, diesel.Loop) or not obj.running:
continue
if obj.id == diesel.core.current_loop.id:
continue
fr = obj.coroutine.gr_frame
stack = ''.join(traceback.format_stack(fr))
stacks[stack] += 1
loops[stack] = obj
for stack, count in sorted(stacks.iteritems(), key=itemgetter(1)):
loop = loops[stack]
loop_id = address_stripper.sub('', str(loop.loop_callable))
print '[%d] === %s ===' % (count, loop_id)
print stack
| {
"content_hash": "343564a6b2b2222c7c6e008eec07da52",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.638095238095238,
"repo_name": "dieseldev/diesel",
"id": "b4c720ce8e97ef050c5aae03f7826988817ccdb7",
"size": "1050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diesel/util/debugtools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "252"
},
{
"name": "Protocol Buffer",
"bytes": "9279"
},
{
"name": "Python",
"bytes": "345729"
}
],
"symlink_target": ""
} |
"""Module for numerical treatment of piece-wise continuous reservoir models.
An abstract
:class:`~.smooth_reservoir_model.SmoothReservoirModel` is
filled with life by giving initial values, a parameter set, a time grid,
and potentially additional involved functions to it.
The model can then be run and as long as the model is linear,
based on the state transition operator age and transit time
distributions can be computed.
Nonlinear models can be linearized along a solution trajectory.
Counting of compartment/pool/reservoir numbers start at zero and the
total number of pools is :math:`d`.
"""
from numbers import Number
from copy import copy, deepcopy
from matplotlib import cm
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import matrix_power
import plotly.graph_objs as go
import base64
import hashlib
import mpmath
from frozendict import frozendict
from sympy import lambdify, flatten, latex, Function, sympify, sstr, solve, \
ones, Matrix, ImmutableMatrix
from sympy.core.function import UndefinedFunction
from sympy.abc import _clash
from sympy.printing import pprint
import scipy.linalg
from scipy.linalg import inv
from numpy.linalg import pinv
from scipy.special import factorial
from scipy.integrate import odeint, quad
from scipy.interpolate import interp1d, UnivariateSpline
from scipy.optimize import newton, brentq, minimize
from tqdm import tqdm
#from testinfrastructure.helpers import pe
from .smooth_reservoir_model import SmoothReservoirModel
from .model_run import ModelRun
from .helpers_reservoir import (
deprecation_warning
,warning
,make_cut_func_set
,has_pw
,numsol_symbolic_system_old
,numsol_symbolical_system
,arrange_subplots
,melt
,generalized_inverse_CDF
,draw_rv
,stochastic_collocation_transform
,numerical_rhs
,numerical_rhs_old
,MH_sampling
,save_csv
,load_csv
,stride
,f_of_t_maker
,const_of_t_maker
,numerical_function_from_expression
,x_phi_ode
,phi_tmax
,x_tmax
,print_quantile_error_statisctics
,custom_lru_cache_wrapper
,net_Us_from_discrete_Bs_and_xs
,net_Fs_from_discrete_Bs_and_xs
,net_Rs_from_discrete_Bs_and_xs
,check_parameter_dict_complete
)
from .BlockIvp import BlockIvp
from .myOdeResult import solve_ivp_pwc
from .Cache import Cache
class Error(Exception):
"""Generic error occurring in this module."""
pass
class SmoothModelRun(ModelRun):
"""Class for a model run based on a
:class:`~.smooth_reservoir_model.SmoothReservoirModel`.
Attributes:
model (:class:`~.smooth_reservoir_model.SmoothReservoirModel`):
The reservoir model on which the model run bases.
parameter_dict (dict): ``{x: y}`` with ``x`` being a SymPy symbol
and ``y`` being a numerical value.
start_values (numpy.array): The vector of start values.
times (numpy.array): The time grid used for the simulation.
Typically created by ``numpy.linspace``.
func_set (dict): ``{f: func}`` with ``f`` being a SymPy symbol and
``func`` being a Python function. Defaults to ``dict()``.
Pool counting starts with ``0``. In combined structures for pools and
system, the system is at the position of a ``(d+1)`` st pool.
"""
def __init__(self, model, parameter_dict,
start_values, times, func_set=None ):
"""Return a SmoothModelRun instance.
Args:
model (:class:`~.smooth_reservoir_model.SmoothReservoirModel`):
The reservoir model on which the model run bases.
parameter_dict (dict): ``{x: y}`` with ``x`` being a SymPy symbol
and ``y`` being a numerical value.
start_values (numpy.array): The vector of start values.
times (numpy.array): The time grid used for the simulation.
Typically created by ``numpy.linspace``.
func_set (dict): ``{f: func}`` with ``f`` being a SymPy symbol and
``func`` being a Python function. Defaults to ``dict()``.
Raises:
Error: If ``start_values`` is not a ``numpy.array``.
"""
# we cannot use dict() as default because the test suite makes weird
# things with it! But that is bad style anyways
if parameter_dict is None: parameter_dict = dict()
if func_set is None: func_set = dict()
# check parameter_dict + func_set for completeness
free_symbols = check_parameter_dict_complete(
model,
parameter_dict,
func_set)
if free_symbols != set():
raise(Error('Missing parameter values for ' + str(free_symbols)))
self.model = model
self.parameter_dict = frozendict(parameter_dict)
self.times = times
# make sure that start_values are an array,
# even a one-dimensional one
self.start_values = np.array(start_values).reshape(model.nr_pools,)
if not(isinstance(start_values, np.ndarray)):
raise(Error("start_values should be a numpy array"))
# fixme mm:
#func_set = {str(key): val for key, val in func_set.items()}
# The conversion to string is not desirable here
# should rather implement a stricter check (which fails at the moment because some tests use the old syntax
#for f in func_set.keys():
# if not isinstance(f,UndefinedFunction):
# raise(Error("The keys of the func_set should be of type: sympy.core.function.UndefinedFunction"))
self.func_set = frozendict(func_set)
def __str__(self):
return str(
[ 'id(self)'+str(id(self)), 'id(model)'+str(id(self.model))]
+["id "+str(key)+" "+str(id(val)) for key,val in self.func_set.items()]
+["id "+str(key)+" "+str(id(val)) for key,val in self.parameter_dict.items()]
)
@property
def dts(self):
"""
The lengths of the time intervals.
"""
return np.diff(self.times).astype(np.float64)
def B_func(self, vec_sol_func=None):
# Design comment:
# Note that the vec_sol_func argument is necessary because
# we have slight differences in solutions coming from different
# numerical solutions. If we intended to apply B_func to
# values not belonging to the solution, we would not do so
# in an instace method but a separate function.
if vec_sol_func == None:
vec_sol_func = self.solve_func()
# we inject the solution into B to get the linearized version
srm = self.model
tup = (srm.time_symbol,) + tuple(srm.state_vector)
numfun = numerical_function_from_expression(
srm.compartmental_matrix,
tup,
self.parameter_dict,
self.func_set
)
# we want a function that accepts a vector argument for x
def B_func(t):
x = vec_sol_func(t)
return numfun(t,*x)
return B_func
def linearize_old(self):
"""Return a linearized SmoothModelRun instance.
Linearization happens along the solution trajectory. Only for linear
systems all functionality is guaranteed,
this is why nonlinear systems should be linearized first.
Returns:
:class:`SmoothModelRun`: A linearized version of the original
:class:`SmoothModelRun`, with the solutions now being part
of ``func_set``.
"""
sol_funcs = self.sol_funcs_old()
srm = self.model
xi, T, N, C, u = deepcopy(srm.xi_T_N_u_representation())
svec = srm.state_vector
symbolic_sol_funcs = {sv: Function(sv.name + '_sol')(srm.time_symbol)
for sv in svec}
# need to define a function_factory to create the function we need to
# avoid late binding
# with late binding pool will always be nr_pools and always the last
# function will be used!
def func_maker(pool):
def func(t):
return sol_funcs[pool](t)
return(func)
sol_dict = {}
for pool in range(self.nr_pools):
key = sstr(symbolic_sol_funcs[svec[pool]])
sol_dict[key] = func_maker(pool)
linearized_B = (xi*T*N).subs(symbolic_sol_funcs)
linearized_u = u.subs(symbolic_sol_funcs)
func_set=frozendict({key:val for mydict in [self.func_set,sol_dict]
for key,val in mydict.items()})
cl=srm.__class__
linearized_srm = cl.from_B_u(
srm.state_vector,
srm.time_symbol,
linearized_B,
linearized_u
)
linearized_smr = self.__class__(
linearized_srm,
self.parameter_dict,
self.start_values,
self.times,
func_set=func_set
)
return linearized_smr
def linearize(self):
"""Return a linearized SmoothModelRun instance.
Linearization happens along the solution trajectory. Only for linear
systems all functionality is guaranteed,
this is why nonlinear systems should be linearized first.
Returns:
:class:`SmoothModelRun`: A linearized version of the original
:class:`SmoothModelRun`, with the solutions now being part
of ``func_set``.
"""
#sol_funcs = self.sol_funcs()
sol_funcs = self.sol_funcs()
srm = self.model
xi, T, N, C, u = srm.xi_T_N_u_representation()
svec = srm.state_vector
symbolic_sol_funcs = {sv: Function(sv.name + '_sol')(srm.time_symbol)
for sv in svec}
# need to define a function_factory to create the function we need to
# avoid late binding
# with late binding pool will always be nr_pools and always the last
# function will be used!
def func_maker(pool):
def func(t):
return sol_funcs[pool](t)
return(func)
sol_dict = {}
for pool in range(self.nr_pools):
key = sstr(symbolic_sol_funcs[svec[pool]])
sol_dict[key] = func_maker(pool)
linearized_B = (xi*T*N).subs(symbolic_sol_funcs)
linearized_u = u.subs(symbolic_sol_funcs)
func_set=frozendict({key:val for mydict in [self.func_set,sol_dict] for key,val in mydict.items()})
#func_set = self.func_set
#func_set.update(sol_dict)
cl = srm.__class__
linearized_srm = cl.from_B_u(
srm.state_vector,
srm.time_symbol,
linearized_B,
linearized_u
)
linearized_smr = self.__class__(
linearized_srm,
self.parameter_dict,
self.start_values,
self.times,
func_set=func_set
)
return linearized_smr
@staticmethod
#fixme mm 2018-9-5:
# Why is this is mehtod of class SmoothModelRun?
# It does not rely on the class definition in any
# way.
# Is it because the helper module is not exposed in the API?
def moments_from_densities(max_order, densities):
"""Compute the moments up to max_order of the given densities.
Args:
max_order (int): The highest order up to which moments are
to be computed.
densities (numpy.array): Each entry is a Python function of one
variable (age) that represents a probability density function.
Returns:
numpy.ndarray: moments x pools, containing the moments of the given
densities.
"""
n = densities(0).shape[0]
def kth_moment(k):
def kth_moment_pool(k, pool):
norm = quad(lambda a: densities(a)[pool], 0, np.infty)[0]
if norm == 0: return np.nan
return (quad(lambda a: a**k*densities(a)[pool], 0, np.infty)[0]
/ norm)
return np.array([kth_moment_pool(k,pool) for pool in range(n)])
return np.array([kth_moment(k) for k in range(1, max_order+1)])
########## public methods and properties ##########
@property
def nr_pools(self):
"""int: Return the number of pools involved in the model."""
return self.model.nr_pools
def solve_single_value_old(self, alternative_start_values=None):
"""Solve the model and return a function of time.
Args:
alternative_start_values (numpy.array, optional): If not given, the
original ``start_values`` are used.
Returns:
Python function ``f``: ``f(t)`` is a numpy.array that containts the
pool contents at time ``t``.
"""
return self._solve_age_moment_system_single_value_old(0, None,
alternative_start_values)
def solve_func(self, alternative_start_values=None):
"""Solve the model and return a function of time.
Args:
alternative_start_values (numpy.array, optional): If not given, the
original ``start_values`` are used.
Returns:
Python function ``f``: ``f(t)`` is a numpy.array that containts the
pool contents at time ``t``.
"""
return self._solve_age_moment_system_func(
0,
None,
alternative_start_values
)
def solve_old(self, alternative_times = None, alternative_start_values=None):
"""Solve the model and return a solution grid.
Args:
alternative_times (numpy.array): If not given, the original time
grid is used.
alternative_start_values (numpy.array): If not given,
the original start_values are used.
Returns:
numpy.ndarray: len(times) x nr_pools, contains the pool contents
at the times given in the time grid.
"""
return self._solve_age_moment_system_old(0, None, alternative_times,
alternative_start_values)
def solve(self, alternative_start_values=None):
"""Solve the model and return a solution grid. If the solution has been computed previously (even by other methods) the cached result will be returned.
Args:
alternative_start_values (numpy.array): If not given,
the original start_values are used.
Returns:
numpy.ndarray: len(times) x nr_pools, contains the pool contents
at the times given in the time grid.
"""
soln, sol_func = self._solve_age_moment_system(
0,
None,
alternative_start_values
)
return soln
##### fluxes as functions #####
def sol_funcs_old(self):
"""Return linearly interpolated solution functions.
Returns:
Python function ``f``: ``f(t)`` returns a numpy.array containing the
pool contents at time ``t``.
"""
times = self.times
sol = self.solve_old(times)
sol_funcs = []
for i in range(self.nr_pools):
sol_inter = interp1d(times, sol[:,i])
sol_funcs.append(sol_inter)
return sol_funcs
#fixme: test
def sol_funcs(self):#->List[Callable[float,float]]:
"""Returns list of linearly interpolated solution functions per pool.
Returns:
List of Python functions ``[f[i]]``, where ``f[i](t)`` returns
pool i's content at time ``t``.
"""
vec_sol_func = self.solve_func()
# the factory is necessary to avoid unstrict evaluation
def func_maker(pool):
def func(t):
return vec_sol_func(t)[pool]
return(func)
return [func_maker(i) for i in range(self.nr_pools)]
def sol_funcs_dict_by_symbol(self):
"""
Return linearly interpolated solution functions as a dictionary
indexed by the symbols of the state variables
"""
#sol_funcs=self.sol_funcs()
sol_funcs=self.sol_funcs()
state_vector=self.model.state_vector
n=len(state_vector)
sol_dict_by_smybol={state_vector[i]:sol_funcs[i] for i in range(n)}
return sol_dict_by_smybol
def sol_funcs_dict_by_name(self):
"""Return linearly interpolated solution functions. as a dictionary indexed by the name (string) of the
state variables"""
sol_dict_by_name={k.name:v for k,v in self.sol_funcs_dict_by_symbol().items()}
return sol_dict_by_name
def external_input_flux_funcs(self):
"""Return a dictionary of the external input fluxes.
The resulting functions base on sol_funcs and are linear interpolations.
Returns:
dict: ``{key: func}`` with ``key`` representing the pool which
receives the input and ``func`` a function of time that returns
a ``float``.
"""
return self._flux_funcs(self.model.input_fluxes)
def internal_flux_funcs(self):
"""Return a dictionary of the internal fluxes.
Returns:
dict: ``{key: func}`` with ``key=(pool_from, pool_to)`` representing
the pools involved and ``func`` a function of time that returns
a ``float``.
"""
return self._flux_funcs(self.model.internal_fluxes)
def external_output_flux_funcs(self):
"""Return a dictionary of the external output fluxes.
Returns:
dict: ``{key: func}`` with ``key`` representing the pool from which
the output comes and ``func`` a function of time that returns a
``float``.
"""
return self._flux_funcs(self.model.output_fluxes)
def acc_gross_external_output_vector(self, data_times=None):
"""Return the vectors of accumulated external outputs.
Returns:
numpy.ndarray: len(times)-1 x nr_pools
"""
times = self.times if data_times is None else data_times
nt = len(times)-1
res = np.zeros((nt,self.nr_pools))
for k in range(nt):
for pool_nr, func in self.external_output_flux_funcs().items():
res[k,pool_nr] = quad(func,times[k],times[k+1])[0]
return res
#fixme: here _func indicated that this here is already a function of t
# on other occasions _func indicated that a function is returned
def output_vector_func(self, t):
"""Return a vector of the external output fluxes at time ``t``.
Returns:
numpy.array: The ``i`` th entry is the output from pool ``i`` at
time ``t``.
"""
res = np.zeros((self.nr_pools,))
for key, value in self.external_output_flux_funcs().items():
res[key] = value(t)
return res
##### fluxes as vector-valued functions #####
# Note: This function could be rewritten using a vector valued input function
def external_input_vector_func(self, cut_off = True):
"""Return a vector valued function for the external inputs.
Returns:
Python function ``u``: ``u(t)`` is a ``numpy.array`` containing the
external inputs at time ``t``.
Note:
If the required (future) values for the input exceed the maximum of
times they are assumed to be zero if ``cut_off`` is ``True``.
If ``cut_off`` is ``False`` then the input function is assumed to
be valid everywhere which might be dangerous if they are
extrapolated from data.
"""
if not hasattr(self, '_external_input_vector_func'):
t0 = self.times[0]
# cut off inputs until t0 (exclusive)
if cut_off:
t_valid = lambda t: True if ((t0<=t) and
(t<=self.times[-1])) else False
else:
t_valid = lambda t: True
input_fluxes = []
for i in range(self.nr_pools):
if i in self.external_input_flux_funcs().keys():
input_fluxes.append(self.external_input_flux_funcs()[i])
else:
input_fluxes.append(lambda t: 0)
u = lambda t: (np.array([f(t) for f in input_fluxes],
dtype=np.float)
if t_valid(t) else np.zeros((self.nr_pools,)))
self._external_input_vector_func = u
return self._external_input_vector_func
def output_rate_vector_at_t(self, t):
"""Return a vector of output rates at time ``t``.
Args:
t (float): The time at which the output rates are computed.
Returns:
numpy.array: The ith entry contains the output rate of pool ``i``
at time ``t``.
"""
n = self.nr_pools
#sol_funcs = self.sol_funcs()
vec_sol_func = self.solve_func()
output_vec_at_t = self.output_vector_func(t)
# rate_vec = np.zeros((n,))
x = vec_sol_func(t)
# for pool in range(n):
# #x = sol_funcs[pool](t)
# if x != 0:
# rate_vec[pool] = output_vec_at_t[pool] / x
return np.nan_to_num(output_vec_at_t / x)
##### fluxes as vector over self.times #####
def acc_gross_external_input_vector(self, data_times=None):
"""Return the grid of accumulated external input vectors.
Returns:
numpy.ndarray: len(times) x nr_pools
"""
times = self.times if data_times is None else data_times
nt = len(times)-1
res = np.zeros((nt, self.nr_pools))
for k in range(nt):
for pool_nr, func in self.external_input_flux_funcs().items():
res[k,pool_nr] = quad(func,times[k],times[k+1])[0]
return res
@property
#this function should be rewritten using the vector values solution
def external_input_vector(self):
"""Return the grid of external input vectors.
Returns:
numpy.ndarray: len(times) x nr_pools
"""
res = self._flux_vector(self.model.external_inputs)
# no inputs at t0 (only >t0)
#res[0,:] = np.zeros((self.nr_pools,))
return res
@property
#this function should be rewritten using the vector valued solution
def external_output_vector(self):
"""Return the grid of external output vectors.
Returns:
numpy.ndarray: len(times) x nr_pools
"""
return(self._flux_vector(self.model.external_outputs))
@property
def output_rate_vector(self):
"""Return the grid of output rate vectors.
Returns:
numpy.ndarray: len(times) x nr_pools, ``solution/output_vector``
"""
soln = self.solve()
output_vec = self.external_output_vector
# take care of possible division by zero
output_vec[soln==0] = 0
soln[soln==0] = 0
return output_vec/soln
#fixme hm: test
def acc_gross_internal_flux_matrix(self, data_times=None):
"""Return the grid of flux matrices.
Returns:
numpy.ndarray: len(times) x nr_pools x nr_pools
"""
times = self.times if data_times is None else data_times
nt = len(times)-1
res = np.zeros((nt, self.nr_pools, self.nr_pools))
for k in range(nt):
for key, func in self.internal_flux_funcs().items():
j, i = key
res[k,i,j] = quad(func,times[k],times[k+1])[0]
return res
##### age density methods #####
def pool_age_densities_single_value(self, start_age_densities=None):
"""Return a function for the pool age densities.
Args:
start_age_densities (Python function, optional): A function of age
that returns a numpy.array containing the masses with the given
age at time :math:`t_0`. Defaults to None, meaning that all
initial mass is considered to have zero age.
Returns:
Python function ``p_sv``: ``p_sv(a, t)`` returns ``a numpy.array``
containing the pool contents with age ``a`` at time ``t``.
"""
p1_sv = self._age_densities_1_single_value(start_age_densities)
p2_sv = self._age_densities_2_single_value()
p_sv = lambda a, t: p1_sv(a,t) + p2_sv(a,t)
return p_sv
# returns a function p that takes an age array "ages" as argument
# and gives back a three-dimensional ndarray (ages x times x pools)
# start_age_densities is a array-valued function of age
def pool_age_densities_func(self, start_age_densities=None):
"""Return a function that takes an array of ages and returns the
pool age densities.
Args:
start_age_densities (Python function, optional): A function of age
that returns a numpy.array containing the masses with the given
age at time :math:`t_0` for every pool.
Defaults to None, meaning that all initial mass is considered
to have zero age.
Returns:
Python function ``p``: ``p(ages)`` returns a ``numpy.ndarray``
len(ages) x len(times) x nr_pools containing the pool contents
with the respective ages at the respective times, where ``ages``
is a ``numpy.array``.
"""
p1 = self._age_densities_1(start_age_densities)
p2 = self._age_densities_2()
def p(ages):
if hasattr(self, '_computed_age_density_fields'):
if ((start_age_densities, tuple(ages)) in
self._computed_age_density_fields.keys()):
#print('using cached result')
return self._computed_age_density_fields[
(start_age_densities, tuple(ages))]
else:
self._computed_age_density_fields = {}
field_list = []
for a in tqdm(ages):
field_list.append(p1(np.array([a])) + p2(np.array([a])))
field = np.array(field_list)[:,0,:,:]
self._computed_age_density_fields[
(start_age_densities, tuple(ages))] = field
return field
return p
def system_age_density_single_value(self, start_age_densities=None):
"""Return a function for the system age density.
Args:
start_age_densities (Python function, optional): A function of age
that returns a numpy.array containing the masses with the given
age at time :math:`t_0`.
Defaults to None, meaning that all initial mass is considered
to have zero age.
Returns:
Python function ``sys_p_sv``: ``sys_p_sv(a, t)`` returns the system
content with age ``a`` at time ``t``.
"""
p_sv = self.pool_age_densities_single_value(start_age_densities)
sys_p_sv = lambda a, t: sum(p_sv(a,t))
return sys_p_sv
# return array ages x times with ages based on pool_age_densities
def system_age_density(self, pool_age_densities):
"""Return the system age density based on the given pool age densities.
Args:
pool_age_densites (numpy.ndarray len(ages) x len(times) x nr_pools):
The pool age density values.
Returns:
numpy.ndarray: (len(ages) x len(times)) The sum of the pool age
contents over all pools.
"""
return pool_age_densities.sum(2)
# combine pool and system age densities to one numpy array
def age_densities(self, pool_age_densities, system_age_density):
"""Combine pool and system age densities to one numpy.array.
Args:
pool_age_densites (numpy.ndarray len(ages) x len(times) x nr_pools):
The pool age density values.
system_age_density (numpy.ndarray len(ages) x len(times)):
The system age density values.
Returns:
numpy.ndarray: (len(ages) x len(times) x (nr_pools+1)).
The system age density values are appended to the end of the
pool density values (system = pool ``d+1`` with ``d = nr_pools``).
"""
n = self.nr_pools
nr_ages = pool_age_densities.shape[0]
nr_times = pool_age_densities.shape[1]
_age_densities = np.zeros((nr_ages, nr_times, n+1))
_age_densities[:,:,:n] = pool_age_densities
_age_densities[:,:,n] = system_age_density
return _age_densities
##### age moment methods #####
def age_moment_vector_from_densities(self, order, start_age_densities):
"""Compute the ``order`` th moment of the pool ages by integration.
This function is extremely slow, since for each pool the integral over
the density is computed based on the singe-valued functions. It is
implemented only for the sake of completeness and to test the results
obtained by faster methods.
Args:
order (int): The order of the moment to be computed.
start_age_densities (Python function, optional):
A function of age that returns a numpy.array containing the
masses with the given age at time :math:`t_0`.
Returns:
numpy.ndarray: len(times) x nr_pools.
Contains the ``order`` th moment
of the pool ages over the time grid.
"""
p_sv = self.pool_age_densities_single_value(start_age_densities)
times = self.times
#x = self.solve_old()
x = self.solve()
n = self.nr_pools
k = order
def am_at_time_index_for_pool(ti, pool):
def integrand(a):
return (a**k) * p_sv(a, times[ti])[pool]
return x[ti, pool]**(-1) * quad(integrand, 0, np.inf)[0]
def age_moment_at_time_index(ti):
return np.array([am_at_time_index_for_pool(ti, pool)
for pool in range(n)])
am_arr = np.array([age_moment_at_time_index(ti)
for ti in range(len(times))])
am = np.ndarray((len(times), n), np.float, am_arr)
return am
def age_moment_vector_semi_explicit(self, order,
start_age_moments=None, times=None):
"""Compute the ``order`` th moment of the pool ages by a semi-explicit
formula.
This function bases on a semi-explicit formula such that no improper
integrals need to be computed.
Args:
order (int): The order of the age moment to be computed.
start_age_moments (numpy.ndarray order x nr_pools, optional):
Given initial age moments up to the order of interest.
Can possibly be computed by :func:`moments_from_densities`.
Defaults to ``None`` assuming zero initial ages.
times (numpy.array, optional): Time grid.
Defaults to ``None`` and the original time grid is used.
Returns:
numpy.ndarray: len(times) x nr_pools.
The ``order`` th pool age moments over the time grid.
"""
if times is None: times = self.times
t0 = times[0]
n = self.nr_pools
k = order
if start_age_moments is None:
start_age_moments = np.zeros((order, n))
start_age_moments[np.isnan(start_age_moments)] = 0
p2_sv = self._age_densities_2_single_value()
def binomial(n, k):
return 1 if k==0 else (0 if n==0
else binomial(n-1, k) + binomial(n-1, k-1))
Phi = lambda t, t0, x: self._state_transition_operator(t, t0, x)
def x0_a0_bar(j):
if j == 0:
return self.start_values
return np.array(self.start_values) * start_age_moments[j-1,:]
def both_parts_at_time(t):
def part2_time(t):
def part2_time_index_pool(ti, pool):
return quad(lambda a: a**k * p2_sv(a, t)[pool], 0, t-t0)[0]
return np.array([part2_time_index_pool(t, pool)
for pool in range(n)])
def part1_time(t):
def summand(j):
return binomial(k, j)*(t-t0)**(k-j)*Phi(t, t0, x0_a0_bar(j))
return sum([summand(j) for j in range(k+1)])
return part1_time(t) + part2_time(t)
#soln = self.solve_old()
soln = self.solve()
def both_parts_normalized_at_time_index(ti):
t = times[ti]
bp = both_parts_at_time(t)
diag_values = np.array([x if x>0 else np.nan for x in soln[ti,:]])
X_inv = np.diag(diag_values**(-1))
#return (np.mat(X_inv) * np.mat(bp).transpose()).A1
return (np.matmul(X_inv, bp).transpose()).flatten()
return np.array([both_parts_normalized_at_time_index(ti)
for ti in range(len(times))])
def age_moment_vector(self, order, start_age_moments = None):
"""Compute the ``order`` th pool age moment vector over the time grid
by an ODE system.
This function solves an ODE system to obtain the pool age moments very
fast. If the system has empty pools at the beginning, the semi-explicit
formula is used until all pools are non-empty. Then the ODE system
starts.
Args:
order (int): The order of the pool age moments to be computed.
start_age_moments (numpy.ndarray order x nr_pools, optional):
Given initial age moments up to the order of interest.
Can possibly be computed by :func:`moments_from_densities`.
Defaults to None assuming zero initial ages.
Returns:
numpy.ndarray: len(times) x nr_pools.
The ``order`` th pool age moments over the time grid.
"""
n = self.nr_pools
times = self.times
if start_age_moments is None:
start_age_moments = np.zeros((order, n))
max_order=start_age_moments.shape[0]
if order>max_order:
raise Error("""
To solve the moment system with order{0}
start_age_moments up to (at least) the same order have to be
provided. But the start_age_moments.shape was
{1}""".format(order,start_age_moments.shape)
)
if order<max_order:
warning("""
Start_age_moments contained higher order values than needed.
start_age_moments order was {0} while the requested order was
{1}. This is no problem but possibly unintended. The higer
order moments will be clipped """.format(max_order,order)
)
# make sure that the start age moments are clipped to the order
# (We do not need start values for higher moments and the clipping
# avoids problems with recasting if higher order moments are given
# by the user)
start_age_moments=start_age_moments[0:order,:]
if not (0 in self.start_values):
#ams = self._solve_age_moment_system_old(order, start_age_moments)
ams,_ = self._solve_age_moment_system(order, start_age_moments)
return ams[:,n*order:]
else:
# try to start adapted mean_age_system once no pool
# has np.nan as mean_age (empty pool)
# find last time index that contains an empty pool --> ti
#soln = self.solve_old()
soln = self.solve()
ti = len(times)-1
content = soln[ti,:]
while not (0 in content) and (ti>0):
ti = ti-1
content = soln[ti,:]
# not forever an empty pool there?
if ti+1 < len(times):
# compute moment with semi-explicit formula
# as long as there is an empty pool
amv1_list = []
amv1 = np.zeros((ti+2, order*n))
for k in range(1, order+1):
amv1_k = self.age_moment_vector_semi_explicit(
k, start_age_moments, times[:ti+2])
amv1[:,(k-1)*n:k*n] = amv1_k
# use last values as start values for moment system
# with nonzero start values
new_start_age_moments = amv1[-1,:].reshape((n, order))
start_values = soln[ti+1]
#ams = self._solve_age_moment_system_old(
# order, new_start_age_moments, times[ti+1:], start_values)
ams,_ = self._solve_age_moment_system(
order, new_start_age_moments, start_values, times[ti+1:])
amv2 = ams[:,n*order:]
# put the two parts together
part1 = amv1[:,(order-1)*n:order*n][:-1]
amv = np.ndarray((len(times), n))
amv[:part1.shape[0], :part1.shape[1]] = part1
amv[part1.shape[0]:, :amv2.shape[1]] = amv2
return amv
else:
# always an empty pool there
return self.age_moment_vector_semi_explicit(
order, start_age_moments)
# requires start moments <= order
def system_age_moment(self, order, start_age_moments=None):
"""Compute the ``order`` th system age moment vector over the time grid
by an ODE system.
The pool age moments are computed by :func:`age_moment_vector` and then
weighted corresponding to the pool contents.
Args:
order (int): The order of the pool age moments to be computed.
start_age_moments (numpy.ndarray order x nr_pools, optional):
Given initial age moments up to the order of interest.
Can possibly be computed by :func:`moments_from_densities`.
Defaults to None assuming zero initial ages.
Returns:
numpy.array: The ``order`` th system age moment over the time grid.
"""
n = self.nr_pools
age_moment_vector = self.age_moment_vector(order, start_age_moments)
age_moment_vector[np.isnan(age_moment_vector)] = 0
#soln = self.solve_old()
soln = self.solve()
total_mass = soln.sum(1) # row sum
total_mass[total_mass==0] = np.nan
system_age_moment = (age_moment_vector*soln).sum(1)/total_mass
return system_age_moment
##### transit time density methods #####
def backward_transit_time_density_single_value_func(
self, start_age_densities
):
"""Return a function that returns a single value for the
backward transit time density.
Args:
start_age_densities (Python function, optional):
A function of age that returns a numpy.array containing the
masses with the given age at time :math:`t_0`.
Returns:
Python function ``p_sv``: ``p_sv(a, t)`` returns the mass that
leaves the system at time ``t`` with age ``a``.
"""
n = self.nr_pools
p_age_sv = self.pool_age_densities_single_value(start_age_densities)
def p_sv(a, t):
p = p_age_sv(a, t)
r = self.output_rate_vector_at_t(t)
return (r*p).sum()
return p_sv
# return an array ages x times with ages based on pool_age_densities
def backward_transit_time_density(self, pool_age_densities):
"""Compute the backward transit time based on given pool age densities.
The result is obtained by computing a weighted sum of the pool age
densities according to output rates.
Args:
pool_age_densites (numpy.ndarray len(ages) x len(times) x nr_pools):
The pool age density values.
Returns:
numpy.ndarray: len(ages) x len(times). Mass leaving the system with
the respective age at the respective time.
"""
r = self.output_rate_vector
return (pool_age_densities*r).sum(2)
def forward_transit_time_density_single_value_func(self, cut_off=True, my_B_func=None):
"""Return a function that returns a single value for the
forward transit time density.
Args:
cut_off (bool, optional): If ``True``, no density values are going to
be computed after the end of the time grid, instead
``numpy.nan`` will be returned.
Defaults to ``True`` and ``False`` might lead to unexpected behavior.
Returns:
Python function ``p_sv``: ``p_sv(a, t)`` returns how much mass will
leave the system with age ``a`` when it came in at time ``t``.
"""
if my_B_func is None:
my_B_func = self.B_func(self.x_solve_func_skew())
n = self.nr_pools
times = self.times
Phi = self._state_transition_operator
input_func = self.external_input_vector_func()
t0 = times[0]
t_max = times[-1]
def p_ftt_sv(a, t):
# nothing leaves before t0
if (t+a < t0): return 0.0
#fixme: for Metropolis-Hastings we might need the density
#very far away...
# we cannot compute the density if t+a is out of bounds
if cut_off and (t+a > t_max): return np.nan
u = input_func(t)
if sum(u) == 0: return np.nan
if (a < 0): return 0.0
return -np.matmul(my_B_func(t+a),Phi(t+a, t, u)).sum()
return p_ftt_sv
#fixme: return value not consistent with backward_transit_time_density
# not that easy to resolve, since here we do not use age_densities,
# instead ages is really needed to be able to make the shift or call
# the state_transition_operator
def forward_transit_time_density_func(self, cut_off=True, times=None):
"""Return a function based on an age array for the forward transit time
density.
Args:
cut_off (bool, optional): If True, no density values are going to
be computed after the end of the time grid, instead
``numpy.nan`` will be returned.
Defaults to True and False might lead to unexpected behavior.
times (numpy.array, optional): Time grid.
Defaults to ``None`` and the original time grid is used.
Returns:
Python function ``p``: ``p(ages)`` is a ``numpy.ndarray``
len(ages) x len(times) that gives the mass that will leave the
system with the respective age when it came in at time ``t``,
where ``ages`` is a ``numpy.array``.
"""
wrapper = custom_lru_cache_wrapper(maxsize=len(self.times))
cached_B_func = wrapper(self.B_func(self.x_solve_func_skew()))
if times is None:
times = self.times
p_sv = self.forward_transit_time_density_single_value_func(
cut_off,
my_B_func=cached_B_func
)
pp = lambda a: np.array([p_sv(a,t) for t in times], np.float)
#p = lambda ages: np.array([pp(a) for a in ages], np.float)
def p(ages):
field_list = []
for a in tqdm(ages):
field_list.append(pp(a))
field = np.array(field_list)
return field
return p
##### transit time moment methods #####
def backward_transit_time_moment_from_density(self,
order, start_age_densities):
"""Compute the ``order`` th backward transit time moment based on an
improper integral over the density.
This function is extremely slow and implemented only for the sake of
completeness and for testing results from faster approaches.
Args:
order (int): The order of the backward transit time moment that is
to be computed.
start_age_densities (Python function, optional): A function of age
that returns a numpy.array containing the masses with the given
age at time :math:`t_0`.
Returns:
numpy.array: The ``order`` th backward transit time moment over the
time grid.
"""
p_sv = self.backward_transit_time_density_single_value_func(
start_age_densities)
times = self.times
k = order
ext_outp_vec = self.external_output_vector
ext_outp = ext_outp_vec.sum(1)
def btt_moment_at_time_index(ti):
def integrand(a):
return (a**k) * p_sv(a, times[ti])
return ext_outp[ti]**(-1) * quad(integrand, 0, np.inf)[0]
bttm = np.array([btt_moment_at_time_index(ti)
for ti in range(len(times))])
return bttm
def backward_transit_time_moment(self, order, start_age_moments=None):
"""Compute the ``order`` th backward transit time moment based on the
:func:`age_moment_vector`.
Args:
order (int): The order of the backward transit time moment that is
to be computed.
start_age_moments (numpy.ndarray order x nr_pools, optional):
Given initial age moments up to the order of interest.
Can possibly be computed by :func:`moments_from_densities`.
Defaults to None assuming zero initial ages.
Returns:
numpy.array: The ``order`` th backward transit time moment over the
time grid.
"""
age_moment_vector = self.age_moment_vector(order, start_age_moments)
r = self.external_output_vector
pool_axis=1
return (
(r*age_moment_vector).sum(axis=pool_axis)/
r.sum(axis=pool_axis)
)
# def forward_transit_time_moment(self, order, epsrel=1e-2):
# """Compute the ``order`` th forward transit time moment.
#
# Attention! This function integrates over the state transition operator
# until infinite time.
# The results are going to be weird, since at the end of the time grid
# some cut- off will happen which biases the result.
# Be also aware that additionally - to avoid convergence issues in quad -
# the relative tolerance is set to 1e-2 by default.
#
# Args:
# order (int): The order of the forward transit time moment to be
# computed.
#
# Returns:
# numpy.array: The ``order`` th forward transit time moment over the
# time grid.
# """
# k = order
# times = self.times
# Phi = self._state_transition_operator
# input_vector = self.external_input_vector
#
# #import warnings
# #from scipy.integrate import IntegrationWarning
# #warnings.simplefilter("error", IntegrationWarning)
# def moment_at_ti(ti):
# u = input_vector[ti]
#
# # if we have no inputs, there cannot be a transit(time)
# if u.sum() == 0:
# return np.nan
#
# def integrand(a):
# res = (k*a**(k-1)*Phi(times[ti]+a, times[ti], u).sum())/u.sum()
# #print(a, Phi(times[ti]+a, times[ti], u), res)
# return res
#
# return quad(integrand, 0, np.infty,epsrel=epsrel)[0]
#
# # Remark:
# # We want to compute an inproper integral
# # instead of calling res=quad(integrand, 0, np.infty)[0]
# # we could apply a variable transformation z=a/(c+a) # with an arbitrary c (possibly c=1 but we can optimize the choice for better performance)
# # so we have \int_0^\infty f(a) dx= \int_0^z(a=\infty) f(a(z))*da/dz *dz =\int_0^1 f(a(z)) c/(1-z**2) dz
# # to do:
# # To have the best accuracy we try to find c so that the peak of the integrand is projected to the middle of the new integration interval [0,1]
# # 1.) find the maximum of the integrand
# # 2.) find the c that projects this x to z=1/2
# #c =1000
# #def a(z):
# # return c*z/(1-z)
# #def transformed_integrand(z):
# # res = integrand(a(z))*c/(1-z**2)
# # return res
# #
# #return quad(transformed_integrand, 0, 1)[0]
#
# #res = np.array([moment_at_ti(ti) for ti in range(len(times))])
# res = []
# for ti in tqdm(range(len(times))):
# res.append(moment_at_ti(ti))
# res = np.array(res)
#
# return res
#fixme: split into two functions for SCCS and MH
# do not use dict as default value
def apply_to_forward_transit_time_simulation(self,
f_dict={'mean': np.mean}, N=10000, M=2, k=5, MH=False):
"""This is just a tentative approach.
To be honest, the problem of an unkown infinite future cannot be solved
this way, since the densities used to simulate forward transit time
random variables are biased by the cut-off at the end of the time grid.
"""
# f is a Python function, for the mean, take f = np.mean
# N is the number of simulations per each time step
# M is the number of collocation points for
# stochastic collocation sampling
# allowed values for M are 2, 3, 4, ..., 11
# other values lead to inverse transform sampling (slow)
# k is the order of the smoothing and interpolating spline
# 'smoothing_spline' is best used for inverse transform sampling,
# because of additional smoothing for low
# number of random variates
# for SCMCS (M in [2,...,11]), 'interpolation' is better,
# because the higher number of random variates
# (because of faster sampling) makes their mean already quite precise
# (in the framework of what is possible with SCMCS)
times = self.times
Phi = self._state_transition_operator
input_func = self.external_input_vector_func()
if not MH:
self.n = 0
def F_FTT(a, t):
u = input_func(t)
if u.sum() == 0:
return np.nan
if (a <= 0): return 0.0
self.n += 1
return 1 - Phi(t+a, t, u).sum()/u.sum()
def simulate(n, CDF):
# compute lagrange polynomial p if M is in [2, ..., 11]
g = stochastic_collocation_transform(M, CDF)
if g is None:
# inverse transform sampling
print('inverse transform sampling')
rvs = np.array([draw_rv(CDF) for _ in range(n)])
else:
norms = np.random.normal(size = n)
rvs = g(norms)
return rvs
else:
self.m = 0
p_sv = self.forward_transit_time_density_single_value_func(
cut_off=False
)
def f_FTT(a, t):
self.m += 1
return p_sv(a, t)
res = {f_name: {'values': [],
'smoothing_spline': None,
'interpolation': None} for f_name in f_dict.keys()}
for t in times:
print('time', t)
# no iput means no forward transit time
u = input_func(t)
if u.sum() == 0:
rvs = np.nan
else:
if not MH:
rvs = simulate(N, lambda a: F_FTT(a, t))
print(self.n, 'calls of state transition operator')
else:
rvs = MH_sampling(N, lambda a: f_FTT(a, t))
print(self.m, 'calls of forward transit time density')
for f_name, f in f_dict.items():
value = f(rvs)
res[f_name]['values'].append(value)
print(f_name, value)
for f_name in res.keys():
y = np.array(res[f_name]['values'])
z = y.copy()
res[f_name]['values'] = y.copy()
# give weight zero to nan values fo compting the spline
w = np.isnan(y)
y[w] = 0.
res[f_name]['smoothing_spline'] = UnivariateSpline(
times, y, w=~w, k=k, check_finite=True)
res[f_name]['interpolation'] = interp1d(times[~w], z[~w], kind=k)
return res
# use inverse transform sampling
def apply_to_forward_transit_time_simulation_its(self,
f_dict, times, N=1000, k=5):
"""This is just a tentative approach.
To be honest, the problem of an unkown infinite future cannot be solved
this way, since the densities used to simulate forward transit time
random variables are biased by the cut-off at the end of the time grid.
"""
# f is a Python function, for the mean, take f = np.mean
# N is the number of simulations per each time step
# times is an np.array of interpolation points
# k is the order of the smoothing and interpolating spline
# 'smoothing_spline' is best used for inverse transform sampling,
# because of additional smoothing for low
# number of random variates
Phi = self._state_transition_operator
input_func = self.external_input_vector_func()
def F_FTT(a, t):
u = input_func(t)
if u.sum() == 0:
return np.nan
if (a <= 0): return 0.0
return 1 - Phi(t+a, t, u).sum()/u.sum()
res = {f_name: {'values': [],
'smoothing_spline': None,
'interpolation': None} for f_name in f_dict.keys()}
for t in times:
print('time', t)
# no iput means no forward transit time
u = input_func(t)
if u.sum() == 0:
rvs = np.nan
else:
CDF = lambda a: F_FTT(a, t)
rvs = np.array([draw_rv(CDF) for _ in range(N)])
for f_name, f in f_dict.items():
value = f(rvs)
res[f_name]['values'].append(value)
print(f_name, value)
def compute_splines(res, times):
for f_name in res.keys():
y = np.array(res[f_name]['values'])
z = y.copy()
res[f_name]['values'] = y.copy()
# give weight zero to nan values fo compting the spline
w = np.isnan(y)
y[w] = 0.
res[f_name]['smoothing_spline'] = UnivariateSpline(
times, y, w=~w, k=k, check_finite=True)
res[f_name]['interpolation'] = interp1d(times[~w],z[~w],kind=k)
return res
return compute_splines(res, times)
##### comma separated values output methods #####
def save_pools_and_system_density_csv(self, filename, pool_age_densities,
system_age_density, ages):
"""Save the pool and system age densities to a csv file.
The system value will be coded into pool number -1.
Args:
filename (str): The name of the csv file to be written.
pool_age_densites (numpy.ndarray len(ages) x len(times) x nr_pools):
The pool age density values.
system_age_density (numpy.ndarray len(ages) x len(times)):
The system age density values.
ages (numpy.array): The ages that correspond to the indices in the
zeroth dimension of the density arrays.
Returns:
None
"""
n = self.nr_pools
times = self.times
ndarr = np.zeros((system_age_density.shape[0], len(times), n+1))
ndarr[:,:,:n] = pool_age_densities
ndarr[:,:,n] = system_age_density
pool_entries = [i for i in range(n)] + [-1]
melted = melt(ndarr, [ages, times, pool_entries])
header = '"age", "time", "pool", "value"'
save_csv(filename, melted, header)
def save_pools_and_system_value_csv(self, filename, pools_ndarr,
system_arr):
"""Save pool and system values to a csv file.
Values could be the mean age, for example. One dimension less than a
density.
The system value will be coded into pool number -1.
Args:
filename (str): The name of the csv file to be written.
pools_ndarr (numpy.ndarray len(times) x nr_pools): The values to be
saved over the time-pool grid.
system_arr (numpy.array): The values over the time grid
corresponding to the system.
Returns:
None:
"""
n = self.nr_pools
times = self.times
ndarr = np.concatenate(
(pools_ndarr, system_arr.reshape((len(times), 1))), axis=1)
pool_entries = [i for i in range(n)] + [-1]
melted = melt(ndarr, [times, pool_entries])
header = '"time", "pool", "value"'
save_csv(filename, melted, header)
## helping methods ##
def density_values_for_pools(self, pool_densities_sv, pool_age_values):
"""Compute the pool age densities over the time grid at ages given in
pool_age_values.
This function can be used to obtain the density values at mean or median
values to draw a curve on top of the density surface. But actually this
is now implemented in a much faster way based on the surface itself.
Args:
pool_age_densites_sv (Python function): A function that takes
``a``, ``t`` as arguments and returns a vector of pool contents
with mass a at time t. Potentially coming from
:func:`pool_age_densities_single_value`.
pool_age_values (numpy.ndarray len(times) x nr_pools): The ages over
the time-pool grid at which the density values are to be
computed.
Returns:
numpy.ndarray: (len(times) x nr_pools) The pool density values over
the time-pool grid based on the given age values.
"""
n = self.nr_pools
times = self.times
# for each pool we have a different age value
z = []
for pool in range(n):
val = pool_age_values[:,pool]
#z.append(np.array([pool_densities_sv(val[i], times[i])[pool]
# for i in range(len(times))]))
new_z_list = []
for i in tqdm(range(len(times))):
new_z_list.append(pool_densities_sv(val[i], times[i])[pool])
z.append(np.array(new_z_list))
z = np.array(z).T
return z
# return density values for mean, median, etc.
#fixme: test
def density_values(self, density_sv, values):
"""Compute the density value over the time grid at ages given in values.
This function can be used to obtain the density values at mean or median
values to draw a curve on top of the density surface. But actually this
is now implemented in a much faster way based on the surface itself.
Args:
density_sv (Python function): A function that takes ``a``, ``t``
as arguments and returns density value with age a at time ``t``.
Potentially coming from :func:`system_age_density_single_value`.
values (numpy.array): The ages over the time grid at which the
density values are to be computed.
Returns:
numpy.array: The density values over the time grid based
on the given age values.
"""
times = self.times
def f(i):
if np.isnan(values[i]): return np.nan
return density_sv(values[i], times[i])
#dv_list = [f(i) for i in range(len(times))]
dv_list = []
for i in tqdm(range(len(times))):
dv_list.append(f(i))
return np.array(dv_list)
def save_value_csv(self, filename, arr, times=None):
"""Save values over the time grid to a csv file.
Args:
filename (str): The name of the csv file to be written.
arr (numpy.array): The values to be saved over the time grid.
times (np.array, optional): The time grid to be used.
Defaults to ``None`` in which case the orginal time grid
is used.
Returns:
"""
if times is None:
times = self.times
melted = melt(arr, [times])
header = '"time", "value"'
save_csv(filename, melted, header)
def save_density_csv(self, filename, density, ages, times=None):
"""Save density values over age-time grid to csv file.
Args:
filename (str): The name of the csv file to be written.
density (numpy.ndarray len(ages) x len(times)): The density values
to be saved over the age-time grid.
ages (numpy.array): The ages corresponding to the indices in the
zeroth dimension of the density ndarray.
times (numpy.array, optional): An alternative time grid to be used.
Defaults to ``None`` which means that the original time grid is
going to be used.
Returns:
"""
if times is None: times = self.times
melted = melt(density, [ages, times])
header = '"age", "time", "value"'
save_csv(filename, melted, header)
##### comma separated values input methods #####
def load_value_csv(self, filename):
melted = load_csv(filename)
return (melted[:,1]).copy()
## combining pool and system structures ##
def combine_pools_and_system_values(self, pools_values, system_values):
"""Append the system values to the pool values as if they belonged to
another pool.
Args:
pools_values (numpy.ndarray len(times) x nr_pools): The values to be
saved over the time-pool grid.
system_values (numpy.array): The system values to be saved over the
time grid.
Returns:
numpy.ndarray: len(times) x (nr_pools+1) The pool and system values
over the time-pool grid with the system added at the end as another
pool.
"""
n = self.nr_pools
times = self.times
values = np.zeros((len(times), n+1))
values[:,:n] = pools_values
values[:, n] = system_values
return values
## age ##
def load_pools_and_system_densities_csv(self, filename, ages):
"""Load pool and system age densities from a csv file.
Attention: It is assumed that the data were saved before with the very
same ages, times, and pools.
Furthermore, it is assumed that the system value always follows the
pool values.
Args:
filename (str): The csv file from which the data are to be read.
ages (numpy.array): The ages corresponding to the age indices.
What is needed here is in fact only the length of the age grid.
Returns:
numpy.ndarray: len(ages) x len(times) x (nr_pools+1) The density
values for the pools and the system over the
ages-times-(pools+system) grid.
"""
melted = load_csv(filename)
n = self.nr_pools
return np.ndarray((len(ages), len(self.times), n+1),
buffer=(melted[:,3]).copy())
def load_density_csv(self, filename, ages, times=None):
"""Load density values from a csv file.
Attention: It is assumed that the data were saved before with the very
same ages, times, and pools.
Args:
filename (str): The csv file from which the data are to be read.
ages (numpy.array): The ages corresponding to the age indices.
What is needed here is in fact only the length of the age grid.
times (numpy.array, optional): The ages corresponding to the time
indices.
What is needed here is in fact only the length of the time grid.
Defaults to ``None`` and the original times are being used.
Returns:
numpy.ndarray: len(ages) x len(times) The density values over the
ages-times grid.
"""
if times is None:
times = self.times
melted = load_csv(filename)
return np.ndarray((len(ages), len(times)),
buffer=(melted[:,2]).copy())
def load_pools_and_system_value_csv(self, filename):
"""Load pool and system values from a csv file.
Values could be the mean/median age, for example. One dimension less
than a density.
Attention: It is assumed that the data were saved before with the very
same ages, times, and pools.
Furthermore, it is assumed that the system value always follows the
pool values.
Args:
filename (str): The csv file from which the data are to be read.
Returns:
numpy.ndarray: len(times) x (nr_pools+1) The values for the pools
and the system over the times-(pools+system) grid.
"""
melted = load_csv(filename)
n = self.nr_pools
values_lst = []
for pool in range(n):
indices = melted[:,1] == pool
values_lst.append(melted[np.ix_(indices),2][0])
pool_values = np.array(values_lst).transpose()
indices = melted[:,1] == -1
system_values = melted[np.ix_(indices),2][0]
return (pool_values, system_values)
##### plotting methods #####
## solutions ##
def plot_solutions(self, fig, fontsize = 10):
"""Plot the solution trajectories.
For each trajectory (nr_pools+1) a new subplot is created.
Args:
fig (Matplotlib figure): The fig to which the subplots are added.
fontsize (float, optional): Defaults to 10.
Returns:
None.
Instead ``fig`` is changed in place.
"""
#fixme:
# since time units and units are related to those
# of the other fluxes it would be more consistent
# to make them a property of SmoothModelRun and use
# them in the other plots as well
times = self.times
n = self.nr_pools
#soln = self.solve_old()
soln = self.solve()
def make_ax_nice(ax, title):
ax.set_title(title, fontsize = fontsize)
ax.set_xlabel(self._add_time_unit(latex(self.model.time_symbol)),
fontsize=fontsize)
ax.set_ylabel(self._add_content_unit('content'), fontsize=fontsize)
ax.set_xlim(times[0], times[-1])
ax.set_ylim(ax.get_ylim()[0]*0.9, ax.get_ylim()[1]*1.1)
ax = fig.add_subplot(n+1, 1, 1)
ax.plot(times, soln.sum(1))
make_ax_nice(ax, 'System')
for pool in range(n):
ax = fig.add_subplot(n+1, 1, 2+pool)
ax.plot(times, soln[:,pool])
make_ax_nice(
ax, "\$" + latex(self.model.state_variables[pool]) + "\$")
# fig.tight_layout()
def plot_phase_plane(self, ax, i, j, fontsize = 10):
"""Plot one single phase plane.
Args:
ax (Matplotlib axis): The axis onto which the phase plane is
plotted.
i, j (int): The numbers of the pools for which the phase plane is
plotted.
fontsize (float, optional): Defaults to 10.
Returns:
None.
Instead ``ax`` is changed in place.
"""
times = self.times
#soln = self.solve_old()
soln = self.solve()
ax.plot(soln[:, i], soln[:, j])
x0 = soln[0, i]
y0 = soln[0, j]
ax.scatter([x0],[y0], s=60)
x1 = soln[[len(times)//2-1], i][0]
y1 = soln[[len(times)//2-1], j][0]
x2 = soln[[len(times)//2+1], i][0]
y2 = soln[[len(times)//2+1], j][0]
ax.add_patch(mpatches.FancyArrowPatch((x1,y1), (x2,y2),
arrowstyle='simple', mutation_scale=20, alpha=1))
ax.set_xlabel(self._add_content_unit(
"$"+latex(sympify(self.model.state_variables[i]))+"$"), fontsize=fontsize)
ax.set_ylabel(self._add_content_unit(
"$"+latex(sympify(self.model.state_variables[j]))+"$"), fontsize=fontsize)
def plot_phase_planes(self, fig, fontsize = 10):
"""Plot all phase planes.
For each (i,j)-phase plane a new subplot is added.
Args:
fig (Matplotlib figure): The fig to which the subplots are added.
fontsize (float, optional): Defaults to 10.
Returns:
None.
Instead ``fig`` is changed in place.
"""
n = self.nr_pools
if n>=2:
# planes = [(i,j) for i in range(n) for j in range(i)]
# rows, cols = arrange_subplots(len(planes))
k = 0
for i in range(n):
for j in range(n):
k += 1
if i > j:
ax = fig.add_subplot(n, n, k)
self.plot_phase_plane(ax, i, j, fontsize)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
# fig.tight_layout()
## fluxes ##
def plot_internal_fluxes(self, fig, fontsize = 10):
"""Plot all internal fluxes.
For each internal flux a new subplot is added.
Args:
fig (Matplotlib figure): The fig to which the subplots are added.
fontsize (float, optional): Defaults to 10.
Returns:
None.
Instead ``fig`` is changed in place.
"""
internal_flux_funcs = self.internal_flux_funcs()
n = len(internal_flux_funcs.keys())
times = self.times
#n=self.nr_pools
i = 1
for key, value in internal_flux_funcs.items():
ax = fig.add_subplot(n,1,i)
ax.plot(times, [internal_flux_funcs[key](t) for t in times])
ax.set_title(
'Flux from \$'
+ latex(self.model.state_variables[key[0]])
+ '\$ to \$'
+ latex(self.model.state_variables[key[1]])
+ '\$',
fontsize=fontsize)
ax.set_xlabel(self._add_time_unit(
'\$' + latex(self.model.time_symbol) + '\$'), fontsize=fontsize)
ax.set_ylabel(self._add_flux_unit('flux'), fontsize=fontsize)
i += 1
# fig.tight_layout()
def plot_external_output_fluxes(self, fig, fontsize = 10):
"""Plot all external output fluxes.
For each external output flux a new subplot is added.
Args:
fig (Matplotlib figure): The fig to which the subplots are added.
fontsize (float, optional): Defaults to 10.
Returns:
None.
Instead ``fig`` is changed in place.
"""
times = self.times
output_flux_funcs = self.external_output_flux_funcs()
n = len(output_flux_funcs.keys())
i = 1
for key, value in output_flux_funcs.items():
ax = fig.add_subplot(n,1,i)
ax.plot(times, [output_flux_funcs[key](t) for t in times])
ax.set_title(
'External outflux from \$'
+ latex(self.model.state_variables[key])
+ '\$',
fontsize=fontsize)
ax.set_xlabel(
self._add_time_unit('\$' + latex(self.model.time_symbol) + '\$'),
fontsize=fontsize)
ax.set_ylabel(self._add_flux_unit('flux'), fontsize=fontsize)
i += 1
# fig.tight_layout()
def plot_external_input_fluxes(self, fig, fontsize = 10):
"""Plot all external inpput fluxes.
For each external input flux a new subplot is added.
Args:
fig (Matplotlib figure): The fig to which the subplots are added.
fontsize (float, optional): Defaults to 10.
Returns:
None.
Instead ``fig`` is changed in place.
"""
times = self.times
input_flux_funcs = self.external_input_flux_funcs()
n = len(input_flux_funcs.keys())
i = 1
for key, value in input_flux_funcs.items():
ax = fig.add_subplot(n,1,i)
ax.plot(times, [input_flux_funcs[key](t) for t in times])
ax.set_title(
'External influx to \$'
+ latex(self.model.state_variables[key])
+ '\$',
fontsize=fontsize)
ax.set_xlabel(
self._add_time_unit('\$' + latex(self.model.time_symbol) + '\$'),
fontsize=fontsize)
ax.set_ylabel(self._add_flux_unit('flux'), fontsize=fontsize)
i += 1
# fig.tight_layout()
# means #
def plot_mean_ages(self, fig, start_mean_ages):
"""Plot the time evolution of the mean ages for all pools and the
system.
For each pool and the system a separate subplot is created.
Args:
fig (Matplotlib figure): The fig to which the subplots are added.
start_mean_ages (numpy.array): Contains the start mean ages of the
pools at time :math:`t_0`.
Returns:
None.
Instead ``fig`` is changed in place.
"""
times = self.times
n = self.nr_pools
start_age_moments = np.ndarray(
(1,n), np.float, np.array(start_mean_ages))
time_symbol = self.model.time_symbol
states = self.model.state_variables
ma_vector = self.age_moment_vector(1, start_age_moments)
sma = self.system_age_moment(1, start_age_moments)
def make_ax_nice(ax, title):
ax.set_title(title)
ax.set_xlabel(self._add_time_unit("\$" + latex(time_symbol) + "\$"))
ax.set_ylabel(self._add_time_unit("mean age"))
ax.set_xlim([times[0], times[-1]])
ax = fig.add_subplot(n+1, 1, 1)
ax.plot(times, sma)
make_ax_nice(ax, "System")
for i in range(n):
ax = fig.add_subplot(n+1, 1, 2+i)
ax.plot(times, ma_vector[:,i])
make_ax_nice(ax, "\$" + latex(states[i]) + "\$")
# fig.tight_layout()
def plot_mean_backward_transit_time(self, ax, start_mean_ages):
"""Plot the time evolution of the mean backward transit time.
For each pool and the system a separate subplot is created.
Args:
ax (Matplotlib axis): The ax onto which the plot is done.
start_mean_ages (numpy.array): Contains the start mean ages of the
pools at time :math:`t_0`.
Returns:
None.
Instead ``ax`` is changed in place.
"""
times = self.times
n = self.nr_pools
start_age_moments = np.ndarray(
(1,n), np.float, np.array(start_mean_ages))
time_symbol = self.model.time_symbol
tr_val = self.backward_transit_time_moment(1, start_age_moments)
ax.plot(times, tr_val)
ax.set_title("Mean backward transit time")
ax.set_xlabel(self._add_time_unit("$" + latex(time_symbol) + "$"))
ax.set_ylabel(self._add_time_unit("mean BTT"))
ax.set_xlim([times[0], times[-1]])
## densities ##
# age #
# fixme mm 10-31-2022
# this method needs a test (does not throw an interpretable error if data is more than one-dimensional)
def add_line_to_density_plot_plotly(self, fig, data, color, name,
time_stride=1, width=5, on_surface=True, bottom=True,
legend_on_surface=False, legend_bottom=False):
"""Add a line to an existing Plotly density plot.
Args:
fig (Plotly figure): Contains already a density plot to which the
new line is added.
data (numpy.array len(times)): The age data of the new line.
color (#RRGGBB): The color of the new line.
name (str): The name of the new line for the legend.
time_stride (int, optional): Coarsity of the plot in the time
direction to save memory.
Defaults to 1 meaning that all times are plotted and no memory
is saved.
width (int, optional): Width of the new line. Defaults to 5.
on_surface (bool, optional): If True, a new line with the given age
data is plotted on top of the given density.
Defaults to True.
bottom (bool optional): If True, a new line with the given age data
is plotted in the xy-plane.
Defaults to True.
legend_on_surface (bool, optional): If True, the line on the surface
is mentioned in the legend.
Has no effect if on_surface is False.
Defaults to False.
legend_bottom (bool, optional): If True, the line in the xy-plane is
mentioned in the legend.
Has no effect if bottom is False.
Defaults to False.
Returns:
None.
Instead ``fig`` is changed in place.
"""
times = self.times
strided_data = stride(data, time_stride)
strided_times = stride(times, time_stride)
if bottom:
#trace_bottom = go.Scatter3d(
fig.add_scatter3d(
name=name,
x=-strided_times, y=strided_data, z=0*strided_times,
mode = 'lines',
line=dict(
color=color,
width=width
),
showlegend = legend_bottom
)
#fig['data'] += [trace_bottom]
if on_surface:
# compute the density values on the surface
#strided_times = -fig['data'][0]['x']
strided_ages = fig['data'][0]['y']
density_data = fig['data'][0]['z']
strided_z = []
for ti in range(len(strided_times)):
time = strided_times[ti]
age = strided_data[ti]
if ((np.isnan(age)) or (age < strided_ages[0]) or
(age > strided_ages[-1])):
strided_z.append(np.nan)
else:
ti_lower = strided_times.searchsorted(time)-1
ti_upper = (ti_lower+1 if ti_lower+1<len(strided_times)
else ti_lower)
time_lower = strided_times[ti_lower]
time_upper = strided_times[ti_upper]
ai_lower = strided_ages.searchsorted(age)-1
ai_upper = (ai_lower+1 if ai_lower+1<len(strided_ages)
else ai_lower)
age_lower = strided_ages[ai_lower]
age_upper = strided_ages[ai_upper]
bl_density_value = density_data[ai_lower, ti_lower]
br_density_value = density_data[ai_lower, ti_upper]
bottom_density_value = (bl_density_value + (time-time_lower)
/(time_upper-time_lower)*
(br_density_value-bl_density_value))
tl_density_value = density_data[ai_upper, ti_lower]
tr_density_value = density_data[ai_upper, ti_upper]
top_density_value = (tl_density_value + (time-time_lower)/
(time_upper-time_lower)*
(tr_density_value-tl_density_value))
density_value = (bottom_density_value +
(age-age_lower)/(age_upper-age_lower)*
(top_density_value-bottom_density_value))
strided_z.append(density_value)
#trace_on_surface = go.Scatter3d(
# name=name,
# x=-strided_times, y=strided_data, z=strided_z,
# mode = 'lines',
# line=dict(
# color=color,
# width=width
# ),
# showlegend = legend_on_surface
#)
#fig['data'] += [trace_on_surface]
fig.add_scatter3d(
name=name,
x=-strided_times, y=strided_data, z=strided_z,
mode = 'lines',
line=dict(
color=color,
width=width
),
showlegend = legend_on_surface
)
def plot_3d_density_plotly(self, title, density_data, ages,
age_stride=1, time_stride=1, y_label="Age", z_label="Mass"):
"""Create a 3-dimendional density plot with Plotly.
The colors are created such that they are constant along the age-time
diagonal.
Thus, equal colors mark equal entry time into the system.
Args:
title (str): The title of the new figure.
density_data (numpy.ndarray len(ages) x len(times)):
The density data to be plotted.
ages (numpy.ndarray):
ages (or transit times)
age_stride (int, optional): Coarsity of the plot in the age
direction to save memory.
Defaults to 1 meaning that all times are plotted and no memory
is saved.
time_stride (int, optional): Coarsity of the plot in the time
direction to save memory.
Defaults to 1 meaning that all times are plotted and no memory
is saved.
Returns:
Plotly figure.
"""
data, layout = self._density_plot_plotly(
density_data, ages, age_stride, time_stride, y_label, z_label)
layout['title'] = title
fig = go.Figure(data=data, layout=layout)
return fig
# def add_equilibrium_surface_plotly(self, fig, opacity=0.7, index=0):
# """
# The function has been renamed since
# 1. It is not certain that the system has an equilibrium at all.
# 2. The age distribution at the beginning of a model run does not have to
# represent an equilibrium age distribution
# (even if the system was in equilibrium at t0 in the sense that the pool contents do not change any more the age distribution still could.)
#
# please call add_constant_age_distribution_surface_plotly instead!
# """
# txt=self.add_equilibrium_surface_plotly.__doc__
# deprecation_warning(txt)
# self.add_constant_age_distribution_surface_plotly(fig, opacity, index)
def add_constant_age_distribution_surface_plotly(self, fig, opacity=0.7, index=0):
"""Add a grey and transparent density surface to an existing
Plotly density plot.
If index is not specified it is assumed to be 0 and the values correspond to the first time in the times porperty of the model run (the age distribution at the beginning)
and repeated for all times.
The plotted surface represents an age distribution that is constant in time.
It is intended to increase the visibility of changes in the age distribution with time.
Note that this constant age distribution does NOT necessarily correspond to a
possible (constant) development of the system.
This would only be true if the system was in equilibrium and the age distribution
was the equilibrium age distribution.
While this special case is a very interesting application this function does not
assertain that such an equlibrium situation is even possible.
Args:
fig (Plotly figure): The existing density plot to which the
surface is added.
opacity (between 0 and 1, optional): The opacity of the new surface.
Defaults to 0.9.
Unfortunately, the opacity option does not seem to work
properly.
index (int, optional): The time index from which the age distribution
data is taken.
Defaults to 0 such that the constant distribution is computed at time :math:`t_0`.
Returns:
None.
Instead ``fig`` is changed in place.
"""
data = fig['data'][0]
x = data['x']
y = data['y']
z = data['z'].copy()
for ti in range(z.shape[1]):
z[:,ti] = z[:,index]
#eq_surface_data = go.Surface(
fig.add_surface(
x=x,
y=y,
z=z,
showscale=False,
opacity = opacity,
surfacecolor=np.zeros_like(z))
#fig['data'].append(eq_surface_data)
##### cumulative distribution methods #####
def cumulative_pool_age_distributions_single_value(self,
start_age_densities=None, F0=None):
"""Return a function for the cumulative pool age distributions.
Args:
start_age_densities (Python function, optional): A function of age
that returns a numpy.array containing the masses with the given
age at time :math:`t_0`. Defaults to None.
F0 (Python function): A function of age that returns a numpy.array
containing the masses with age less than or equal to the age at
time :math:`t_0`. Defaults to None.
Raises:
Error: If both ``start_age_densities`` and ``F0`` are ``None``.
One must be given.
It is fastest to provide ``F0``, otherwise ``F0`` will be
computed by numerical integration of ``start_age_densities``.
Returns:
Python function ``F_sv``: ``F_sv(a,t)`` is the vector of pool
masses (``numpy.array``) with age less than or equal to ``a`` at
time ``t``.
"""
n = self.nr_pools
#soln = self.solve_old()
soln = self.solve()
if soln[0,:].sum() == 0:
start_age_densities = lambda a: np.zeros((n,))
if F0 is None and start_age_densities is None:
raise(Error('Either F0 or start_age_densities must be given.'))
times = self.times
t0 = times[0]
#sol_funcs = self.sol_funcs()
#sol_funcs_array = lambda t: np.array([sol_funcs[pool](t)
# for pool in range(n)])
#sol_funcs_array = self.solve_single_value_old()
sol_funcs_array = self.solve_func()
if F0 is None:
p0 = start_age_densities
F0 = lambda a: np.array([quad(lambda s: p0(s)[pool], 0, a)[0]
for pool in range(n)])
Phi = self._state_transition_operator
def G_sv(a, t):
if a < t-t0: return np.zeros((n,))
#print(t, t0, a-(t-t0))
res = Phi(t, t0, F0(a-(t-t0)))
c = hasattr(self, '_state_transition_operator_cache')
#print(c, 'G', res, t, t0, a, a-(t-t0))
return res
def H_sv(a, t):
# count everything from beginning?
if a >= t-t0: a = t-t0
# mass at time t
#x_t_old = np.array([sol_funcs[pool](t) for pool in range(n)])
x_t = sol_funcs_array(t)
# mass at time t-a
#x_tma_old = [np.float(sol_funcs[pool](t-a)) for pool in range(n)]
x_tma = sol_funcs_array(t-a)
# what remains from x_tma at time t
m = Phi(t, t-a, x_tma)
c = hasattr(self, '_state_transition_operator_cache')
#print(c, 'H', m, t, a, x_t, x_tma)
# difference is not older than t-a
res = x_t-m
# cut off accidental negative values
return np.maximum(res, np.zeros(res.shape))
def F(a, t):
res = G_sv(a,t) + H_sv(a,t)
#print(a, t, res)
#print('G', G_sv(a,t), 'H', H_sv(a,t))
return res
return F
def cumulative_system_age_distribution_single_value(self,
start_age_densities=None, F0=None):
"""Return a function for the cumulative system age distribution.
Args:
start_age_densities (Python function, optional): A function of age
that returns a numpy.array containing the masses with the given
age at time :math:`t_0`. Defaults to None.
F0 (Python function): A function of age that returns a numpy.array
containing the masses with age less than or equal to the age at
time :math:`t_0`. Defaults to None.
Raises:
Error: If both ``start_age_densities`` and ``F0`` are None.
One must be given.
It is fastest to provide ``F0``, otherwise ``F0`` will be
computed by numerical integration of ``start_age_densities``.
Returns:
Python function ``F_sv``: ``F_sv(a, t)`` is the mass in the system
with age less than or equal to ``a`` at time ``t``.
"""
n = self.nr_pools
#soln = self.solve_old()
soln = self.solve()
if soln[0,:].sum() == 0:
start_age_densities = lambda a: np.zeros((n,))
if F0 is None and start_age_densities is None:
raise(Error('Either F0 or start_age_densities must be given.'))
F_sv = self.cumulative_pool_age_distributions_single_value(
start_age_densities=start_age_densities, F0=F0)
return lambda a, t: F_sv(a,t).sum()
def cumulative_backward_transit_time_distribution_single_value_func(self,
start_age_densities=None, F0=None):
"""Return a function for the cumulative backward transit time
distribution.
Args:
start_age_densities (Python function, optional): A function of age
that returns a numpy.array containing the masses with the given
age at time :math:`t_0`. Defaults to None.
F0 (Python function): A function of age that returns a numpy.array
containing the masses with age less than or equal to the age at
time :math:`t_0`. Defaults to None.
Raises:
Error: If both ``start_age_densities`` and ``F0`` are ``None``.
One must be given.
It is fastest to provide ``F0``, otherwise ``F0`` will be
computed by numerical integration of ``start_age_densities``.
Returns:
Python function ``F_sv``: ``F_sv(a, t)`` is the mass leaving the
system at time ``t`` with age less than or equal to ``a``.
"""
if F0 is None and start_age_densities is None:
raise(Error('Either F0 or start_age_densities must be given.'))
F_sv = self.cumulative_pool_age_distributions_single_value(
start_age_densities=start_age_densities, F0=F0)
rho = self.output_rate_vector_at_t
def F_btt_sv(a, t):
res = (rho(t)*F_sv(a, t)).sum()
#print(a, t, res)
return res
return F_btt_sv
def cumulative_forward_transit_time_distribution_single_value_func(
self, cut_off=True):
"""Return a function for the cumulative forward transit time
distribution.
Args:
cut_off (bool, optional): If ``True``, no density values are going
to be computed after the end of the time grid, instead
``numpy.nan`` will be returned.
Defaults to ``True``.
``False`` might lead to unexpected behavior.
Returns:
Python function ``F_sv``: ``F_sv(a, t)`` is the mass leaving the
system at time ``t+a`` with age less than or equal to ``a``.
"""
times = self.times
t_max = times[-1]
Phi = self._state_transition_operator
u_func = self.external_input_vector_func()
def F_ftt_sv(a, t):
#print(a, t, a+t>t_max)
if cut_off and a+t>t_max: return np.nan
u = u_func(t)
res = u.sum() - Phi(t+a, t, u).sum()
#print(a, t, u, res)
return res
return F_ftt_sv
##### quantiles #####
def pool_age_distributions_quantiles(
self,
quantile,
start_values = None,
start_age_densities = None,
F0 = None,
method = 'brentq',
tol = 1e-8
):
"""Return pool age distribution quantiles over the time grid.
The compuation is done by computing the generalized inverse of the
respective cumulative distribution using a nonlinear root search
algorithm. Depending on how slowly the cumulative distribution can be
computed, this can take quite some time.
Args:
quantile (between 0 and 1): The relative share of mass that is
considered to be left of the computed value. A value of ``0.5``
leads to the computation of the median of the distribution.
start_values (numpy.ndarray, len(times) x nr_pools, optional):
For each pool an array over the time grid of start values for
the nonlinear search.
Good values are slighty greater than the solution values.
Defaults to an array of zeros for each pool
start_age_densities (Python function, optional): A function of age
that returns a ``numpy.array`` containing the masses with the
given age at time :math:`t_0`.
Defaults to ``None``.
F0 (Python function): A function of age that returns a
``numpy.array`` containing the masses with age less than or
equal to the age at time :math:`t_0`.
Defaults to ``None``.
method (str): The method that is used for finding the roots of a
nonlinear function. Either 'brentq' or 'newton'.
Defaults to 'brentq'.
tol (float): The tolerance used in the numerical root search
algorithm. A low tolerance decreases the computation speed
tremendously, so a value of ``1e-01`` might already be fine.
Defaults to ``1e-08``.
Raises:
Error: If both ``start_age_densities`` and ``F0`` are ``None``.
One must be given.
It is fastest to provide ``F0``, otherwise ``F0`` will be
computed by numerical integration of ``start_age_densities``.
Returns:
numpy.ndarray: (len(times) x nr_pools)
The computed quantile values over the time-pool grid.
"""
n = self.nr_pools
soln = self.solve()
if soln[0,:].sum() == 0:
start_age_densities = lambda a: np.zeros((n,))
if F0 is None and start_age_densities is None:
raise(Error('Either F0 or start_age_densities must be given.'))
times = self.times
if start_values is None:
start_values = np.ones((len(times), n))
F_sv = self.cumulative_pool_age_distributions_single_value(
start_age_densities = start_age_densities,
F0 = F0
)
res = []
for pool in range(n):
print('Pool:', pool)
F_sv_pool = lambda a, t: F_sv(a,t)[pool]
res.append(
self.distribution_quantiles(
self,
quantile,
F_sv_pool,
norm_consts = soln[:,pool],
start_values = start_values[:,pool],
method = method,
tol = tol
)
)
return np.array(res).transpose()
def system_age_distribution_quantiles(
self,
quantile,
start_values = None,
start_age_densities = None,
F0 = None,
method = 'brentq',
tol = 1e-8
):
"""Return system age distribution quantiles over the time grid.
The compuation is done by computing the generalized inverse of the
respective cumulative distribution using a nonlinear root search
algorithm. Depending on how slowly the cumulative distribution can be
computed, this can take quite some time.
Args:
quantile (between 0 and 1): The relative share of mass that is
considered to be left of the computed value. A value of ``0.5``
leads to the computation of the median of the distribution.
start_values (numpy.array, optional): An array over the time grid of
start values for the nonlinear search.
Good values are slighty greater than the solution values.
Must have the same length as ``times``.
Defaults to an array of zeros.
start_age_densities (Python function, optional): A function of age
that returns a ``numpy.array`` containing the masses with the
given age at time :math:`t_0`.
Defaults to ``None``.
F0 (Python function): A function of age that returns a
``numpy.array`` containing the masses with age less than or
equal to the age at time :math:`t_0`.
Defaults to ``None``.
method (str): The method that is used for finding the roots of a
nonlinear function. Either 'brentq' or 'newton'.
Defaults to 'brentq'.
tol (float): The tolerance used in the numerical root search
algorithm. A low tolerance decreases the computation speed
tremendously, so a value of ``1e-01`` might already be fide.
Defaults to ``1e-08``.
Raises:
Error: If both ``start_age_densities`` and ``F0`` are ``None``.
One must be given.
It is fastest to provide ``F0``, otherwise ``F0`` will be
computed by numerical integration of ``start_age_densities``.
Returns:
numpy.array: The computed quantile values over the time grid.
"""
n = self.nr_pools
soln = self.solve()
if soln[0,:].sum() == 0:
start_age_densities = lambda a: np.zeros((n,))
if F0 is None and start_age_densities is None:
raise(Error('Either F0 or start_age_densities must be given.'))
F_sv = self.cumulative_system_age_distribution_single_value(
start_age_densities = start_age_densities,
F0 = F0
)
#soln = self.solve_old()
if start_age_densities is not None:
start_age_moments = self.moments_from_densities(1, start_age_densities)
else:
start_age_moments = None
if start_values is None:
start_values = self.system_age_moment(1, start_age_moments)
a_star = self.distribution_quantiles(
self,
quantile,
F_sv,
norm_consts = soln.sum(1),
start_values = start_values,
method = method,
tol = tol
)
return a_star
@staticmethod
def distribution_quantiles(mr, quantile, F_sv, norm_consts=None,
start_values=None, times=None, method='brentq', tol=1e-8):
"""Return distribution quantiles over the time grid of a given
distribution.
The compuation is done by computing the generalized inverse of the
respective cumulative distribution using a nonlinear root search
algorithm. Depending on how slowly the cumulative distribution can be
computed, this can take quite some time.
Args:
mr: The model run itself. This is necessary because for obscure
parallelization reasons the method needs to be static.
quantile (between 0 and 1): The relative share of mass that is
considered to be left of the computed value. A value of ``0.5``
leads to the computation of the median of the distribution.
F_sv (Python function): A function of age ``a`` and time ``t`` that
returns the mass that is of age less than or equal to ``a`` at
time ``t``.
norm_consts (numpy.array, optional): An array over the time grid of
total masses over all ages.
Defaults to an array of ones assuming given probability
distributions.
start_values (numpy.array, optional): An array over the time grid of
start values for the nonlinear search.
Good values are slighty greater than the solution values.
Must have the same length as ``times``.
Defaults to an array of zeros.
times (numpy.array, optional): Time grid on which to compute the
quantiles.
Defaults to ``None`` in which case the orignal time grid
is used.
method (str): The method that is used for finding the roots of a
nonlinear function. Either 'brentq' or 'newton'.
Defaults to 'brentq'.
tol (float): The tolerance used in the numerical root search
algorithm. A low tolerance decreases the computation speed
tremendously, so a value of ``1e-01`` might already be fine.
Defaults to ``1e-08``.
Returns:
numpy.array: The computed quantile values over the time grid.
"""
if times is None:
times = mr.times
if start_values is None:
start_values = np.zeros((len(times),))
if norm_consts is None:
norm_consts = np.ones((len(times),))
q_lst = []
for ti in tqdm(range(len(times))):
q_lst.append(
mr.distribution_quantile(
quantile,
lambda a: F_sv(a, times[ti]),
norm_const=norm_consts[ti],
start_value=start_values[ti],
method=method,
tol=tol
)
)
return np.array(q_lst)
@staticmethod
def distribution_quantile(quantile, F,
norm_const=None, start_value=None, method='brentq', tol=1e-8):
"""Return distribution quantile (one single value) of a given distribution.
The compuation is done by computing the generalized inverse of the
respective cumulative distribution using a nonlinear root search
algorithm.
Args:
quantile (between 0 and 1): The relative share of mass that is
considered to be left of the computed value. A value of ``0.5``
leads to the computation of the median of the distribution.
F (Python function): A function of age ``a`` that
returns the mass that is of age less than or equal to ``a``.
norm_const (numpy.array, optional): The amount of total mass of the
distribution.
Defaults to one assuming a given probability distribution.
start_value (float, optional): A start value for the nonlinear
search.
A good values is slighty greater than the solution value.
Defaults to zero.
method (str): The method that is used for finding the roots of a
nonlinear function. Either 'brentq' or 'newton'.
Defaults to 'brentq'.
tol (float): The tolerance used in the numerical root search
algorithm. A low tolerance decreases the computation speed
tremendously, so a value of ``1e-01`` might already be fine.
Defaults to ``1e-08``.
Returns:
float: The computed quantile value of the distribution.
"""
if start_value is None:
start_value = 0
if norm_const is None:
norm_const = 1
def quantile_f():
if norm_const == 0: return np.nan
def g(a):
if np.isnan(a): return np.nan
res = quantile*norm_const - F(a)
#print('a:', a,'t', times[ti], 'g(a):', res, 'nc',
# norm_consts[ti], 'F', F(a, times[ti]))
return res
if method == 'newton':
a_star = newton(g, start_value, maxiter=500, tol=tol)
if method == 'brentq':
a_star = generalized_inverse_CDF(
lambda a: F(a),
quantile*norm_const,
# start_dist=start_value,
x1=start_value,
tol=tol
)
return a_star
q = quantile_f()
return q
## by ode ##
def pool_age_distributions_quantiles_by_ode(self, quantile,
start_age_densities, F0=None, check_time_indices=None, **kwargs):
"""Return pool age distribution quantiles over the time grid.
The compuation is done by solving an ODE for each pool as soon as the
pool is nonempty.
The initial value is obtained by computing the generalized inverse of
the pool age distribution by a numerical root search algorithm.
Args:
quantile (between 0 and 1): The relative share of mass that is
considered to be left of the computed value. A value of ``0.5``
leads to the computation of the median of the distribution.
start_age_densities (Python function): A function of age
that returns a ``numpy.array`` containing the masses with the
given age at time :math:`t_0`.
F0 (Python function, optional): A function of age that returns a
``numpy.array`` containing the masses with age less than or
equal to the age at time :math:`t_0`.
Defaults to ``None``.
It is fastest to provide ``F0``, otherwise ``F0`` will be
computed by numerical integration of ``start_age_densities``.
check_time_indices (numpy.array, optional): Indices of the tiime
grid on which the ODE result are checked against an explicit
solution computed by the pseudo-inverse of the cumulative
distribution function.
Defaults to ``None`` in which case no check is performed.
kwargs: Passed to the ``solve_ivp``, e.g., ``method``
or ``max_step``.
Returns:
numpy.ndarray: (len(times) x nr_pools) The computed quantile values
over the time-pool grid.
"""
res = []
for pool in range(self.nr_pools):
print('Pool:', pool)
res.append(
self.pool_age_distribution_quantiles_pool_by_ode(
quantile,
pool,
start_age_densities,
F0=F0,
check_time_indices=check_time_indices,
**kwargs
)
)
return np.array(res).transpose()
def x_solve_func_skew(self):
block_ode,x_block_name,phi_block_name=self._x_phi_block_ode()
return x_tmax(
self.times[0],
self.times[-1],
block_ode,
tuple(self.start_values),
x_block_name,
phi_block_name
)
def pool_age_distribution_quantiles_pool_by_ode(self, quantile, pool,
start_age_densities, F0=None, check_time_indices=None, **kwargs):
"""Return pool age distribution quantile over the time grid for one
single pool.
The compuation is done by solving an ODE as soon as the pool is
nonempty.
The initial value is obtained by computing the generalized inverse of
the pool age distribution by a numerical root search algorithm.
Args:
quantile (between 0 and 1): The relative share of mass that is
considered to be left of the computed value. A value of ``0.5``
leads to the computation of the median of the distribution.
pool (int): The number of the pool for which the age quantile is to
be computed.
start_age_densities (Python function): A function of age
that returns a ``numpy.array`` containing the masses with the
given age at time :math:`t_0`.
F0 (Python function, optional): A function of age that returns a
``numpy.array`` containing the masses with age less than or
equal to the age at time :math:`t_0`.
It is fastest to provide ``F0``, otherwise ``F0`` will be
computed by numerical integration of ``start_age_densities``.
Defaults to ``None``.
check_time_indices (numpy.array, optional): Indices of the tiime
grid on which the ODE result are checked against an explicit
solution computed by the pseudo-inverse of the cumulative
distribution function.
Defaults to ``None`` in which case no check is performed.
kwargs: Passed to the ``solve_ivp``, e.g., ``method``
or ``max_step``.
Raises:
Error: If ``start_age_densities`` is ``None``.
Returns:
numpy.ndarray: (len(times)) The computed quantile values over the
time grid.
"""
#soln = self.solve_old()
vec_sol_func = self.x_solve_func_skew()
soln = vec_sol_func(self.times)
empty = soln[0, pool] == 0
if not empty and start_age_densities is None:
raise(Error('start_age_densities must be given'))
times = self.times
n = self.nr_pools
if not empty and F0 is None:
p0 = start_age_densities
F0 = lambda a: np.array([quad(lambda s: p0(s)[i], 0, a)[0]
for i in range(n)])
p = self.pool_age_densities_single_value(start_age_densities)
u = self.external_input_vector_func()
F = self.cumulative_pool_age_distributions_single_value(
start_age_densities=start_age_densities, F0=F0)
#sol_funcs = self.solve_single_value_old()
#vec_sol_func = self.solve_func()
# find last time index such that the pool is empty --> ti
ti = len(times)-1
content = soln[ti, pool]
while (content > 0) and (ti > 0):
ti = ti-1
content = soln[ti, pool]
if content == 0: ti += 1
if (ti == len(times)): return np.nan*np.ones((len(times),))
if ti == 0:
sv = generalized_inverse_CDF(lambda a: F0(a)[pool],
quantile*self.start_values[pool])
else:
#if start_age_densities is None:
# raise(Error('Cannot start delayed quantile computation,'
# 'since start_age_densities are missing.'))
CDFs = self.cumulative_pool_age_distributions_single_value(
start_age_densities)
CDF = lambda a: CDFs(a, times[ti])
sv = generalized_inverse_CDF(lambda a: CDF(a)[pool],
quantile*soln[ti, pool])
times = times[ti:]
t_max = times[-1]
t_min = times[0]
pb = tqdm(total = t_max-t_min)
global last_t, last_res
last_t = -1
last_res = -1.0
def rhs(y, t_val):
y = np.float(y)
global last_t, last_res
t_val = min(t_val, t_max)
# rhs will be called twice with the same value apparently,
# we can use this to speed it up
if t_val == last_t: return last_res
#print('y', y, 't', t_val)
if (t_val <= t_max) and (t_val-t_min-pb.n > 0):
#pb.n = t_val-t_min
#pb.update(0)
pb.update(t_val-t_min-pb.n)
# print('Quantile, line 2866')
# print('y', y, 't', t_val)
p_val = p(y, t_val)[pool]
u_val = u(t_val)[pool]
F_vec = F(y, t_val).reshape((n,1))
x_vec = vec_sol_func(t_val)#.reshape((n,1))
B = self.B_func(vec_sol_func)(t_val)
# print('B', B)
# print('x', x_vec)
# print('B*x', B.dot(x_vec))
# print('p', p_val)
# print('u', u_val)
# print('F', F_vec)
# print('B*F', B.dot(F_vec))
# print(B.dot(F_vec)[pool])
# print(B.dot(F_vec)[1])
#if p_val == 0:
#raise(Error('Division by zero during quantile computation.'))
#else:
res = 1 + 1/p_val*(u_val*(quantile-1.0)
+quantile*(np.matmul(B,x_vec)[pool])-(np.matmul(B,F_vec)[pool]))
#print('res', res)
#print('---')
last_t = t_val
last_res = res
return np.array(res).reshape(1,)
#short_res = odeint(rhs, sv, times, atol=tol, mxstep=10000)
rhs2 = lambda t_val, y: rhs(y, t_val)
short_res = solve_ivp_pwc(
rhss = (rhs2,),
y0 = np.array([sv]).reshape(1,),
t_span = (times[0], times[-1]),
t_eval = times,
**kwargs
).y
short_res = np.rollaxis(short_res, -1, 0)
pb.close()
res = np.ndarray((len(self.times),))
res[:ti] = np.nan
res[ti:] = short_res.reshape((len(times),))
if check_time_indices is not None:
qs_ode = res[check_time_indices]
qs_pi = np.zeros_like(qs_ode)
for nr, ct_index in enumerate(check_time_indices):
qs_pi[nr] = self.__class__.distribution_quantile(
quantile,
lambda a: F(a, self.times[ct_index])[pool],
norm_const=soln[ct_index, pool],
start_value=qs_ode[nr]
)
#, method='brentq', tol=1e-8):
print_quantile_error_statisctics(qs_ode, qs_pi)
#print(res)
return res
def system_age_distribution_quantiles_by_ode(self, quantile,
start_age_densities, F0=None, check_time_indices=None, **kwargs):
"""Return system age distribution quantile over the time grid.
The compuation is done by solving an ODE as soon as the system is
nonempty.
The initial value is obtained by computing the generalized inverse of
the system age distribution by a numerical root search algorithm.
Args:
quantile (between 0 and 1): The relative share of mass that is
considered to be left of the computed value. A value of ``0.5``
leads to the computation of the median of the distribution.
pool (int): The number of the pool for which the age quantile is to
be computed.
start_age_densities (Python function): A function of age
that returns a ``numpy.array`` containing the masses with the
given age at time :math:`t_0`.
F0 (Python function, optional): A function of age that returns a
``numpy.array`` containing the masses with age less than or
equal to the age at time :math:`t_0`.
It is fastest to provide ``F0``, otherwise ``F0`` will be
computed by numerical integration of ``start_age_densities``.
Defaults to ``None``.
check_time_indices (numpy.array, optional): Indices of the tiime
grid on which the ODE result are checked against an explicit
solution computed by the pseudo-inverse of the cumulative
distribution function.
Defaults to ``None`` in which case no check is performed.
kwargs: Passed to the ``solve_ivp``, e.g., ``method``
or ``max_step``.
Raises:
Error: If ``start_age_densities`` is ``None``.
Returns:
numpy.ndarray: The computed quantile values over the time grid.
"""
#soln = self.solve_old()
#soln = self.solve()
vec_sol_func = self.x_solve_func_skew()
soln = vec_sol_func(self.times)
# check if system is empty at the beginning,
# if so, then we use 0 as start value, otherwise
# we need to compute it from F0 (preferably) or start_age_density
empty = soln[0,:].sum() == 0
if not empty and start_age_densities is None:
raise(Error('start_age_densities must be given'))
times = self.times
original_times = copy(times)
n = self.nr_pools
if not empty and F0 is None:
p0 = start_age_densities
F0 = lambda a: np.array([quad(lambda s: p0(s)[pool], 0, a)[0]
for pool in range(n)])
p = self.system_age_density_single_value(start_age_densities)
u = self.external_input_vector_func()
F = self.cumulative_pool_age_distributions_single_value(
start_age_densities=start_age_densities, F0=F0)
#sol_funcs = self.solve_single_value_old()
#vec_sol_func = self.solve_func()
# find last time index such that the system is empty --> ti
ti = len(times)-1
content = soln[ti,:]
while (content.sum() > 0) and (ti > 0):
ti = ti-1
content = soln[ti,:]
if content.sum() == 0: ti += 1
if (ti == len(times)): return np.nan*np.ones((len(times),))
if ti == 0:
sv = generalized_inverse_CDF(lambda a: F0(a).sum(),
quantile*self.start_values.sum())
else:
#if start_age_densities is None:
# raise(Error('Cannot start delayed quantile computation,'
# 'since start_age_Densities are missing.'))
CDFs = self.cumulative_system_age_distribution_single_value(
start_age_densities)
CDF = lambda a: CDFs(a, times[ti])
sv = generalized_inverse_CDF(CDF, quantile*soln[ti,:].sum())
times = times[ti:]
t_max = times[-1]
t_min = times[0]
pb = tqdm(total = t_max-t_min)
global last_t, last_res
last_t = -1
last_res = -1.0
def rhs(y, t_val):
y = np.float(y)
global last_t, last_res
t_val = min(t_val, t_max)
# rhs will be called twice with the same value apparently,
# we can use this to speed it up
if t_val == last_t: return last_res
if (t_val <= t_max) and (t_val-t_min-pb.n > 0):
#pb.n = t_val-t_min
#pb.update(0)
pb.update(t_val-t_min-pb.n)
#pb.update(t_val-t_min, n=0)
#print()
#print('y', y, 't', t_val)
p_val = p(y, t_val)
u_vec = u(t_val)
F_vec = F(y, t_val).reshape((n,1))
x_vec = vec_sol_func(t_val)#.reshape((n,1))
B=self.B_func(vec_sol_func)(t_val)
#print('B', B)
#print('x', x_vec)
#print('B*x', B.dot(x_vec))
#print('y', y)
#print('t', t_val)
#print('p', p_val)
#print('u', u_vec)
#print('F', F_vec)
#print('B*F', B.dot(F_vec))
#print(F_val/x_val.sum()*((B*x_val).sum()-(B*F_val).sum()))
#if p_val == 0:
# raise(Error('Division by zero during quantile computation.'))
#else:
res = 1 + 1/p_val*(u_vec.sum()*(quantile-1.0)+
quantile*(np.matmul(B,x_vec)).sum()-(np.matmul(B,F_vec)).sum())
#print('res', res)
last_t = t_val
last_res = res
return np.array(res).reshape(1,)
#short_res = odeint(rhs, sv, times, atol=tol, mxstep=10000)
rhs2 = lambda t_val, y: rhs(y, t_val)
short_res = solve_ivp_pwc(
rhss = (rhs2,),
y0 = np.array([sv]).reshape(1,),
t_span = (times[0], times[-1]),
t_eval = times,
**kwargs
).y
short_res = np.rollaxis(short_res, -1, 0)
pb.close()
res = np.ndarray((len(original_times),))
res[:ti] = np.nan
res[ti:] = short_res.reshape((len(times),))
if check_time_indices is not None:
qs_ode = res[check_time_indices]
qs_pi = np.zeros_like(qs_ode)
for nr, ct_index in enumerate(check_time_indices):
qs_pi[nr] = self.__class__.distribution_quantile(
quantile,
lambda a: F(a, self.times[ct_index]).sum(),
norm_const=soln[ct_index,:].sum(),
start_value=qs_ode[nr]
)
#, method='brentq', tol=1e-8):
print_quantile_error_statisctics(qs_ode, qs_pi)
#print(res)
return res
########## 14C methods #########
def to_14C_explicit(self, start_values_14C, Fa_func, decay_rate=0.0001209681):
"""Construct and return a :class:`SmoothModelRun` instance that
models the 14C component additional to the original model run.
Args:
start_values_14C (numpy.nd_array, nr_pools): 14C start values.
Fa_func (func(t)): returns atmospheric fraction to be multiplied with the input vector
decay rate (float, optional): The decay rate to be used, defaults to
``0.0001209681`` (daily).
Returns:
:class:`SmoothModelRun`
"""
srm_14C = self.model.to_14C_explicit('lamda_14C', 'Fa_14C')
# create SmoothModelRun for 14C
par_set_14C = {k:v for k, v in self.parameter_dict.items()}
par_set_14C['lamda_14C'] = decay_rate
nr_pools = self.nr_pools
start_values_14C_cb = np.ones(nr_pools*2)
start_values_14C_cb[:nr_pools] = self.start_values
start_values_14C_cb[nr_pools:] = start_values_14C
times_14C = self.times
#Fa_atm = copy(atm_delta_14C)
#Fa_atm[:,1] = Fa_atm[:,1]/1000 + 1
#Fa_func = interp1d(Fa_atm[:,0], Fa_atm[:,1])
func_set_14C = {k:v for k,v in self.func_set.items()}
function_string = 'Fa_14C(' + srm_14C.time_symbol.name + ')'
func_set_14C[function_string] = Fa_func
smr_14C = SmoothModelRun(
srm_14C,
par_set_14C,
start_values_14C_cb,
times_14C,
func_set_14C,
)
return smr_14C
########## private methods #########
def _solve_age_moment_system_single_value_old(self, max_order,
start_age_moments=None, start_values=None):
t0 = self.times[0]
t_max = self.times[-1]
def func(t):
if t < t0:
# times x pools
res = np.zeros((1, self.nr_pools))
res[res==0] = np.nan
return res
# fixme: do we really want to cut off here?
# This could be dangerous
if t > t_max: t = t_max
new_times = [t0, t]
soln = self._solve_age_moment_system_old(max_order,
start_age_moments,
times=new_times,
start_values=start_values)
return soln[-1]
return func
def _solve_age_moment_system_func(
self,
max_order,
start_age_moments=None,
start_values=None
):
t0 = self.times[0]
t_max = self.times[-1]
soln, func = self._solve_age_moment_system(
max_order,
start_age_moments,
#times=new_times,
start_values=start_values
)
def save_func(times):
if isinstance(times,np.ndarray):
if times[0]<t0 or times[-1]>t_max:
raise Exception("""
times[0]<t0 or times[-1]>t_max: solve_ivp returns an interpolated
function, which does not check if the functions is called
with arguments outside the computed range, but we do.
"""
)
else:
return np.rollaxis(func(times),-1,0)
else:
if (times < t0) or (times > t_max):
raise Exception("""
t<t0 or t>t_max: solve_ivp returns an interpolated
function, which does not check if the functions is called
with arguments outside the computed range, but we do.
"""
)
else:
return func(times)
return save_func
def _solve_age_moment_system(self, max_order,
start_age_moments=None, start_values=None, times=None, store=True):
# this function caches the interpolation function instead of the values
#if max_order < 1:
# raise(ValueError("For numerical consistency we use the age moment system only for order >=1 (mean). Use solve instead!"))
if not ((times is None) and (start_values is None)): store = False
if times is None:
times = self.times
if start_values is None: start_values = self.start_values
if not(isinstance(start_values, np.ndarray)):
#print(start_values)
raise(Error("start_values should be a numpy array"))
n = self.nr_pools
if start_age_moments is None:
start_age_moments = np.zeros((max_order, n))
start_age_moments_list = flatten([a.tolist() for a in
[start_age_moments[i,:]
for i in range(start_age_moments.shape[0])]])
storage_key = tuple(start_age_moments_list) + ((max_order,),)
# return cached result if possible
if store:
if hasattr(self, "_previously_computed_age_moment_sol"):
if storage_key in self._previously_computed_age_moment_sol:
#print('using cached age moment system:', storage_key)
#print(
# self._previously_computed_age_moment_sol[storage_key])
return self._previously_computed_age_moment_sol[storage_key]
else:
self._previously_computed_age_moment_sol = {}
srm = self.model
state_vector, rhs = srm.age_moment_system(max_order)
# print('---')
# print(state_vector)
# print(rhs)
# input()
# compute solution
new_start_values = np.zeros((n*(max_order+1),))
new_start_values[:n] = np.array(start_values)#.reshape(n,)
new_start_values[n:] = np.array(start_age_moments_list)
soln, sol_func = numsol_symbolical_system(
state_vector,
srm.time_symbol,
rhs,
[self.parameter_dict],
[self.func_set],
new_start_values,
times,
#dense_output=True,
#disc_times=self.disc_times
)
def restrictionMaker(order):
#pe('soln[:,:]',locals())
restrictedSolutionArr=soln[:,:(order+1)*n]
def restrictedSolutionFunc(t):
return sol_func(t)[:(order+1)*n]
return (restrictedSolutionArr,restrictedSolutionFunc)
# save all solutions for order <= max_order
if store:
# as it seems, if max_order is > 0, the solution (solved with
# max_order=0) is sligthly different from the part of first part
# of the higher order system that corresponds als to the solution.
# The difference is very small ( ~1e-5 ), but big
# enough to cause numerical problems in functions depending on
# the consistency of the solution and the state transition
# operator.
#consequently we do not save the solution
# for orders less than max_order separately
for order in [max_order]:
shorter_start_age_moments_list = (
start_age_moments_list[:order*n])
#print(start_age_moments_list)
#print(shorter_start_age_moments_list)
storage_key = (tuple(shorter_start_age_moments_list)
+ ((order,),))
#print('saving', storage_key)
self._previously_computed_age_moment_sol[storage_key] = restrictionMaker(order)
#print(self._previously_computed_age_moment_sol[storage_key])
return (soln, sol_func)
def _solve_age_moment_system_old(self, max_order,
start_age_moments=None, times=None, start_values=None, store=True):
#store = True
if not ((times is None) and (start_values is None)): store = False
if times is None:
times = self.times
if start_values is None: start_values = self.start_values
if not(isinstance(start_values, np.ndarray)):
#print(start_values)
raise(Error("start_values should be a numpy array"))
n = self.nr_pools
if start_age_moments is None:
start_age_moments = np.zeros((max_order, n))
start_age_moments_list = flatten([a.tolist() for a in
[start_age_moments[i,:]
for i in range(start_age_moments.shape[0])]])
storage_key = tuple(start_age_moments_list) + ((max_order,),)
# return cached result if possible
if store:
if hasattr(self, "_previously_computed_age_moment_sol_old"):
if storage_key in self._previously_computed_age_moment_sol_old:
#print('using cached age moment system:', storage_key)
#print(
# self._previously_computed_age_moment_sol_old[storage_key])
return self._previously_computed_age_moment_sol_old[storage_key]
else:
self._previously_computed_age_moment_sol_old = {}
srm = self.model
state_vector, rhs = srm.age_moment_system(max_order)
# compute solution
new_start_values = np.zeros((n*(max_order+1),))
new_start_values[:n] = np.array((start_values)).reshape((n,))
new_start_values[n:] = np.array((start_age_moments_list))
soln= numsol_symbolic_system_old(
state_vector,
srm.time_symbol,
rhs,
self.parameter_dict,
self.func_set,
new_start_values,
times
)
# save all solutions for order <= max_order
if store:
for order in range(max_order+1):
shorter_start_age_moments_list = (
start_age_moments_list[:order*n])
#print(start_age_moments_list)
#print(shorter_start_age_moments_list)
storage_key = (tuple(shorter_start_age_moments_list)
+ ((order,),))
#print('saving', storage_key)
self._previously_computed_age_moment_sol_old[storage_key] = (
soln[:,:(order+1)*n])
#print(self._previously_computed_age_moment_sol_old[storage_key])
return soln
@property
def no_input_model(self):
m=self.model
return m.no_input_model
#SmoothReservoirModel(
# m.state_vector,
# m.time_symbol,
# {},
# m.output_fluxes,
# m.internal_fluxes
#)
@property
def _no_input_sol(self):
# note that the solution of the no input system
# only coincides with the (application of)
# the statetransition operator if the system is linear
# so this function can only compute the state transition operatro
# for a linear(ized) system
if not hasattr(self, '_saved_no_input_sol'):
m = self.model
m_no_inputs=self.no_input_model
no_inputs_num_rhs = numerical_rhs_old(
m_no_inputs.state_vector,
m_no_inputs.time_symbol,
m_no_inputs.F,
self.parameter_dict,
self.func_set,
self.times)
def no_input_sol(times, start_vector):
('nos', times, start_vector)
# Start and end time too close together? Do not integrate!
if abs(times[0]-times[-1]) < 1e-14:
return np.array(start_vector)
sv = np.array(start_vector).reshape((self.nr_pools,))
return odeint(no_inputs_num_rhs, sv, times, mxstep = 10000)[-1]
self._saved_no_input_sol = no_input_sol
return self._saved_no_input_sol
def initialize_state_transition_operator_cache(self, lru_maxsize, lru_stats=False, size=1):
custom_lru_cache = custom_lru_cache_wrapper(
maxsize=lru_maxsize, # variable maxsize now for lru cache
typed=False,
stats=lru_stats # use custom statistics feature
)
nr_pools = self.nr_pools
times = self.times
t_min = times[0]
t_max = times[-1]
cache_times = np.linspace(t_min, t_max, size+1)
ca = np.zeros((size, nr_pools, nr_pools))
cache = Cache(cache_times, ca, self.myhash())
cache._cached_phi_tmax = custom_lru_cache(phi_tmax)
self._state_transition_operator_cache = cache
#fixme:
# this method is not yet aware of the Cache class
def save_state_transition_operator_cache(self, filename):
self._state_transition_operator_cache.save(filename)
def load_state_transition_operator_cache(self, filename):
tmpCache = Cache.from_file(filename)
if self.myhash()==tmpCache.myhash:
self._state_transition_operator_cache=tmpCache
else:
raise Exception('State transition operator cache hash is different from the hash of the present model run and cannot be used. Please REMOVE THE CACHE FILE:'+filename)
def myhash(self):
"""
Compute a hash considering SOME but NOT ALL properties of a
model run. The function's main use is to detect saved state transition
operator cashes that are no longer compatible with the model run object
that wants to use them. This check is useful but NOT COMPREHENSIVE.
"""
times=self.times
def make_hash_sha256(o):
hasher = hashlib.sha256()
#hasher.update(repr(make_hashable(o)).encode())
hasher.update(repr(o).encode())
return base64.b64encode(hasher.digest()).decode()
return make_hash_sha256(
(
frozendict(self.model.input_fluxes),
frozendict(self.model.internal_fluxes),
frozendict(self.model.output_fluxes),
ImmutableMatrix(self.model.state_vector),
# to compute a hash of an arbitrary function object is difficult
# in particular if the function depends on data.
frozendict(self.parameter_dict),
self.start_values,
(times[0],times[-1])
)
)
def _x_phi_block_ode(self):
x_block_name = 'x'
phi_block_name = 'phi'
if not(hasattr(self, '_x_phi_block_ode_cache')):
nr_pools = self.nr_pools
block_ode = x_phi_ode(
self.model,
(self.parameter_dict,),
(self.func_set,),
x_block_name,
phi_block_name
)
self._x_phi_block_ode_cache = block_ode
return self._x_phi_block_ode_cache, x_block_name, phi_block_name
def _state_transition_operator(self, t, t0, x):
return np.matmul(self.Phi(t, t0), x).reshape((self.nr_pools,))
def _state_transition_operator_for_linear_systems(self, t, t0, x):
# this function could be used in a "linear smooth model run class"
# At the moment it is only used by the tests to show
# why a replacement was necessary for the general case
srm = self.model
if not srm.is_linear:
raise Exception("This method can only be applied to linear systems. Maybe you have to linearize along a solution first? ( Consider using the linearize method )" )
if t0 > t:
raise(Error("Evaluation before t0 is not possible"))
if t0 == t:
return x.flatten()
n = self.nr_pools
no_input_sol = self._no_input_sol
soln = (no_input_sol([t0, t], x)).reshape((n,))
# avoid small negative values
return np.maximum(soln, np.zeros_like(soln))
#if self._state_transition_operator_cache is None:
# # do not use the cache, it has not yet been created
# #self.build_state_transition_operator_cache()
# soln = (no_input_sol([t0, t], x)).reshape((n,))
#else:
# # use the already created cache
# times = self.times
# t_min = times[0]
# t_max = times[-1]
# nc = self._cache_size
# cached_times = np.linspace(t_min, t_max, nc)
# ca = self._state_transition_operator_cache
# # find tm1
# tm1_ind = cached_times.searchsorted(t0)
# tm1 = cached_times[tm1_ind]
# # check if next cached time is already behind t
# if t <= tm1: return no_input_sol([t0, t], x)
# # first integrate x to tm1: y = Phi(tm1, t_0)x
# y = (no_input_sol([t0, tm1], x)).reshape((n,1))
# step_size = (t_max-tm1)/(nc-1)
# if step_size > 0:
# tm2_ind = np.int(np.min([np.floor((t-tm1)/step_size), nc-1]))
# tm2 = tm1 + tm2_ind*step_size
# #print(t, t0, t==t0, tm1_ind, tm1, tm2_ind, tm2, step_size)
# B = ca[tm1_ind,tm2_ind,:,:]
# #print(t, t0, tm1, tm2, step_size, B)
#
# z = np.dot(B, y)
# else:
# tm2 = tm1
# z = y
# #z = (no_input_sol([tm1, tm2], y)[-1]).reshape((n,))
# # integrate z to t: sol=Phi(t,tm2)*z
# soln = (no_input_sol([tm2, t],z)).reshape((n,))
#
#return np.maximum(soln, np.zeros_like(soln))
#this function should be rewritten using the vector valued solution
def _flux_vector(self, flux_vec_symbolic):
#sol = self.solve_old()
sol = self.solve()
srm = self.model
n = self.nr_pools
times = self.times
tup = tuple(srm.state_vector) + (srm.time_symbol,)
res = np.zeros((len(times), n))
flux_vec_symbolic = sympify(flux_vec_symbolic, locals = _clash)
flux_vec_symbolic = flux_vec_symbolic.subs(self.parameter_dict)
#cut_func_set = {key[:key.index('(')]: val
# for key, val in self.func_set.items()}
cut_func_set=make_cut_func_set(self.func_set)
flux_vec_fun = lambdify(tup,
flux_vec_symbolic,
modules=[cut_func_set, 'numpy'])
res = np.zeros((len(times), n))
for ti in range(len(times)):
args = [sol[ti, pool] for pool in range(n)] + [times[ti]]
val = flux_vec_fun(*args)
res[ti,:] = val.reshape((n,))
return res
##### age density methods #####
def _age_densities_1_single_value(self, start_age_densities = None):
# for part that comes from initial value
if start_age_densities is None:
# all mass is assumed to have age 0 at the beginning
def start_age_densities(a):
if a != 0: return np.array((0,)*self.nr_pools)
return np.array(self.start_values)
# cut off negative ages in start_age_densities
def p0(a):
if a >= 0:
return start_age_densities(a)
else:
return np.zeros((self.nr_pools,))
Phi = self._state_transition_operator#_for_linear_systems
t0 = self.times[0]
#ppp = lambda a, t: self._state_transition_operator(t,t0,p0(a-(t-t0)))
def ppp(a, t):
#print('iv: ', a, t)
#fixme: cut off accidental negative values
#print('Y', a-(t-t0), p0(a-t-t0))
#print('smr 3821 ppp', t, t0, a, a-(t-t0))
res = np.maximum(Phi(t, t0, p0(a-(t-t0))), 0)
#print('ppp:', res)
return res
return ppp
# return a function p1 that takes an age np.array
# and gives back an nd array (age, time, pool)
def _age_densities_1(self, start_age_densities = None):
# for part that comes from initial value
ppp = self._age_densities_1_single_value(start_age_densities)
pp = lambda a: np.array([ppp(a,t) for t in self.times], np.float)
p1 = lambda ages: np.array([pp(a) for a in ages], np.float)
return p1
def _age_densities_2_single_value(self):
# for part that comes from the input function u
t0 = self.times[0]
u = self.external_input_vector_func()
#u = lambda x: np.array([1,2])
def ppp(a, t):
#print('input', a, t)
if (a < 0) or (t-t0 <= a):
val = np.zeros((1, self.nr_pools))[-1]
else:
u_val = u(t-a)
#print('u_val', u_val)
val = self._state_transition_operator(t, t-a, u_val)
#fixme: cut off accidental negative values
res = np.maximum(val, 0)
#print('ppp:', res)
return res
return ppp
# returns a function p2 that takes an age array "ages" as argument
# and gives back a three-dimensional ndarray (ages x times x pools)
def _age_densities_2(self):
# for part that comes from the input function u
ppp = self._age_densities_2_single_value()
pp = lambda a: np.array([ppp(a,t) for t in self.times], np.float)
p2 = lambda ages: np.array([pp(a) for a in ages], np.float)
return p2
##### plotting methods #####
def _density_plot_plotly(self, field, ages, age_stride=1, time_stride=1, y_label="Age", z_label="Mass"):
times = self.times
strided_field = stride(field, (age_stride, time_stride))
strided_ages = stride(ages, age_stride)
strided_times = stride(times, time_stride)
surfacecolor = strided_field.copy()
for ai in range(strided_field.shape[0]):
for ti in range(strided_field.shape[1]):
surfacecolor[ai,ti] = - (ai - ti)
data = [go.Surface(x = -strided_times,
y = strided_ages,
z = strided_field,
showscale = False,
surfacecolor = surfacecolor,
colorscale = 'Rainbow')]
tickvals = np.linspace(strided_times[0], strided_times[-1], 5)
ticktext = [str(v) for v in tickvals]
tickvals = -tickvals
layout = go.Layout(
width = 800,
height = 800,
scene = dict(
xaxis = dict(
title = 'Time',
tickmode = 'array',
tickvals = tickvals,
ticktext = ticktext
#range = [-times[0], -times[-1]]
),
yaxis = dict(
title = y_label,
range = [ages[0], ages[-1]]
),
zaxis = dict(
title = z_label,
range = [0, np.amax(strided_field)]
)
)
)
return data, layout
## plot helper methods ##
# fixme: unit treatment disabled
# fixme mm 7-28-2021 units do not have a place in SmothModelRun since
# we do not have them for all the instances. If units are required they should life in a different class
def _add_time_unit(self, label):
#if self.model.time_unit:
# label += r"$\quad(\mathrm{" + latex(self.model.time_unit) + "})$"
return label
def _add_content_unit(self, label):
#if self.model.content_unit:
# label +=r"$\quad(\mathrm{" + latex(self.model.content_unit) + "})$"
return label
def _add_flux_unit(self, label):
#if self.model.content_unit and self.model.time_unit:
# label += r"$\quad(\mathrm{" + latex(self.model.content_unit)
# label += "/" + latex(self.model.time_unit) + "})$"
return label
## flux helper functions ##
#fixme: test and move
def _flux_funcs(self, expr_dict):
m = self.model
#sol_funcs = self.sol_funcs()
sol_funcs = self.sol_funcs()
flux_funcs = {}
tup = tuple(m.state_variables) + (m.time_symbol,)
for key, expression in expr_dict.items():
if isinstance(expression, Number):
# in this case (constant flux) lambdify for some reason
# does not return a vectorized function but one that
# allways returns a number even when it is called with
# an array argument. We therfore create such a function
# ourselves
flux_funcs[key] = const_of_t_maker(expression)
else:
# fixme mm 11-5-2018
# the sympify in the next line should be unnecesary since
# the expressions are already expressions and not strings
# and now also not Numbers
#o_par = sympify(expression, locals=_clash).subs(self.parameter_dict)
o_par = expression.subs(self.parameter_dict)
cut_func_set = make_cut_func_set(self.func_set)
ol = lambdify(tup, o_par, modules = [cut_func_set, 'numpy'])
#ol = numerical_function_from_expression(expression,tup,self.parameter_dict,self.func_set)
flux_funcs[key] = f_of_t_maker(sol_funcs, ol)
return flux_funcs
## temporary ##
def _FTTT_lambda_bar(self, end, s, u):
u_norm = u.sum()
if u_norm == 0:
return 0
Phi = self._state_transition_operator
t1 = end
result = -np.log(Phi(t1, s, u).sum()/u_norm)/(t1-s)
return result
def _FTTT_lambda_bar_R(self, start, end):
if (start < self.times[0]) or (end > self.times[-1]):
raise(Error('Interval boundaries out of bounds'))
if start > end:
raise(Error('Starting time must not be later then ending time'))
t0 = start
t1 = end
u_func = self.external_input_vector_func()
#soln_func = self.solve_single_value_old()
vec_soln_func = self.solve_func()
x0 = vec_soln_func(t0)
x0_norm = x0.sum()
A = x0_norm*(t1-t0)*self._FTTT_lambda_bar(t1, t0, x0)
#print('A', A)
def B_integrand(s):
u = u_func(s)
u_norm = u.sum()
return u_norm*(t1-s)*self._FTTT_lambda_bar(t1, s, u)
B = quad(B_integrand, t0, t1)[0]
#print('B', B)
C = x0_norm*(t1-t0)
#print('C', C)
def D_integrand(s):
u_norm = u_func(s).sum()
return u_norm*(t1-s)
D = quad(D_integrand, t0, t1)[0]
#print('D', D)
return (A+B)/(C+D)
def _FTTT_T_bar_R(self, start, end):
if (start < self.times[0]) or (end > self.times[-1]):
raise(Error('Interval boundaries out of bounds'))
if start > end:
raise(Error('Starting time must not be later then ending time'))
t0 = start
t1 = end
u_func = self.external_input_vector_func()
Phi = self._state_transition_operator
#soln_func = self.solve_single_value_old()
vec_soln_func = self.solve_func()
x0 = vec_soln_func(t0)
x0_norm = x0.sum()
if x0_norm > 0:
A = x0_norm*(t1-t0)*1/self._FTTT_lambda_bar(t1, t0, x0)
else:
A = 0
#print('A', A)
def B_integrand(s):
u = u_func(s)
u_norm = u.sum()
if u_norm > 0:
return u_norm*(t1-s)*1/self._FTTT_lambda_bar(t1, s, u)
else:
return 0
B = quad(B_integrand, t0, t1)[0]
#print('B', B)
C = x0_norm*(t1-t0)
#print('C', C)
def D_integrand(s):
u_norm = u_func(s).sum()
return u_norm*(t1-s)
D = quad(D_integrand, t0, t1)[0]
#print('D', D)
return (A+B)/(C+D)
def _FTTT_lambda_bar_S(self, start, end):
# for Martin Rasmussens surrogate system
if (start < self.times[0]) or (end > self.times[-1]):
raise(Error('Interval boundaries out of bounds'))
if start > end:
raise(Error('Starting time must not be later than ending time'))
if start == end:
return np.nan
t0, t1 = start, end
#soln_func = self.solve_single_value_old()
vec_soln_func = self.solve_func()
x0 = vec_soln_func(t0)
x1 = vec_soln_func(t1)
z0 = x0.sum()
z1 = x1.sum()
u_func = self.external_input_vector_func()
# function to minimize during Newton to find lambda_bar_S
# g seems to have huge numerical issues
def g(lamda):
def f(z, t):
# RHS in the surrogate system
return -lamda*z+sum(u_func(t))
# solve the system with current lambda
sol = odeint(f, z0, [t0, t1])
# return the distance of the current final time value
# from the desired z1
res = sol[-1]-z1
return res
# g2 seems to work much better
def g2(lamda):
if lamda <= 0:
return 137
def f(s):
res = np.exp(-lamda*(t1-s))*sum(u_func(s))
#print(lamda, res, u_func(s), t1, s)
return res
int_res = quad(f, t0, t1)[0]
z0_remaining = np.exp(-lamda*(t1-t0))*z0
if (z0_remaining<1e-08) or np.isnan(z0_remaining):
z0_remaining = 0
res = z0_remaining-z1+int_res
#print(lamda, z0_remaining, z1, int_res, res)
return res
# return lambda_bar_S after optimization
try:
#res = newton(g, 0.5, maxiter=5000)
#res = newton(g2, 1.5, maxiter=500)
res = brentq(g2, 0, 5, maxiter=500)
except RuntimeError:
print('optimization aborted')
return np.nan
if res <= 0:
return np.nan
if not isinstance(res, float):
res = res[0]
return res
def _calculate_steady_states(self):
#fixme: should be possible only for autonomous, possibly nonlinear,
# models
#fixme: test?
ss = solve(self.model.F.subs(self.parameter_dict),
self.model.state_vector,
dict=True)
return_ss = []
for ss_i in ss:
add = True
for key, val in ss_i.items():
if self.model.time_symbol in val.free_symbols:
add = False
if add:
return_ss.append(ss_i)
return return_ss
def _FTTT_lambda_bar_R_left_limit(self, t0):
#B0 = self.B(t0)
vec_sol_funcs = self.solve_func()
B0 = self.B_func()(t0)
iv = Matrix(self.start_values) # column vector
z = (-ones(1, len(iv))*B0).T
return (z.T*iv/mpmath.norm(iv, 1))[0]
## new FTTT approach ##
def _alpha_s_i(self, s, i, t1):
Phi = self._state_transition_operator
e_i = np.zeros(self.nr_pools)
e_i[i] = 1
return 1 - Phi(t1,s,e_i).sum()
def _alpha_s(self, s, t1, vec):
Phi = self._state_transition_operator
vec_norm = vec.sum()
return 1 - Phi(t1,s,vec).sum()/vec_norm
def _EFFTT_s_i(self, s, i, t1, alpha_s_i = None):
Phi = self._state_transition_operator
if alpha_s_i is None:
alpha_s_i = self._alpha_s_i(s, i, t1)
e_i = np.zeros(self.nr_pools)
e_i[i] = 1
def F_FTT_i(a):
return 1 - Phi(s+a,s,e_i).sum()
def integrand(a):
return 1 - F_FTT_i(a)/alpha_s_i
result = quad(integrand, 0, t1-s, epsabs=1.5e-03, epsrel=1.5e-03)[0]
return result
def _TR(self, s, t1, v): # v is the remaining vector, not normalized
Phi = self._state_transition_operator
n = self.nr_pools
Phi_matrix = np.zeros((n,n))
for i in range(n):
e_i = np.zeros(n)
e_i[i] = 1
Phi_matrix[:,i] = Phi(t1,s,e_i)
A = scipy.linalg.logm(Phi_matrix)/(t1-s)
A_inv = scipy.linalg.inv(A)
o = np.ones(n)
v_normed = v/v.sum()
return (t1-s) + (-o @ A_inv @ v_normed)
def _FTTT_finite_plus_remaining(self, s, t1, t0):
if s == t0:
#soln_func = self.solve_single_value_old()
vec_soln_func = self.solve_func()
vec = vec_soln_func(s)
else:
u_func = self.external_input_vector_func()
vec = u_func(s)
vec_norm = vec.sum()
if vec_norm > 0 :
Phi = self._state_transition_operator
# the finite time part
finite = 0
for i in range(self.nr_pools):
alpha_s_i = self._alpha_s_i(s, i, t1)
EFFTT_s_i = self._EFFTT_s_i(s, i, t1, alpha_s_i)
finite += vec[i] * alpha_s_i * EFFTT_s_i
# the part for the remaining mass
if s < t1:
v = Phi(t1,s,vec) # remaining mass at time t1
alpha_s = self._alpha_s(s, t1, vec)
remaining = (1-alpha_s) * vec_norm * self._TR(s, t1, v)
else:
remaining = 0
return finite + remaining
else:
return 0
def _FTTT_conditional(self, t1, t0):
if (t0 < self.times[0]) or (t1 > self.times[-1]):
raise(Error('Interval boundaries out of bounds'))
if t0 >= t1:
raise(Error('Starting time must be earlier then ending time'))
A = (t1-t0) * self._FTTT_finite_plus_remaining(t0, t1, t0)
def B_integrand(s):
return (t1-s) * self._FTTT_finite_plus_remaining(s, t1, t0)
B = quad(B_integrand, t0, t1, epsabs=1.5e-03, epsrel=1.5e-03)[0]
#soln_func = self.solve_single_value_old()
vec_soln_func = self.solve_func()
x0 = vec_soln_func(t0)
x0_norm = x0.sum()
C = x0_norm*(t1-t0)
u_func = self.external_input_vector_func()
def D_integrand(s):
u_norm = u_func(s).sum()
return u_norm*(t1-s)
D = quad(D_integrand, t0, t1)[0]
return (A+B)/(C+D)
#### Important again ####
def Phi_func(self):
# note that the functions used to produce the matrix
# self.Phi are cached (if the cache is initialized)
# so that the repeated calls by the following lambda
# are actually cheap
return lambda T, S: self.Phi(T, S)
def Phi(self, T, S):
nr_pools = self.nr_pools
start_Phi_2d = np.identity(nr_pools)
if S > T:
raise(Error("Evaluation before S is not possible"))
if S == T:
return start_Phi_2d
solve_func = self.solve_func()
block_ode, x_block_name, phi_block_name = self._x_phi_block_ode()
if hasattr(self,'_state_transition_operator_cache'):
cache = self._state_transition_operator_cache
cache_times = cache.keys
S_phi_ind = cache.phi_ind(S)
T_phi_ind = cache.phi_ind(T)
my_phi_tmax = cache._cached_phi_tmax
def phi(t, s, t_max):
x_s = tuple(solve_func(s))
return my_phi_tmax(
s,
t_max,
block_ode,
x_s,
x_block_name,
phi_block_name
)(t)
S_phi_ind = cache.phi_ind(S)
T_phi_ind = cache.phi_ind(T)
# t_max = self.times[-1]
t_max = cache.end_time_from_phi_ind(T_phi_ind)
# catch the corner cases where the cache is useless.
if (T_phi_ind-S_phi_ind) < 1:
return phi(T, S, t_max=cache.end_time_from_phi_ind(T_phi_ind))
tm1 = cache.end_time_from_phi_ind(S_phi_ind)
## first integrate to tm1:
if tm1 != S:
phi_tm1_S = phi(tm1, S, tm1)
else:
phi_tm1_S = start_Phi_2d
phi_T_tm1 = phi(T, tm1, self.times[-1])
return np.matmul(phi_T_tm1, phi_tm1_S)
else:
def phi(t, s):
x_s = solve_func(s)
start_Phi_2d = np.identity(nr_pools)
start_blocks = [
(x_block_name, x_s),
(phi_block_name, start_Phi_2d)
]
blivp = block_ode.blockIvp(start_blocks)
return blivp.block_solve(t_span=(s, t))[phi_block_name][-1, ...]
return phi(T, S)
def fake_discretized_Bs(self, data_times=None):
if data_times is None:
data_times = self.times
nr_pools = self.nr_pools
n = len(data_times)
Bs = np.zeros((n-1, nr_pools, nr_pools))
for k in range(n-1):
Bs[k,:,:] = self.Phi(data_times[k+1], data_times[k])
return Bs
def acc_net_internal_flux_matrix(self, data_times=None):
if data_times is None:
data_times = self.times
x_func = self.solve_func()
xs = x_func(data_times)
Bs = self.fake_discretized_Bs(data_times)
return net_Fs_from_discrete_Bs_and_xs(Bs, xs)
def acc_net_external_output_vector(self, data_times=None):
if data_times is None:
data_times = self.times
x_func = self.solve_func()
xs = x_func(data_times)
Bs = self.fake_discretized_Bs(data_times)
return net_Rs_from_discrete_Bs_and_xs(Bs, xs)
def acc_net_external_input_vector(self, data_times=None):
if data_times is None:
data_times = self.times
x_func = self.solve_func()
xs = x_func(data_times)
Bs = self.fake_discretized_Bs(data_times)
return net_Us_from_discrete_Bs_and_xs(Bs, xs)
def fake_net_discretized_output(self, data_times):
x_func = self.solve_func()
xs = x_func(data_times)
net_Fs = self.acc_net_internal_flux_matrix(data_times)
net_Rs = self.acc_net_external_output_vector(data_times)
net_Us = self.acc_net_external_input_vector(data_times)
return xs, net_Us, net_Fs, net_Rs
def fake_gross_discretized_output(self, data_times):
## prepare some fake output data
#x = self.solve_single_value_old()
x_func = self.solve_func()
xs = x_func(data_times)
gross_Fs = self.acc_gross_internal_flux_matrix(data_times)
gross_Rs = self.acc_gross_external_output_vector(data_times)
gross_Us = self.acc_gross_external_input_vector(data_times)
return xs, gross_Us, gross_Fs, gross_Rs
| {
"content_hash": "ff7b50a67b737bb3ed1c66e8ea7dd164",
"timestamp": "",
"source": "github",
"line_count": 4422,
"max_line_length": 179,
"avg_line_length": 37.12618724559023,
"alnum_prop": 0.5366262212801208,
"repo_name": "MPIBGC-TEE/CompartmentalSystems",
"id": "70910d3b0a96ee67770aaa3f4ec2e251a593418e",
"size": "164172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/CompartmentalSystems/smooth_model_run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "895"
},
{
"name": "HTML",
"bytes": "35548556"
},
{
"name": "Jupyter Notebook",
"bytes": "131659124"
},
{
"name": "Makefile",
"bytes": "8783"
},
{
"name": "Python",
"bytes": "1119047"
},
{
"name": "Shell",
"bytes": "2348"
}
],
"symlink_target": ""
} |
import os
import sys
import random
import time
import json
import copy
import errno
import traceback
import operator
import math
from functools import partial
from collections import defaultdict
from numbers import Number
from decimal import Decimal
from typing import TYPE_CHECKING, List, Optional, Tuple, Union, NamedTuple, Sequence, Dict, Any, Set
from abc import ABC, abstractmethod
import itertools
import threading
import enum
from aiorpcx import timeout_after, TaskTimeout, ignore_after
from .i18n import _
from .bip32 import BIP32Node, convert_bip32_intpath_to_strpath, convert_bip32_path_to_list_of_uint32
from .crypto import sha256
from . import util
from .util import (NotEnoughFunds, UserCancelled, profiler, OldTaskGroup,
format_satoshis, format_fee_satoshis, NoDynamicFeeEstimates,
WalletFileException, BitcoinException,
InvalidPassword, format_time, timestamp_to_datetime, Satoshis,
Fiat, bfh, bh2u, TxMinedInfo, quantize_feerate, create_bip21_uri, OrderedDictWithIndex, parse_max_spend)
from .simple_config import SimpleConfig, FEE_RATIO_HIGH_WARNING, FEERATE_WARNING_HIGH_FEE
from .bitcoin import COIN, TYPE_ADDRESS
from .bitcoin import is_address, address_to_script, is_minikey, relayfee, dust_threshold
from .crypto import sha256d
from . import keystore
from .keystore import (load_keystore, Hardware_KeyStore, KeyStore, KeyStoreWithMPK,
AddressIndexGeneric, CannotDerivePubkey)
from .util import multisig_type
from .storage import StorageEncryptionVersion, WalletStorage
from .wallet_db import WalletDB
from . import transaction, bitcoin, coinchooser, paymentrequest, ecc, bip32
from .transaction import (Transaction, TxInput, UnknownTxinType, TxOutput,
PartialTransaction, PartialTxInput, PartialTxOutput, TxOutpoint)
from .plugin import run_hook
from .address_synchronizer import (AddressSynchronizer, TX_HEIGHT_LOCAL,
TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_FUTURE)
from .invoices import Invoice, OnchainInvoice, LNInvoice
from .invoices import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED, PR_UNCONFIRMED, PR_TYPE_ONCHAIN, PR_TYPE_LN
from .contacts import Contacts
from .interface import NetworkException
from .mnemonic import Mnemonic
from .logging import get_logger
from .lnworker import LNWallet
from .paymentrequest import PaymentRequest
from .util import read_json_file, write_json_file, UserFacingException
if TYPE_CHECKING:
from .network import Network
from .exchange_rate import FxThread
_logger = get_logger(__name__)
TX_STATUS = [
_('Unconfirmed'),
_('Unconfirmed parent'),
_('Not Verified'),
_('Local'),
]
class BumpFeeStrategy(enum.Enum):
COINCHOOSER = enum.auto()
DECREASE_CHANGE = enum.auto()
DECREASE_PAYMENT = enum.auto()
async def _append_utxos_to_inputs(*, inputs: List[PartialTxInput], network: 'Network',
pubkey: str, txin_type: str, imax: int) -> None:
if txin_type in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
address = bitcoin.pubkey_to_address(txin_type, pubkey)
scripthash = bitcoin.address_to_scripthash(address)
elif txin_type == 'p2pk':
script = bitcoin.public_key_to_p2pk_script(pubkey)
scripthash = bitcoin.script_to_scripthash(script)
else:
raise Exception(f'unexpected txin_type to sweep: {txin_type}')
async def append_single_utxo(item):
prev_tx_raw = await network.get_transaction(item['tx_hash'])
prev_tx = Transaction(prev_tx_raw)
prev_txout = prev_tx.outputs()[item['tx_pos']]
if scripthash != bitcoin.script_to_scripthash(prev_txout.scriptpubkey.hex()):
raise Exception('scripthash mismatch when sweeping')
prevout_str = item['tx_hash'] + ':%d' % item['tx_pos']
prevout = TxOutpoint.from_str(prevout_str)
txin = PartialTxInput(prevout=prevout)
txin.utxo = prev_tx
txin.block_height = int(item['height'])
txin.script_type = txin_type
txin.pubkeys = [bfh(pubkey)]
txin.num_sig = 1
if txin_type == 'p2wpkh-p2sh':
txin.redeem_script = bfh(bitcoin.p2wpkh_nested_script(pubkey))
inputs.append(txin)
u = await network.listunspent_for_scripthash(scripthash)
async with OldTaskGroup() as group:
for item in u:
if len(inputs) >= imax:
break
await group.spawn(append_single_utxo(item))
async def sweep_preparations(privkeys, network: 'Network', imax=100):
async def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = ecc.ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
await _append_utxos_to_inputs(
inputs=inputs,
network=network,
pubkey=pubkey,
txin_type=txin_type,
imax=imax)
keypairs[pubkey] = privkey, compressed
inputs = [] # type: List[PartialTxInput]
keypairs = {}
async with OldTaskGroup() as group:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
await group.spawn(find_utxos_for_privkey(txin_type, privkey, compressed))
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
await group.spawn(find_utxos_for_privkey(txin_type, privkey, not compressed))
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
await group.spawn(find_utxos_for_privkey('p2pk', privkey, compressed))
if not inputs:
raise UserFacingException(_('No inputs found.'))
return inputs, keypairs
async def sweep(
privkeys,
*,
network: 'Network',
config: 'SimpleConfig',
to_address: str,
fee: int = None,
imax=100,
locktime=None,
tx_version=None) -> PartialTransaction:
inputs, keypairs = await sweep_preparations(privkeys, network, imax)
total = sum(txin.value_sats() for txin in inputs)
if fee is None:
outputs = [PartialTxOutput(scriptpubkey=bfh(bitcoin.address_to_script(to_address)),
value=total)]
tx = PartialTransaction.from_io(inputs, outputs)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise Exception(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise Exception(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [PartialTxOutput(scriptpubkey=bfh(bitcoin.address_to_script(to_address)),
value=total - fee)]
if locktime is None:
locktime = get_locktime_for_new_transaction(network)
tx = PartialTransaction.from_io(inputs, outputs, locktime=locktime, version=tx_version)
#rbf = bool(config.get('use_rbf', True))
#tx.set_rbf(rbf)
tx.sign(keypairs)
return tx
def get_locktime_for_new_transaction(network: 'Network') -> int:
# if no network or not up to date, just set locktime to zero
if not network:
return 0
chain = network.blockchain()
if chain.is_tip_stale():
return 0
# discourage "fee sniping"
locktime = chain.height()
# sometimes pick locktime a bit further back, to help privacy
# of setups that need more time (offline/multisig/coinjoin/...)
if random.randint(0, 9) == 0:
locktime = max(0, locktime - random.randint(0, 99))
return locktime
class CannotBumpFee(Exception):
def __str__(self):
return _('Cannot bump fee') + ':\n\n' + Exception.__str__(self)
class CannotDoubleSpendTx(Exception):
def __str__(self):
return _('Cannot cancel transaction') + ':\n\n' + Exception.__str__(self)
class CannotCPFP(Exception):
def __str__(self):
return _('Cannot create child transaction') + ':\n\n' + Exception.__str__(self)
class InternalAddressCorruption(Exception):
def __str__(self):
return _("Wallet file corruption detected. "
"Please restore your wallet from seed, and compare the addresses in both files")
class TxWalletDetails(NamedTuple):
txid: Optional[str]
status: str
label: str
can_broadcast: bool
can_bump: bool
can_cpfp: bool
can_dscancel: bool # whether user can double-spend to self
can_save_as_local: bool
amount: Optional[int]
fee: Optional[int]
tx_mined_status: TxMinedInfo
mempool_depth_bytes: Optional[int]
can_remove: bool # whether user should be allowed to delete tx
is_lightning_funding_tx: bool
class Abstract_Wallet(AddressSynchronizer, ABC):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
LOGGING_SHORTCUT = 'w'
max_change_outputs = 3
gap_limit_for_change = 10
txin_type: str
wallet_type: str
lnworker: Optional['LNWallet']
def __init__(self, db: WalletDB, storage: Optional[WalletStorage], *, config: SimpleConfig):
if not db.is_ready_to_be_used_by_wallet():
raise Exception("storage not ready to be used by Abstract_Wallet")
self.config = config
assert self.config is not None, "config must not be None"
self.db = db
self.storage = storage
# load addresses needs to be called before constructor for sanity checks
db.load_addresses(self.wallet_type)
self.keystore = None # type: Optional[KeyStore] # will be set by load_keystore
AddressSynchronizer.__init__(self, db)
# saved fields
self.use_change = db.get('use_change', True)
self.multiple_change = db.get('multiple_change', False)
self._labels = db.get_dict('labels')
self._frozen_addresses = set(db.get('frozen_addresses', []))
self._frozen_coins = db.get_dict('frozen_coins') # type: Dict[str, bool]
self.fiat_value = db.get_dict('fiat_value')
self.receive_requests = db.get_dict('payment_requests') # type: Dict[str, Invoice]
self.invoices = db.get_dict('invoices') # type: Dict[str, Invoice]
self._reserved_addresses = set(db.get('reserved_addresses', []))
self._freeze_lock = threading.Lock() # for mutating/iterating frozen_{addresses,coins}
self._prepare_onchain_invoice_paid_detection()
self.calc_unused_change_addresses()
# save wallet type the first time
if self.db.get('wallet_type') is None:
self.db.put('wallet_type', self.wallet_type)
self.contacts = Contacts(self.db)
self._coin_price_cache = {}
self.lnworker = None
def save_db(self):
if self.storage:
self.db.write(self.storage)
def save_backup(self, backup_dir):
new_db = WalletDB(self.db.dump(), manual_upgrades=False)
if self.lnworker:
channel_backups = new_db.get_dict('imported_channel_backups')
for chan_id, chan in self.lnworker.channels.items():
channel_backups[chan_id.hex()] = self.lnworker.create_channel_backup(chan_id)
new_db.put('channels', None)
new_db.put('lightning_privkey2', None)
new_path = os.path.join(backup_dir, self.basename() + '.backup')
new_storage = WalletStorage(new_path)
new_storage._encryption_version = self.storage._encryption_version
new_storage.pubkey = self.storage.pubkey
new_db.set_modified(True)
new_db.write(new_storage)
return new_path
def has_lightning(self) -> bool:
return bool(self.lnworker)
def can_have_lightning(self) -> bool:
# we want static_remotekey to be a wallet address
return self.txin_type == 'p2wpkh'
def can_have_deterministic_lightning(self) -> bool:
if not self.can_have_lightning():
return False
if not self.keystore:
return False
return self.keystore.can_have_deterministic_lightning_xprv()
def init_lightning(self, *, password) -> None:
assert self.can_have_lightning()
assert self.db.get('lightning_xprv') is None
assert self.db.get('lightning_privkey2') is None
if self.can_have_deterministic_lightning():
assert isinstance(self.keystore, keystore.BIP32_KeyStore)
ln_xprv = self.keystore.get_lightning_xprv(password)
self.db.put('lightning_xprv', ln_xprv)
else:
seed = os.urandom(32)
node = BIP32Node.from_rootseed(seed, xtype='standard')
ln_xprv = node.to_xprv()
self.db.put('lightning_privkey2', ln_xprv)
if self.network:
self.network.run_from_another_thread(self.stop())
self.lnworker = LNWallet(self, ln_xprv)
if self.network:
self.start_network(self.network)
async def stop(self):
"""Stop all networking and save DB to disk."""
try:
async with ignore_after(5):
await super().stop()
if self.network:
if self.lnworker:
await self.lnworker.stop()
self.lnworker = None
finally: # even if we get cancelled
if any([ks.is_requesting_to_be_rewritten_to_wallet_file for ks in self.get_keystores()]):
self.save_keystore()
self.save_db()
def set_up_to_date(self, b):
super().set_up_to_date(b)
if b: self.save_db()
def clear_history(self):
super().clear_history()
self.save_db()
def start_network(self, network):
AddressSynchronizer.start_network(self, network)
if network:
if self.lnworker:
self.lnworker.start_network(network)
# only start gossiping when we already have channels
if self.db.get('channels'):
self.network.start_gossip()
def load_and_cleanup(self):
self.load_keystore()
self.test_addresses_sanity()
super().load_and_cleanup()
@abstractmethod
def load_keystore(self) -> None:
pass
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def get_master_public_keys(self):
return []
def basename(self) -> str:
return self.storage.basename() if self.storage else 'no name'
def test_addresses_sanity(self) -> None:
addrs = self.get_receiving_addresses()
if len(addrs) > 0:
addr = str(addrs[0])
if not bitcoin.is_address(addr):
neutered_addr = addr[:5] + '..' + addr[-2:]
raise WalletFileException(f'The addresses in this wallet are not bitcoin addresses.\n'
f'e.g. {neutered_addr} (length: {len(addr)})')
def check_returned_address_for_corruption(func):
def wrapper(self, *args, **kwargs):
addr = func(self, *args, **kwargs)
self.check_address_for_corruption(addr)
return addr
return wrapper
def calc_unused_change_addresses(self) -> Sequence[str]:
"""Returns a list of change addresses to choose from, for usage in e.g. new transactions.
The caller should give priority to earlier ones in the list.
"""
with self.lock:
# We want a list of unused change addresses.
# As a performance optimisation, to avoid checking all addresses every time,
# we maintain a list of "not old" addresses ("old" addresses have deeply confirmed history),
# and only check those.
if not hasattr(self, '_not_old_change_addresses'):
self._not_old_change_addresses = self.get_change_addresses()
self._not_old_change_addresses = [addr for addr in self._not_old_change_addresses
if not self.address_is_old(addr)]
unused_addrs = [addr for addr in self._not_old_change_addresses
if not self.is_used(addr) and not self.is_address_reserved(addr)]
return unused_addrs
def is_deterministic(self) -> bool:
return self.keystore.is_deterministic()
def _set_label(self, key: str, value: Optional[str]) -> None:
with self.lock:
if value is None:
self._labels.pop(key, None)
else:
self._labels[key] = value
def set_label(self, name: str, text: str = None) -> bool:
if not name:
return False
changed = False
with self.lock:
old_text = self._labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self._labels[name] = text
changed = True
else:
if old_text is not None:
self._labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
return changed
def import_labels(self, path):
data = read_json_file(path)
for key, value in data.items():
self.set_label(key, value)
def export_labels(self, path):
write_json_file(path, self.get_all_labels())
def set_fiat_value(self, txid, ccy, text, fx, value_sat):
if not self.db.get_transaction(txid):
return
# since fx is inserting the thousands separator,
# and not util, also have fx remove it
text = fx.remove_thousands_separator(text)
def_fiat = self.default_fiat_value(txid, fx, value_sat)
formatted = fx.ccy_amount_str(def_fiat, commas=False)
def_fiat_rounded = Decimal(formatted)
reset = not text
if not reset:
try:
text_dec = Decimal(text)
text_dec_rounded = Decimal(fx.ccy_amount_str(text_dec, commas=False))
reset = text_dec_rounded == def_fiat_rounded
except:
# garbage. not resetting, but not saving either
return False
if reset:
d = self.fiat_value.get(ccy, {})
if d and txid in d:
d.pop(txid)
else:
# avoid saving empty dict
return True
else:
if ccy not in self.fiat_value:
self.fiat_value[ccy] = {}
self.fiat_value[ccy][txid] = text
return reset
def get_fiat_value(self, txid, ccy):
fiat_value = self.fiat_value.get(ccy, {}).get(txid)
try:
return Decimal(fiat_value)
except:
return
def is_mine(self, address) -> bool:
if not address: return False
return bool(self.get_address_index(address))
def is_change(self, address) -> bool:
if not self.is_mine(address):
return False
return self.get_address_index(address)[0] == 1
@abstractmethod
def get_address_index(self, address: str) -> Optional[AddressIndexGeneric]:
pass
@abstractmethod
def get_address_path_str(self, address: str) -> Optional[str]:
"""Returns derivation path str such as "m/0/5" to address,
or None if not applicable.
"""
pass
@abstractmethod
def get_redeem_script(self, address: str) -> Optional[str]:
pass
@abstractmethod
def get_witness_script(self, address: str) -> Optional[str]:
pass
@abstractmethod
def get_txin_type(self, address: str) -> str:
"""Return script type of wallet address."""
pass
def export_private_key(self, address: str, password: Optional[str]) -> str:
if self.is_watching_only():
raise Exception(_("This is a watching-only wallet"))
if not is_address(address):
raise Exception(f"Invalid bitcoin address: {address}")
if not self.is_mine(address):
raise Exception(_('Address not in wallet.') + f' {address}')
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
txin_type = self.get_txin_type(address)
serialized_privkey = bitcoin.serialize_privkey(pk, compressed, txin_type)
return serialized_privkey
def export_private_key_for_path(self, path: Union[Sequence[int], str], password: Optional[str]) -> str:
raise Exception("this wallet is not deterministic")
@abstractmethod
def get_public_keys(self, address: str) -> Sequence[str]:
pass
def get_public_keys_with_deriv_info(self, address: str) -> Dict[bytes, Tuple[KeyStoreWithMPK, Sequence[int]]]:
"""Returns a map: pubkey -> (keystore, derivation_suffix)"""
return {}
def is_lightning_funding_tx(self, txid: Optional[str]) -> bool:
if not self.lnworker or txid is None:
return False
return any([chan.funding_outpoint.txid == txid
for chan in self.lnworker.channels.values()])
def get_tx_info(self, tx: Transaction) -> TxWalletDetails:
tx_wallet_delta = self.get_wallet_delta(tx)
is_relevant = tx_wallet_delta.is_relevant
is_any_input_ismine = tx_wallet_delta.is_any_input_ismine
fee = tx_wallet_delta.fee
exp_n = None
can_broadcast = False
can_bump = False
can_cpfp = False
tx_hash = tx.txid() # note: txid can be None! e.g. when called from GUI tx dialog
is_lightning_funding_tx = self.is_lightning_funding_tx(tx_hash)
tx_we_already_have_in_db = self.db.get_transaction(tx_hash)
can_save_as_local = (is_relevant and tx.txid() is not None
and (tx_we_already_have_in_db is None or not tx_we_already_have_in_db.is_complete()))
label = ''
tx_mined_status = self.get_tx_height(tx_hash)
can_remove = ((tx_mined_status.height in [TX_HEIGHT_FUTURE, TX_HEIGHT_LOCAL])
# otherwise 'height' is unreliable (typically LOCAL):
and is_relevant
# don't offer during common signing flow, e.g. when watch-only wallet starts creating a tx:
and bool(tx_we_already_have_in_db))
can_dscancel = False
if tx.is_complete():
if tx_we_already_have_in_db:
label = self.get_label_for_txid(tx_hash)
if tx_mined_status.height > 0:
if tx_mined_status.conf:
status = _("{} confirmations").format(tx_mined_status.conf)
else:
status = _('Not verified')
elif tx_mined_status.height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED):
status = _('Unconfirmed')
if fee is None:
fee = self.get_tx_fee(tx_hash)
if fee and self.network and self.config.has_fee_mempool():
size = tx.estimated_size()
fee_per_byte = fee / size
exp_n = self.config.fee_to_depth(fee_per_byte)
can_bump = is_any_input_ismine and not tx.is_final()
can_dscancel = (is_any_input_ismine and not tx.is_final()
and not all([self.is_mine(txout.address) for txout in tx.outputs()]))
try:
self.cpfp(tx, 0)
can_cpfp = True
except:
can_cpfp = False
else:
status = _('Local')
can_broadcast = self.network is not None
can_bump = is_any_input_ismine and not tx.is_final()
else:
status = _("Signed")
can_broadcast = self.network is not None
else:
assert isinstance(tx, PartialTransaction)
s, r = tx.signature_count()
status = _("Unsigned") if s == 0 else _('Partially signed') + ' (%d/%d)'%(s,r)
if is_relevant:
if tx_wallet_delta.is_all_input_ismine:
assert fee is not None
amount = tx_wallet_delta.delta + fee
else:
amount = tx_wallet_delta.delta
else:
amount = None
if is_lightning_funding_tx:
can_bump = False # would change txid
return TxWalletDetails(
txid=tx_hash,
status=status,
label=label,
can_broadcast=can_broadcast,
can_bump=can_bump,
can_cpfp=can_cpfp,
can_dscancel=can_dscancel,
can_save_as_local=can_save_as_local,
amount=amount,
fee=fee,
tx_mined_status=tx_mined_status,
mempool_depth_bytes=exp_n,
can_remove=can_remove,
is_lightning_funding_tx=is_lightning_funding_tx,
)
def get_spendable_coins(self, domain, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
confirmed_only = self.config.get('confirmed_only', False)
with self._freeze_lock:
frozen_addresses = self._frozen_addresses.copy()
utxos = self.get_utxos(domain,
excluded_addresses=frozen_addresses,
mature_only=True,
confirmed_funding_only=confirmed_only,
nonlocal_only=nonlocal_only)
utxos = [utxo for utxo in utxos if not self.is_frozen_coin(utxo)]
return utxos
@abstractmethod
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None) -> Sequence[str]:
pass
@abstractmethod
def get_change_addresses(self, *, slice_start=None, slice_stop=None) -> Sequence[str]:
pass
def dummy_address(self):
# first receiving address
return self.get_receiving_addresses(slice_start=0, slice_stop=1)[0]
def get_frozen_balance(self):
with self._freeze_lock:
frozen_addresses = self._frozen_addresses.copy()
# note: for coins, use is_frozen_coin instead of _frozen_coins,
# as latter only contains *manually* frozen ones
frozen_coins = {utxo.prevout.to_str() for utxo in self.get_utxos()
if self.is_frozen_coin(utxo)}
if not frozen_coins: # shortcut
return self.get_balance(frozen_addresses)
c1, u1, x1 = self.get_balance()
c2, u2, x2 = self.get_balance(
excluded_addresses=frozen_addresses,
excluded_coins=frozen_coins,
)
return c1-c2, u1-u2, x1-x2
def balance_at_timestamp(self, domain, target_timestamp):
# we assume that get_history returns items ordered by block height
# we also assume that block timestamps are monotonic (which is false...!)
h = self.get_history(domain=domain)
balance = 0
for hist_item in h:
balance = hist_item.balance
if hist_item.tx_mined_status.timestamp is None or hist_item.tx_mined_status.timestamp > target_timestamp:
return balance - hist_item.delta
# return last balance
return balance
def get_onchain_history(self, *, domain=None):
monotonic_timestamp = 0
for hist_item in self.get_history(domain=domain):
monotonic_timestamp = max(monotonic_timestamp, (hist_item.tx_mined_status.timestamp or 999_999_999_999))
yield {
'txid': hist_item.txid,
'fee_sat': hist_item.fee,
'height': hist_item.tx_mined_status.height,
'confirmations': hist_item.tx_mined_status.conf,
'timestamp': hist_item.tx_mined_status.timestamp,
'monotonic_timestamp': monotonic_timestamp,
'incoming': True if hist_item.delta>0 else False,
'bc_value': Satoshis(hist_item.delta),
'bc_balance': Satoshis(hist_item.balance),
'date': timestamp_to_datetime(hist_item.tx_mined_status.timestamp),
'label': self.get_label_for_txid(hist_item.txid),
'txpos_in_block': hist_item.tx_mined_status.txpos,
}
def create_invoice(self, *, outputs: List[PartialTxOutput], message, pr, URI) -> Invoice:
height=self.get_local_height()
if pr:
return OnchainInvoice.from_bip70_payreq(pr, height)
amount = 0
for x in outputs:
if parse_max_spend(x.value):
amount = '!'
break
else:
amount += x.value
timestamp = None
exp = None
if URI:
timestamp = URI.get('time')
exp = URI.get('exp')
timestamp = timestamp or int(time.time())
exp = exp or 0
_id = bh2u(sha256d(repr(outputs) + "%d"%timestamp))[0:10]
invoice = OnchainInvoice(
type=PR_TYPE_ONCHAIN,
amount_sat=amount,
outputs=outputs,
message=message,
id=_id,
time=timestamp,
exp=exp,
bip70=None,
requestor=None,
height=height,
)
return invoice
def save_invoice(self, invoice: Invoice) -> None:
key = self.get_key_for_outgoing_invoice(invoice)
if not invoice.is_lightning():
assert isinstance(invoice, OnchainInvoice)
if self.is_onchain_invoice_paid(invoice, 0):
self.logger.info("saving invoice... but it is already paid!")
with self.transaction_lock:
for txout in invoice.outputs:
self._invoices_from_scriptpubkey_map[txout.scriptpubkey].add(key)
self.invoices[key] = invoice
self.save_db()
def clear_invoices(self):
self.invoices.clear()
self.save_db()
def clear_requests(self):
self.receive_requests.clear()
self.save_db()
def get_invoices(self):
out = list(self.invoices.values())
out.sort(key=lambda x:x.time)
return out
def get_unpaid_invoices(self):
invoices = self.get_invoices()
return [x for x in invoices if self.get_invoice_status(x) != PR_PAID]
def get_invoice(self, key):
return self.invoices.get(key)
def import_requests(self, path):
data = read_json_file(path)
for x in data:
req = Invoice.from_json(x)
self.add_payment_request(req)
def export_requests(self, path):
write_json_file(path, list(self.receive_requests.values()))
def import_invoices(self, path):
data = read_json_file(path)
for x in data:
invoice = Invoice.from_json(x)
self.save_invoice(invoice)
def export_invoices(self, path):
write_json_file(path, list(self.invoices.values()))
def _get_relevant_invoice_keys_for_tx(self, tx: Transaction) -> Set[str]:
relevant_invoice_keys = set()
with self.transaction_lock:
for txout in tx.outputs():
for invoice_key in self._invoices_from_scriptpubkey_map.get(txout.scriptpubkey, set()):
# note: the invoice might have been deleted since, so check now:
if invoice_key in self.invoices:
relevant_invoice_keys.add(invoice_key)
return relevant_invoice_keys
def get_relevant_invoices_for_tx(self, tx: Transaction) -> Sequence[OnchainInvoice]:
invoice_keys = self._get_relevant_invoice_keys_for_tx(tx)
invoices = [self.get_invoice(key) for key in invoice_keys]
invoices = [inv for inv in invoices if inv] # filter out None
for inv in invoices:
assert isinstance(inv, OnchainInvoice), f"unexpected type {type(inv)}"
return invoices
def _prepare_onchain_invoice_paid_detection(self):
# scriptpubkey -> list(invoice_keys)
self._invoices_from_scriptpubkey_map = defaultdict(set) # type: Dict[bytes, Set[str]]
for invoice_key, invoice in self.invoices.items():
if invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
for txout in invoice.outputs:
self._invoices_from_scriptpubkey_map[txout.scriptpubkey].add(invoice_key)
def _is_onchain_invoice_paid(self, invoice: Invoice, conf: int) -> Tuple[bool, Sequence[str]]:
"""Returns whether on-chain invoice is satisfied, and list of relevant TXIDs."""
assert invoice.type == PR_TYPE_ONCHAIN
assert isinstance(invoice, OnchainInvoice)
invoice_amounts = defaultdict(int) # type: Dict[bytes, int] # scriptpubkey -> value_sats
for txo in invoice.outputs: # type: PartialTxOutput
invoice_amounts[txo.scriptpubkey] += 1 if parse_max_spend(txo.value) else txo.value
relevant_txs = []
with self.lock, self.transaction_lock:
for invoice_scriptpubkey, invoice_amt in invoice_amounts.items():
scripthash = bitcoin.script_to_scripthash(invoice_scriptpubkey.hex())
prevouts_and_values = self.db.get_prevouts_by_scripthash(scripthash)
total_received = 0
for prevout, v in prevouts_and_values:
tx_height = self.get_tx_height(prevout.txid.hex())
if tx_height.height > 0 and tx_height.height <= invoice.height:
continue
if tx_height.conf < conf:
continue
total_received += v
relevant_txs.append(prevout.txid.hex())
# check that there is at least one TXO, and that they pay enough.
# note: "at least one TXO" check is needed for zero amount invoice (e.g. OP_RETURN)
if len(prevouts_and_values) == 0:
return False, []
if total_received < invoice_amt:
return False, []
return True, relevant_txs
def is_onchain_invoice_paid(self, invoice: Invoice, conf: int) -> bool:
return self._is_onchain_invoice_paid(invoice, conf)[0]
def _maybe_set_tx_label_based_on_invoices(self, tx: Transaction) -> bool:
# note: this is not done in 'get_default_label' as that would require deserializing each tx
tx_hash = tx.txid()
labels = []
for invoice in self.get_relevant_invoices_for_tx(tx):
if invoice.message:
labels.append(invoice.message)
if labels and not self._labels.get(tx_hash, ''):
self.set_label(tx_hash, "; ".join(labels))
return bool(labels)
def add_transaction(self, tx, *, allow_unrelated=False):
is_known = bool(self.db.get_transaction(tx.txid()))
tx_was_added = super().add_transaction(tx, allow_unrelated=allow_unrelated)
if tx_was_added and not is_known:
self._maybe_set_tx_label_based_on_invoices(tx)
if self.lnworker:
self.lnworker.maybe_add_backup_from_tx(tx)
return tx_was_added
@profiler
def get_full_history(self, fx=None, *, onchain_domain=None, include_lightning=True):
transactions_tmp = OrderedDictWithIndex()
# add on-chain txns
onchain_history = self.get_onchain_history(domain=onchain_domain)
for tx_item in onchain_history:
txid = tx_item['txid']
transactions_tmp[txid] = tx_item
# add lnworker onchain transactions
lnworker_history = self.lnworker.get_onchain_history() if self.lnworker and include_lightning else {}
for txid, item in lnworker_history.items():
if txid in transactions_tmp:
tx_item = transactions_tmp[txid]
tx_item['group_id'] = item.get('group_id') # for swaps
tx_item['label'] = item['label']
tx_item['type'] = item['type']
ln_value = Decimal(item['amount_msat']) / 1000 # for channel open/close tx
tx_item['ln_value'] = Satoshis(ln_value)
else:
if item['type'] == 'swap':
# swap items do not have all the fields. We can skip skip them
# because they will eventually be in onchain_history
# TODO: use attr.s objects instead of dicts
continue
transactions_tmp[txid] = item
ln_value = Decimal(item['amount_msat']) / 1000 # for channel open/close tx
item['ln_value'] = Satoshis(ln_value)
# add lightning_transactions
lightning_history = self.lnworker.get_lightning_history() if self.lnworker and include_lightning else {}
for tx_item in lightning_history.values():
txid = tx_item.get('txid')
ln_value = Decimal(tx_item['amount_msat']) / 1000
tx_item['lightning'] = True
tx_item['ln_value'] = Satoshis(ln_value)
key = tx_item.get('txid') or tx_item['payment_hash']
transactions_tmp[key] = tx_item
# sort on-chain and LN stuff into new dict, by timestamp
# (we rely on this being a *stable* sort)
transactions = OrderedDictWithIndex()
for k, v in sorted(list(transactions_tmp.items()),
key=lambda x: x[1].get('monotonic_timestamp') or x[1].get('timestamp') or float('inf')):
transactions[k] = v
now = time.time()
balance = 0
for item in transactions.values():
# add on-chain and lightning values
value = Decimal(0)
if item.get('bc_value'):
value += item['bc_value'].value
if item.get('ln_value'):
value += item.get('ln_value').value
# note: 'value' and 'balance' has msat precision (as LN has msat precision)
item['value'] = Satoshis(value)
balance += value
item['balance'] = Satoshis(balance)
if fx and fx.is_enabled() and fx.get_history_config():
txid = item.get('txid')
if not item.get('lightning') and txid:
fiat_fields = self.get_tx_item_fiat(tx_hash=txid, amount_sat=value, fx=fx, tx_fee=item['fee_sat'])
item.update(fiat_fields)
else:
timestamp = item['timestamp'] or now
fiat_value = value / Decimal(bitcoin.COIN) * fx.timestamp_rate(timestamp)
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_default'] = True
return transactions
@profiler
def get_detailed_history(
self,
from_timestamp=None,
to_timestamp=None,
fx=None,
show_addresses=False,
from_height=None,
to_height=None):
# History with capital gains, using utxo pricing
# FIXME: Lightning capital gains would requires FIFO
if (from_timestamp is not None or to_timestamp is not None) \
and (from_height is not None or to_height is not None):
raise Exception('timestamp and block height based filtering cannot be used together')
show_fiat = fx and fx.is_enabled() and fx.get_history_config()
out = []
income = 0
expenditures = 0
capital_gains = Decimal(0)
fiat_income = Decimal(0)
fiat_expenditures = Decimal(0)
now = time.time()
for item in self.get_onchain_history():
timestamp = item['timestamp']
if from_timestamp and (timestamp or now) < from_timestamp:
continue
if to_timestamp and (timestamp or now) >= to_timestamp:
continue
height = item['height']
if from_height is not None and from_height > height > 0:
continue
if to_height is not None and (height >= to_height or height <= 0):
continue
tx_hash = item['txid']
tx = self.db.get_transaction(tx_hash)
tx_fee = item['fee_sat']
item['fee'] = Satoshis(tx_fee) if tx_fee is not None else None
if show_addresses:
item['inputs'] = list(map(lambda x: x.to_json(), tx.inputs()))
item['outputs'] = list(map(lambda x: {'address': x.get_ui_address_str(), 'value': Satoshis(x.value)},
tx.outputs()))
# fixme: use in and out values
value = item['bc_value'].value
if value < 0:
expenditures += -value
else:
income += value
# fiat computations
if show_fiat:
fiat_fields = self.get_tx_item_fiat(tx_hash=tx_hash, amount_sat=value, fx=fx, tx_fee=tx_fee)
fiat_value = fiat_fields['fiat_value'].value
item.update(fiat_fields)
if value < 0:
capital_gains += fiat_fields['capital_gain'].value
fiat_expenditures += -fiat_value
else:
fiat_income += fiat_value
out.append(item)
# add summary
if out:
first_item = out[0]
last_item = out[-1]
if from_height or to_height:
start_height = from_height
end_height = to_height
else:
start_height = first_item['height'] - 1
end_height = last_item['height']
b = first_item['bc_balance'].value
v = first_item['bc_value'].value
start_balance = None if b is None or v is None else b - v
end_balance = last_item['bc_balance'].value
if from_timestamp is not None and to_timestamp is not None:
start_timestamp = from_timestamp
end_timestamp = to_timestamp
else:
start_timestamp = first_item['timestamp']
end_timestamp = last_item['timestamp']
start_coins = self.get_utxos(
domain=None,
block_height=start_height,
confirmed_funding_only=True,
confirmed_spending_only=True,
nonlocal_only=True)
end_coins = self.get_utxos(
domain=None,
block_height=end_height,
confirmed_funding_only=True,
confirmed_spending_only=True,
nonlocal_only=True)
def summary_point(timestamp, height, balance, coins):
date = timestamp_to_datetime(timestamp)
out = {
'date': date,
'block_height': height,
'BTC_balance': Satoshis(balance),
}
if show_fiat:
ap = self.acquisition_price(coins, fx.timestamp_rate, fx.ccy)
lp = self.liquidation_price(coins, fx.timestamp_rate, timestamp)
out['acquisition_price'] = Fiat(ap, fx.ccy)
out['liquidation_price'] = Fiat(lp, fx.ccy)
out['unrealized_gains'] = Fiat(lp - ap, fx.ccy)
out['fiat_balance'] = Fiat(fx.historical_value(balance, date), fx.ccy)
out['BTC_fiat_price'] = Fiat(fx.historical_value(COIN, date), fx.ccy)
return out
summary_start = summary_point(start_timestamp, start_height, start_balance, start_coins)
summary_end = summary_point(end_timestamp, end_height, end_balance, end_coins)
flow = {
'BTC_incoming': Satoshis(income),
'BTC_outgoing': Satoshis(expenditures)
}
if show_fiat:
flow['fiat_currency'] = fx.ccy
flow['fiat_incoming'] = Fiat(fiat_income, fx.ccy)
flow['fiat_outgoing'] = Fiat(fiat_expenditures, fx.ccy)
flow['realized_capital_gains'] = Fiat(capital_gains, fx.ccy)
summary = {
'begin': summary_start,
'end': summary_end,
'flow': flow,
}
else:
summary = {}
return {
'transactions': out,
'summary': summary
}
def acquisition_price(self, coins, price_func, ccy):
return Decimal(sum(self.coin_price(coin.prevout.txid.hex(), price_func, ccy, self.get_txin_value(coin)) for coin in coins))
def liquidation_price(self, coins, price_func, timestamp):
p = price_func(timestamp)
return sum([coin.value_sats() for coin in coins]) * p / Decimal(COIN)
def default_fiat_value(self, tx_hash, fx, value_sat):
return value_sat / Decimal(COIN) * self.price_at_timestamp(tx_hash, fx.timestamp_rate)
def get_tx_item_fiat(
self,
*,
tx_hash: str,
amount_sat: int,
fx: 'FxThread',
tx_fee: Optional[int],
) -> Dict[str, Any]:
item = {}
fiat_value = self.get_fiat_value(tx_hash, fx.ccy)
fiat_default = fiat_value is None
fiat_rate = self.price_at_timestamp(tx_hash, fx.timestamp_rate)
fiat_value = fiat_value if fiat_value is not None else self.default_fiat_value(tx_hash, fx, amount_sat)
fiat_fee = tx_fee / Decimal(COIN) * fiat_rate if tx_fee is not None else None
item['fiat_currency'] = fx.ccy
item['fiat_rate'] = Fiat(fiat_rate, fx.ccy)
item['fiat_value'] = Fiat(fiat_value, fx.ccy)
item['fiat_fee'] = Fiat(fiat_fee, fx.ccy) if fiat_fee is not None else None
item['fiat_default'] = fiat_default
if amount_sat < 0:
acquisition_price = - amount_sat / Decimal(COIN) * self.average_price(tx_hash, fx.timestamp_rate, fx.ccy)
liquidation_price = - fiat_value
item['acquisition_price'] = Fiat(acquisition_price, fx.ccy)
cg = liquidation_price - acquisition_price
item['capital_gain'] = Fiat(cg, fx.ccy)
return item
def get_label(self, key: str) -> str:
# key is typically: address / txid / LN-payment-hash-hex
return self._labels.get(key) or ''
def get_label_for_txid(self, tx_hash: str) -> str:
return self._labels.get(tx_hash) or self._get_default_label_for_txid(tx_hash)
def _get_default_label_for_txid(self, tx_hash: str) -> str:
# if no inputs are ismine, concat labels of output addresses
if not self.db.get_txi_addresses(tx_hash):
labels = []
for addr in self.db.get_txo_addresses(tx_hash):
label = self._labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_all_labels(self) -> Dict[str, str]:
with self.lock:
return copy.copy(self._labels)
def get_tx_status(self, tx_hash, tx_mined_info: TxMinedInfo):
extra = []
height = tx_mined_info.height
conf = tx_mined_info.conf
timestamp = tx_mined_info.timestamp
if height == TX_HEIGHT_FUTURE:
assert conf < 0, conf
num_blocks_remainining = -conf
return 2, f'in {num_blocks_remainining} blocks'
if conf == 0:
tx = self.db.get_transaction(tx_hash)
if not tx:
return 2, 'unknown'
is_final = tx and tx.is_final()
if not is_final:
extra.append('rbf')
fee = self.get_tx_fee(tx_hash)
if fee is not None:
size = tx.estimated_size()
fee_per_byte = fee / size
extra.append(format_fee_satoshis(fee_per_byte) + ' sat/b')
if fee is not None and height in (TX_HEIGHT_UNCONF_PARENT, TX_HEIGHT_UNCONFIRMED) \
and self.config.has_fee_mempool():
exp_n = self.config.fee_to_depth(fee_per_byte)
if exp_n is not None:
extra.append('%.2f MB'%(exp_n/1000000))
if height == TX_HEIGHT_LOCAL:
status = 3
elif height == TX_HEIGHT_UNCONF_PARENT:
status = 1
elif height == TX_HEIGHT_UNCONFIRMED:
status = 0
else:
status = 2 # not SPV verified
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = TX_STATUS[status] if status < 4 else time_str
if extra:
status_str += ' [%s]'%(', '.join(extra))
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def get_unconfirmed_base_tx_for_batching(self) -> Optional[Transaction]:
candidate = None
for hist_item in self.get_history():
# tx should not be mined yet
if hist_item.tx_mined_status.conf > 0: continue
# conservative future proofing of code: only allow known unconfirmed types
if hist_item.tx_mined_status.height not in (TX_HEIGHT_UNCONFIRMED,
TX_HEIGHT_UNCONF_PARENT,
TX_HEIGHT_LOCAL):
continue
# tx should be "outgoing" from wallet
if hist_item.delta >= 0:
continue
tx = self.db.get_transaction(hist_item.txid)
if not tx:
continue
# is_mine outputs should not be spent yet
# to avoid cancelling our own dependent transactions
txid = tx.txid()
if any([self.is_mine(o.address) and self.db.get_spent_outpoint(txid, output_idx)
for output_idx, o in enumerate(tx.outputs())]):
continue
# all inputs should be is_mine
if not all([self.is_mine(self.get_txin_address(txin)) for txin in tx.inputs()]):
continue
# do not mutate LN funding txs, as that would change their txid
if self.is_lightning_funding_tx(txid):
continue
# tx must have opted-in for RBF (even if local, for consistency)
if tx.is_final():
continue
# prefer txns already in mempool (vs local)
if hist_item.tx_mined_status.height == TX_HEIGHT_LOCAL:
candidate = tx
continue
return tx
return candidate
def get_change_addresses_for_new_transaction(
self, preferred_change_addr=None, *, allow_reusing_used_change_addrs: bool = True,
) -> List[str]:
change_addrs = []
if preferred_change_addr:
if isinstance(preferred_change_addr, (list, tuple)):
change_addrs = list(preferred_change_addr)
else:
change_addrs = [preferred_change_addr]
elif self.use_change:
# Recalc and get unused change addresses
addrs = self.calc_unused_change_addresses()
# New change addresses are created only after a few
# confirmations.
if addrs:
# if there are any unused, select all
change_addrs = addrs
else:
# if there are none, take one randomly from the last few
if not allow_reusing_used_change_addrs:
return []
addrs = self.get_change_addresses(slice_start=-self.gap_limit_for_change)
change_addrs = [random.choice(addrs)] if addrs else []
for addr in change_addrs:
assert is_address(addr), f"not valid bitcoin address: {addr}"
# note that change addresses are not necessarily ismine
# in which case this is a no-op
self.check_address_for_corruption(addr)
max_change = self.max_change_outputs if self.multiple_change else 1
return change_addrs[:max_change]
def get_single_change_address_for_new_transaction(
self, preferred_change_addr=None, *, allow_reusing_used_change_addrs: bool = True,
) -> Optional[str]:
addrs = self.get_change_addresses_for_new_transaction(
preferred_change_addr=preferred_change_addr,
allow_reusing_used_change_addrs=allow_reusing_used_change_addrs,
)
if addrs:
return addrs[0]
return None
@check_returned_address_for_corruption
def get_new_sweep_address_for_channel(self) -> str:
# Recalc and get unused change addresses
addrs = self.calc_unused_change_addresses()
if addrs:
selected_addr = addrs[0]
else:
# if there are none, take one randomly from the last few
addrs = self.get_change_addresses(slice_start=-self.gap_limit_for_change)
if addrs:
selected_addr = random.choice(addrs)
else: # fallback for e.g. imported wallets
selected_addr = self.get_receiving_address()
assert is_address(selected_addr), f"not valid bitcoin address: {selected_addr}"
return selected_addr
def make_unsigned_transaction(
self, *,
coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput],
fee=None,
change_addr: str = None,
is_sweep=False,
rbf=False) -> PartialTransaction:
if not coins: # any bitcoin tx must have at least 1 input by consensus
raise NotEnoughFunds()
if any([c.already_has_some_signatures() for c in coins]):
raise Exception("Some inputs already contain signatures!")
# prevent side-effect with '!'
outputs = copy.deepcopy(outputs)
# check outputs
i_max = []
i_max_sum = 0
for i, o in enumerate(outputs):
weight = parse_max_spend(o.value)
if weight:
i_max_sum += weight
i_max.append((weight, i))
if fee is None and self.config.fee_per_kb() is None:
raise NoDynamicFeeEstimates()
for item in coins:
self.add_input_info(item)
# Fee estimator
if fee is None:
fee_estimator = self.config.estimate_fee
elif isinstance(fee, Number):
fee_estimator = lambda size: fee
elif callable(fee):
fee_estimator = fee
else:
raise Exception(f'Invalid argument fee: {fee}')
if len(i_max) == 0:
# Let the coin chooser select the coins to spend
coin_chooser = coinchooser.get_coin_chooser(self.config)
# If there is an unconfirmed RBF tx, merge with it
base_tx = self.get_unconfirmed_base_tx_for_batching()
if self.config.get('batch_rbf', False) and base_tx:
# make sure we don't try to spend change from the tx-to-be-replaced:
coins = [c for c in coins if c.prevout.txid.hex() != base_tx.txid()]
is_local = self.get_tx_height(base_tx.txid()).height == TX_HEIGHT_LOCAL
base_tx = PartialTransaction.from_tx(base_tx)
base_tx.add_info_from_wallet(self)
base_tx_fee = base_tx.get_fee()
relayfeerate = Decimal(self.relayfee()) / 1000
original_fee_estimator = fee_estimator
def fee_estimator(size: Union[int, float, Decimal]) -> int:
size = Decimal(size)
lower_bound = base_tx_fee + round(size * relayfeerate)
lower_bound = lower_bound if not is_local else 0
return int(max(lower_bound, original_fee_estimator(size)))
txi = base_tx.inputs()
txo = list(filter(lambda o: not self.is_change(o.address), base_tx.outputs()))
old_change_addrs = [o.address for o in base_tx.outputs() if self.is_change(o.address)]
else:
txi = []
txo = []
old_change_addrs = []
# change address. if empty, coin_chooser will set it
change_addrs = self.get_change_addresses_for_new_transaction(change_addr or old_change_addrs)
tx = coin_chooser.make_tx(
coins=coins,
inputs=txi,
outputs=list(outputs) + txo,
change_addrs=change_addrs,
fee_estimator_vb=fee_estimator,
dust_threshold=self.dust_threshold())
else:
# "spend max" branch
# note: This *will* spend inputs with negative effective value (if there are any).
# Given as the user is spending "max", and so might be abandoning the wallet,
# try to include all UTXOs, otherwise leftover might remain in the UTXO set
# forever. see #5433
# note: Actually, it might be the case that not all UTXOs from the wallet are
# being spent if the user manually selected UTXOs.
sendable = sum(map(lambda c: c.value_sats(), coins))
for (_,i) in i_max:
outputs[i].value = 0
tx = PartialTransaction.from_io(list(coins), list(outputs))
fee = fee_estimator(tx.estimated_size())
amount = sendable - tx.output_value() - fee
if amount < 0:
raise NotEnoughFunds()
distr_amount = 0
for (weight, i) in i_max:
val = int((amount/i_max_sum) * weight)
outputs[i].value = val
distr_amount += val
(x,i) = i_max[-1]
outputs[i].value += (amount - distr_amount)
tx = PartialTransaction.from_io(list(coins), list(outputs))
# Timelock tx to current height.
tx.locktime = get_locktime_for_new_transaction(self.network)
tx.set_rbf(rbf)
tx.add_info_from_wallet(self)
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, *,
outputs: List[PartialTxOutput],
password=None, fee=None, change_addr=None,
domain=None, rbf=False, nonlocal_only=False,
tx_version=None, sign=True) -> PartialTransaction:
coins = self.get_spendable_coins(domain, nonlocal_only=nonlocal_only)
tx = self.make_unsigned_transaction(
coins=coins,
outputs=outputs,
fee=fee,
change_addr=change_addr,
rbf=rbf)
if tx_version is not None:
tx.version = tx_version
if sign:
self.sign_transaction(tx, password)
return tx
def is_frozen_address(self, addr: str) -> bool:
return addr in self._frozen_addresses
def is_frozen_coin(self, utxo: PartialTxInput) -> bool:
prevout_str = utxo.prevout.to_str()
frozen = self._frozen_coins.get(prevout_str, None)
# note: there are three possible states for 'frozen':
# True/False if the user explicitly set it,
# None otherwise
if frozen is None:
return self._is_coin_small_and_unconfirmed(utxo)
return bool(frozen)
def _is_coin_small_and_unconfirmed(self, utxo: PartialTxInput) -> bool:
"""If true, the coin should not be spent.
The idea here is that an attacker might send us a UTXO in a
large low-fee unconfirmed tx that will ~never confirm. If we
spend it as part of a tx ourselves, that too will not confirm
(unless we use a high fee, but that might not be worth it for
a small value UTXO).
In particular, this test triggers for large "dusting transactions"
that are used for advertising purposes by some entities.
see #6960
"""
# confirmed UTXOs are fine; check this first for performance:
block_height = utxo.block_height
assert block_height is not None
if block_height > 0:
return False
# exempt large value UTXOs
value_sats = utxo.value_sats()
assert value_sats is not None
threshold = self.config.get('unconf_utxo_freeze_threshold', 5_000)
if value_sats >= threshold:
return False
# if funding tx has any is_mine input, then UTXO is fine
funding_tx = self.db.get_transaction(utxo.prevout.txid.hex())
if funding_tx is None:
# we should typically have the funding tx available;
# might not have it e.g. while not up_to_date
return True
if any(self.is_mine(self.get_txin_address(txin))
for txin in funding_tx.inputs()):
return False
return True
def set_frozen_state_of_addresses(self, addrs: Sequence[str], freeze: bool) -> bool:
"""Set frozen state of the addresses to FREEZE, True or False"""
if all(self.is_mine(addr) for addr in addrs):
with self._freeze_lock:
if freeze:
self._frozen_addresses |= set(addrs)
else:
self._frozen_addresses -= set(addrs)
self.db.put('frozen_addresses', list(self._frozen_addresses))
return True
return False
def set_frozen_state_of_coins(self, utxos: Sequence[str], freeze: bool) -> None:
"""Set frozen state of the utxos to FREEZE, True or False"""
# basic sanity check that input is not garbage: (see if raises)
[TxOutpoint.from_str(utxo) for utxo in utxos]
with self._freeze_lock:
for utxo in utxos:
self._frozen_coins[utxo] = bool(freeze)
def is_address_reserved(self, addr: str) -> bool:
# note: atm 'reserved' status is only taken into consideration for 'change addresses'
return addr in self._reserved_addresses
def set_reserved_state_of_address(self, addr: str, *, reserved: bool) -> None:
if not self.is_mine(addr):
return
with self.lock:
if reserved:
self._reserved_addresses.add(addr)
else:
self._reserved_addresses.discard(addr)
self.db.put('reserved_addresses', list(self._reserved_addresses))
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def address_is_old(self, address: str, *, req_conf: int = 3) -> bool:
"""Returns whether address has any history that is deeply confirmed.
Used for reorg-safe(ish) gap limit roll-forward.
"""
max_conf = -1
h = self.db.get_addr_history(address)
needs_spv_check = not self.config.get("skipmerklecheck", False)
for tx_hash, tx_height in h:
if needs_spv_check:
tx_age = self.get_tx_height(tx_hash).conf
else:
if tx_height <= 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
max_conf = max(max_conf, tx_age)
return max_conf >= req_conf
def bump_fee(
self,
*,
tx: Transaction,
txid: str = None,
new_fee_rate: Union[int, float, Decimal],
coins: Sequence[PartialTxInput] = None,
strategies: Sequence[BumpFeeStrategy] = None,
) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
'new_fee_rate' is the target min rate in sat/vbyte
'coins' is a list of UTXOs we can choose from as potential new inputs to be added
"""
txid = txid or tx.txid()
assert txid
assert tx.txid() in (None, txid)
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
assert isinstance(tx, PartialTransaction)
tx.remove_signatures()
if tx.is_final():
raise CannotBumpFee(_('Transaction is final'))
new_fee_rate = quantize_feerate(new_fee_rate) # strip excess precision
try:
# note: this might download input utxos over network
tx.add_info_from_wallet(self, ignore_network_issues=False)
except NetworkException as e:
raise CannotBumpFee(repr(e))
old_tx_size = tx.estimated_size()
old_fee = tx.get_fee()
assert old_fee is not None
old_fee_rate = old_fee / old_tx_size # sat/vbyte
if new_fee_rate <= old_fee_rate:
raise CannotBumpFee(_("The new fee rate needs to be higher than the old fee rate."))
if not strategies:
strategies = [BumpFeeStrategy.COINCHOOSER, BumpFeeStrategy.DECREASE_CHANGE]
tx_new = None
exc = None
for strat in strategies:
try:
if strat == BumpFeeStrategy.COINCHOOSER:
tx_new = self._bump_fee_through_coinchooser(
tx=tx,
txid=txid,
new_fee_rate=new_fee_rate,
coins=coins,
)
elif strat == BumpFeeStrategy.DECREASE_CHANGE:
tx_new = self._bump_fee_through_decreasing_change(
tx=tx, new_fee_rate=new_fee_rate)
elif strat == BumpFeeStrategy.DECREASE_PAYMENT:
tx_new = self._bump_fee_through_decreasing_payment(
tx=tx, new_fee_rate=new_fee_rate)
else:
raise NotImplementedError(f"unexpected strategy: {strat}")
except CannotBumpFee as e:
exc = e
else:
strat_used = strat
break
if tx_new is None:
assert exc
raise exc # all strategies failed, re-raise last exception
target_min_fee = new_fee_rate * tx_new.estimated_size()
actual_fee = tx_new.get_fee()
if actual_fee + 1 < target_min_fee:
raise CannotBumpFee(
f"bump_fee fee target was not met (strategy: {strat_used}). "
f"got {actual_fee}, expected >={target_min_fee}. "
f"target rate was {new_fee_rate}")
tx_new.locktime = get_locktime_for_new_transaction(self.network)
tx_new.set_rbf(True)
tx_new.add_info_from_wallet(self)
return tx_new
def _bump_fee_through_coinchooser(
self,
*,
tx: PartialTransaction,
txid: str,
new_fee_rate: Union[int, Decimal],
coins: Sequence[PartialTxInput] = None,
) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
- keeps all inputs
- keeps all not is_mine outputs,
- allows adding new inputs
"""
assert txid
tx = copy.deepcopy(tx)
tx.add_info_from_wallet(self)
assert tx.get_fee() is not None
old_inputs = list(tx.inputs())
old_outputs = list(tx.outputs())
# change address
old_change_addrs = [o.address for o in old_outputs if self.is_change(o.address)]
change_addrs = self.get_change_addresses_for_new_transaction(old_change_addrs)
# which outputs to keep?
if old_change_addrs:
fixed_outputs = list(filter(lambda o: not self.is_change(o.address), old_outputs))
else:
if all(self.is_mine(o.address) for o in old_outputs):
# all outputs are is_mine and none of them are change.
# we bail out as it's unclear what the user would want!
# the coinchooser bump fee method is probably not a good idea in this case
raise CannotBumpFee(_('All outputs are non-change is_mine'))
old_not_is_mine = list(filter(lambda o: not self.is_mine(o.address), old_outputs))
if old_not_is_mine:
fixed_outputs = old_not_is_mine
else:
fixed_outputs = old_outputs
if not fixed_outputs:
raise CannotBumpFee(_('Could not figure out which outputs to keep'))
if coins is None:
coins = self.get_spendable_coins(None)
# make sure we don't try to spend output from the tx-to-be-replaced:
coins = [c for c in coins if c.prevout.txid.hex() != txid]
for item in coins:
self.add_input_info(item)
def fee_estimator(size):
return self.config.estimate_fee_for_feerate(fee_per_kb=new_fee_rate*1000, size=size)
coin_chooser = coinchooser.get_coin_chooser(self.config)
try:
return coin_chooser.make_tx(
coins=coins,
inputs=old_inputs,
outputs=fixed_outputs,
change_addrs=change_addrs,
fee_estimator_vb=fee_estimator,
dust_threshold=self.dust_threshold())
except NotEnoughFunds as e:
raise CannotBumpFee(e)
def _bump_fee_through_decreasing_change(
self,
*,
tx: PartialTransaction,
new_fee_rate: Union[int, Decimal],
) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
- keeps all inputs
- no new inputs are added
- allows decreasing and removing outputs (change is decreased first)
This is less "safe" than "coinchooser" method as it might end up decreasing
e.g. a payment to a merchant; but e.g. if the user has sent "Max" previously,
this is the only way to RBF.
"""
tx = copy.deepcopy(tx)
tx.add_info_from_wallet(self)
assert tx.get_fee() is not None
inputs = tx.inputs()
outputs = tx._outputs # note: we will mutate this directly
# use own outputs
s = list(filter(lambda o: self.is_mine(o.address), outputs))
# ... unless there is none
if not s:
s = outputs
x_fee = run_hook('get_tx_extra_fee', self, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
s = list(filter(lambda o: o.address != x_fee_address, s))
if not s:
raise CannotBumpFee('No outputs at all??')
# prioritize low value outputs, to get rid of dust
s = sorted(s, key=lambda o: o.value)
for o in s:
target_fee = int(math.ceil(tx.estimated_size() * new_fee_rate))
delta = target_fee - tx.get_fee()
i = outputs.index(o)
if o.value - delta >= self.dust_threshold():
new_output_value = o.value - delta
assert isinstance(new_output_value, int)
outputs[i].value = new_output_value
delta = 0
break
else:
del outputs[i]
# note: we mutated the outputs of tx, which will affect
# tx.estimated_size() in the next iteration
if delta > 0:
raise CannotBumpFee(_('Could not find suitable outputs'))
return PartialTransaction.from_io(inputs, outputs)
def _bump_fee_through_decreasing_payment(
self,
*,
tx: PartialTransaction,
new_fee_rate: Union[int, Decimal],
) -> PartialTransaction:
"""Increase the miner fee of 'tx'.
- keeps all inputs
- no new inputs are added
- decreases payment outputs (not change!). Each non-ismine output is decreased
proportionally to their byte-size.
"""
tx = copy.deepcopy(tx)
tx.add_info_from_wallet(self)
assert tx.get_fee() is not None
inputs = tx.inputs()
outputs = tx.outputs()
# select non-ismine outputs
s = [(idx, out) for (idx, out) in enumerate(outputs)
if not self.is_mine(out.address)]
# exempt 2fa fee output if present
x_fee = run_hook('get_tx_extra_fee', self, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
s = [(idx, out) for (idx, out) in s if out.address != x_fee_address]
if not s:
raise CannotBumpFee("Cannot find payment output")
del_out_idxs = set()
tx_size = tx.estimated_size()
cur_fee = tx.get_fee()
# Main loop. Each iteration decreases value of all selected outputs.
# The number of iterations is bounded by len(s) as only the final iteration
# can *not remove* any output.
for __ in range(len(s) + 1):
target_fee = int(math.ceil(tx_size * new_fee_rate))
delta_total = target_fee - cur_fee
if delta_total <= 0:
break
out_size_total = sum(Transaction.estimated_output_size_for_script(out.scriptpubkey.hex())
for (idx, out) in s if idx not in del_out_idxs)
for idx, out in s:
out_size = Transaction.estimated_output_size_for_script(out.scriptpubkey.hex())
delta = int(math.ceil(delta_total * out_size / out_size_total))
if out.value - delta >= self.dust_threshold():
new_output_value = out.value - delta
assert isinstance(new_output_value, int)
outputs[idx].value = new_output_value
cur_fee += delta
else: # remove output
tx_size -= out_size
cur_fee += out.value
del_out_idxs.add(idx)
if delta_total > 0:
raise CannotBumpFee(_('Could not find suitable outputs'))
outputs = [out for (idx, out) in enumerate(outputs) if idx not in del_out_idxs]
return PartialTransaction.from_io(inputs, outputs)
def cpfp(self, tx: Transaction, fee: int) -> Optional[PartialTransaction]:
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
address, value = o.address, o.value
if self.is_mine(address):
break
else:
raise CannotCPFP(_("Could not find suitable output"))
coins = self.get_addr_utxo(address)
item = coins.get(TxOutpoint.from_str(txid+':%d'%i))
if not item:
raise CannotCPFP(_("Could not find coins for output"))
inputs = [item]
out_address = (self.get_single_change_address_for_new_transaction(allow_reusing_used_change_addrs=False)
or self.get_unused_address()
or address)
output_value = value - fee
if output_value < self.dust_threshold():
raise CannotCPFP(_("The output value remaining after fee is too low."))
outputs = [PartialTxOutput.from_address_and_value(out_address, output_value)]
locktime = get_locktime_for_new_transaction(self.network)
tx_new = PartialTransaction.from_io(inputs, outputs, locktime=locktime)
tx_new.set_rbf(True)
tx_new.add_info_from_wallet(self)
return tx_new
def dscancel(
self, *, tx: Transaction, new_fee_rate: Union[int, float, Decimal]
) -> PartialTransaction:
"""Double-Spend-Cancel: cancel an unconfirmed tx by double-spending
its inputs, paying ourselves.
'new_fee_rate' is the target min rate in sat/vbyte
"""
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
assert isinstance(tx, PartialTransaction)
tx.remove_signatures()
if tx.is_final():
raise CannotDoubleSpendTx(_('Transaction is final'))
new_fee_rate = quantize_feerate(new_fee_rate) # strip excess precision
try:
# note: this might download input utxos over network
tx.add_info_from_wallet(self, ignore_network_issues=False)
except NetworkException as e:
raise CannotDoubleSpendTx(repr(e))
old_tx_size = tx.estimated_size()
old_fee = tx.get_fee()
assert old_fee is not None
old_fee_rate = old_fee / old_tx_size # sat/vbyte
if new_fee_rate <= old_fee_rate:
raise CannotDoubleSpendTx(_("The new fee rate needs to be higher than the old fee rate."))
# grab all ismine inputs
inputs = [txin for txin in tx.inputs()
if self.is_mine(self.get_txin_address(txin))]
value = sum([txin.value_sats() for txin in inputs])
# figure out output address
old_change_addrs = [o.address for o in tx.outputs() if self.is_mine(o.address)]
out_address = (self.get_single_change_address_for_new_transaction(old_change_addrs)
or self.get_receiving_address())
locktime = get_locktime_for_new_transaction(self.network)
outputs = [PartialTxOutput.from_address_and_value(out_address, value)]
tx_new = PartialTransaction.from_io(inputs, outputs, locktime=locktime)
new_tx_size = tx_new.estimated_size()
new_fee = max(
new_fee_rate * new_tx_size,
old_fee + self.relayfee() * new_tx_size / Decimal(1000), # BIP-125 rules 3 and 4
)
new_fee = int(math.ceil(new_fee))
output_value = value - new_fee
if output_value < self.dust_threshold():
raise CannotDoubleSpendTx(_("The output value remaining after fee is too low."))
outputs = [PartialTxOutput.from_address_and_value(out_address, value - new_fee)]
tx_new = PartialTransaction.from_io(inputs, outputs, locktime=locktime)
tx_new.set_rbf(True)
tx_new.add_info_from_wallet(self)
return tx_new
@abstractmethod
def _add_input_sig_info(self, txin: PartialTxInput, address: str, *, only_der_suffix: bool) -> None:
pass
def _add_txinout_derivation_info(self, txinout: Union[PartialTxInput, PartialTxOutput],
address: str, *, only_der_suffix: bool) -> None:
pass # implemented by subclasses
def _add_input_utxo_info(
self,
txin: PartialTxInput,
*,
address: str = None,
ignore_network_issues: bool = True,
) -> None:
# We prefer to include UTXO (full tx) for every input.
# We cannot include UTXO if the prev tx is not signed yet though (chain of unsigned txs),
# in which case we might include a WITNESS_UTXO.
address = address or txin.address
if txin.witness_utxo is None and txin.is_segwit() and address:
received, spent = self.get_addr_io(address)
item = received.get(txin.prevout.to_str())
if item:
txin_value = item[1]
txin.witness_utxo = TxOutput.from_address_and_value(address, txin_value)
if txin.utxo is None:
txin.utxo = self.get_input_tx(txin.prevout.txid.hex(), ignore_network_issues=ignore_network_issues)
txin.ensure_there_is_only_one_utxo()
def _learn_derivation_path_for_address_from_txinout(self, txinout: Union[PartialTxInput, PartialTxOutput],
address: str) -> bool:
"""Tries to learn the derivation path for an address (potentially beyond gap limit)
using data available in given txin/txout.
Returns whether the address was found to be is_mine.
"""
return False # implemented by subclasses
def add_input_info(
self,
txin: PartialTxInput,
*,
only_der_suffix: bool = False,
ignore_network_issues: bool = True,
) -> None:
address = self.get_txin_address(txin)
# note: we add input utxos regardless of is_mine
self._add_input_utxo_info(txin, ignore_network_issues=ignore_network_issues, address=address)
if not self.is_mine(address):
is_mine = self._learn_derivation_path_for_address_from_txinout(txin, address)
if not is_mine:
return
# set script_type first, as later checks might rely on it:
txin.script_type = self.get_txin_type(address)
txin.num_sig = self.m if isinstance(self, Multisig_Wallet) else 1
if txin.redeem_script is None:
try:
redeem_script_hex = self.get_redeem_script(address)
txin.redeem_script = bfh(redeem_script_hex) if redeem_script_hex else None
except UnknownTxinType:
pass
if txin.witness_script is None:
try:
witness_script_hex = self.get_witness_script(address)
txin.witness_script = bfh(witness_script_hex) if witness_script_hex else None
except UnknownTxinType:
pass
self._add_input_sig_info(txin, address, only_der_suffix=only_der_suffix)
def can_sign(self, tx: Transaction) -> bool:
if not isinstance(tx, PartialTransaction):
return False
if tx.is_complete():
return False
# add info to inputs if we can; otherwise we might return a false negative:
tx.add_info_from_wallet(self)
for txin in tx.inputs():
# note: is_mine check needed to avoid false positives.
# just because keystore could sign, txin does not necessarily belong to wallet.
# Example: we have p2pkh-like addresses and txin is a multisig that involves our pubkey.
if not self.is_mine(txin.address):
continue
for k in self.get_keystores():
if k.can_sign_txin(txin):
return True
return False
def get_input_tx(self, tx_hash: str, *, ignore_network_issues=False) -> Optional[Transaction]:
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.db.get_transaction(tx_hash)
if not tx and self.network and self.network.has_internet_connection():
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(tx_hash, timeout=10))
except NetworkException as e:
self.logger.info(f'got network error getting input txn. err: {repr(e)}. txid: {tx_hash}. '
f'if you are intentionally offline, consider using the --offline flag')
if not ignore_network_issues:
raise e
else:
tx = Transaction(raw_tx)
if not tx and not ignore_network_issues:
raise NetworkException('failed to get prev tx from network')
return tx
def add_output_info(self, txout: PartialTxOutput, *, only_der_suffix: bool = False) -> None:
address = txout.address
if not self.is_mine(address):
is_mine = self._learn_derivation_path_for_address_from_txinout(txout, address)
if not is_mine:
return
txout.script_type = self.get_txin_type(address)
txout.is_mine = True
txout.is_change = self.is_change(address)
if isinstance(self, Multisig_Wallet):
txout.num_sig = self.m
self._add_txinout_derivation_info(txout, address, only_der_suffix=only_der_suffix)
if txout.redeem_script is None:
try:
redeem_script_hex = self.get_redeem_script(address)
txout.redeem_script = bfh(redeem_script_hex) if redeem_script_hex else None
except UnknownTxinType:
pass
if txout.witness_script is None:
try:
witness_script_hex = self.get_witness_script(address)
txout.witness_script = bfh(witness_script_hex) if witness_script_hex else None
except UnknownTxinType:
pass
def sign_transaction(self, tx: Transaction, password) -> Optional[PartialTransaction]:
if self.is_watching_only():
return
if not isinstance(tx, PartialTransaction):
return
# add info to a temporary tx copy; including xpubs
# and full derivation paths as hw keystores might want them
tmp_tx = copy.deepcopy(tx)
tmp_tx.add_info_from_wallet(self, include_xpubs=True)
# sign. start with ready keystores.
for k in sorted(self.get_keystores(), key=lambda ks: ks.ready_to_sign(), reverse=True):
try:
if k.can_sign(tmp_tx):
k.sign_transaction(tmp_tx, password)
except UserCancelled:
continue
# remove sensitive info; then copy back details from temporary tx
tmp_tx.remove_xpubs_and_bip32_paths()
tx.combine_with_other_psbt(tmp_tx)
tx.add_info_from_wallet(self, include_xpubs=False)
return tx
def try_detecting_internal_addresses_corruption(self) -> None:
pass
def check_address_for_corruption(self, addr: str) -> None:
pass
def get_unused_addresses(self) -> Sequence[str]:
domain = self.get_receiving_addresses()
# TODO we should index receive_requests by id
in_use_by_request = [k for k in self.receive_requests.keys()
if self.get_request_status(k) != PR_EXPIRED]
in_use_by_request = set(in_use_by_request)
return [addr for addr in domain if not self.is_used(addr)
and addr not in in_use_by_request]
@check_returned_address_for_corruption
def get_unused_address(self) -> Optional[str]:
"""Get an unused receiving address, if there is one.
Note: there might NOT be one available!
"""
addrs = self.get_unused_addresses()
if addrs:
return addrs[0]
@check_returned_address_for_corruption
def get_receiving_address(self) -> str:
"""Get a receiving address. Guaranteed to always return an address."""
unused_addr = self.get_unused_address()
if unused_addr:
return unused_addr
domain = self.get_receiving_addresses()
if not domain:
raise Exception("no receiving addresses in wallet?!")
choice = domain[0]
for addr in domain:
if not self.is_used(addr):
if addr not in self.receive_requests.keys():
return addr
else:
choice = addr
return choice
def create_new_address(self, for_change: bool = False):
raise Exception("this wallet cannot generate new addresses")
def import_address(self, address: str) -> str:
raise Exception("this wallet cannot import addresses")
def import_addresses(self, addresses: List[str], *,
write_to_disk=True) -> Tuple[List[str], List[Tuple[str, str]]]:
raise Exception("this wallet cannot import addresses")
def delete_address(self, address: str) -> None:
raise Exception("this wallet cannot delete addresses")
def get_onchain_request_status(self, r: OnchainInvoice) -> Tuple[bool, Optional[int]]:
address = r.get_address()
amount = r.get_amount_sat()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
tx_height = self.get_tx_height(txid)
height = tx_height.height
if height > 0 and height <= r.height:
continue
conf = tx_height.conf
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def get_request_URI(self, req: OnchainInvoice) -> str:
addr = req.get_address()
message = self.get_label(addr)
amount = req.amount_sat
extra_query_params = {}
if req.time:
extra_query_params['time'] = str(int(req.time))
if req.exp:
extra_query_params['exp'] = str(int(req.exp))
#if req.get('name') and req.get('sig'):
# sig = bfh(req.get('sig'))
# sig = bitcoin.base_encode(sig, base=58)
# extra_query_params['name'] = req['name']
# extra_query_params['sig'] = sig
uri = create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def check_expired_status(self, r: Invoice, status):
if r.is_lightning() and r.exp == 0:
status = PR_EXPIRED # for BOLT-11 invoices, exp==0 means 0 seconds
if status == PR_UNPAID and r.exp > 0 and r.time + r.exp < time.time():
status = PR_EXPIRED
return status
def get_invoice_status(self, invoice: Invoice):
if invoice.is_lightning():
status = self.lnworker.get_invoice_status(invoice) if self.lnworker else PR_UNKNOWN
else:
if self.is_onchain_invoice_paid(invoice, 1):
status =PR_PAID
elif self.is_onchain_invoice_paid(invoice, 0):
status = PR_UNCONFIRMED
else:
status = PR_UNPAID
return self.check_expired_status(invoice, status)
def get_request_status(self, key):
r = self.get_request(key)
if r is None:
return PR_UNKNOWN
if r.is_lightning():
assert isinstance(r, LNInvoice)
status = self.lnworker.get_payment_status(bfh(r.rhash)) if self.lnworker else PR_UNKNOWN
else:
assert isinstance(r, OnchainInvoice)
paid, conf = self.get_onchain_request_status(r)
if not paid:
status = PR_UNPAID
elif conf == 0:
status = PR_UNCONFIRMED
else:
status = PR_PAID
return self.check_expired_status(r, status)
def get_request(self, key):
return self.receive_requests.get(key)
def get_formatted_request(self, key):
x = self.receive_requests.get(key)
if x:
return self.export_request(x)
def export_request(self, x: Invoice) -> Dict[str, Any]:
key = self.get_key_for_receive_request(x)
status = self.get_request_status(key)
status_str = x.get_status_str(status)
is_lightning = x.is_lightning()
d = {
'is_lightning': is_lightning,
'amount_BTC': format_satoshis(x.get_amount_sat()),
'message': x.message,
'timestamp': x.time,
'expiration': x.exp,
'status': status,
'status_str': status_str,
}
if is_lightning:
assert isinstance(x, LNInvoice)
d['rhash'] = x.rhash
d['invoice'] = x.invoice
d['amount_msat'] = x.get_amount_msat()
if self.lnworker and status == PR_UNPAID:
d['can_receive'] = self.lnworker.can_receive_invoice(x)
else:
assert isinstance(x, OnchainInvoice)
paid, conf = self.get_onchain_request_status(x)
d['amount_sat'] = x.get_amount_sat()
d['address'] = x.get_address()
d['URI'] = self.get_request_URI(x)
if conf is not None:
d['confirmations'] = conf
# add URL if we are running a payserver
payserver = self.config.get_netaddress('payserver_address')
if payserver:
root = self.config.get('payserver_root', '/r')
use_ssl = bool(self.config.get('ssl_keyfile'))
protocol = 'https' if use_ssl else 'http'
base = '%s://%s:%d'%(protocol, payserver.host, payserver.port)
d['view_url'] = base + root + '/pay?id=' + key
if use_ssl and 'URI' in d:
request_url = base + '/bip70/' + key + '.bip70'
d['bip70_url'] = request_url
return d
def export_invoice(self, x: Invoice) -> Dict[str, Any]:
status = self.get_invoice_status(x)
status_str = x.get_status_str(status)
is_lightning = x.is_lightning()
d = {
'is_lightning': is_lightning,
'amount_BTC': format_satoshis(x.get_amount_sat()),
'message': x.message,
'timestamp': x.time,
'expiration': x.exp,
'status': status,
'status_str': status_str,
}
if is_lightning:
assert isinstance(x, LNInvoice)
d['invoice'] = x.invoice
d['amount_msat'] = x.get_amount_msat()
if self.lnworker and status == PR_UNPAID:
d['can_pay'] = self.lnworker.can_pay_invoice(x)
else:
assert isinstance(x, OnchainInvoice)
amount_sat = x.get_amount_sat()
assert isinstance(amount_sat, (int, str, type(None)))
d['amount_sat'] = amount_sat
d['outputs'] = [y.to_legacy_tuple() for y in x.outputs]
if x.bip70:
d['bip70'] = x.bip70
d['requestor'] = x.requestor
return d
def receive_tx_callback(self, tx_hash, tx, tx_height):
super().receive_tx_callback(tx_hash, tx, tx_height)
self._update_request_statuses_touched_by_tx(tx_hash)
def add_verified_tx(self, tx_hash, info):
super().add_verified_tx(tx_hash, info)
self._update_request_statuses_touched_by_tx(tx_hash)
def undo_verifications(self, blockchain, above_height):
reorged_txids = super().undo_verifications(blockchain, above_height)
for txid in reorged_txids:
self._update_request_statuses_touched_by_tx(txid)
def _update_request_statuses_touched_by_tx(self, tx_hash: str) -> None:
# FIXME in some cases if tx2 replaces unconfirmed tx1 in the mempool, we are not called.
# For a given receive request, if tx1 touches it but tx2 does not, then
# we were called when tx1 was added, but we will not get called when tx2 replaces tx1.
tx = self.db.get_transaction(tx_hash)
if tx is None:
return
for txo in tx.outputs():
addr = txo.address
if addr in self.receive_requests:
status = self.get_request_status(addr)
util.trigger_callback('request_status', self, addr, status)
def make_payment_request(self, address, amount_sat, message, expiration):
# TODO maybe merge with wallet.create_invoice()...
# note that they use incompatible "id"
amount_sat = amount_sat or 0
timestamp = int(time.time())
_id = bh2u(sha256d(address + "%d"%timestamp))[0:10]
expiration = expiration or 0
return OnchainInvoice(
type=PR_TYPE_ONCHAIN,
outputs=[PartialTxOutput.from_address_and_value(address, amount_sat)],
message=message,
time=timestamp,
amount_sat=amount_sat,
exp=expiration,
id=_id,
bip70=None,
requestor=None,
height=self.get_local_height(),
)
def sign_payment_request(self, key, alias, alias_addr, password): # FIXME this is broken
req = self.receive_requests.get(key)
assert isinstance(req, OnchainInvoice)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req.bip70 = pr.raw.hex()
req['name'] = pr.pki_data
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
@classmethod
def get_key_for_outgoing_invoice(cls, invoice: Invoice) -> str:
"""Return the key to use for this invoice in self.invoices."""
if invoice.is_lightning():
assert isinstance(invoice, LNInvoice)
key = invoice.rhash
else:
assert isinstance(invoice, OnchainInvoice)
key = invoice.id
return key
def get_key_for_receive_request(self, req: Invoice, *, sanity_checks: bool = False) -> str:
"""Return the key to use for this invoice in self.receive_requests."""
if not req.is_lightning():
assert isinstance(req, OnchainInvoice)
addr = req.get_address()
if sanity_checks:
if not bitcoin.is_address(addr):
raise Exception(_('Invalid Bitcoin address.'))
if not self.is_mine(addr):
raise Exception(_('Address not in wallet.'))
key = addr
else:
assert isinstance(req, LNInvoice)
key = req.rhash
return key
def add_payment_request(self, req: Invoice, *, write_to_disk: bool = True):
key = self.get_key_for_receive_request(req, sanity_checks=True)
message = req.message
self.receive_requests[key] = req
self.set_label(key, message) # should be a default label
if write_to_disk:
self.save_db()
return req
def delete_request(self, key):
""" lightning or on-chain """
if key in self.receive_requests:
self.remove_payment_request(key)
elif self.lnworker:
self.lnworker.delete_payment(key)
def delete_invoice(self, key):
""" lightning or on-chain """
if key in self.invoices:
self.invoices.pop(key)
elif self.lnworker:
self.lnworker.delete_payment(key)
def remove_payment_request(self, addr) -> bool:
found = False
if addr in self.receive_requests:
found = True
self.receive_requests.pop(addr)
self.save_db()
return found
def get_sorted_requests(self) -> List[Invoice]:
""" sorted by timestamp """
out = [self.get_request(x) for x in self.receive_requests.keys()]
out = [x for x in out if x is not None]
out.sort(key=lambda x: x.time)
return out
def get_unpaid_requests(self):
out = [self.get_request(x) for x in self.receive_requests.keys() if self.get_request_status(x) != PR_PAID]
out = [x for x in out if x is not None]
out.sort(key=lambda x: x.time)
return out
@abstractmethod
def get_fingerprint(self) -> str:
"""Returns a string that can be used to identify this wallet.
Used e.g. by Labels plugin, and LN channel backups.
Returns empty string "" for wallets that don't have an ID.
"""
pass
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def has_password(self):
return self.has_keystore_encryption() or self.has_storage_encryption()
def can_have_keystore_encryption(self):
return self.keystore and self.keystore.may_have_password()
def get_available_storage_encryption_version(self) -> StorageEncryptionVersion:
"""Returns the type of storage encryption offered to the user.
A wallet file (storage) is either encrypted with this version
or is stored in plaintext.
"""
if isinstance(self.keystore, Hardware_KeyStore):
return StorageEncryptionVersion.XPUB_PASSWORD
else:
return StorageEncryptionVersion.USER_PASSWORD
def has_keystore_encryption(self):
"""Returns whether encryption is enabled for the keystore.
If True, e.g. signing a transaction will require a password.
"""
if self.can_have_keystore_encryption():
return self.db.get('use_encryption', False)
return False
def has_storage_encryption(self):
"""Returns whether encryption is enabled for the wallet file on disk."""
return self.storage and self.storage.is_encrypted()
@classmethod
def may_have_password(cls):
return True
def check_password(self, password):
if self.has_keystore_encryption():
self.keystore.check_password(password)
if self.has_storage_encryption():
self.storage.check_password(password)
def update_password(self, old_pw, new_pw, *, encrypt_storage: bool = True):
if old_pw is None and self.has_password():
raise InvalidPassword()
self.check_password(old_pw)
if self.storage:
if encrypt_storage:
enc_version = self.get_available_storage_encryption_version()
else:
enc_version = StorageEncryptionVersion.PLAINTEXT
self.storage.set_password(new_pw, enc_version)
# make sure next storage.write() saves changes
self.db.set_modified(True)
# note: Encrypting storage with a hw device is currently only
# allowed for non-multisig wallets. Further,
# Hardware_KeyStore.may_have_password() == False.
# If these were not the case,
# extra care would need to be taken when encrypting keystores.
self._update_password_for_keystore(old_pw, new_pw)
encrypt_keystore = self.can_have_keystore_encryption()
self.db.set_keystore_encryption(bool(new_pw) and encrypt_keystore)
self.save_db()
@abstractmethod
def _update_password_for_keystore(self, old_pw: Optional[str], new_pw: Optional[str]) -> None:
pass
def sign_message(self, address: str, message: str, password) -> bytes:
index = self.get_address_index(address)
script_type = self.get_txin_type(address)
assert script_type != "address"
return self.keystore.sign_message(index, message, password, script_type=script_type)
def decrypt_message(self, pubkey: str, message, password) -> bytes:
addr = self.pubkeys_to_address([pubkey])
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
@abstractmethod
def pubkeys_to_address(self, pubkeys: Sequence[str]) -> Optional[str]:
pass
def price_at_timestamp(self, txid, price_func):
"""Returns fiat price of bitcoin at the time tx got confirmed."""
timestamp = self.get_tx_height(txid).timestamp
return price_func(timestamp if timestamp else time.time())
def average_price(self, txid, price_func, ccy) -> Decimal:
""" Average acquisition price of the inputs of a transaction """
input_value = 0
total_price = 0
txi_addresses = self.db.get_txi_addresses(txid)
if not txi_addresses:
return Decimal('NaN')
for addr in txi_addresses:
d = self.db.get_txi_addr(txid, addr)
for ser, v in d:
input_value += v
total_price += self.coin_price(ser.split(':')[0], price_func, ccy, v)
return total_price / (input_value/Decimal(COIN))
def clear_coin_price_cache(self):
self._coin_price_cache = {}
def coin_price(self, txid, price_func, ccy, txin_value) -> Decimal:
"""
Acquisition price of a coin.
This assumes that either all inputs are mine, or no input is mine.
"""
if txin_value is None:
return Decimal('NaN')
cache_key = "{}:{}:{}".format(str(txid), str(ccy), str(txin_value))
result = self._coin_price_cache.get(cache_key, None)
if result is not None:
return result
if self.db.get_txi_addresses(txid):
result = self.average_price(txid, price_func, ccy) * txin_value/Decimal(COIN)
self._coin_price_cache[cache_key] = result
return result
else:
fiat_value = self.get_fiat_value(txid, ccy)
if fiat_value is not None:
return fiat_value
else:
p = self.price_at_timestamp(txid, price_func)
return p * txin_value/Decimal(COIN)
def is_billing_address(self, addr):
# overridden for TrustedCoin wallets
return False
@abstractmethod
def is_watching_only(self) -> bool:
pass
def get_keystore(self) -> Optional[KeyStore]:
return self.keystore
def get_keystores(self) -> Sequence[KeyStore]:
return [self.keystore] if self.keystore else []
@abstractmethod
def save_keystore(self):
pass
@abstractmethod
def has_seed(self) -> bool:
pass
@abstractmethod
def get_all_known_addresses_beyond_gap_limit(self) -> Set[str]:
pass
def create_transaction(self, outputs, *, fee=None, feerate=None, change_addr=None, domain_addr=None, domain_coins=None,
unsigned=False, rbf=None, password=None, locktime=None):
if fee is not None and feerate is not None:
raise Exception("Cannot specify both 'fee' and 'feerate' at the same time!")
coins = self.get_spendable_coins(domain_addr)
if domain_coins is not None:
coins = [coin for coin in coins if (coin.prevout.to_str() in domain_coins)]
if feerate is not None:
fee_per_kb = 1000 * Decimal(feerate)
fee_estimator = partial(SimpleConfig.estimate_fee_for_feerate, fee_per_kb)
else:
fee_estimator = fee
tx = self.make_unsigned_transaction(
coins=coins,
outputs=outputs,
fee=fee_estimator,
change_addr=change_addr)
if locktime is not None:
tx.locktime = locktime
if rbf is None:
rbf = bool(self.config.get('use_rbf', True))
tx.set_rbf(rbf)
if not unsigned:
self.sign_transaction(tx, password)
return tx
def get_warning_for_risk_of_burning_coins_as_fees(self, tx: 'PartialTransaction') -> Optional[str]:
"""Returns a warning message if there is risk of burning coins as fees if we sign.
Note that if not all inputs are ismine, e.g. coinjoin, the risk is not just about fees.
Note:
- legacy sighash does not commit to any input amounts
- BIP-0143 sighash only commits to the *corresponding* input amount
- BIP-taproot sighash commits to *all* input amounts
"""
assert isinstance(tx, PartialTransaction)
# if we have all full previous txs, we *know* all the input amounts -> fine
if all([txin.utxo for txin in tx.inputs()]):
return None
# a single segwit input -> fine
if len(tx.inputs()) == 1 and tx.inputs()[0].is_segwit() and tx.inputs()[0].witness_utxo:
return None
# coinjoin or similar
if any([not self.is_mine(txin.address) for txin in tx.inputs()]):
return (_("Warning") + ": "
+ _("The input amounts could not be verified as the previous transactions are missing.\n"
"The amount of money being spent CANNOT be verified."))
# some inputs are legacy
if any([not txin.is_segwit() for txin in tx.inputs()]):
return (_("Warning") + ": "
+ _("The fee could not be verified. Signing non-segwit inputs is risky:\n"
"if this transaction was maliciously modified before you sign,\n"
"you might end up paying a higher mining fee than displayed."))
# all inputs are segwit
# https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2017-August/014843.html
return (_("Warning") + ": "
+ _("If you received this transaction from an untrusted device, "
"do not accept to sign it more than once,\n"
"otherwise you could end up paying a different fee."))
def get_tx_fee_warning(
self, *,
invoice_amt: int,
tx_size: int,
fee: int) -> Optional[Tuple[bool, str, str]]:
feerate = Decimal(fee) / tx_size # sat/byte
fee_ratio = Decimal(fee) / invoice_amt if invoice_amt else 1
long_warning = None
short_warning = None
allow_send = True
if feerate < self.relayfee() / 1000:
long_warning = (
_("This transaction requires a higher fee, or it will not be propagated by your current server.") + " "
+ _("Try to raise your transaction fee, or use a server with a lower relay fee."))
short_warning = _("below relay fee") + "!"
allow_send = False
elif fee_ratio >= FEE_RATIO_HIGH_WARNING:
long_warning = (
_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")
+ f' ({fee_ratio*100:.2f}% of amount)')
short_warning = _("high fee ratio") + "!"
elif feerate > FEERATE_WARNING_HIGH_FEE / 1000:
long_warning = (
_('Warning') + ': ' + _("The fee for this transaction seems unusually high.")
+ f' (feerate: {feerate:.2f} sat/byte)')
short_warning = _("high fee rate") + "!"
if long_warning is None:
return None
else:
return allow_send, long_warning, short_warning
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def is_watching_only(self):
return self.keystore.is_watching_only()
def _update_password_for_keystore(self, old_pw, new_pw):
if self.keystore and self.keystore.may_have_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
def save_keystore(self):
self.db.put('keystore', self.keystore.dump())
@abstractmethod
def get_public_key(self, address: str) -> Optional[str]:
pass
def get_public_keys(self, address: str) -> Sequence[str]:
return [self.get_public_key(address)]
def get_redeem_script(self, address: str) -> Optional[str]:
txin_type = self.get_txin_type(address)
if txin_type in ('p2pkh', 'p2wpkh', 'p2pk'):
return None
if txin_type == 'p2wpkh-p2sh':
pubkey = self.get_public_key(address)
return bitcoin.p2wpkh_nested_script(pubkey)
if txin_type == 'address':
return None
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def get_witness_script(self, address: str) -> Optional[str]:
return None
class Imported_Wallet(Simple_Wallet):
# wallet made of imported addresses
wallet_type = 'imported'
txin_type = 'address'
def __init__(self, db, storage, *, config):
Abstract_Wallet.__init__(self, db, storage, config=config)
self.use_change = db.get('use_change', False)
def is_watching_only(self):
return self.keystore is None
def can_import_privkey(self):
return bool(self.keystore)
def load_keystore(self):
self.keystore = load_keystore(self.db, 'keystore') if self.db.get('keystore') else None
def save_keystore(self):
self.db.put('keystore', self.keystore.dump())
def can_import_address(self):
return self.is_watching_only()
def can_delete_address(self):
return True
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_all_known_addresses_beyond_gap_limit(self) -> Set[str]:
return set()
def get_fingerprint(self):
return ''
def get_addresses(self):
# note: overridden so that the history can be cleared
return self.db.get_imported_addresses()
def get_receiving_addresses(self, **kwargs):
return self.get_addresses()
def get_change_addresses(self, **kwargs):
return self.get_addresses()
def import_addresses(self, addresses: List[str], *,
write_to_disk=True) -> Tuple[List[str], List[Tuple[str, str]]]:
good_addr = [] # type: List[str]
bad_addr = [] # type: List[Tuple[str, str]]
for address in addresses:
if not bitcoin.is_address(address):
bad_addr.append((address, _('invalid address')))
continue
if self.db.has_imported_address(address):
bad_addr.append((address, _('address already in wallet')))
continue
good_addr.append(address)
self.db.add_imported_address(address, {})
self.add_address(address)
if write_to_disk:
self.save_db()
return good_addr, bad_addr
def import_address(self, address: str) -> str:
good_addr, bad_addr = self.import_addresses([address])
if good_addr and good_addr[0] == address:
return address
else:
raise BitcoinException(str(bad_addr[0][1]))
def delete_address(self, address: str) -> None:
if not self.db.has_imported_address(address):
return
if len(self.get_addresses()) <= 1:
raise UserFacingException("cannot delete last remaining address from wallet")
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr in self.db.get_history():
details = self.get_address_history(addr)
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self.db.remove_addr_history(address)
for tx_hash in transactions_to_remove:
self._remove_transaction(tx_hash)
self.set_label(address, None)
self.remove_payment_request(address)
self.set_frozen_state_of_addresses([address], False)
pubkey = self.get_public_key(address)
self.db.remove_imported_address(address)
if pubkey:
# delete key iff no other address uses it (e.g. p2pkh and p2wpkh for same key)
for txin_type in bitcoin.WIF_SCRIPT_TYPES.keys():
try:
addr2 = bitcoin.pubkey_to_address(txin_type, pubkey)
except NotImplementedError:
pass
else:
if self.db.has_imported_address(addr2):
break
else:
self.keystore.delete_imported_key(pubkey)
self.save_keystore()
self.save_db()
def get_change_addresses_for_new_transaction(self, *args, **kwargs) -> List[str]:
# for an imported wallet, if all "change addresses" are already used,
# it is probably better to send change back to the "from address", than to
# send it to another random used address and link them together, hence
# we force "allow_reusing_used_change_addrs=False"
return super().get_change_addresses_for_new_transaction(
*args,
**{**kwargs, "allow_reusing_used_change_addrs": False},
)
def calc_unused_change_addresses(self) -> Sequence[str]:
with self.lock:
unused_addrs = [addr for addr in self.get_change_addresses()
if not self.is_used(addr) and not self.is_address_reserved(addr)]
return unused_addrs
def is_mine(self, address) -> bool:
if not address: return False
return self.db.has_imported_address(address)
def get_address_index(self, address) -> Optional[str]:
# returns None if address is not mine
return self.get_public_key(address)
def get_address_path_str(self, address):
return None
def get_public_key(self, address) -> Optional[str]:
x = self.db.get_imported_address(address)
return x.get('pubkey') if x else None
def import_private_keys(self, keys: List[str], password: Optional[str], *,
write_to_disk=True) -> Tuple[List[str], List[Tuple[str, str]]]:
good_addr = [] # type: List[str]
bad_keys = [] # type: List[Tuple[str, str]]
for key in keys:
try:
txin_type, pubkey = self.keystore.import_privkey(key, password)
except Exception as e:
bad_keys.append((key, _('invalid private key') + f': {e}'))
continue
if txin_type not in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
bad_keys.append((key, _('not implemented type') + f': {txin_type}'))
continue
addr = bitcoin.pubkey_to_address(txin_type, pubkey)
good_addr.append(addr)
self.db.add_imported_address(addr, {'type':txin_type, 'pubkey':pubkey})
self.add_address(addr)
self.save_keystore()
if write_to_disk:
self.save_db()
return good_addr, bad_keys
def import_private_key(self, key: str, password: Optional[str]) -> str:
good_addr, bad_keys = self.import_private_keys([key], password=password)
if good_addr:
return good_addr[0]
else:
raise BitcoinException(str(bad_keys[0][1]))
def get_txin_type(self, address):
return self.db.get_imported_address(address).get('type', 'address')
@profiler
def try_detecting_internal_addresses_corruption(self):
# we check only a random sample, for performance
addresses = self.get_addresses()
addresses = random.sample(addresses, min(len(addresses), 10))
for addr_found in addresses:
self.check_address_for_corruption(addr_found)
def check_address_for_corruption(self, addr):
if addr and self.is_mine(addr):
pubkey = self.get_public_key(addr)
if not pubkey:
return
txin_type = self.get_txin_type(addr)
if txin_type == 'address':
return
if addr != bitcoin.pubkey_to_address(txin_type, pubkey):
raise InternalAddressCorruption()
def _add_input_sig_info(self, txin, address, *, only_der_suffix):
if not self.is_mine(address):
return
if txin.script_type in ('unknown', 'address'):
return
elif txin.script_type in ('p2pkh', 'p2wpkh', 'p2wpkh-p2sh'):
pubkey = self.get_public_key(address)
if not pubkey:
return
txin.pubkeys = [bfh(pubkey)]
else:
raise Exception(f'Unexpected script type: {txin.script_type}. '
f'Imported wallets are not implemented to handle this.')
def pubkeys_to_address(self, pubkeys):
pubkey = pubkeys[0]
# FIXME This is slow.
# Ideally we would re-derive the address from the pubkey and the txin_type,
# but we don't know the txin_type, and we only have an addr->txin_type map.
# so instead a linear search of reverse-lookups is done...
for addr in self.db.get_imported_addresses():
if self.db.get_imported_address(addr)['pubkey'] == pubkey:
return addr
return None
def decrypt_message(self, pubkey: str, message, password) -> bytes:
# this is significantly faster than the implementation in the superclass
return self.keystore.decrypt_message(pubkey, message, password)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, db, storage, *, config):
self._ephemeral_addr_to_addr_index = {} # type: Dict[str, Sequence[int]]
Abstract_Wallet.__init__(self, db, storage, config=config)
self.gap_limit = db.get('gap_limit', 20)
# generate addresses now. note that without libsecp this might block
# for a few seconds!
self.synchronize()
# lightning_privkey2 is not deterministic (legacy wallets, bip39)
ln_xprv = self.db.get('lightning_xprv') or self.db.get('lightning_privkey2')
# lnworker can only be initialized once receiving addresses are available
# therefore we instantiate lnworker in DeterministicWallet
self.lnworker = LNWallet(self, ln_xprv) if ln_xprv else None
def has_seed(self):
return self.keystore.has_seed()
def get_addresses(self):
# note: overridden so that the history can be cleared.
# addresses are ordered based on derivation
out = self.get_receiving_addresses()
out += self.get_change_addresses()
return out
def get_receiving_addresses(self, *, slice_start=None, slice_stop=None):
return self.db.get_receiving_addresses(slice_start=slice_start, slice_stop=slice_stop)
def get_change_addresses(self, *, slice_start=None, slice_stop=None):
return self.db.get_change_addresses(slice_start=slice_start, slice_stop=slice_stop)
@profiler
def try_detecting_internal_addresses_corruption(self):
addresses_all = self.get_addresses()
# sample 1: first few
addresses_sample1 = addresses_all[:10]
# sample2: a few more randomly selected
addresses_rand = addresses_all[10:]
addresses_sample2 = random.sample(addresses_rand, min(len(addresses_rand), 10))
for addr_found in itertools.chain(addresses_sample1, addresses_sample2):
self.check_address_for_corruption(addr_found)
def check_address_for_corruption(self, addr):
if addr and self.is_mine(addr):
if addr != self.derive_address(*self.get_address_index(addr)):
raise InternalAddressCorruption()
def get_seed(self, password):
return self.keystore.get_seed(password)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
value = int(value)
if value >= self.min_acceptable_gap():
self.gap_limit = value
self.db.put('gap_limit', self.gap_limit)
self.save_db()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for addr in addresses[::-1]:
if self.db.get_addr_history(addr):
break
k += 1
return k
def min_acceptable_gap(self) -> int:
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for addr in addresses[0:-k]:
if self.address_is_old(addr):
n = 0
else:
n += 1
nmax = max(nmax, n)
return nmax + 1
@abstractmethod
def derive_pubkeys(self, c: int, i: int) -> Sequence[str]:
pass
def derive_address(self, for_change: int, n: int) -> str:
for_change = int(for_change)
pubkeys = self.derive_pubkeys(for_change, n)
return self.pubkeys_to_address(pubkeys)
def export_private_key_for_path(self, path: Union[Sequence[int], str], password: Optional[str]) -> str:
if isinstance(path, str):
path = convert_bip32_path_to_list_of_uint32(path)
pk, compressed = self.keystore.get_private_key(path, password)
txin_type = self.get_txin_type() # assumes no mixed-scripts in wallet
return bitcoin.serialize_privkey(pk, compressed, txin_type)
def get_public_keys_with_deriv_info(self, address: str):
der_suffix = self.get_address_index(address)
der_suffix = [int(x) for x in der_suffix]
return {k.derive_pubkey(*der_suffix): (k, der_suffix)
for k in self.get_keystores()}
def _add_input_sig_info(self, txin, address, *, only_der_suffix):
self._add_txinout_derivation_info(txin, address, only_der_suffix=only_der_suffix)
def _add_txinout_derivation_info(self, txinout, address, *, only_der_suffix):
if not self.is_mine(address):
return
pubkey_deriv_info = self.get_public_keys_with_deriv_info(address)
txinout.pubkeys = sorted([pk for pk in list(pubkey_deriv_info)])
for pubkey in pubkey_deriv_info:
ks, der_suffix = pubkey_deriv_info[pubkey]
fp_bytes, der_full = ks.get_fp_and_derivation_to_be_used_in_partial_tx(der_suffix,
only_der_suffix=only_der_suffix)
txinout.bip32_paths[pubkey] = (fp_bytes, der_full)
def create_new_address(self, for_change: bool = False):
assert type(for_change) is bool
with self.lock:
n = self.db.num_change_addresses() if for_change else self.db.num_receiving_addresses()
address = self.derive_address(int(for_change), n)
self.db.add_change_address(address) if for_change else self.db.add_receiving_address(address)
self.add_address(address)
if for_change:
# note: if it's actually "old", it will get filtered later
self._not_old_change_addresses.append(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
num_addr = self.db.num_change_addresses() if for_change else self.db.num_receiving_addresses()
if num_addr < limit:
self.create_new_address(for_change)
continue
if for_change:
last_few_addresses = self.get_change_addresses(slice_start=-limit)
else:
last_few_addresses = self.get_receiving_addresses(slice_start=-limit)
if any(map(self.address_is_old, last_few_addresses)):
self.create_new_address(for_change)
else:
break
@AddressSynchronizer.with_local_height_cached
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def get_all_known_addresses_beyond_gap_limit(self):
# note that we don't stop at first large gap
found = set()
def process_addresses(addrs, gap_limit):
rolling_num_unused = 0
for addr in addrs:
if self.db.get_addr_history(addr):
rolling_num_unused = 0
else:
if rolling_num_unused >= gap_limit:
found.add(addr)
rolling_num_unused += 1
process_addresses(self.get_receiving_addresses(), self.gap_limit)
process_addresses(self.get_change_addresses(), self.gap_limit_for_change)
return found
def get_address_index(self, address) -> Optional[Sequence[int]]:
return self.db.get_address_index(address) or self._ephemeral_addr_to_addr_index.get(address)
def get_address_path_str(self, address):
intpath = self.get_address_index(address)
if intpath is None:
return None
return convert_bip32_intpath_to_strpath(intpath)
def _learn_derivation_path_for_address_from_txinout(self, txinout, address):
for ks in self.get_keystores():
pubkey, der_suffix = ks.find_my_pubkey_in_txinout(txinout, only_der_suffix=True)
if der_suffix is not None:
# note: we already know the pubkey belongs to the keystore,
# but the script template might be different
if len(der_suffix) != 2: continue
try:
my_address = self.derive_address(*der_suffix)
except CannotDerivePubkey:
my_address = None
if my_address == address:
self._ephemeral_addr_to_addr_index[address] = list(der_suffix)
return True
return False
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address=None):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, db, storage, *, config):
Deterministic_Wallet.__init__(self, db, storage, config=config)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkeys = self.derive_pubkeys(*sequence)
return pubkeys[0]
def load_keystore(self):
self.keystore = load_keystore(self.db, 'keystore')
try:
xtype = bip32.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return [self.keystore.derive_pubkey(c, i).hex()]
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkeys):
pubkey = pubkeys[0]
return bitcoin.pubkey_to_address(self.txin_type, pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
def __init__(self, db, storage, *, config):
self.wallet_type = db.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, db, storage, config=config)
def get_public_keys(self, address):
return [pk.hex() for pk in self.get_public_keys_with_deriv_info(address)]
def pubkeys_to_address(self, pubkeys):
redeem_script = self.pubkeys_to_scriptcode(pubkeys)
return bitcoin.redeem_script_to_address(self.txin_type, redeem_script)
def pubkeys_to_scriptcode(self, pubkeys: Sequence[str]) -> str:
return transaction.multisig_script(sorted(pubkeys), self.m)
def get_redeem_script(self, address):
txin_type = self.get_txin_type(address)
pubkeys = self.get_public_keys(address)
scriptcode = self.pubkeys_to_scriptcode(pubkeys)
if txin_type == 'p2sh':
return scriptcode
elif txin_type == 'p2wsh-p2sh':
return bitcoin.p2wsh_nested_script(scriptcode)
elif txin_type == 'p2wsh':
return None
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def get_witness_script(self, address):
txin_type = self.get_txin_type(address)
pubkeys = self.get_public_keys(address)
scriptcode = self.pubkeys_to_scriptcode(pubkeys)
if txin_type == 'p2sh':
return None
elif txin_type in ('p2wsh-p2sh', 'p2wsh'):
return scriptcode
raise UnknownTxinType(f'unexpected txin_type {txin_type}')
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i).hex() for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.db, name)
self.keystore = self.keystores['x1/']
xtype = bip32.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.db.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def can_have_keystore_encryption(self):
return any([k.may_have_password() for k in self.get_keystores()])
def _update_password_for_keystore(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.update_password(old_pw, new_pw)
self.db.put(name, keystore.dump())
def check_password(self, password):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.check_password(password)
if self.has_storage_encryption():
self.storage.check_password(password)
def get_available_storage_encryption_version(self):
# multisig wallets are not offered hw device encryption
return StorageEncryptionVersion.USER_PASSWORD
def has_seed(self):
return self.keystore.has_seed()
def is_watching_only(self):
return all([k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported': Imported_Wallet
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, db: 'WalletDB', storage: Optional[WalletStorage], *, config: SimpleConfig):
wallet_type = db.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(db, storage, config=config)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise WalletFileException("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config: SimpleConfig, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
db = WalletDB('', manual_upgrades=False)
seed = Mnemonic('en').make_seed(seed_type=seed_type)
k = keystore.from_seed(seed, passphrase)
db.put('keystore', k.dump())
db.put('wallet_type', 'standard')
if k.can_have_deterministic_lightning_xprv():
db.put('lightning_xprv', k.get_lightning_xprv(None))
if gap_limit is not None:
db.put('gap_limit', gap_limit)
wallet = Wallet(db, storage, config=config)
wallet.update_password(old_pw=None, new_pw=password, encrypt_storage=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.save_db()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config: SimpleConfig,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
db = WalletDB('', manual_upgrades=False)
text = text.strip()
if keystore.is_address_list(text):
wallet = Imported_Wallet(db, storage, config=config)
addresses = text.split()
good_inputs, bad_inputs = wallet.import_addresses(addresses, write_to_disk=False)
# FIXME tell user about bad_inputs
if not good_inputs:
raise Exception("None of the given addresses can be imported")
elif keystore.is_private_key_list(text, allow_spaces_inside_key=False):
k = keystore.Imported_KeyStore({})
db.put('keystore', k.dump())
wallet = Imported_Wallet(db, storage, config=config)
keys = keystore.get_private_keys(text, allow_spaces_inside_key=False)
good_inputs, bad_inputs = wallet.import_private_keys(keys, None, write_to_disk=False)
# FIXME tell user about bad_inputs
if not good_inputs:
raise Exception("None of the given privkeys can be imported")
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase)
if k.can_have_deterministic_lightning_xprv():
db.put('lightning_xprv', k.get_lightning_xprv(None))
else:
raise Exception("Seed or key not recognized")
db.put('keystore', k.dump())
db.put('wallet_type', 'standard')
if gap_limit is not None:
db.put('gap_limit', gap_limit)
wallet = Wallet(db, storage, config=config)
assert not storage.file_exists(), "file was created too soon! plaintext keys might have been written to disk"
wallet.update_password(old_pw=None, new_pw=password, encrypt_storage=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.save_db()
return {'wallet': wallet, 'msg': msg}
def check_password_for_directory(config: SimpleConfig, old_password, new_password=None) -> Tuple[bool, bool]:
"""Checks password against all wallets, returns whether they can be unified and whether they are already.
If new_password is not None, update all wallet passwords to new_password.
"""
dirname = os.path.dirname(config.get_wallet_path())
failed = []
is_unified = True
for filename in os.listdir(dirname):
path = os.path.join(dirname, filename)
if not os.path.isfile(path):
continue
basename = os.path.basename(path)
storage = WalletStorage(path)
if not storage.is_encrypted():
is_unified = False
# it is a bit wasteful load the wallet here, but that is fine
# because we are progressively enforcing storage encryption.
try:
db = WalletDB(storage.read(), manual_upgrades=False)
wallet = Wallet(db, storage, config=config)
except:
_logger.exception(f'failed to load {basename}:')
failed.append(basename)
continue
if wallet.has_keystore_encryption():
try:
wallet.check_password(old_password)
except:
failed.append(basename)
continue
if new_password:
wallet.update_password(old_password, new_password)
else:
if new_password:
wallet.update_password(None, new_password)
continue
if not storage.is_encrypted_with_user_pw():
failed.append(basename)
continue
try:
storage.check_password(old_password)
except:
failed.append(basename)
continue
try:
db = WalletDB(storage.read(), manual_upgrades=False)
wallet = Wallet(db, storage, config=config)
except:
_logger.exception(f'failed to load {basename}:')
failed.append(basename)
continue
try:
wallet.check_password(old_password)
except:
failed.append(basename)
continue
if new_password:
wallet.update_password(old_password, new_password)
can_be_unified = failed == []
is_unified = can_be_unified and is_unified
return can_be_unified, is_unified
def update_password_for_directory(config: SimpleConfig, old_password, new_password) -> bool:
" returns whether password is unified "
if new_password is None:
# we opened a non-encrypted wallet
return False
can_be_unified, is_unified = check_password_for_directory(config, old_password, None)
if not can_be_unified:
return False
if is_unified and old_password == new_password:
return True
check_password_for_directory(config, old_password, new_password)
return True
| {
"content_hash": "68a974b66a378fba81389f8143f061b6",
"timestamp": "",
"source": "github",
"line_count": 3399,
"max_line_length": 150,
"avg_line_length": 41.79082082965578,
"alnum_prop": 0.5836307700972213,
"repo_name": "wakiyamap/electrum-mona",
"id": "40aaa6630e97d7edb65ed335a333aed488f09fe4",
"size": "143415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_mona/wallet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13043"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2162"
},
{
"name": "NSIS",
"bytes": "7779"
},
{
"name": "Python",
"bytes": "4381566"
},
{
"name": "Ruby",
"bytes": "16375"
},
{
"name": "Shell",
"bytes": "100799"
},
{
"name": "kvlang",
"bytes": "67448"
}
],
"symlink_target": ""
} |
"""
This file is part of python-webuntis
:copyright: (c) 2012 by Markus Unterwaditzer.
:license: BSD, see LICENSE for more details.
"""
from webuntis import utils, objects, errors
from webuntis.utils import result_wrapper, log, rpc_request
from webuntis.utils.userinput import unicode_string
class JSONRPCSession(object):
"""Lower-level version of :py:class:`Session`. Do not use this."""
config = None
'''Dictionary with configuration.'''
def __init__(self, **kwargs):
self.config = utils.FilterDict(utils.config_keys)
config = {
'server': None,
'school': None,
'useragent': None,
'username': None,
'password': None,
'jsessionid': None,
'login_repeat': 0,
'_http_session': None
}
config.update(kwargs)
self.config.update(config)
def __enter__(self):
"""Context-manager"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Context-manager -- the only thing we need to clean up is to log out
"""
self.logout(suppress_errors=True)
def logout(self, suppress_errors=False):
"""
Log out of session
:type suppress_errors: bool
:param suppress_errors: Whether to suppress errors.
:raises: :py:class:`webuntis.errors.NotLoggedInError` -- Can't log out
because not logged in. Raised unless ``suppress_errors`` is
``True``.
"""
def throw_errors():
if not suppress_errors:
raise errors.NotLoggedInError('We already were logged out.')
try:
# Send a JSON-RPC 'logout' method without parameters to log out
self._request('logout')
except errors.NotLoggedInError:
throw_errors()
try:
del self.config['jsessionid']
except KeyError:
throw_errors()
def login(self):
"""Initializes an authentication, provided we have the credentials for
it.
:returns: The session. This is useful for jQuery-like command
chaining::
s = webuntis.Session(...).login()
:raises: :py:class:`webuntis.errors.BadCredentialsError` --
Username/Password missing or invalid.
:raises: :py:class:`webuntis.errors.AuthError` -- Didn't receive a
session ID for unknown reasons.
"""
try:
username = self.config['username']
password = self.config['password']
useragent = self.config['useragent']
except KeyError as e:
raise errors.BadCredentialsError('Missing config: ' + str(e))
res = self._request('authenticate', {
'user': username,
'password': password,
'client': useragent
}, use_login_repeat=False)
if 'sessionId' in res:
sid = self.config['jsessionid'] = res['sessionId']
log('debug', 'Did get a jsessionid from the server: ' + sid)
else:
raise errors.AuthError('Something went wrong while authenticating',
res)
return self
def _request(self, method, params=None, use_login_repeat=None):
if not isinstance(method, unicode_string):
method = method.decode('ascii')
if use_login_repeat is None:
use_login_repeat = (method not in ('logout', 'authenticate'))
attempts_left = self.config['login_repeat'] if use_login_repeat else 0
data = None
while data is None:
try:
data = rpc_request(self.config, method, params or {})
except errors.NotLoggedInError:
if attempts_left > 0:
self.logout(suppress_errors=True)
self.login()
else:
raise errors.NotLoggedInError(
'Tried to login several times, failed. Original method'
' was ' + method)
else:
return data
attempts_left -= 1 # new round!
class ResultWrapperMixin(object):
@result_wrapper
def departments(self):
"""Get all departments.
:rtype: :py:class:`webuntis.objects.DepartmentList`
"""
return objects.DepartmentList, 'getDepartments', {}
@result_wrapper
def holidays(self):
"""Get all holidays.
:rtype: :py:class:`webuntis.objects.HolidayList`
"""
return objects.HolidayList, 'getHolidays', {}
@result_wrapper
def klassen(self, schoolyear=None):
"""Get all school classes.
:param schoolyear: The schoolyear where the classes should be fetched
from.
:type schoolyear: :py:class:`webuntis.objects.SchoolyearObject` or an
integer ID of it
:rtype: :py:class:`webuntis.objects.KlassenList`
"""
params = {}
if schoolyear:
params['schoolyearId'] = int(schoolyear)
return objects.KlassenList, 'getKlassen', params
@result_wrapper
def timetable(self, start, end, **type_and_id):
"""Get the timetable for a specific school class and time period.
:type start: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param start: The beginning of the time period.
:type end: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param end: The end of the time period.
:rtype: :py:class:`webuntis.objects.PeriodList`
Furthermore you have to explicitly define a klasse, teacher, subject,
room or student parameter containing the id or the object of the thing
you want to get a timetable about::
import datetime
today = datetime.date.today()
monday = today - datetime.timedelta(days=today.weekday())
friday = monday + datetime.timedelta(days=4)
klasse = s.klassen().filter(id=1)[0] # schoolclass #1
tt = s.timetable(klasse=klasse, start=monday, end=friday)
:raises: :exc:`ValueError`, :exc:`TypeError`
"""
element_type_table = {
'klasse': 1,
'teacher': 2,
'subject': 3,
'room': 4,
'student': 5
}
invalid_type_error = TypeError(
'You have to specify exactly one of the following parameters by '
'keyword: ' +
(', '.join(element_type_table.keys()))
)
if len(type_and_id) != 1:
raise invalid_type_error
element_type, element_id = list(type_and_id.items())[0]
element_type = utils.userinput.string(element_type)
if element_type not in element_type_table:
raise invalid_type_error
# if we have to deal with an object in element_id,
# its id gets placed here anyway
parameters = self._create_date_param(end, start,
id=int(element_id), type=element_type_table[element_type])
return objects.PeriodList, 'getTimetable', parameters
@result_wrapper
def rooms(self):
"""Get all rooms of a school.
:rtype: :py:class:`webuntis.objects.RoomList`
"""
return objects.RoomList, 'getRooms', {}
@result_wrapper
def schoolyears(self):
"""Get all schoolyears.
:rtype: :py:class:`webuntis.objects.SchoolyearList`
"""
return objects.SchoolyearList, 'getSchoolyears', {}
@result_wrapper
def subjects(self):
"""Get all subjects.
:rtype: :py:class:`webuntis.objects.SubjectList`
"""
return objects.SubjectList, 'getSubjects', {}
@result_wrapper
def teachers(self):
"""Get all teachers.
:rtype: :py:class:`webuntis.objects.TeacherList`
"""
return objects.TeacherList, 'getTeachers', {}
@result_wrapper
def statusdata(self):
"""Information about lesson types and period codes, specifically about
the colors used to highlight them in the web-interface of WebUntis.
:rtype: :py:class:`webuntis.objects.StatusData`
"""
return objects.StatusData, 'getStatusData', {}
@result_wrapper
def last_import_time(self):
"""Information about the last change made.
:rtype: py:class:`webuntis.objects.TimeStampObject`
"""
return objects.TimeStampObject, 'getLatestImportTime', {}
@result_wrapper
def substitutions(self, start, end, department_id=0):
"""Get all substitutions.
:type start: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param start: The beginning of the time period.
:type end: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param end: The end of the time period.
:param department_id: int, set to 0 for all departments or if not applicable
:rtype: :py:class:`webuntis.objects.SubstitutionList`
"""
parameters = self._create_date_param(end, start, departmentId=department_id)
return objects.SubstitutionList, 'getSubstitutions', parameters
@result_wrapper
def timegrid_units(self):
"""Information about the Timegrid.
:return:
:rtype: :py:class:`webuntis.objects.TimegridObject`
"""
return objects.TimegridObject, 'getTimegridUnits', {}
@result_wrapper
def students(self):
"""Get all students
:rtype: :py:class:`webuntis.objects.StudentsList`
"""
return objects.StudentsList, 'getStudents', {}
@result_wrapper
def exam_types(self):
"""Information about the Exam types.
needs additional rights Master/Exam Types -- Stammdaten /Pruefungsart
:rtype: :py:class:`webuntis.objects.ExamTypeList`
"""
return objects.ExamTypeList, 'getExamTypes', {}
@result_wrapper
def exams(self, start, end, exam_type_id=0):
"""Information about the Exams.
:type start: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param start: The beginning of the time period.
:type end: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param end: The end of the time period.
:param exam_type_id: int - id of Exam, @TODO: allow examtype id/name
:rtype: :py:class:`webuntis.objects.ExamsList`
"""
parameters = self._create_date_param(end, start, examTypeId=exam_type_id)
return objects.ExamsList, 'getExams', parameters
@result_wrapper
def timetable_with_absences(self, start, end):
"""Information about the Exams.
:type start: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param start: The beginning of the time period.
:type end: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param end: The end of the time period.
:rtype: :py:class:`webuntis.objects.AbsencesList`
"""
parameters = {u'options': self._create_date_param(end, start)}
return objects.AbsencesList, 'getTimetableWithAbsences', parameters
@result_wrapper
def class_reg_events(self, start, end):
"""Information about the ClassRegEvents
:type start: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param start: The beginning of the time period.
:type end: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param end: The end of the time period.
:rtype: :py:class:`webuntis.objects.ClassRegEventList`
"""
parameters = self._create_date_param(end, start)
return objects.ClassRegEventList, 'getClassregEvents', parameters
# @TODO this is a copy of timetable()
@result_wrapper
def class_reg_event_for_id(self, start, end, **type_and_id):
"""Get the Information about the ClassRegEvents for a specific school class and time period.
:type start: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param start: The beginning of the time period.
:type end: :py:class:`datetime.datetime` or :py:class:`datetime.date` or int
:param end: The end of the time period.
:rtype: :py:class:`webuntis.objects.ClassRegEventList`
see timetable for the type_and_id parameter
:raises: :exc:`ValueError`, :exc:`TypeError`
"""
element_type_table = {
'klasse': 1,
'teacher': 2,
'subject': 3,
'room': 4,
'student': 5
}
invalid_type_error = TypeError(
'You have to specify exactly one of the following parameters by '
'keyword: ' +
(', '.join(element_type_table.keys()))
)
if len(type_and_id) != 1:
raise invalid_type_error
element_type, element_id = list(type_and_id.items())[0]
element_type = utils.userinput.string(element_type)
if element_type not in element_type_table:
raise invalid_type_error
# if we have to deal with an object in element_id,
# its id gets placed here anyway
parameters = self._create_date_param(end, start,
id=int(element_id), type=element_type_table[element_type])
return objects.ClassRegEventList, 'getClassregEvents', parameters
@result_wrapper
def class_reg_categories(self):
"""Information about the Request remark categories
:rtype: :py:class:`webuntis.objects.ClassRegClassRegCategoryList`
"""
return objects.ClassRegCategoryList, 'getClassregCategories', {}
@result_wrapper
def class_reg_category_groups(self):
"""Information about the Request remark categories groups
:rtype: :py:class:`webuntis.objects.ClassRegClassRegCategoryGroupList`
"""
return objects.ClassRegCategoryGroupList, 'getClassregCategoryGroups', {}
@result_wrapper
def class_reg_categories(self):
return objects.ClassRegCategoryList, 'getClassregCategories', {}
@result_wrapper
def class_reg_category_groups(self):
return objects.ClassRegCategoryGroupList, 'getClassregCategoryGroups', {}
@staticmethod
def _create_date_param(end, start, **kwargs):
json_start = utils.datetime_utils.format_date(start)
json_end = utils.datetime_utils.format_date(end)
if json_start > json_end:
raise ValueError('Start can\'t be later than the end.')
parameters = dict({
'startDate': json_start,
'endDate': json_end,
}, **kwargs)
return parameters
def get_student(self, surname, fore_name, dob=0):
"""
Search for a student by name
:param surname: family name
:type surname: str
:param fore_name: fore name
:type fore_name: str
:param dob: date of birth, use 0 if unknown -- unknown Unit!
:type dob: int
:return: a dummy StudentObject with just the id filled
:raises: :exc:`KeyError`
"""
s = self._search(surname=surname, fore_name=fore_name, dob=dob, what=5)
id = s._data
if not id:
raise KeyError("Student not found")
data = {"id": id, "name": surname, "longName": surname, "foreName": fore_name}
return objects.StudentObject(data=data, parent=s._parent, session=s._session)
def get_teacher(self, surname, fore_name, dob=0):
"""
Search for a teacher by name
:param surname: family name
:type surname: str
:param fore_name: fore name
:type fore_name: str
:param dob: date of birth, use 0 if unknown -- unknown Unit!
:type dob: int
:return: a dummy TeacherObject with just the id and name filled
:raises: :exc:`KeyError`
"""
t = self._search(surname=surname, fore_name=fore_name, dob=dob, what=2)
id = t._data
if not id:
raise KeyError("Teacher not found")
data = {"id": id, "name": surname, "longName": surname, "foreName": fore_name, "title": ""}
return objects.TeacherObject(data=data, parent=t._parent, session=t._session)
@result_wrapper
def _search(self, surname, fore_name, dob=0, what=-1):
"""
search for student or teacher
:rtype: :py:class:`webuntis.objects._OnlyID`
"""
return objects.Result, 'getPersonId', {
"sn": surname, "fn": fore_name, "dob": dob, "type": what
}
class Session(JSONRPCSession, ResultWrapperMixin):
"""The origin of everything you want to do with the WebUntis API. Can be
used as a context-manager to provide automatic log-out.
Configuration can be set with keyword arguments when initializing
:py:class:`Session`. Unless noted otherwise, they get saved in a dictionary
located in the instance's :py:attr:`config` attribute and can be modified
afterwards.
:type username: str
:param username: The username used for the API.
:type password: str
:param password: The password used for the API.
:type server: str
:param server: A host name, a URL, or a URL without path.
::
s = webuntis.Session(..., server='thalia.webuntis.com')
# 'https://thalia.webuntis.com/WebUntis/jsonrpc.do'
# Want to disable SSL?
# make sure there's NO SLASH at the end!
s.config['server'] = 'http://thalia.webuntis.com'
# 'http://thalia.webuntis.com/WebUntis/jsonrpc.do'
# or maybe use a completely different API endpoint?
s.config['server'] = 'http://thalia.webuntis.com/WebUntis/jsonrpc2.do'
# 'http://thalia.webuntis.com/WebUntis/jsonrpc2.do'
# or just change the path?
s.config['server'] = 'thalia.webuntis.com/WebUntis/jsonrpc2.do'
# 'https://thalia.webuntis.com/WebUntis/jsonrpc2.do'
s.config['server'] = '!"$%/WebUntis/jsonrpc.do'
# ValueError: Not a valid hostname
:type school: str
:param school: A valid school name.
:type useragent: str
:param useragent: A string containing a useragent. Please include useful
information, such as an email address, for the server maintainer. Just
like you would do with the HTTP useragents of bots.
:type cachelen: int
:param cachelen: The maximum size of the internal cache. All results are
saved in it, but they only get used if you set the ``from_cache``
parameter on a session method to ``True``. This parameter is not saved
in the configuration dictionary.
::
s.timetable(klasse=123) # saves in cache
s.timetable(klasse=123) # fetch data again, override old value
s.timetable(klasse=123, from_cache=True) # get directly from cache
The reason this cache was added is that the API only allows you to
fetch a whole list of objects (teachers/schoolclasses/...), not single
ones. It would seriously harm performance to fetch the whole list each
time we want information about a single object. Without the cache, i
sometimes experienced a performance decrease about twenty seconds, so i
wouldn't set the ``cachelen`` to anything smaller than ``5``.
Default value is ``20``.
You can clear the cache using::
s.cache.clear('timetable') # clears all cached timetables
s.cache.clear() # clears everything from the cache
:type jsessionid: str
:param jsessionid: The session key to use. You usually shouldn't touch
this.
:type login_repeat: int
:param login_repeat: The amount of times `python-webuntis` should try to
login when finding no or an expired session. Default to ``0``, meaning
it won't do that.
:type use_cache: bool
:param use_cache: always use the cache
"""
cache = None
'''Contains the caching dictionary for requests.'''
# Repeated here because sphinx doesn't recognize it when defined in
# JSONRPCSession:
config = None
'''The config dictionary, filled with most keyword arguments from
initialization.'''
def __init__(self, **config):
if 'use_cache' in config:
result_wrapper.session_use_cache = bool(config['use_cache'])
del config['use_cache']
cachelen = config.pop('cachelen', 20)
self.cache = utils.SessionCache(maxlen=cachelen)
JSONRPCSession.__init__(self, **config)
| {
"content_hash": "cdc0b7e37c75fccef70919e09c2a3dca",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 103,
"avg_line_length": 34.38550247116969,
"alnum_prop": 0.6063146799540053,
"repo_name": "maphy-psd/python-webuntis",
"id": "50a862d3032f7b7b9440654552754240ee2b86e0",
"size": "20872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webuntis/session.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "51"
},
{
"name": "Python",
"bytes": "75861"
}
],
"symlink_target": ""
} |
"""Provides a variety of device interactions with power.
"""
# pylint: disable=unused-argument
import collections
import contextlib
import csv
import logging
from pylib import constants
from pylib.device import decorators
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.utils import timeout_retry
_DEFAULT_TIMEOUT = 30
_DEFAULT_RETRIES = 3
_DEVICE_PROFILES = [
{
'name': 'Nexus 4',
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': (
'echo 0 > /sys/module/pm8921_charger/parameters/disabled && '
'dumpsys battery reset'),
'disable_command': (
'echo 1 > /sys/module/pm8921_charger/parameters/disabled && '
'dumpsys battery set ac 0 && dumpsys battery set usb 0'),
'charge_counter': None,
'voltage': None,
'current': None,
},
{
'name': 'Nexus 5',
# Nexus 5
# Setting the HIZ bit of the bq24192 causes the charger to actually ignore
# energy coming from USB. Setting the power_supply offline just updates the
# Android system to reflect that.
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online &&'
'dumpsys battery reset'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online && '
'dumpsys battery set ac 0 && dumpsys battery set usb 0'),
'charge_counter': None,
'voltage': None,
'current': None,
},
{
'name': 'Nexus 6',
'witness_file': None,
'enable_command': (
'echo 1 > /sys/class/power_supply/battery/charging_enabled && '
'dumpsys battery reset'),
'disable_command': (
'echo 0 > /sys/class/power_supply/battery/charging_enabled && '
'dumpsys battery set ac 0 && dumpsys battery set usb 0'),
'charge_counter': (
'/sys/class/power_supply/max170xx_battery/charge_counter_ext'),
'voltage': '/sys/class/power_supply/max170xx_battery/voltage_now',
'current': '/sys/class/power_supply/max170xx_battery/current_now',
},
{
'name': 'Nexus 9',
'witness_file': None,
'enable_command': (
'echo Disconnected > '
'/sys/bus/i2c/drivers/bq2419x/0-006b/input_cable_state && '
'dumpsys battery reset'),
'disable_command': (
'echo Connected > '
'/sys/bus/i2c/drivers/bq2419x/0-006b/input_cable_state && '
'dumpsys battery set ac 0 && dumpsys battery set usb 0'),
'charge_counter': (
'/sys/class/power_supply/max170xx_battery/charge_counter_ext'),
'voltage': '/sys/class/power_supply/max170xx_battery/voltage_now',
'current': '/sys/class/power_supply/max170xx_battery/current_now',
},
{
'name': 'Nexus 10',
'witness_file': None,
'enable_command': None,
'disable_command': None,
'charge_counter': (
'/sys/class/power_supply/ds2784-fuelgauge/charge_counter_ext'),
'voltage': '/sys/class/power_supply/ds2784-fuelgauge/voltage_now',
'current': '/sys/class/power_supply/ds2784-fuelgauge/current_now',
},
]
# The list of useful dumpsys columns.
# Index of the column containing the format version.
_DUMP_VERSION_INDEX = 0
# Index of the column containing the type of the row.
_ROW_TYPE_INDEX = 3
# Index of the column containing the uid.
_PACKAGE_UID_INDEX = 4
# Index of the column containing the application package.
_PACKAGE_NAME_INDEX = 5
# The column containing the uid of the power data.
_PWI_UID_INDEX = 1
# The column containing the type of consumption. Only consumtion since last
# charge are of interest here.
_PWI_AGGREGATION_INDEX = 2
# The column containing the amount of power used, in mah.
_PWI_POWER_CONSUMPTION_INDEX = 5
class BatteryUtils(object):
def __init__(self, device, default_timeout=_DEFAULT_TIMEOUT,
default_retries=_DEFAULT_RETRIES):
"""BatteryUtils constructor.
Args:
device: A DeviceUtils instance.
default_timeout: An integer containing the default number of seconds to
wait for an operation to complete if no explicit value
is provided.
default_retries: An integer containing the default number or times an
operation should be retried on failure if no explicit
value is provided.
Raises:
TypeError: If it is not passed a DeviceUtils instance.
"""
if not isinstance(device, device_utils.DeviceUtils):
raise TypeError('Must be initialized with DeviceUtils object.')
self._device = device
self._cache = device.GetClientCache(self.__class__.__name__)
self._default_timeout = default_timeout
self._default_retries = default_retries
@decorators.WithTimeoutAndRetriesFromInstance()
def SupportsFuelGauge(self, timeout=None, retries=None):
"""Detect if fuel gauge chip is present.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if known fuel gauge files are present.
False otherwise.
"""
self._DiscoverDeviceProfile()
return (self._cache['profile']['enable_command'] != None
and self._cache['profile']['charge_counter'] != None)
@decorators.WithTimeoutAndRetriesFromInstance()
def GetFuelGaugeChargeCounter(self, timeout=None, retries=None):
"""Get value of charge_counter on fuel gauge chip.
Device must have charging disabled for this, not just battery updates
disabled. The only device that this currently works with is the nexus 5.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
value of charge_counter for fuel gauge chip in units of nAh.
Raises:
device_errors.CommandFailedError: If fuel gauge chip not found.
"""
if self.SupportsFuelGauge():
return int(self._device.ReadFile(
self._cache['profile']['charge_counter']))
raise device_errors.CommandFailedError(
'Unable to find fuel gauge.')
@decorators.WithTimeoutAndRetriesFromInstance()
def GetNetworkData(self, package, timeout=None, retries=None):
"""Get network data for specific package.
Args:
package: package name you want network data for.
timeout: timeout in seconds
retries: number of retries
Returns:
Tuple of (sent_data, recieved_data)
None if no network data found
"""
# If device_utils clears cache, cache['uids'] doesn't exist
if 'uids' not in self._cache:
self._cache['uids'] = {}
if package not in self._cache['uids']:
self.GetPowerData()
if package not in self._cache['uids']:
logging.warning('No UID found for %s. Can\'t get network data.',
package)
return None
network_data_path = '/proc/uid_stat/%s/' % self._cache['uids'][package]
try:
send_data = int(self._device.ReadFile(network_data_path + 'tcp_snd'))
# If ReadFile throws exception, it means no network data usage file for
# package has been recorded. Return 0 sent and 0 received.
except device_errors.AdbShellCommandFailedError:
logging.warning('No sent data found for package %s', package)
send_data = 0
try:
recv_data = int(self._device.ReadFile(network_data_path + 'tcp_rcv'))
except device_errors.AdbShellCommandFailedError:
logging.warning('No received data found for package %s', package)
recv_data = 0
return (send_data, recv_data)
@decorators.WithTimeoutAndRetriesFromInstance()
def GetPowerData(self, timeout=None, retries=None):
"""Get power data for device.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
Dict of power data, keyed on package names.
{
package_name: {
'uid': uid,
'data': [1,2,3]
},
}
"""
if 'uids' not in self._cache:
self._cache['uids'] = {}
dumpsys_output = self._device.RunShellCommand(
['dumpsys', 'batterystats', '-c'], check_return=True)
csvreader = csv.reader(dumpsys_output)
pwi_entries = collections.defaultdict(list)
for entry in csvreader:
if entry[_DUMP_VERSION_INDEX] not in ['8', '9']:
# Wrong dumpsys version.
raise device_errors.DeviceVersionError(
'Dumpsys version must be 8 or 9. %s found.'
% entry[_DUMP_VERSION_INDEX])
if _ROW_TYPE_INDEX < len(entry) and entry[_ROW_TYPE_INDEX] == 'uid':
current_package = entry[_PACKAGE_NAME_INDEX]
if (self._cache['uids'].get(current_package)
and self._cache['uids'].get(current_package)
!= entry[_PACKAGE_UID_INDEX]):
raise device_errors.CommandFailedError(
'Package %s found multiple times with differnt UIDs %s and %s'
% (current_package, self._cache['uids'][current_package],
entry[_PACKAGE_UID_INDEX]))
self._cache['uids'][current_package] = entry[_PACKAGE_UID_INDEX]
elif (_PWI_POWER_CONSUMPTION_INDEX < len(entry)
and entry[_ROW_TYPE_INDEX] == 'pwi'
and entry[_PWI_AGGREGATION_INDEX] == 'l'):
pwi_entries[entry[_PWI_UID_INDEX]].append(
float(entry[_PWI_POWER_CONSUMPTION_INDEX]))
return {p: {'uid': uid, 'data': pwi_entries[uid]}
for p, uid in self._cache['uids'].iteritems()}
@decorators.WithTimeoutAndRetriesFromInstance()
def GetPackagePowerData(self, package, timeout=None, retries=None):
"""Get power data for particular package.
Args:
package: Package to get power data on.
returns:
Dict of UID and power data.
{
'uid': uid,
'data': [1,2,3]
}
None if the package is not found in the power data.
"""
return self.GetPowerData().get(package)
@decorators.WithTimeoutAndRetriesFromInstance()
def GetBatteryInfo(self, timeout=None, retries=None):
"""Gets battery info for the device.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
A dict containing various battery information as reported by dumpsys
battery.
"""
result = {}
# Skip the first line, which is just a header.
for line in self._device.RunShellCommand(
['dumpsys', 'battery'], check_return=True)[1:]:
# If usb charging has been disabled, an extra line of header exists.
if 'UPDATES STOPPED' in line:
logging.warning('Dumpsys battery not receiving updates. '
'Run dumpsys battery reset if this is in error.')
elif ':' not in line:
logging.warning('Unknown line found in dumpsys battery: "%s"', line)
else:
k, v = line.split(':', 1)
result[k.strip()] = v.strip()
return result
@decorators.WithTimeoutAndRetriesFromInstance()
def GetCharging(self, timeout=None, retries=None):
"""Gets the charging state of the device.
Args:
timeout: timeout in seconds
retries: number of retries
Returns:
True if the device is charging, false otherwise.
"""
battery_info = self.GetBatteryInfo()
for k in ('AC powered', 'USB powered', 'Wireless powered'):
if (k in battery_info and
battery_info[k].lower() in ('true', '1', 'yes')):
return True
return False
@decorators.WithTimeoutAndRetriesFromInstance()
def SetCharging(self, enabled, timeout=None, retries=None):
"""Enables or disables charging on the device.
Args:
enabled: A boolean indicating whether charging should be enabled or
disabled.
timeout: timeout in seconds
retries: number of retries
Raises:
device_errors.CommandFailedError: If method of disabling charging cannot
be determined.
"""
self._DiscoverDeviceProfile()
if not self._cache['profile']['enable_command']:
raise device_errors.CommandFailedError(
'Unable to find charging commands.')
if enabled:
command = self._cache['profile']['enable_command']
else:
command = self._cache['profile']['disable_command']
def set_and_verify_charging():
self._device.RunShellCommand(command, check_return=True)
return self.GetCharging() == enabled
timeout_retry.WaitFor(set_and_verify_charging, wait_period=1)
# TODO(rnephew): Make private when all use cases can use the context manager.
@decorators.WithTimeoutAndRetriesFromInstance()
def DisableBatteryUpdates(self, timeout=None, retries=None):
"""Resets battery data and makes device appear like it is not
charging so that it will collect power data since last charge.
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
device_errors.CommandFailedError: When resetting batterystats fails to
reset power values.
device_errors.DeviceVersionError: If device is not L or higher.
"""
def battery_updates_disabled():
return self.GetCharging() is False
self._ClearPowerData()
self._device.RunShellCommand(['dumpsys', 'battery', 'set', 'ac', '0'],
check_return=True)
self._device.RunShellCommand(['dumpsys', 'battery', 'set', 'usb', '0'],
check_return=True)
timeout_retry.WaitFor(battery_updates_disabled, wait_period=1)
# TODO(rnephew): Make private when all use cases can use the context manager.
@decorators.WithTimeoutAndRetriesFromInstance()
def EnableBatteryUpdates(self, timeout=None, retries=None):
"""Restarts device charging so that dumpsys no longer collects power data.
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
device_errors.DeviceVersionError: If device is not L or higher.
"""
def battery_updates_enabled():
return (self.GetCharging()
or not bool('UPDATES STOPPED' in self._device.RunShellCommand(
['dumpsys', 'battery'], check_return=True)))
self._device.RunShellCommand(['dumpsys', 'battery', 'reset'],
check_return=True)
timeout_retry.WaitFor(battery_updates_enabled, wait_period=1)
@contextlib.contextmanager
def BatteryMeasurement(self, timeout=None, retries=None):
"""Context manager that enables battery data collection. It makes
the device appear to stop charging so that dumpsys will start collecting
power data since last charge. Once the with block is exited, charging is
resumed and power data since last charge is no longer collected.
Only for devices L and higher.
Example usage:
with BatteryMeasurement():
browser_actions()
get_power_data() # report usage within this block
after_measurements() # Anything that runs after power
# measurements are collected
Args:
timeout: timeout in seconds
retries: number of retries
Raises:
device_errors.DeviceVersionError: If device is not L or higher.
"""
if (self._device.build_version_sdk <
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP):
raise device_errors.DeviceVersionError('Device must be L or higher.')
try:
self.DisableBatteryUpdates(timeout=timeout, retries=retries)
yield
finally:
self.EnableBatteryUpdates(timeout=timeout, retries=retries)
def ChargeDeviceToLevel(self, level, wait_period=60):
"""Enables charging and waits for device to be charged to given level.
Args:
level: level of charge to wait for.
wait_period: time in seconds to wait between checking.
"""
self.SetCharging(True)
def device_charged():
battery_level = self.GetBatteryInfo().get('level')
if battery_level is None:
logging.warning('Unable to find current battery level.')
battery_level = 100
else:
logging.info('current battery level: %s', battery_level)
battery_level = int(battery_level)
return battery_level >= level
timeout_retry.WaitFor(device_charged, wait_period=wait_period)
def LetBatteryCoolToTemperature(self, target_temp, wait_period=60):
"""Lets device sit to give battery time to cool down
Args:
temp: maximum temperature to allow in tenths of degrees c.
wait_period: time in seconds to wait between checking.
"""
def cool_device():
temp = self.GetBatteryInfo().get('temperature')
if temp is None:
logging.warning('Unable to find current battery temperature.')
temp = 0
else:
logging.info('Current battery temperature: %s', temp)
return int(temp) <= target_temp
logging.info('Waiting for the device to cool down to %s (0.1 C)',
target_temp)
timeout_retry.WaitFor(cool_device, wait_period=wait_period)
@decorators.WithTimeoutAndRetriesFromInstance()
def TieredSetCharging(self, enabled, timeout=None, retries=None):
"""Enables or disables charging on the device.
Args:
enabled: A boolean indicating whether charging should be enabled or
disabled.
timeout: timeout in seconds
retries: number of retries
"""
if self.GetCharging() == enabled:
logging.warning('Device charging already in expected state: %s', enabled)
return
if enabled:
try:
self.SetCharging(enabled)
except device_errors.CommandFailedError:
logging.info('Unable to enable charging via hardware.'
' Falling back to software enabling.')
self.EnableBatteryUpdates()
else:
try:
self._ClearPowerData()
self.SetCharging(enabled)
except device_errors.CommandFailedError:
logging.info('Unable to disable charging via hardware.'
' Falling back to software disabling.')
self.DisableBatteryUpdates()
@contextlib.contextmanager
def PowerMeasurement(self, timeout=None, retries=None):
"""Context manager that enables battery power collection.
Once the with block is exited, charging is resumed. Will attempt to disable
charging at the hardware level, and if that fails will fall back to software
disabling of battery updates.
Only for devices L and higher.
Example usage:
with PowerMeasurement():
browser_actions()
get_power_data() # report usage within this block
after_measurements() # Anything that runs after power
# measurements are collected
Args:
timeout: timeout in seconds
retries: number of retries
"""
try:
self.TieredSetCharging(False, timeout=timeout, retries=retries)
yield
finally:
self.TieredSetCharging(True, timeout=timeout, retries=retries)
def _ClearPowerData(self):
"""Resets battery data and makes device appear like it is not
charging so that it will collect power data since last charge.
Returns:
True if power data cleared.
False if power data clearing is not supported (pre-L)
Raises:
device_errors.DeviceVersionError: If power clearing is supported,
but fails.
"""
if (self._device.build_version_sdk <
constants.ANDROID_SDK_VERSION_CODES.LOLLIPOP):
logging.warning('Dumpsys power data only available on 5.0 and above. '
'Cannot clear power data.')
return False
self._device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '1'], check_return=True)
self._device.RunShellCommand(
['dumpsys', 'battery', 'set', 'ac', '1'], check_return=True)
self._device.RunShellCommand(
['dumpsys', 'batterystats', '--reset'], check_return=True)
battery_data = self._device.RunShellCommand(
['dumpsys', 'batterystats', '--charged', '--checkin'],
check_return=True, large_output=True)
for line in battery_data:
l = line.split(',')
if (len(l) > _PWI_POWER_CONSUMPTION_INDEX and l[_ROW_TYPE_INDEX] == 'pwi'
and l[_PWI_POWER_CONSUMPTION_INDEX] != 0):
self._device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True)
raise device_errors.CommandFailedError(
'Non-zero pmi value found after reset.')
self._device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True)
return True
def _DiscoverDeviceProfile(self):
"""Checks and caches device information.
Returns:
True if profile is found, false otherwise.
"""
if 'profile' in self._cache:
return True
for profile in _DEVICE_PROFILES:
if self._device.product_model == profile['name']:
self._cache['profile'] = profile
return True
self._cache['profile'] = {
'name': None,
'witness_file': None,
'enable_command': None,
'disable_command': None,
'charge_counter': None,
'voltage': None,
'current': None,
}
return False
| {
"content_hash": "4bd53e6d2b2f597c85f12d7730301f99",
"timestamp": "",
"source": "github",
"line_count": 591,
"max_line_length": 80,
"avg_line_length": 35.7580372250423,
"alnum_prop": 0.6503099417971893,
"repo_name": "SaschaMester/delicium",
"id": "7c106a97ff3ceb512b278c462143664fb1c5b941",
"size": "21296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/android/pylib/device/battery_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "4171711"
},
{
"name": "C++",
"bytes": "243066171"
},
{
"name": "CSS",
"bytes": "935112"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27211018"
},
{
"name": "Java",
"bytes": "14285999"
},
{
"name": "JavaScript",
"bytes": "20413885"
},
{
"name": "Makefile",
"bytes": "23496"
},
{
"name": "Objective-C",
"bytes": "1725804"
},
{
"name": "Objective-C++",
"bytes": "9880229"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "478406"
},
{
"name": "Python",
"bytes": "8261413"
},
{
"name": "Shell",
"bytes": "482077"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
import urllib
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.core.urlresolvers import reverse, reverse_lazy
from django.db import models
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from hackingweek import settings
class Category(models.Model):
name = models.CharField(max_length=128)
class Meta:
verbose_name_plural = "categories"
def __unicode__(self):
return self.name
class Challenge(models.Model):
category = models.ForeignKey(Category)
name = models.CharField(max_length=128)
author = models.CharField(max_length=128)
body = models.CharField(max_length=4096)
# TODO: Keys should be stored hashed in case somebody manage to dump the db
key = models.CharField(max_length=512)
def __unicode__(self):
return self.name
class UserProfile(models.Model):
"""Decorate the regular User model with extra information about the user"""
user = models.OneToOneField(User)
status = models.CharField(max_length=32)
organisation = models.CharField(max_length=128)
class Team(models.Model):
name = models.CharField(max_length=128, unique=True)
members = models.ManyToManyField(User, null=True, blank=True)
# Storing Team score data
score = models.IntegerField(default=0)
breakthroughs = models.IntegerField(default=0)
# The 'is_active' field denotes if the team has validated at least
# one challenge in order to know if it has to be considered as
# active in the contest.
is_active = models.BooleanField(default=False)
def __unicode__(self):
return self.name
class Validation(models.Model):
date = models.DateTimeField(default=timezone.now)
user = models.ForeignKey(User, related_name='validation_user')
team = models.ForeignKey(Team, related_name='validation_team')
challenge = models.ForeignKey(Challenge, related_name='validation_challenge')
class TeamJoinRequest(models.Model):
team = models.ForeignKey(Team)
requester = models.ForeignKey(User, related_name='teamjoinrequest_requester')
responder = models.ForeignKey(User, related_name='teamjoinrequest_responder')
created = models.DateTimeField(default=timezone.now)
key = models.CharField(max_length=64, unique=True)
@classmethod
def create(cls, request=None, **kwargs):
# Check if a similar request already exists before proceeding
try:
_object = cls.objects.get(requester=kwargs['requester'],
responder=kwargs['responder'],
team=kwargs['team'])
# If the request has expired keep going
#if _object.key_expired():
# raise cls.DoesNotExist
# If not, return None
joinrequest = None
except cls.DoesNotExist:
from uuid import uuid1
kwargs['key'] = uuid1().hex
joinrequest = cls(**kwargs)
joinrequest.save()
return joinrequest
def send_join_request(self):
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
current_site = Site.objects.get_current()
accept_url = "{0}://{1}{2}".format(
protocol,
current_site.domain,
reverse('team_join_accept',
kwargs = {'pk': self.team.pk, 'key': self.key,}
)
)
ctx = {
"team" : self.team,
"username" : self.requester.username,
"current_site": current_site,
"accept_url" : accept_url,
}
subject = render_to_string("email/team_join_request_subject.txt", ctx)
message = render_to_string("email/team_join_request_message.txt", ctx)
send_mail(subject.rstrip(),
message,
settings.DEFAULT_FROM_EMAIL,
[self.responder.email])
def send_join_accept(self):
ctx = {
"team" : self.team.name,
"responder" : self.responder.username,
"requester" : self.requester.username,
"site": Site.objects.get_current().name,
}
subject = render_to_string("email/team_join_accept_subject.txt", ctx)
message = render_to_string("email/team_join_accept_message.txt", ctx)
for member in self.team.members.all():
send_mail(subject.rstrip(),
message,
settings.DEFAULT_FROM_EMAIL,
[member.email])
def key_expired(self):
expiration_date = self.created + \
timedelta(days=settings.TEAM_JOIN_REQUEST_EXPIRE_DAYS)
if expiration_date <= timezone.now():
self.delete()
return expiration_date <= timezone.now()
| {
"content_hash": "567d1f2779fa2ef8acfb7d440c4c0697",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 81,
"avg_line_length": 34.326530612244895,
"alnum_prop": 0.6236623067776457,
"repo_name": "perror/hackingweek",
"id": "7e654b4bed83f903f008ced5fd23a13ca4d627c3",
"size": "5046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hackingweek/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "152334"
},
{
"name": "HTML",
"bytes": "43079"
},
{
"name": "JavaScript",
"bytes": "573"
},
{
"name": "Makefile",
"bytes": "2963"
},
{
"name": "Python",
"bytes": "70441"
}
],
"symlink_target": ""
} |
import elastictools.elasticclient
import elastictools.request
| {
"content_hash": "8655c87367d9cff877e12eb8902d4c3a",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 33,
"avg_line_length": 21,
"alnum_prop": 0.8888888888888888,
"repo_name": "skyhound/elastic-tools",
"id": "e0904f29500b23ac0ea5cd4020d7acc73ac74b4f",
"size": "63",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elastictools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "83"
},
{
"name": "Python",
"bytes": "20679"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, List, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machine_images_edge_zone_operations import (
build_get_request,
build_list_offers_request,
build_list_publishers_request,
build_list_request,
build_list_skus_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineImagesEdgeZoneOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.compute.v2021_03_01.aio.ComputeManagementClient`'s
:attr:`virtual_machine_images_edge_zone` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def get(
self, location: str, edge_zone: str, publisher_name: str, offer: str, skus: str, version: str, **kwargs: Any
) -> _models.VirtualMachineImage:
"""Gets a virtual machine image in an edge zone.
:param location: The name of a supported Azure region. Required.
:type location: str
:param edge_zone: The name of the edge zone. Required.
:type edge_zone: str
:param publisher_name: A valid image publisher. Required.
:type publisher_name: str
:param offer: A valid image publisher offer. Required.
:type offer: str
:param skus: A valid image SKU. Required.
:type skus: str
:param version: A valid image SKU version. Required.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineImage or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImage
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.VirtualMachineImage]
request = build_get_request(
location=location,
edge_zone=edge_zone,
publisher_name=publisher_name,
offer=offer,
skus=skus,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("VirtualMachineImage", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}"} # type: ignore
@distributed_trace_async
async def list(
self,
location: str,
edge_zone: str,
publisher_name: str,
offer: str,
skus: str,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List[_models.VirtualMachineImageResource]:
"""Gets a list of all virtual machine image versions for the specified location, edge zone,
publisher, offer, and SKU.
:param location: The name of a supported Azure region. Required.
:type location: str
:param edge_zone: The name of the edge zone. Required.
:type edge_zone: str
:param publisher_name: A valid image publisher. Required.
:type publisher_name: str
:param offer: A valid image publisher offer. Required.
:type offer: str
:param skus: A valid image SKU. Required.
:type skus: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:param top: An integer value specifying the number of images to return that matches supplied
values. Default value is None.
:type top: int
:param orderby: Specifies the order of the results returned. Formatted as an OData query.
Default value is None.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImageResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineImageResource]]
request = build_list_request(
location=location,
edge_zone=edge_zone,
publisher_name=publisher_name,
offer=offer,
skus=skus,
subscription_id=self._config.subscription_id,
expand=expand,
top=top,
orderby=orderby,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineImageResource]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions"} # type: ignore
@distributed_trace_async
async def list_offers(
self, location: str, edge_zone: str, publisher_name: str, **kwargs: Any
) -> List[_models.VirtualMachineImageResource]:
"""Gets a list of virtual machine image offers for the specified location, edge zone and
publisher.
:param location: The name of a supported Azure region. Required.
:type location: str
:param edge_zone: The name of the edge zone. Required.
:type edge_zone: str
:param publisher_name: A valid image publisher. Required.
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImageResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineImageResource]]
request = build_list_offers_request(
location=location,
edge_zone=edge_zone,
publisher_name=publisher_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_offers.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineImageResource]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_offers.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers"} # type: ignore
@distributed_trace_async
async def list_publishers(
self, location: str, edge_zone: str, **kwargs: Any
) -> List[_models.VirtualMachineImageResource]:
"""Gets a list of virtual machine image publishers for the specified Azure location and edge zone.
:param location: The name of a supported Azure region. Required.
:type location: str
:param edge_zone: The name of the edge zone. Required.
:type edge_zone: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImageResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineImageResource]]
request = build_list_publishers_request(
location=location,
edge_zone=edge_zone,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_publishers.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineImageResource]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_publishers.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers"} # type: ignore
@distributed_trace_async
async def list_skus(
self, location: str, edge_zone: str, publisher_name: str, offer: str, **kwargs: Any
) -> List[_models.VirtualMachineImageResource]:
"""Gets a list of virtual machine image SKUs for the specified location, edge zone, publisher, and
offer.
:param location: The name of a supported Azure region. Required.
:type location: str
:param edge_zone: The name of the edge zone. Required.
:type edge_zone: str
:param publisher_name: A valid image publisher. Required.
:type publisher_name: str
:param offer: A valid image publisher offer. Required.
:type offer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2021_03_01.models.VirtualMachineImageResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-03-01")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[List[_models.VirtualMachineImageResource]]
request = build_list_skus_request(
location=location,
edge_zone=edge_zone,
publisher_name=publisher_name,
offer=offer,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_skus.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("[VirtualMachineImageResource]", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_skus.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/edgeZones/{edgeZone}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus"} # type: ignore
| {
"content_hash": "25f8f8c40a00e8ca8b834a944a1e9fca",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 242,
"avg_line_length": 43.958230958230956,
"alnum_prop": 0.6497121457716171,
"repo_name": "Azure/azure-sdk-for-python",
"id": "eb3ff6e4aff7256d9cbafd97a59274d15bc82a4d",
"size": "18391",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_03_01/aio/operations/_virtual_machine_images_edge_zone_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from nose.plugins.attrib import attr
import unittest
#from pyon.ion.endpoint import ProcessRPCClient
from pyon.public import log, IonObject, RT, PRED, BadRequest
from pyon.util.int_test import IonIntegrationTestCase
from pyon.agent.agent import ResourceAgentClient
from pyon.util.context import LocalContextMixin
from coverage_model.coverage import GridDomain, GridShape, CRS
from coverage_model.basic_types import MutabilityEnum, AxisTypeEnum
from ion.services.dm.utility.granule.taxonomy import TaxyTool
from ion.services.dm.utility.granule_utils import time_series_domain
from ion.services.dm.inventory.dataset_management_service import DatasetManagementService
# MI imports
from ion.agents.instrument.instrument_agent import InstrumentAgentState
from interface.objects import AgentCommand
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient
from interface.services.dm.idataset_management_service import DatasetManagementServiceClient
from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient
from interface.services.sa.idata_acquisition_management_service import DataAcquisitionManagementServiceClient
# Agent parameters.
EDA_RESOURCE_ID = '123xyz'
EDA_NAME = 'ExampleEDA'
EDA_MOD = 'ion.agents.data.external_dataset_agent'
EDA_CLS = 'ExternalDatasetAgent'
class FakeProcess(LocalContextMixin):
"""
A fake process used because the test case is not an ion process.
"""
name = ''
id=''
process_type = ''
@attr('INT', group='sa')
class TestExternalDatasetAgentMgmt(IonIntegrationTestCase):
# DataHandler config
DVR_CONFIG = {
'dvr_mod' : 'ion.agents.data.handlers.base_data_handler',
'dvr_cls' : 'DummyDataHandler',
}
def setUp(self):
# Start container
self._start_container()
self.container.start_rel_from_url('res/deploy/r2deploy.yml')
log.debug("TestExternalDatasetAgentMgmt: started services")
# Now create client to DataProductManagementService
self.rrclient = ResourceRegistryServiceClient(node=self.container.node)
self.damsclient = DataAcquisitionManagementServiceClient(node=self.container.node)
self.pubsubcli = PubsubManagementServiceClient(node=self.container.node)
self.dpclient = DataProductManagementServiceClient(node=self.container.node)
self.datasetclient = DatasetManagementServiceClient(node=self.container.node)
# @unittest.skip('not yet working. fix activate_data_product_persistence()')
#@unittest.skip()
@unittest.skip('not working')
def test_activateDatasetAgent(self):
# Create ExternalDatasetModel
datsetModel_obj = IonObject(RT.ExternalDatasetModel, name='ExampleDatasetModel', description="ExampleDatasetModel", datset_type="FibSeries" )
try:
datasetModel_id = self.damsclient.create_external_dataset_model(datsetModel_obj)
except BadRequest as ex:
self.fail("failed to create new ExternalDatasetModel: %s" %ex)
log.debug("TestExternalDatasetAgentMgmt: new ExternalDatasetModel id = %s", str(datasetModel_id) )
# Create ExternalDatasetAgent
datasetAgent_obj = IonObject(RT.ExternalDatasetAgent, name='datasetagent007', description="datasetagent007", handler_module=EDA_MOD, handler_class=EDA_CLS )
try:
datasetAgent_id = self.damsclient.create_external_dataset_agent(datasetAgent_obj, datasetModel_id)
except BadRequest as ex:
self.fail("failed to create new ExternalDatasetAgent: %s" %ex)
log.debug("TestExternalDatasetAgentMgmt: new ExternalDatasetAgent id = %s", str(datasetAgent_id) )
# Create ExternalDataset
log.debug('TestExternalDatasetAgentMgmt: Create external dataset resource ')
extDataset_obj = IonObject(RT.ExternalDataset, name='ExtDataset', description="ExtDataset" )
try:
extDataset_id = self.damsclient.create_external_dataset(extDataset_obj, datasetModel_id)
except BadRequest as ex:
self.fail("failed to create new external dataset resource: %s" %ex)
log.debug("TestExternalDatasetAgentMgmt: new ExternalDataset id = %s ", str(extDataset_id))
#register the dataset as a data producer
dproducer_id = self.damsclient.register_external_data_set(extDataset_id)
# create a stream definition for the data from the ctd simulator
pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
ctd_stream_def_id = self.pubsubcli.create_stream_definition(name='SBE37_CDM', parameter_dictionary_id=pdict_id)
log.debug("TestExternalDatasetAgentMgmt: new Stream Definition id = %s", str(ctd_stream_def_id))
log.debug("TestExternalDatasetAgentMgmt: Creating new data product with a stream definition")
dp_obj = IonObject(RT.DataProduct,name='eoi dataset data',description=' stream test')
dp_obj = IonObject(RT.DataProduct,
name='DP1',
description='some new dp')
data_product_id1 = self.dpclient.create_data_product(dp_obj, ctd_stream_def_id)
log.debug("TestExternalDatasetAgentMgmt: new dp_id = %s", str(data_product_id1) )
self.damsclient.assign_data_product(input_resource_id=extDataset_id, data_product_id=data_product_id1)
#todo fix the problem here....
self.dpclient.activate_data_product_persistence(data_product_id=data_product_id1)
# Retrieve the id of the OUTPUT stream from the out Data Product
stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
log.debug("TestExternalDatasetAgentMgmt: Data product streams1 = %s", str(stream_ids) )
stream_id = stream_ids[0]
# Build a taxonomy for the dataset
tx = TaxyTool()
tx.add_taxonomy_set('data', 'external_data')
# Augment the DVR_CONFIG with the necessary pieces
self.DVR_CONFIG['dh_cfg'] = {
'TESTING':True,
'stream_id':stream_id,#TODO: This should probably be a 'stream_config' dict with stream_name:stream_id members
'data_producer_id':dproducer_id,
# 'external_dataset_res':extDataset_obj, # Not needed - retrieved by EDA based on resource_id
'taxonomy':tx.dump(), #TODO: Currently does not support sets
'max_records':4,
}
# Create agent config.
self._stream_config = {}
agent_config = {
'driver_config' : self.DVR_CONFIG,
'stream_config' : self._stream_config,
'agent' : {'resource_id': EDA_RESOURCE_ID},
'test_mode' : True
}
extDatasetAgentInstance_obj = IonObject(RT.ExternalDatasetAgentInstance, name='DatasetAgentInstance', description="DatasetAgentInstance", dataset_driver_config = self.DVR_CONFIG, dataset_agent_config = agent_config)
extDatasetAgentInstance_id = self.damsclient.create_external_dataset_agent_instance(external_dataset_agent_instance=extDatasetAgentInstance_obj, external_dataset_agent_id=datasetAgent_id, external_dataset_id=extDataset_id)
log.debug("TestExternalDatasetAgentMgmt: Dataset agent instance obj: = %s", str(extDatasetAgentInstance_obj) )
log.debug("TestExternalDatasetAgentMgmt: Dataset agent instance id: = %s", str(extDatasetAgentInstance_id) )
#Check that the instance is currently not active
id, active = self.damsclient.retrieve_external_dataset_agent_instance(extDataset_id)
log.debug("TestExternalDatasetAgentMgmt: Dataset agent instance id: = %s active 1 = %s ", str(id), str(active) )
self.damsclient.start_external_dataset_agent_instance(extDatasetAgentInstance_id)
dataset_agent_instance_obj= self.damsclient.read_external_dataset_agent_instance(extDatasetAgentInstance_id)
log.debug("TestExternalDatasetAgentMgmt: Dataset agent instance obj: = %s", str(dataset_agent_instance_obj) )
# now the instance process should be active
id, active = self.damsclient.retrieve_external_dataset_agent_instance(extDataset_id)
log.debug("TestExternalDatasetAgentMgmt: Dataset agent instance id: = %s active 2 = %s ", str(id), str(active) )
# Start a resource agent client to talk with the instrument agent.
self._dsa_client = ResourceAgentClient(extDataset_id, process=FakeProcess())
print 'TestExternalDatasetAgentMgmt: got ia client %s', self._dsa_client
log.debug("TestExternalDatasetAgentMgmt: got dataset client %s", str(self._dsa_client))
# cmd=AgentCommand(command='initialize')
# _ = self._dsa_client.execute_agent(cmd)
#
# cmd = AgentCommand(command='go_active')
# _ = self._dsa_client.execute_agent(cmd)
#
# cmd = AgentCommand(command='run')
# _ = self._dsa_client.execute_agent(cmd)
#
# log.info('Send an unconstrained request for data (\'new data\')')
# cmd = AgentCommand(command='acquire_data')
# self._dsa_client.execute(cmd)
#
# log.info('Send a second unconstrained request for data (\'new data\'), should be rejected')
# cmd = AgentCommand(command='acquire_data')
# self._dsa_client.execute(cmd)
#
# cmd = AgentCommand(command='reset')
# _ = self._dsa_client.execute_agent(cmd)
# cmd = AgentCommand(command='get_current_state')
# retval = self._dsa_client.execute_agent(cmd)
# state = retval.result
# TODO: Think about what we really should be testing at this point
# The following is taken from ion.agents.data.test.test_external_dataset_agent.ExternalDatasetAgentTestBase.test_states()
# TODO: Do we also need to show data retrieval?
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.UNINITIALIZED)
cmd = AgentCommand(command='initialize')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.INACTIVE)
cmd = AgentCommand(command='go_active')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.IDLE)
cmd = AgentCommand(command='run')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.OBSERVATORY)
cmd = AgentCommand(command='pause')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.STOPPED)
cmd = AgentCommand(command='resume')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.OBSERVATORY)
cmd = AgentCommand(command='clear')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.IDLE)
cmd = AgentCommand(command='run')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.OBSERVATORY)
cmd = AgentCommand(command='pause')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.STOPPED)
cmd = AgentCommand(command='clear')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.IDLE)
cmd = AgentCommand(command='run')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.OBSERVATORY)
cmd = AgentCommand(command='reset')
retval = self._dsa_client.execute_agent(cmd)
cmd = AgentCommand(command='get_current_state')
retval = self._dsa_client.execute_agent(cmd)
state = retval.result
self.assertEqual(state, InstrumentAgentState.UNINITIALIZED)
#-------------------------------
# Deactivate InstrumentAgentInstance
#-------------------------------
self.damsclient.stop_external_dataset_agent_instance(extDatasetAgentInstance_id)
def test_dataset_agent_prepare_support(self):
eda_sup = self.damsclient.prepare_external_dataset_agent_support()
eda_obj = IonObject(RT.ExternalDatasetAgent, name="ExternalDatasetAgent")
eda_id = self.damsclient.create_external_dataset_agent(eda_obj)
eda_sup = self.damsclient.prepare_external_dataset_agent_support(external_dataset_agent_id=eda_id)
edai_sup = self.damsclient.prepare_external_dataset_agent_instance_support()
edai_obj = IonObject(RT.ExternalDatasetAgentInstance, name="ExternalDatasetAgentInstance")
edai_id = self.damsclient.create_external_dataset_agent_instance(edai_obj)
edai_sup = self.damsclient.prepare_external_dataset_agent_instance_support(external_dataset_agent_instance_id=edai_id)
| {
"content_hash": "8c8ed6732fe654c90a529f2e646b22b5",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 230,
"avg_line_length": 46.55806451612903,
"alnum_prop": 0.6950045035682118,
"repo_name": "ooici/coi-services",
"id": "6d4d9c3849cf24b8614b36d94858b39a7623fb71",
"size": "14433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ion/services/sa/acquisition/test/test_external_dataset_agent_mgmt.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "403012"
},
{
"name": "C++",
"bytes": "251803"
},
{
"name": "CSS",
"bytes": "689"
},
{
"name": "Erlang",
"bytes": "532"
},
{
"name": "JavaScript",
"bytes": "11627"
},
{
"name": "Objective-C",
"bytes": "8918"
},
{
"name": "Python",
"bytes": "7964384"
},
{
"name": "Shell",
"bytes": "9221"
},
{
"name": "nesC",
"bytes": "57712131"
}
],
"symlink_target": ""
} |
import asyncio
import logging
import signal
from contextlib import AsyncExitStack
from typing import Any
from urllib.parse import SplitResult
import __main__
from asgiref.sync import async_to_sync, sync_to_async
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError, CommandParser
from tornado import autoreload
from tornado.platform.asyncio import AsyncIOMainLoop
settings.RUNNING_INSIDE_TORNADO = True
if settings.PRODUCTION:
settings.SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
from zerver.lib.async_utils import NoAutoCreateEventLoopPolicy
from zerver.lib.debug import interactive_debug_listen
from zerver.tornado.application import create_tornado_application, setup_tornado_rabbitmq
from zerver.tornado.descriptors import set_current_port
from zerver.tornado.event_queue import (
add_client_gc_hook,
dump_event_queues,
get_wrapped_process_notification,
missedmessage_hook,
setup_event_queue,
)
from zerver.tornado.sharding import notify_tornado_queue_name
if settings.USING_RABBITMQ:
from zerver.lib.queue import TornadoQueueClient, set_queue_client
asyncio.set_event_loop_policy(NoAutoCreateEventLoopPolicy())
class Command(BaseCommand):
help = "Starts a Tornado Web server wrapping Django."
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument(
"addrport",
help="[port number or ipaddr:port]",
)
def handle(self, *args: Any, **options: Any) -> None:
interactive_debug_listen()
addrport = options["addrport"]
assert isinstance(addrport, str)
from tornado import httpserver
if addrport.isdigit():
addr, port = "", int(addrport)
else:
r = SplitResult("", addrport, "", "", "")
if r.port is None:
raise CommandError(f"{addrport!r} does not have a valid port number.")
addr, port = r.hostname or "", r.port
if not addr:
addr = "127.0.0.1"
if settings.DEBUG:
logging.basicConfig(
level=logging.INFO, format="%(asctime)s %(levelname)-8s %(message)s"
)
async def inner_run() -> None:
from django.utils import translation
AsyncIOMainLoop().install()
loop = asyncio.get_running_loop()
stop_fut = loop.create_future()
def stop() -> None:
if not stop_fut.done():
stop_fut.set_result(None)
def add_signal_handlers() -> None:
loop.add_signal_handler(signal.SIGINT, stop),
loop.add_signal_handler(signal.SIGTERM, stop),
def remove_signal_handlers() -> None:
loop.remove_signal_handler(signal.SIGINT),
loop.remove_signal_handler(signal.SIGTERM),
async with AsyncExitStack() as stack:
stack.push_async_callback(
sync_to_async(remove_signal_handlers, thread_sensitive=True)
)
await sync_to_async(add_signal_handlers, thread_sensitive=True)()
set_current_port(port)
translation.activate(settings.LANGUAGE_CODE)
# We pass display_num_errors=False, since Django will
# likely display similar output anyway.
self.check(display_num_errors=False)
print(f"Tornado server (re)started on port {port}")
if settings.USING_RABBITMQ:
queue_client = TornadoQueueClient()
set_queue_client(queue_client)
# Process notifications received via RabbitMQ
queue_name = notify_tornado_queue_name(port)
stack.callback(queue_client.close)
queue_client.start_json_consumer(
queue_name, get_wrapped_process_notification(queue_name)
)
# Application is an instance of Django's standard wsgi handler.
application = create_tornado_application()
# start tornado web server in single-threaded mode
http_server = httpserver.HTTPServer(application, xheaders=True)
stack.push_async_callback(http_server.close_all_connections)
stack.callback(http_server.stop)
http_server.listen(port, address=addr)
from zerver.tornado.ioloop_logging import logging_data
logging_data["port"] = str(port)
await setup_event_queue(http_server, port)
stack.callback(dump_event_queues, port)
add_client_gc_hook(missedmessage_hook)
if settings.USING_RABBITMQ:
setup_tornado_rabbitmq(queue_client)
if hasattr(__main__, "add_reload_hook"):
autoreload.start()
await stop_fut
# Monkey patch tornado.autoreload to prevent it from continuing
# to watch for changes after catching our SystemExit. Otherwise
# the user needs to press Ctrl+C twice.
__main__.wait = lambda: None
async_to_sync(inner_run, force_new_loop=True)()
| {
"content_hash": "fe40f6caf44580e7a8b8503fba62e17a",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 89,
"avg_line_length": 37.744680851063826,
"alnum_prop": 0.6110484780157835,
"repo_name": "zulip/zulip",
"id": "e74ea0d96980e389d3f0b2b52d61432145d7865f",
"size": "5322",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/management/commands/runtornado.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "509211"
},
{
"name": "Dockerfile",
"bytes": "4219"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "696430"
},
{
"name": "Handlebars",
"bytes": "384277"
},
{
"name": "JavaScript",
"bytes": "4098367"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112433"
},
{
"name": "Python",
"bytes": "10336945"
},
{
"name": "Ruby",
"bytes": "3166"
},
{
"name": "Shell",
"bytes": "147162"
},
{
"name": "TypeScript",
"bytes": "286785"
}
],
"symlink_target": ""
} |
import requests
import begin
def search(seq, threshold=1):
url = "http://api.bigsi.io/search?threshold=%f&seq=%s" % (
float(threshold), seq)
results = requests.get(url).json()
samples = []
for i, j in list(results.values())[0]["results"].items():
samples.append(i)
return samples
@begin.start
def main(seq, threshold=1):
samples = search(seq, threshold)
for s in samples:
print(s)
| {
"content_hash": "b6a5faf0788e45ce8d0025f89c63a171",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 62,
"avg_line_length": 21.9,
"alnum_prop": 0.6187214611872146,
"repo_name": "Phelimb/cbg",
"id": "37f1d652b4505dd2537ac308d562079d76849da3",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example-scripts/search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "46144"
},
{
"name": "Python",
"bytes": "151200"
},
{
"name": "R",
"bytes": "1806"
},
{
"name": "Ruby",
"bytes": "60866"
},
{
"name": "Shell",
"bytes": "1014"
}
],
"symlink_target": ""
} |
from .__version__ import __version__
from tensorx.layers import *
from tensorx.random import *
from tensorx.ops import *
from tensorx.math import *
from tensorx.metrics import *
from tensorx.train import *
from tensorx.init import *
from tensorx.loss import *
from tensorx.activation import *
from tensorx.logic import *
| {
"content_hash": "ac1fb87ac1e50f8d4cca40ecb6bf0b9e",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 36,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.7701863354037267,
"repo_name": "davidenunes/tensorx",
"id": "1a81ae9d0c79a0f816eef308ac80df3b71502616",
"size": "322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorx/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "590753"
}
],
"symlink_target": ""
} |
__author__ = 'Shinichi Nakagawa'
from configparser import ConfigParser
from boto3.session import Session
import glob
import os
class Storage(object):
def __init__(self, config_file):
"""
init
:param config_file: Application config file
:return:
"""
self.config = ConfigParser()
self.config.read(config_file)
self.session = Session(
aws_access_key_id=self.config['aws']['access_key'],
aws_secret_access_key=self.config['aws']['secret_access_key'],
region_name=self.config['aws']['region']
)
self.s3 = self.session.resource('s3')
self.s3client = self.session.client('s3')
self.bucket_name = self.config['baseball_report']['bucket_name']
def upload_files(self, dir_path, extension, key_name, delimiter='/', delete=True):
"""
file upload for S3
:param dir_path: input_file_path
:param extension: upload file extension
:param key_name: bucket key name
:param delimiter: Delimiter
:param delete: Delete Flg
:return: None
"""
for file_name in glob.glob(delimiter.join([dir_path, '*{extension}'.format(extension=extension)])):
remote_file_name = delimiter.join(
[
key_name,
file_name.replace('{dir_path}{delimiter}'.format(dir_path=dir_path, delimiter=delimiter), '')
]
)
self.s3client.upload_file(file_name, self.bucket_name, remote_file_name)
if delete:
os.remove(file_name)
| {
"content_hash": "011ca8b26f3c3ff9c89fd083134797bd",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 113,
"avg_line_length": 35.41304347826087,
"alnum_prop": 0.576427255985267,
"repo_name": "Shinichi-Nakagawa/xp2015_baseball_tools",
"id": "436707bf7535d9db139410a2ebd3f1a5f2b2544c",
"size": "1676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "service/storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38461"
}
],
"symlink_target": ""
} |
import json
import platform
import textwrap
import warnings
from datetime import datetime
from . import config
from .compat import binary_type, string_types
from .exceptions import (
APIError, BadGateway, BadRequest, ConnectionError, Forbidden,
InternalError, MethodNotAllowed, NotFound, ServiceUnavailable,
Unauthorized,
)
from .timezone import utc
from .version import version
# TODO: use urlfetch for Google App Engine
try:
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
retry = Retry(total=3, backoff_factor=0.2)
_session = requests.session()
_session.mount('http://', HTTPAdapter(max_retries=retry))
_session.mount('https://', HTTPAdapter(max_retries=retry))
_lib = 'requests'
_lib_ver = requests.__version__
except ImportError:
warnings.warn(
'\n\n' + textwrap.fill(
'requests library is not available, falling to urllib2. '
'Note that urllib2 does NOT verifies SSL certificates, so '
'we recommend to install requests, if possible.'
)
)
try:
from urllib2 import Request, urlopen, HTTPError, URLError, \
__version__ as urllib_ver
_lib = 'urllib2'
_lib_ver = urllib_ver
except ImportError:
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
_lib = 'urllib2'
_lib_ver = 'internal'
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
def urlify(params, prefix=''):
from .models import CubObject
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
result = {}
items = params.items() if isinstance(params, dict) else enumerate(params)
for k, v in items:
key = '%s[%s]' % (prefix, k) if prefix else k
if isinstance(v, (dict, list, tuple)):
result.update(urlify(v, key))
elif isinstance(v, bool):
result[key] = 'true' if v else 'false'
elif v is None:
result[key] = 'null'
elif isinstance(v, string_types):
if isinstance(v, binary_type):
# Must be utf-8, raise exception if it isn't
v = v.decode('utf-8')
if v in ('true', 'false', 'null') or is_number(v):
v = '"%s"' % v
result[key] = v
elif isinstance(v, CubObject):
result[key] = v.id
else:
result[key] = v
return result
def json_datetime_hook(dikt):
for k, v in dikt.items():
if isinstance(v, string_types) and v.endswith('Z'):
try:
dt = datetime.strptime(v, '%Y-%m-%dT%H:%M:%SZ')
except ValueError:
pass
else:
dt = dt.replace(tzinfo=utc)
dikt[k] = dt
return dikt
class API(object):
def __init__(self, key=None, base_url=None, timeout=None):
self.api_key = key or config.api_key
self.base_url = base_url or config.api_url
self.timeout = timeout or config.api_timeout
def url(self, url):
return '%s%s' % (self.base_url, url)
def requests_request(self, method, url, data, headers, timeout):
if method == 'get':
abs_url = '%s?%s' % (url, urlencode(data))
params = {}
else:
abs_url = url
params = data
response = _session.request(
method,
abs_url,
data=params,
headers=headers,
timeout=timeout
)
http_code = response.status_code
http_body = response.content
return http_code, http_body
def urllib2_request(self, method, url, data, headers, timeout):
params = urlencode(data)
if method == 'get':
abs_url = '%s?%s' % (url, params)
req = Request(abs_url, None, headers)
elif method == 'post':
req = Request(url, params.encode('ascii'), headers)
elif method == 'delete':
abs_url = '%s?%s' % (url, params)
req = Request(abs_url, None, headers)
req.get_method = lambda: 'DELETE'
else:
raise APIError('Unsupported method: %s' % method)
try:
response = urlopen(req, timeout=timeout)
http_code = response.code
http_body = response.read()
except HTTPError as e:
http_code = e.code
http_body = e.read()
return http_code, http_body
def request(self, method, url, params=None):
params = params or {}
if not self.api_key:
raise Unauthorized(
'You did not provide an API key. There are 2 ways to do it:\n'
'\n1) set it globally for all requests via '
'cub.config.api_key, like this:\n'
'\nimport cub\n'
'\ncub.config.api_key = "<your-key>"\n'
'\n'
'\n2) pass it to methods which communicate with the API as '
'keyword argument, like this:\n'
'\nfrom cub import User\n'
'\ninv = User.get(api_key="<your-key>", ...)'
)
abs_url = self.url(url)
client_info = {
'publisher': 'ivelum',
'platform': platform.platform(),
'language': 'Python %s' % platform.python_version(),
'httplib': '%s v%s' % (_lib, _lib_ver)
}
headers = {
'Authorization': 'Bearer %s' % self.api_key,
'User-Agent': 'Cub Client for Python, v%s' % version,
'Content-Type': 'application/x-www-form-urlencoded',
'X-Cub-User-Agent-Info': json.dumps(client_info)
}
data = urlify(params)
# Send request to API and handle communication errors
if _lib == 'requests':
try:
http_code, http_body = self.requests_request(
method,
abs_url,
data=data,
headers=headers,
timeout=self.timeout
)
except requests.RequestException:
raise ConnectionError(
'Cannot connect to Cub API using URL %s' % abs_url
)
elif _lib == 'urllib2':
try:
http_code, http_body = self.urllib2_request(
method,
abs_url,
data=data,
headers=headers,
timeout=self.timeout
)
except URLError:
raise ConnectionError(
'Cannot connect to Cub API using URL %s' % abs_url
)
try:
json_body = json.loads(
http_body.decode('utf-8'),
object_hook=json_datetime_hook,
)
except Exception:
raise APIError(
'Invalid response from the API: %s' % http_body,
http_code,
http_body
)
# Handle API errors
if http_code != 200:
try:
error = json_body['error']
err_desc = error['description']
err_params = {}
err_params.update(error.get('params', {}))
except (KeyError, TypeError, ValueError):
raise APIError(
'Invalid response from the API: %s' % json_body,
http_code,
http_body,
json_body
)
if http_code == 400:
err_msg = err_desc
if err_params:
err_msg += '\nParams:\n' + '\n'.join(
['%s: %s' % (k, v) for k, v in err_params.items()]
)
raise BadRequest(err_msg, http_code, http_body, json_body)
elif http_code == 401:
raise Unauthorized(err_desc, http_code, http_body, json_body)
elif http_code == 403:
raise Forbidden(err_desc, http_code, http_body, json_body)
elif http_code == 404:
raise NotFound(err_desc, http_code, http_body, json_body)
elif http_code == 405:
raise MethodNotAllowed(
err_desc, http_code, http_body, json_body)
elif http_code == 500:
raise InternalError(err_desc, http_code, http_body, json_body)
elif http_code == 502:
raise BadGateway(err_desc, http_code, http_body, json_body)
elif http_code == 503:
raise ServiceUnavailable(
err_desc, http_code, http_body, json_body
)
else:
raise APIError(err_desc, http_code, http_body, json_body)
return json_body
| {
"content_hash": "756837b13a1db77fb153bf633463a40d",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 78,
"avg_line_length": 33.96616541353384,
"alnum_prop": 0.5117874930824571,
"repo_name": "ivelum/cub-python",
"id": "8853591ec2e7aceb240f45c6e0c498fb76563ca9",
"size": "9035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cub/transport.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22449"
}
],
"symlink_target": ""
} |
""" smashlib.plugins.prefilter_url
"""
from smashlib.util.ipy import register_prefilter
from smashlib.prefilters.url import URLChecker, URLHandler
from smashlib.plugins import Plugin
from smashlib.util.ipy import uninstall_prefilter
class URLPlugin(Plugin):
""" installs the IPython prefilter which handles urls """
def install(self):
register_prefilter(URLChecker, URLHandler)
return self
def uninstall(self):
return uninstall_prefilter(URLChecker, URLHandler)
def load_ipython_extension(ip):
""" called by %load_ext magic """
return URLPlugin(get_ipython()).install()
def unload_ipython_extension(ip):
""" called by %unload_ext magic """
plugin = URLPlugin()
plugin.uninstall()
| {
"content_hash": "10d0755d86b71706625ae4ef2040a2ed",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 61,
"avg_line_length": 25.689655172413794,
"alnum_prop": 0.7167785234899329,
"repo_name": "mattvonrocketstein/smash",
"id": "f30f0547bc8a95b1781c0f5466fc8ada7ca0c22d",
"size": "745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smashlib/plugins/prefilter_url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "162188"
},
{
"name": "HTML",
"bytes": "32106"
},
{
"name": "JavaScript",
"bytes": "1615935"
},
{
"name": "Makefile",
"bytes": "550"
},
{
"name": "Python",
"bytes": "4934398"
},
{
"name": "Shell",
"bytes": "2990"
}
],
"symlink_target": ""
} |
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = np.random.randn(input_dim, hidden_dim) * weight_scale
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = np.random.randn(hidden_dim, num_classes) * weight_scale
self.params['b2'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
l1, l1_cache = affine_relu_forward(X, self.params['W1'], self.params['b1'])
l2, l2_cache = affine_forward(l1, self.params['W2'], self.params['b2'])
scores = l2
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
grads = {}
loss, d_scores = softmax_loss(scores, y)
loss += np.sum(self.params['W1'] ** 2) * 0.5 * self.reg
loss += np.sum(self.params['W2'] ** 2) * 0.5 * self.reg
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
d_l2, grads['W2'], grads['b2'] = affine_backward(d_scores, l2_cache)
_, grads['W1'], grads['b1'] = affine_relu_backward(d_l2, l1_cache)
# add loss of L2 regularization
grads['W2'] += self.params['W2'] * self.reg
grads['W1'] += self.params['W1'] * self.reg
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def GetWeightName(self, kth):
return 'W' + str(kth)
def GetBiasName(self, kth):
return 'B' + str(kth)
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
for l in xrange(self.num_layers):
if l == 0:
input_d = input_dim
else:
input_d = hidden_dims[l-1]
if l < self.num_layers - 1:
out_d = hidden_dims[l]
else:
out_d = num_classes
self.params[self.GetWeightName(l)] = np.random.randn(input_d, out_d) * weight_scale
self.params[self.GetBiasName(l)] = np.zeros(out_d)
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.dropout_param is not None:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param[mode] = mode
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
last_layer_output = X
cache = {}
for l in xrange(self.num_layers):
if l < (self.num_layers - 1):
if (not self.use_batchnorm) and (not self.use_dropout):
last_layer_output, cache[l] = affine_relu_forward(last_layer_output,
self.params[self.GetWeightName(l)], self.params[self.GetBiasName(l)])
else:
assert False
else:
last_layer_output, cache[l] = affine_forward(last_layer_output,
self.params[self.GetWeightName(l)], self.params[self.GetBiasName(l)])
scores = last_layer_output
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
grads = {}
loss, d_scores = softmax_loss(scores, y)
delta = d_scores
for l in xrange(self.num_layers - 1, -1, -1):
if l == self.num_layers - 1:
delta, grads[self.GetWeightName(l)], grads[self.GetBiasName(l)] = affine_backward(delta, cache[l])
else:
delta, grads[self.GetWeightName(l)], grads[self.GetBiasName(l)] = affine_relu_backward(delta, cache[l])
loss += np.sum(self.params[self.GetWeightName(l)] **2) * 0.5 * self.reg
grads[self.GetWeightName(l)] += self.params[self.GetWeightName(l)] * self.reg
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
| {
"content_hash": "75a5e287f6409502ad1d2577f8ed1838",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 111,
"avg_line_length": 47.84415584415584,
"alnum_prop": 0.5273479913137894,
"repo_name": "HrWangChengdu/CS231n",
"id": "f57cf30d4eebb42e58dd29f3254655ccb10cc4f0",
"size": "14736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assignment2/cs231n/classifiers/fc_net.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1293469"
},
{
"name": "Python",
"bytes": "49805"
},
{
"name": "Shell",
"bytes": "819"
}
],
"symlink_target": ""
} |
"""
Main storage of Dynamic Protocol API
"""
from NetCatKS.DProtocol.api.interfaces.storage import IProtocolStogareInterface
from zope.interface import implementer
__author__ = 'dimd'
@implementer(IProtocolStogareInterface)
class ProtocolStorageImplementor(object):
"""
This storage having implement a singleton pattern.
It's possible to be use only in a case when the protocol having implement id field
"""
session = {}
__instance = None
def __new__(cls):
"""
We having implement a singleton pattern for our Container to ensure that all commands point to one place
:return: instance
"""
if ProtocolStorageImplementor.__instance is None:
ProtocolStorageImplementor.__instance = object.__new__(cls)
return ProtocolStorageImplementor.__instance
| {
"content_hash": "f0312afb7735abda4fb135e2c2c3656d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 112,
"avg_line_length": 23.47222222222222,
"alnum_prop": 0.693491124260355,
"repo_name": "dimddev/NetCatKS",
"id": "d63e3d340cef68bb02df0ac64ad2e5e41bb64926",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NetCatKS/DProtocol/api/implementors/storage/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "182697"
}
],
"symlink_target": ""
} |
from django.db import models
from django.utils import timezone
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
class News_post(models.Model):
'''
Model contains all news articles.
'''
title = models.CharField(max_length=200)
image = RichTextUploadingField()
text = RichTextUploadingField()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def as_dict(self):
'''
Method returns information represented as a dictionary.
'''
return {
'id': self.id,
'title': self.title,
'image': self.image,
'text': self.text,
'created_date': self.created_date,
'published_date': self.published_date
}
def publish(self):
'''
Method point article as publshed.
'''
self.published_date = timezone.now()
self.save()
def unpublish(self):
'''
Mrthod point article as unpublished.
'''
self.published_date = None
self.save()
def __str__(self):
'''
String representation.
'''
return self.title
| {
"content_hash": "7630abe54f1d19952a4cdb854bee62cd",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 64,
"avg_line_length": 25.46938775510204,
"alnum_prop": 0.5993589743589743,
"repo_name": "maistrovas/News_app",
"id": "5c401436f736f52a837df1f28f09f318a1adc928",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71070"
},
{
"name": "HTML",
"bytes": "7497"
},
{
"name": "JavaScript",
"bytes": "497947"
},
{
"name": "Python",
"bytes": "12992"
}
],
"symlink_target": ""
} |
class Publication():
def __init__(self, topic_id, id, client):
self.topic_id = topic_id
self.id = id
self.client = client
# Get all publications for the associated Topic. Requires authorization of **read_any_publications**, or **read_application_publications**.
# '/api/topics/:topic_id/publications' GET
#
def list(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/publications', body, options)
return response
# Retrieve a specific publication on the associated topic by Id. Requires authorization of **read_any_publications**, or **read_application_publications**.
# '/api/topics/:topic_id/publications/:id' GET
#
def find(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
# Create a new publication on the associated Topic which can be easily retrieved later using an id. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications' POST
#
# publication - A Hash containing `host`:The ip address or host of the connection(required).`protocol`:the protocol to communicate over (http, tcp, udp, mqtt) (required)`port`:The port of the connection.
def create(self, publication, options = {}):
body = options['body'] if 'body' in options else {}
body['publication'] = publication
response = self.client.post('/api/topics/' + self.topic_id + '/publications', body, options)
return response
# Update a publication. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications/:id' PUT
#
# publication - A Hash containing `host`:The ip address or host of the connection(required).`protocol`:the protocol to communicate over (http, tcp, udp, mqtt) (required)`port`:The port of the connection.
def update(self, publication, options = {}):
body = options['body'] if 'body' in options else {}
body['publication'] = publication
response = self.client.put('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
# Remove a saved publication on the associated Topic by Id. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications/:id' DELETE
#
def delete(self, options = {}):
body = options['body'] if 'body' in options else {}
response = self.client.delete('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
| {
"content_hash": "96cb637bfda0b9567c61bff67b09c124",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 204,
"avg_line_length": 44.622950819672134,
"alnum_prop": 0.7046289493019838,
"repo_name": "cwadding/sensit-python",
"id": "835eef73bc469db005dae190762a17675f720ebf",
"size": "2940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sensit/api/publication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34294"
}
],
"symlink_target": ""
} |
"""Prints all histogram names."""
from __future__ import print_function
import argparse
import os
import subprocess
import sys
import tempfile
try:
from StringIO import StringIO # for Python 2
except ImportError:
from io import StringIO # for Python 3
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import path_util
import extract_histograms
import histogram_paths
import merge_xml
def get_names(xml_files):
"""Returns all histogram names generated from a list of xml files.
Args:
xml_files: A list of open file objects containing histogram definitions.
Returns:
A tuple of (names, obsolete names), where the obsolete names is a subset of
all names.
"""
doc = merge_xml.MergeFiles(files=xml_files)
histograms, had_errors = extract_histograms.ExtractHistogramsFromDom(doc)
if had_errors:
raise ValueError("Error parsing inputs.")
names = set(extract_histograms.ExtractNames(histograms))
obsolete_names = set(extract_histograms.ExtractObsoleteNames(histograms))
return (names, obsolete_names)
def histogram_xml_files():
return [open(f) for f in histogram_paths.ALL_XMLS]
def get_diff(revision):
"""Returns the added / removed histogram names relative to git revision
Args:
revision: A git revision as described in
https://git-scm.com/docs/gitrevisions
Returns:
A tuple of (added names, removed names, obsoleted names), where each entry
is sorted in ascending order.
"""
def get_file_at_revision(path):
"""Returns a file-like object containing |path|'s content at |revision|"""
obj = "%s:%s" % (revision, path)
contents = subprocess.check_output(
("git", "cat-file", "--textconv", obj)).decode()
# Just store the contents in memory. histograms.xml is big, but it isn't
# _that_ big.
return StringIO(contents)
prev_files = []
for p in histogram_paths.ALL_XMLS_RELATIVE:
try:
prev_files.append(get_file_at_revision(p))
except subprocess.CalledProcessError:
# Paths might not exist in the provided revision.
continue
current_histogram_names, current_obsolete_names = get_names(
histogram_xml_files())
prev_histogram_names, prev_obsolete_names = get_names(prev_files)
added_names = sorted(list(current_histogram_names - prev_histogram_names))
removed_names = sorted(list(prev_histogram_names - current_histogram_names))
obsoleted_names = sorted(list(current_obsolete_names - prev_obsolete_names))
return (added_names, removed_names, obsoleted_names)
def print_diff_names(revision):
added_names, removed_names, obsoleted_names = get_diff(revision)
print("%d histograms added:" % len(added_names))
for name in added_names:
print(name)
print("%d histograms removed:" % len(removed_names))
for name in removed_names:
print(name)
print("%d histograms obsoleted:" % len(obsoleted_names))
for name in obsoleted_names:
print(name)
def main(argv):
parser = argparse.ArgumentParser(description='Print histogram names.')
parser.add_argument('--diff',
type=str,
help='Git revision to diff against (e.g. HEAD~)')
args = parser.parse_args(argv[1:])
if args.diff is not None:
print_diff_names(args.diff)
else:
for name in get_names(histogram_xml_files()):
print(name)
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "4eabc496dbc2c91d078b0e4d8a550eb6",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 29.719298245614034,
"alnum_prop": 0.6992325855962219,
"repo_name": "ric2b/Vivaldi-browser",
"id": "2d8aba3dcf55944b63cc7158714db9c5baf0757a",
"size": "3573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/tools/metrics/histograms/print_histogram_names.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from django.utils import timezone
from mock import patch, MagicMock
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation
from zilencer.models import Deployment
from zerver.forms import HomepageForm, WRONG_SUBDOMAIN_ERROR
from zerver.lib.actions import do_change_password
from zerver.views.invite import get_invitee_emails_set
from zerver.views.registration import confirmation_key
from zerver.models import (
get_realm, get_prereg_user_by_email, get_user_profile_by_email,
get_unique_open_realm, completely_open,
PreregistrationUser, Realm, RealmAlias, Recipient,
Referral, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription, ScheduledJob
)
from zerver.management.commands.deliver_email import send_email_job
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin,
get_stream
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import (
do_deactivate_realm,
do_set_realm_property,
add_new_user_history,
)
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import (
enqueue_welcome_emails, one_click_unsubscribe_link, send_local_email_template_with_delay)
from zerver.lib.test_helpers import find_pattern_in_email, find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.sessions import get_session_dict_user
from zerver.context_processors import common_context
import re
import ujson
from typing import Dict, List, Set, Optional
from six.moves import urllib
from six.moves import range
from typing import Any, Text
import os
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(get_realm("zulip"), stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test@zulip.com", "test")
user_profile = get_user_profile_by_email("test@zulip.com")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("hamlet@zulip.com", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = 'hamlet@zulip.com'
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password.', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("hamlet@zulip.com", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
with queries_captured() as queries:
self.register("test@zulip.com", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test@zulip.com", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("hamlet@zulip.com")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test@zulip.com", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_page_redirects_logged_in_user(self):
# type: () -> None
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
self.login("cordelia@zulip.com")
response = self.client_get("/login/")
self.assertEqual(response["Location"], "/")
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams, body=''):
# type: (str, List[Text], str) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams,
"custom_body": body})
def check_sent_emails(self, correct_recipients, custom_body=None):
# type: (List[str], Optional[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
if len(outbox) == 0:
return
if custom_body is None:
self.assertNotIn("Message from", outbox[0].body)
else:
self.assertIn("Message from ", outbox[0].body)
self.assertIn(custom_body, outbox[0].body)
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees),
}
result = self.client_post('/json/invite/bulk', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_bulk_invite_users_invalid_emails(self):
# type: () -> None
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bobnoatzulip.com']
params = {
'invitee_emails': ujson.dumps(invitees),
}
self.assert_json_error(
self.client_post('/json/invite/bulk', params),
'Some emails did not validate, so we didn\'t send any invitations.')
self.check_sent_emails([])
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_custom_body(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
body = "Custom Text."
self.assert_json_success(self.invite(invitee, ["Denmark"], body))
self.assertTrue(find_pattern_in_email(invitee, body))
self.check_sent_emails([invitee], custom_body=body)
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_successful_invite_user_with_notifications_stream(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters unconditionally
subscribes the invitee to the notifications stream if it exists and is
public.
"""
realm = get_realm('zulip')
notifications_stream = get_stream('Verona', realm)
realm.notifications_stream = notifications_stream
realm.save()
self.login('hamlet@zulip.com')
invitee = 'alice-test@zulip.com'
self.assert_json_success(self.invite(invitee, ['Denmark']))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
prereg_user = get_prereg_user_by_email(invitee)
streams = list(prereg_user.streams.all())
self.assertTrue(notifications_stream in streams)
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("hamlet@zulip.com", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test@zulip.com", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "foo@zulip.com",
"custom_body": ''}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
self.assert_json_error(
self.invite("", ["Denmark"]),
"You must specify at least one email address.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"],
"custom_body": ''}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"],
"custom_body": ''})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_outside_domain_before_closing(self):
# type: () -> None
"""
If you invite someone with a different domain from that of the realm
when `restricted_to_domain = False`, but `restricted_to_domain` later
changes to true, the invitation should succeed but the invitee's signup
attempt should fail.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
zulip_realm.restricted_to_domain = True
zulip_realm.save()
result = self.submit_reg_form_for_user("foo@example.com", "password")
self.assertEqual(result.status_code, 200)
self.assert_in_response("only allows users with e-mail", result)
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_refer_friend(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
invitee = "alice-test@zulip.com"
result = self.client_post('/json/refer_friend', dict(email=invitee))
self.assert_json_success(result)
# verify this works
Referral.objects.get(user_profile=user, email=invitee)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_refer_friend_no_email(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
self.assert_json_error(
self.client_post('/json/refer_friend', dict(email='')),
"No email address specified")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 0)
def test_refer_friend_no_invites(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 1
user.save()
invitee = "alice-test@zulip.com"
self.assert_json_error(
self.client_post('/json/refer_friend', dict(email=invitee)),
"Insufficient invites")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
current_user_email = "hamlet@zulip.com"
self.login(current_user_email)
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
data = {"email": invitee, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=0),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
email_jobs_to_deliver = ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=timezone.now())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
self.assertTrue(send_email_job(job))
self.assertEqual(len(outbox), email_count + 1)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
# An invalid insubscribe token "test123" produces an error.
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
# An unknown message type "fake" produces an error.
user_profile = get_user_profile_by_email("hamlet@zulip.com")
unsubscribe_link = one_click_unsubscribe_link(user_profile, "fake")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.COMMUNITY)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_existing_email(self):
# type: () -> None
"""
Trying to create a realm with an existing email should just redirect to
a login page.
"""
with self.settings(OPEN_REALM_CREATION=True):
email = 'hamlet@zulip.com'
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
def test_create_realm_no_creation_key(self):
# type: () -> None
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response('New organization creation disabled.', result)
def test_create_realm_with_subdomain(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
password = "test"
email = "user1@test.com"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'zephyr': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
email = "newguy@zulip.com"
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_property(realm, 'default_language', u"de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_signup_already_active(self):
# type: () -> None
"""
Check if signing up with an active email redirects to a login page.
"""
email = 'hamlet@zulip.com'
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
def test_signup_invalid_name(self):
# type: () -> None
"""
Check if an invalid name during signup is handled properly.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, full_name="<invalid>")
self.assert_in_success_response(["Invalid characters in name!"], result)
def test_signup_without_password(self):
# type: () -> None
"""
Check if signing up without a password works properly when
password_auth_enabled is False.
"""
email = "newuser@zulip.com"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
with patch('zerver.views.registration.password_auth_enabled', return_value=False):
result = self.client_post(
'/accounts/register/',
{'full_name': 'New User',
'realm_name': 'Zulip Test',
'realm_subdomain': 'zuliptest',
'key': find_key_by_email(email),
'realm_org_type': Realm.COMMUNITY,
'terms': True})
# User should now be logged in.
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_signup_without_full_name(self):
# type: () -> None
"""
Check if signing up without a full name redirects to a registration
form.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.client_post(
'/accounts/register/',
{'password': password,
'realm_name': 'Zulip Test',
'realm_subdomain': 'zuliptest',
'key': find_key_by_email(email),
'realm_org_type': Realm.COMMUNITY,
'terms': True,
'from_confirmation': '1'})
self.assert_in_success_response(["You're almost there."], result)
def test_signup_invalid_subdomain(self):
# type: () -> None
"""
Check if attempting to authenticate to the wrong subdomain logs an
error and redirects.
"""
email = "newuser@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
def invalid_subdomain(**kwargs):
# type: (**Any) -> Any
return_data = kwargs.get('return_data', {})
return_data['invalid_subdomain'] = True
with patch('zerver.views.registration.authenticate', side_effect=invalid_subdomain):
with patch('logging.error') as mock_error:
result = self.client_post(
'/accounts/register/',
{'password': password,
'full_name': 'New User',
'realm_name': 'Zulip Test',
'realm_subdomain': 'zuliptest',
'key': find_key_by_email(email),
'realm_org_type': Realm.COMMUNITY,
'terms': True})
mock_error.assert_called_once()
self.assertEqual(result.status_code, 302)
def test_unique_completely_open_domain(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
for string_id in ('simple', 'zephyr'):
realm = get_realm(string_id)
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_completely_open_domain_success(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_failed_signup_with_realm_str(self):
# type: () -> None
"""
Signing up with the special accounts_home_with_realm_str endpoint should
fail (i.e. redirect to the standard accounts_home) if
settings.REALMS_HAVE_SUBDOMAINS is true, or if the realm is not
completely open.
"""
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
email = 'user1@acme.com'
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('accounts/home', result['Location'])
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS=False):
email = 'user1@acme.com'
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('accounts/home', result['Location'])
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = False
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=realm)
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@zulip.com'}, realm=realm)
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=None)
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# The full_name should not be overriden by the value from LDAP if
# request.session['authenticated_full_name'] has not been set yet.
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
full_name="Non LDAP Full Name",
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"Non LDAP Full Name",
"newuser@zulip.com"],
result)
# Submitting the registration form with from_confirmation='1' sets
# the value of request.session['authenticated_full_name'] from LDAP.
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"newuser@zulip.com"],
result)
# The full name be populated from the value of
# request.session['authenticated_full_name'] from LDAP in the case
# where from_confirmation and name_changes_disabled are both False.
with patch('zerver.views.registration.name_changes_disabled', return_value=True):
result = self.submit_reg_form_for_user(email,
password,
full_name="Non LDAP Full Name",
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"newuser@zulip.com"],
result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
password = "test"
email = "sipbtest@mit.edu"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# If the mirror dummy user is already active, attempting to submit the
# registration form should just redirect to a login page.
user_profile.is_active = True
user_profile.save()
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
user_profile.is_active = False
user_profile.save()
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_registration_of_active_mirror_dummy_user(self):
# type: (Any) -> None
"""
Trying to activate an already-active mirror dummy user should just
redirect to a login page.
"""
email = "sipbtest@mit.edu"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = True
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
class TestOpenRealms(ZulipTestCase):
def test_open_realm_logic(self):
# type: () -> None
realm = get_realm('simple')
do_deactivate_realm(realm)
mit_realm = get_realm("zephyr")
self.assertEqual(get_unique_open_realm(), None)
mit_realm.restricted_to_domain = False
mit_realm.save()
self.assertTrue(completely_open(mit_realm))
self.assertEqual(get_unique_open_realm(), None)
with self.settings(SYSTEM_ONLY_REALMS={"zulip"}):
self.assertEqual(get_unique_open_realm(), mit_realm)
mit_realm.restricted_to_domain = True
mit_realm.save()
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = 'hamlet@zulip.com'
user_2 = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
class TestLoginPage(ZulipTestCase):
def test_login_page_wrong_subdomain_error(self):
# type: () -> None
result = self.client_get("/login/?subdomain=1")
self.assertIn(WRONG_SUBDOMAIN_ERROR, result.content.decode('utf8'))
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_alias(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'www.testserver'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_domain(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'testserver'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
mock_get_host.return_value = 'www.testserver.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
EXTERNAL_HOST='www.testserver.com',
ROOT_SUBDOMAIN_ALIASES=['test']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_works_without_subdomains(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
class TestFindMyTeam(ZulipTestCase):
def test_template(self):
# type: () -> None
result = self.client_get('/find_my_team/')
self.assertIn("Find your team", result.content.decode('utf8'))
def test_result(self):
# type: () -> None
url = '/find_my_team/?emails=iago@zulip.com,cordelia@zulip.com'
result = self.client_get(url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn("iago@zulip.com", content)
self.assertIn("cordelia@zulip.com", content)
def test_find_team_ignore_invalid_email(self):
# type: () -> None
url = '/find_my_team/?emails=iago@zulip.com,invalid_email'
result = self.client_get(url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn("iago@zulip.com", content)
self.assertNotIn("invalid_email", content)
def test_find_team_zero_emails(self):
# type: () -> None
data = {'emails': ''}
result = self.client_post('/find_my_team/', data)
self.assertIn('This field is required', result.content.decode('utf8'))
self.assertEqual(result.status_code, 200)
def test_find_team_one_email(self):
# type: () -> None
data = {'emails': 'hamlet@zulip.com'}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/?emails=hamlet%40zulip.com')
def test_find_team_multiple_emails(self):
# type: () -> None
data = {'emails': 'hamlet@zulip.com,iago@zulip.com'}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 302)
expected = '/find_my_team/?emails=hamlet%40zulip.com%2Ciago%40zulip.com'
self.assertEqual(result.url, expected)
def test_find_team_more_than_ten_emails(self):
# type: () -> None
data = {'emails': ','.join(['hamlet-{}@zulip.com'.format(i) for i in range(11)])}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 200)
self.assertIn("Please enter at most 10", result.content.decode('utf8'))
class ConfirmationKeyTest(ZulipTestCase):
def test_confirmation_key(self):
# type: () -> None
request = MagicMock()
request.session = {
'confirmation_key': {'confirmation_key': 'xyzzy'}
}
result = confirmation_key(request)
self.assert_json_success(result)
self.assert_in_response('xyzzy', result)
| {
"content_hash": "60923e77069a4c7b4ffece1ae48c9f1f",
"timestamp": "",
"source": "github",
"line_count": 1614,
"max_line_length": 114,
"avg_line_length": 43.013011152416354,
"alnum_prop": 0.5822998141826196,
"repo_name": "jainayush975/zulip",
"id": "49f32fbe5bba8cbaf03af9c6a9e603be94cfd356",
"size": "69459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/tests/test_signup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "312492"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "554118"
},
{
"name": "JavaScript",
"bytes": "1667223"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3601270"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: contact@golismero-project.com
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import re
import os.path
import csv
try:
import cPickle as pickle
except ImportError:
import pickle as pickle
from golismero.api.config import Config
from golismero.api.data.resource.url import FolderURL
from golismero.api.data.information.html import HTML
from golismero.api.data import discard_data
from golismero.api.data.vulnerability.infrastructure.outdated_software import OutdatedSoftware
from golismero.api.text.wordlist import WordListLoader
from golismero.api.text.matching_analyzer import get_diff_ratio
from golismero.api.net.web_utils import urljoin, download, get_error_page
from golismero.api.net.http import HTTP
from golismero.api.logger import Logger
from golismero.api.plugin import TestingPlugin
#------------------------------------------------------------------------------
# Plecost plugin extra files.
base_dir = os.path.split(os.path.abspath(__file__))[0]
plecost_dir = os.path.join(base_dir, "plecost_plugin")
plecost_plugin_list = os.path.join(plecost_dir, "plugin_list_500.txt")
plecost_cve_data = os.path.join(plecost_dir, "cve.dat")
del base_dir
#
# This code was taken from:
#
# http://stackoverflow.com/a/1714536
#
def version_cmp(version1, version2):
"""
Compare two software versions.
:param version1: string with version number.
:type version1: str
:param version2: string with version number.
:type version2: str
:return: 1 if version 1 is greater. -1 if version2 if greater.
:rtype: int
"""
tup = lambda x: [int(y) for y in (x+'.0.0.0.0').split('.')][:4]
return cmp(tup(version1),tup(version2))
#------------------------------------------------------------------------------
class PlecostPlugin(TestingPlugin):
#--------------------------------------------------------------------------
def check_params(self):
plugin_list = Config.plugin_args.get("plugin_list", "")
if plugin_list == "":
plugin_list = plecost_plugin_list
# Plugin list file exits?
if not os.path.exists(plugin_list):
raise IOError("Plugin list file not exits: '%s'" % plugin_list)
#--------------------------------------------------------------------------
def get_accepted_types(self):
return [FolderURL]
#--------------------------------------------------------------------------
def run(self, info):
if not isinstance(info, FolderURL):
return
plugin_list = Config.plugin_args.get("plugin_list", "")
if plugin_list == "":
plugin_list = plecost_plugin_list
find_vulns = Config.plugin_args.get("find_vulns", "")
if find_vulns == "":
find_vulns = True
wordpress_urls = Config.plugin_args.get("wordpress_urls", "")
if wordpress_urls == "":
wordpress_urls = "golismero/wordpress_detector.txt"
url = info.url
results = []
#
# Try to detect if there is installed a WordPress
#
wordpress_found = self.__detect_wordpress_installation(url, wordpress_urls)
Logger.log_verbose("%s WordPress instalation found in '%s'." % ("No" if wordpress_found is False else "", url))
if wordpress_found is False:
return
#
# Get WordPress version
#
current_version, last_verstion = self.__get_wordpress_version(url)
Logger.log("WordPress installation version found: %s (latest: %s)" % (current_version, last_verstion))
# Outdated version of WordPress?
if current_version != "unknown":
if version_cmp(current_version, last_verstion) == -1:
s = OutdatedSoftware(info,
"cpe:/a:wordpress:wordpress:%s" % current_version,
title="Outdated version of WordPress (%s)" % current_version,
description="Outdated version of wordpress found. Installed version found: %s. Latest version available: %s"
% (current_version, last_verstion))
results.append(s)
#
# Get WordPress version
#
if find_vulns:
# Load CVE descriptions
try:
CVE_info = pickle.load(open(plecost_cve_data, "rb"))
except pickle.PickleError:
CVE_info = {}
Logger.log("Looking for installed and outdated plugins.")
url_parsed = info.parsed_url
url = "%s://%s%s" % (url_parsed.scheme, url_parsed.host, url_parsed.directory)
installed_plugins = self.__find_plugins(url, plugin_list, self.update_status)
for plugin in installed_plugins:
plugin_name = plugin[0]
plugin_URL = plugin[1]
plugin_installed_version = plugin[2]
plugin_last_version = plugin[3]
plugin_CVEs = plugin[4]
# Check for outdated plugins
if plugin_installed_version != "unknown":
if version_cmp(plugin_installed_version, plugin_last_version) == -1:
# CVE info
cve_descriptions = []
for cve in plugin_CVEs:
try:
cve_descriptions.append("%s description: %s" % (cve, CVE_info[cve]))
except KeyError:
Logger.log_error_more_verbose(
"CVE '%s' not found in database. Maybe you must update your plecost plugin" % cve)
s = OutdatedSoftware(
info,
"cpe:/a:wordpress:wordpress:-",
title="Outdated version of WordPress plugin '%s'" % plugin_name,
description="Outdated version of wordpress found in URL: \n'%s'.\n\n%s"
% (plugin_URL, "\n".join(cve_descriptions)))
results.append(s)
return results
#--------------------------------------------------------------------------
def __find_plugins(self, url, plugins_wordlist, update_func):
"""
Try to find available plugins
:param url: base URL to test.
:type url: str
:param plugins_wordlist: path to wordlist with plugins lists.
:type plugins_wordlist: str
:param update_func: function to update plugin status.
:type update_func: function
:return: list of lists as format:
list([PLUGIN_NAME, PLUGIN_URL, PLUGIN_INSTALLED_VERSION, PLUGIN_LAST_VERSION, [CVE1, CVE2...]])
:type: list(list())
"""
results = []
urls_to_test = {
"readme.txt": r"(Stable tag:[\svV]*)([0-9\.]+)",
"README.txt": r"(Stable tag:[\svV]*)([0-9\.]+)",
}
# Generates the error page
error_response = get_error_page(url).raw_data
# Load plugins info
plugins = []
plugins_append = plugins.append
with open(plugins_wordlist, "rU") as f:
for x in f:
plugins_append(x.replace("\n", ""))
# Calculate sizes
total_plugins = len(plugins)
# Load CSV info
csv_info = csv.reader(plugins)
# Process the URLs
for i, plugin_row in enumerate(csv_info):
# Plugin properties
plugin_URI = plugin_row[0]
plugin_name = plugin_row[1]
plugin_last_version = plugin_row[2]
plugin_CVEs = [] if plugin_row[3] == "" else plugin_row[3].split("|")
# Update status
update_func((float(i) * 100.0) / float(total_plugins))
# Make plugin URL
partial_plugin_url = "%s/%s" % (url, "wp-content/plugins/%s" % plugin_URI)
# Test each URL with possible plugin version info
for target, regex in urls_to_test.iteritems():
plugin_url = "%s/%s" % (partial_plugin_url, target)
# Try to get plugin
p = None
try:
p = HTTP.get_url(plugin_url, use_cache=False)
if p:
discard_data(p)
except Exception, e:
Logger.log_error_more_verbose("Error while download: '%s': %s" % (plugin_url, str(e)))
continue
plugin_installed_version = None
if p.status == "403": # Installed, but inaccesible
plugin_installed_version = "Unknown"
elif p.status == "200":
# Check if page is and non-generic not found page with 404 code
if get_diff_ratio(error_response, p.raw_response) < 0.52:
# Find the version
tmp_version = re.search(regex, p.raw_response)
if tmp_version is not None:
plugin_installed_version = tmp_version.group(2)
# Store info
if plugin_installed_version is not None:
Logger.log("Discovered plugin: '%s (installed version: %s)' (latest version: %s)" %
(plugin_name, plugin_installed_version, plugin_last_version))
results.append([
plugin_name,
plugin_url,
plugin_installed_version,
plugin_last_version,
plugin_CVEs
])
# Plugin found -> not more URL test for this plugin
break
return results
#--------------------------------------------------------------------------
def __detect_wordpress_installation(self, url, wordpress_urls):
"""
Try to detect a wordpress instalation in the current path.
:param url: URL where try to find the WordPress installation.
:type url: str
:param wordpress_urls: string with wordlist name with WordPress URLs.
:type wordpress_urls: str
:return: True if wordpress installation found. False otherwise.
:rtype: bool
"""
Logger.log_more_verbose("Detecting Wordpress instalation in URI: '%s'." % url)
total_urls = 0
urls_found = 0
error_page = get_error_page(url).raw_data
for u in WordListLoader.get_wordlist_as_list(wordpress_urls):
total_urls += 1
tmp_url = urljoin(url, u)
r = HTTP.get_url(tmp_url, use_cache=False)
if r.status == "200":
# Try to detect non-default error pages
ratio = get_diff_ratio(r.raw_response, error_page)
if ratio < 0.35:
urls_found += 1
discard_data(r)
# If Oks > 85% continue
if (urls_found / float(total_urls)) < 0.85:
# If all fails, make another last test
url_wp_admin = urljoin(url, "wp-admin/")
try:
p = HTTP.get_url(url_wp_admin, use_cache=False, allow_redirects=False)
if p:
discard_data(p)
except Exception, e:
return False
if p.status == "302" and "wp-login.php?redirect_to=" in p.headers.get("Location", ""):
return True
else:
return False
else:
return True
#--------------------------------------------------------------------------
def __get_wordpress_version(self, url):
"""
This function get the current version of wordpress and the last version
available for download.
:param url: URL fo target.
:type url: str.
:return: a tuple with (CURRENT_VERSION, LAST_AVAILABLE_VERSION)
:type: tuple(str, str)
"""
url_version = {
# Generic
"wp-login.php": r"(;ver=)([0-9\.]+)([\-a-z]*)",
# For WordPress 3.8
"wp-admin/css/wp-admin-rtl.css": r"(Version[\s]+)([0-9\.]+)",
"wp-admin/css/wp-admin.css": r"(Version[\s]+)([0-9\.]+)"
}
#
# Get current version
#
# URL to find wordpress version
url_current_version = urljoin(url, "readme.html")
current_version_content_1 = download(url_current_version)
if isinstance(current_version_content_1, HTML):
current_version_method1 = re.search(r"(<br/>[\s]*[vV]ersion[\s]*)([0-9\.]*)", current_version_content_1.raw_data)
if current_version_method1 is None:
current_version_method1 = None
else:
if len(current_version_method1.groups()) != 2:
current_version_method1 = None
else:
current_version_method1 = current_version_method1.group(2)
else:
current_version_method1 = None
# Try to find the version into HTML meta value
# Get content of main page
current_version_content_2 = download(url)
# Try to find the info
current_version_method2 = re.search(r"(<meta name=\"generator\" content=\"WordPress[\s]+)([0-9\.]+)",
current_version_content_2.raw_data)
if current_version_method2 is None:
current_version_method2 = None
else:
if len(current_version_method2.groups()) != 2:
current_version_method2 = None
else:
current_version_method2 = current_version_method2.group(2)
# Match versions of the diffentents methods
current_version = "unknown"
if current_version_method1 is None and current_version_method2 is None:
current_version = "unknown"
elif current_version_method1 is None and current_version_method2 is not None:
current_version = current_version_method2
elif current_version_method1 is not None and current_version_method2 is None:
current_version = current_version_method1
elif current_version_method1 is not None and current_version_method2 is not None:
if current_version_method1 != current_version_method2:
current_version = current_version_method2
else:
current_version = current_version_method1
else:
current_version = "unknown"
# If Current version not found
if current_version == "unknown":
for url_pre, regex in url_version.iteritems():
# URL to find wordpress version
url_current_version = urljoin(url, url_pre)
current_version_content = download(url_current_version)
discard_data(current_version_content)
# Find the version
tmp_version = re.search(regex, current_version_content.raw_data)
if tmp_version is not None:
current_version = tmp_version.group(2)
break # Found -> stop search
#
# Get last version
#
# URL to get last version of WordPress available
url_last_version = "http://wordpress.org/download/"
last_version_content = download(url_last_version, allow_out_of_scope=True)
if isinstance(last_version_content, HTML):
last_version = re.search("(WordPress )([0-9\.]*)", last_version_content.raw_data)
if last_version is None:
last_version = "unknown"
else:
if len(last_version.groups()) != 2:
last_version = "unknown"
else:
last_version = last_version.group(2)
else:
last_version = "unknown"
# Discard unused data
discard_data(current_version_content_2)
discard_data(current_version_content_1)
discard_data(last_version_content)
return current_version, last_version
| {
"content_hash": "03912b0b089058cc31e88a4ce09b8474",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 145,
"avg_line_length": 36.74623655913979,
"alnum_prop": 0.5377772575642301,
"repo_name": "JeyZeta/Dangerous",
"id": "e9ef550c3af7bfa5385ba788a6a71ab971c588e8",
"size": "17134",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/plugins/testing/scan/plecost.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
"""
Wrapper for the ssh program.
Provides get_ssh, ssh_command, and scp functions.
"""
import sys, os
from pycopia import proctools
from pycopia import expect
SSH = proctools.which("ssh")
SCP = proctools.which("scp")
KEYGEN = proctools.which("ssh-keygen")
KEYSCAN = proctools.which("ssh-keyscan")
class SSHRetry(RuntimeError):
pass
# |01234567890123456789
TESTED_VERSIONS = ["OpenSSH_3.4p1, SSH protocols 1.5/2.0, OpenSSL 0x0090605f",
"OpenSSH_3.5p1, SSH protocols 1.5/2.0, OpenSSL 0x0090701f",
"OpenSSH_3.6.1p2, SSH protocols 1.5/2.0, OpenSSL 0x0090701f",
"OpenSSH_3.8.1p1, OpenSSL 0.9.7d 17 Mar 2004",
"OpenSSH_4.5p1, OpenSSL 0.9.8a 11 Oct 2005",
"OpenSSH_4.3p2"
]
SSH_OPTIONS = '-F %s' % os.path.join("/", "etc", "pycopia", "ssh_config")
class SSHExpect(expect.Expect):
def sshexit(self):
self.send("\r~.\r")
def login(self, password=None):
"""login([password])
Supplies a password for the SSH session. Net necessarily any subsequent login
prompts. """
if password is None:
import getpass
password = getpass.getpass("Password: ")
while True:
mo = self.expect(["WARNING:", "assword:", "try again"], timeout=20)
if mo:
i = self.expectindex
if i == 0:
raise SSHRetry("SSHExpect.sshlogin: try again, bad host key.")
elif i == 1:
self._fo.write(password+"\r")
break
elif i == 2:
continue
else:
raise RuntimeError("SSHExpect.sshlogin: unknown response.")
def death_callback(self, deadssh):
if self._log:
self._log.write("ssh exited: %s" % (deadssh.exitstatus))
self.close()
def _build_commandline(host, **kwargs):
#user=None, password=None, prompt=None, callback=None, logfile=None, extraoptions=""
if kwargs["user"] is None:
cmd = "%s %s %s" %(SSH, host, command)
else:
cmd = "%s %s@%s %s" %(SSH, user, host, command)
def ssh_command(host, command, user=None, password=None, prompt=None, logfile=None):
"""ssh_command(host, command, [user], [password], [prompt], [logfile])
Runs the command on the given host via SSH, and return the result.
"""
pm = proctools.get_procmanager()
if user is None:
cmd = "%s %s %s" %(SSH, host, command)
else:
cmd = "%s %s@%s %s" %(SSH, user, host, command)
sshproc = pm.spawnpty(cmd)
ssh = SSHExpect(sshproc)
sshproc.set_callback(ssh.death_callback)
ssh.set_prompt(prompt or "$")
ssh.setlog(logfile)
if password is not None:
ssh.login(password)
rv = ssh.read()
return rv
def get_ssh(host, user=None, password=None, prompt=None, callback=None, logfile=None, extraoptions="",
cmd=None, async=False):
"""get_ssh(host, [user], [password], [prompt], [callback], [logfile])
Uses ssh to get a shell on the given host, and automatically authenticate by
password if a password is given. Returns an SSHExpect object.
The logfile parameter should be a file-like object (has a 'write' method).
"""
pm = proctools.get_procmanager()
hostuser = "%s@%s" % (user, host) if user else host
command = "%s %s %s %s %s" % (SSH, SSH_OPTIONS, extraoptions, hostuser, cmd or "")
sshproc = pm.spawnpty(command, logfile=logfile, async=async)
ssh = SSHExpect(sshproc)
sshproc.set_callback(callback or ssh.death_callback)
ssh.set_prompt(prompt or "$")
if password is not None:
ssh.login(password)
return ssh
def get_ssh_unsafe(host, *args, **kwargs):
"""get_ssh_unsafe(host, ...)
Like get_ssh(), but automatically removes any stale known_hosts entry, if
required."""
try:
return get_ssh(host, *args, **kwargs)
except SSHRetry:
remove_known_host(host)
return get_ssh(host, *args, **kwargs)
def scp(srchost=None, srcpath=None, dsthost=None, dstpath=None, user=None,
password=None, prompt=None, callback=None, logfile=None):
"""scp(source, destination, [password])
Copies the file from source to destination. these parameters are strings that
are passed directly to the scp command, and should follow the syntax for this
command.
"""
opts = "-q"
src = location(srchost, user, srcpath)
dst = location(dsthost, user, dstpath)
CMD = "%s %s %s '%s' '%s'" % (SCP, SSH_OPTIONS, opts, src, dst)
if logfile:
logfile.write(CMD+"\n")
scp = proctools.spawnpty(CMD, logfile=logfile)
if password is not None:
escp = SSHExpect(scp)
scp.set_callback(callback or escp.death_callback)
escp.login(password)
discard = escp.read()
else:
discard = scp.read()
es = scp.wait()
return es
def location(host=None, user=None, path=None, forssh=False):
"""Construct an appropriate ssh/scp path spec based on the combination of
parameters. Supply host, user, and path."""
sep = "" if forssh else ":"
if host is None:
if user is None:
if path is None:
raise ValueError("must supply at least one of host, or user.")
else:
return path
else:
if path is None:
raise ValueError("user without host?")
else:
return path # ignore user in this case
else:
if user is None:
if path is None:
return "%s%s" % (host, sep)
else:
return "%s:%s" % (host, path)
else:
if path is None:
return "%s@%s%s" % (user, host, sep)
else:
return "%s@%s:%s" % (user, host, path)
def ssh_version():
"""ssh_version() Return the version string for the ssh command on this system."""
ssh = proctools.spawnpipe("ssh -TV")
ver = ssh.read()
return ver
def check_version():
"""Checks that the installed ssh program is the same as this module was
tested with (and written for)."""
ver = ssh_version()[:13]
for vs in TESTED_VERSIONS:
if ver == vs[:13]:
return 1
return 0
def get_procs():
"""get_ssh_list() Returns list of managed ssh processes."""
pm = proctools.get_procmanager()
return pm.getbyname("ssh")
# Support objects follow.
# Mostly, these are for creating or modifying various ssh related files.
class KnownHostsFile(object):
def __init__(self):
self._fname = os.path.join(os.environ["HOME"], ".ssh", "known_hosts")
self._lines = None
self.open()
def __del__(self):
self.close()
def __str__(self):
return "".join(self._lines)
def open(self):
try:
fo = open(self._fname, "r")
except OSError:
self._lines = []
else:
self._lines = fo.readlines()
fo.close()
self._dirty = 0
def close(self):
if self._dirty:
fo = open(self._fname, "w+")
fo.writelines(self._lines)
fo.close()
self._dirty = 0
def add(self, hostname, publickey, comment=None):
if comment:
line = "%s %s %s\n" % (hostname, publickey, comment)
else:
line = "%s %s\n" % (hostname, publickey)
self._lines.append(line)
self._dirty = 1
def remove(self, hostname):
from pycopia import ipv4
try:
ip = str(ipv4.IPv4(hostname))
except:
ip = None
new = []
for line in self._lines:
if line.startswith(hostname):
self._dirty = 1
continue
elif ip and line.startswith(ip):
self._dirty = 1
continue
else:
new.append(line)
self._lines = new
def get_known_hosts():
return KnownHostsFile()
def get_userdir():
return os.path.join(os.environ["HOME"], ".ssh")
def remove_known_host(hostname):
khf = KnownHostsFile()
khf.remove(hostname)
khf.close()
def keygen(keytype="dsa", bits=1024, comment="", filename=None, passphrase=None, logfile=None, async=0, safe=1):
"""Generate a new ssh user key of the specified keytype."""
assert keytype in KEYTYPES, "keytype must be one of: %s" % (KEYTYPES,)
pm = proctools.get_procmanager()
fn = filename or os.path.join(os.environ["HOME"], ".ssh", "id_%s" % (keytype,))
ph = passphrase or ""
if os.path.exists(fn):
if safe:
raise SSHRetry("key file %s already exists." % (fn,))
else:
os.unlink(fn)
command = '%s -q -N "%s" -t %s -b %s -C "%s" -f %s' % \
(KEYGEN, ph, keytype, bits, comment, filename)
kgproc = pm.spawnpty(command, logfile=logfile, async=async)
kgproc.read()
kgproc.wait()
return kgproc.exitstatus
def keyscan(host, keytype="dsa", logfile=None, async=0):
"""Run ssh-keyscan. Return key, and program exit status."""
assert keytype in KEYTYPES, "keytype must be one of: %s" % (KEYTYPES,)
pm = proctools.get_procmanager()
command = '%s -t %s %s' % (KEYSCAN, keytype, host)
ksproc = pm.spawnpty(command, logfile=logfile, async=async)
res = ksproc.read()
ksproc.wait()
lines = res.split("\n")
[host, text] = lines[1].split(None, 1)
if text.startswith("hostkey"):
return None, ksproc.exitstatus
if text[0] in "0123456789":
rv = _parse_rsa1_pub(text)
else:
rv = _parse_rsa_dsa_pub(text)
return rv, ksproc.exitstatus
##### key and keyfile objects.
class SSHKey(object):
def parse(self, text):
raise NotImplementedError
class SSHKeyRSA1(SSHKey):
pass
class SSHKeyRSA1pub(SSHKey):
def __init__(self, bits, exponent, modulus, comment=""):
self.bits = int(bits)
self.exponent = int(exponent)
self.modulus = long(modulus)
self.comment = str(comment)
def __eq__(self, other):
try:
return self.exponent == other.exponent and self.modulus == other.modulus
except AttributeError:
return NotImplemented
def __ne__(self, other):
try:
return self.exponent != other.exponent or self.modulus != other.modulus
except AttributeError:
return NotImplemented
def __str__(self):
if self.comment:
return "%d %d %ld %s" % (self.bits, self.exponent, self.modulus, self.comment)
else:
return "%d %d %ld" % (self.bits, self.exponent, self.modulus)
# this is really only for RSA/DSA public keys
class SSHKeyPublic(SSHKey):
def __init__(self, key, comment=""):
self.key = str(key) # key is base64 encoded
self.comment = str(comment)
def __eq__(self, other):
try:
return self.key == other.key
except AttributeError:
return NotImplemented
def __ne__(self, other):
try:
return self.key != other.key
except AttributeError:
return NotImplemented
def __str__(self):
return "%s %s %s" % (self.keytype, self.key, self.comment)
class SSHKeyRSA(SSHKey):
pass
class SSHKeyRSApub(SSHKeyPublic):
keytype = "ssh-rsa"
class SSHKeyDSA(SSHKey):
pass
class SSHKeyDSApub(SSHKeyPublic):
keytype = "ssh-dss"
class AuthorizedKeys(SSHKey):
pass
# parser figures out the type, as well. Just pass a key file name, ruturn
# object of correct type with initialized values. Works something like a
# recursive-decent parser, except that it is not recursive. ;-)
def parse_key(filename):
base, ext = os.path.splitext(filename)
if ext and ext == ".pub":
return parse_public(filename)
else:
return parse_private(filename)
def parse_public(filename):
fo = open(filename)
text = fo.read().strip()
fo.close()
if text[0] in "0123456789":
return _parse_rsa1_pub(text)
else:
return _parse_rsa_dsa_pub(text)
def _parse_rsa1_pub(text):
parts = text.split()
[bits, exponent, modulus] = parts[:3]
if len(parts) >= 4: # comments are optional
comment = parts[3]
else:
comment = ""
return SSHKeyRSA1pub(bits, exponent, modulus, comment)
def _parse_rsa_dsa_pub(text):
parts = text.split()
assert len(parts) >= 2, "parse_rsa_dsa: need at least 2 parts."
if len(parts) >= 3:
comment = parts[2]
else:
comment = ""
[keytype, key] = parts[:2]
assert keytype in KEYTYPES, "keytype (%r) not valid." % (keytype,)
keycls_priv, keycls_pub = _CLSMAP[keytype]
return keycls_pub(key, comment)
def parse_private(filename):
raise NotImplementedError
def new_key(keytype="dsa", bits=1024, comment="", filename=None, passphrase=None):
pass
# map to tuple of private key, public key classes
_CLSMAP = {"ssh-dss": (SSHKeyDSA, SSHKeyDSApub),
"ssh-rsa": (SSHKeyRSA, SSHKeyRSApub),
"rsa1": (SSHKeyRSA1, SSHKeyRSA1pub),
"rsa": (SSHKeyRSA, SSHKeyRSApub),
"dsa": (SSHKeyDSA, SSHKeyDSApub),
}
KEYTYPES = _CLSMAP.keys()
| {
"content_hash": "dec1127003cb39a9e23e81cd9f193ddf",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 112,
"avg_line_length": 31.070257611241217,
"alnum_prop": 0.5869450516318685,
"repo_name": "kdart/pycopia",
"id": "91ded8469ebc2c35e195d9df68a3e92605c15236",
"size": "13904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process/pycopia/sshlib.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "423794"
},
{
"name": "CSS",
"bytes": "19522"
},
{
"name": "JavaScript",
"bytes": "91759"
},
{
"name": "Makefile",
"bytes": "6958"
},
{
"name": "Perl",
"bytes": "271"
},
{
"name": "Python",
"bytes": "6098633"
},
{
"name": "Roff",
"bytes": "7289"
},
{
"name": "Shell",
"bytes": "12778"
},
{
"name": "Vim script",
"bytes": "50421"
}
],
"symlink_target": ""
} |
"""autogenerated by genpy from mapping_dlut/Grid.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Grid(genpy.Message):
_md5sum = "d1e632170b3f559b23311a8f8cc60632"
_type = "mapping_dlut/Grid"
_has_header = False #flag to mark the presence of a Header object
_full_text = """#Header header
#maximum elevation in this grid;
#float32 fMaxElevation
#minimum elevation in this grid;
#float32 fMinElevation
#average elevation in this grid;
#float32 fAvgElevation
#points falling in this grid;
#int32 nPointCount
#up point falling in this grid;
#int32 nUpCount
#down point falling in this grid;
#int32 nDownCount
#average elevation in this grid;
float32 fAvgElevation
#proability
int8 proability
#texture
int8 texture
"""
__slots__ = ['fAvgElevation','proability','texture']
_slot_types = ['float32','int8','int8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
fAvgElevation,proability,texture
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Grid, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.fAvgElevation is None:
self.fAvgElevation = 0.
if self.proability is None:
self.proability = 0
if self.texture is None:
self.texture = 0
else:
self.fAvgElevation = 0.
self.proability = 0
self.texture = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_f2b.pack(_x.fAvgElevation, _x.proability, _x.texture))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.fAvgElevation, _x.proability, _x.texture,) = _struct_f2b.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_f2b.pack(_x.fAvgElevation, _x.proability, _x.texture))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.fAvgElevation, _x.proability, _x.texture,) = _struct_f2b.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_f2b = struct.Struct("<f2b")
| {
"content_hash": "06718c6c1c3256ef399482457b9a54ea",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 123,
"avg_line_length": 31.484375,
"alnum_prop": 0.6610421836228287,
"repo_name": "WuNL/mylaptop",
"id": "e805ba2e3c95de25d9b27ee809e83c66174914e4",
"size": "4030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "install/lib/python2.7/dist-packages/mapping_dlut/msg/_Grid.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "189211"
},
{
"name": "Common Lisp",
"bytes": "153266"
},
{
"name": "Python",
"bytes": "98976"
},
{
"name": "Shell",
"bytes": "14892"
}
],
"symlink_target": ""
} |
"""
TutorialWorld - basic objects - Griatch 2011
This module holds all "dead" object definitions for
the tutorial world. Object-commands and -cmdsets
are also defined here, together with the object.
Objects:
TutorialObject
Readable
Climbable
Obelisk
LightSource
CrumblingWall
Weapon
WeaponRack
"""
from future.utils import listvalues
import random
from evennia import DefaultObject, DefaultExit, Command, CmdSet
from evennia import utils
from evennia.utils import search
from evennia.utils.spawner import spawn
# -------------------------------------------------------------
#
# TutorialObject
#
# The TutorialObject is the base class for all items
# in the tutorial. They have an attribute "tutorial_info"
# on them that the global tutorial command can use to extract
# interesting behind-the scenes information about the object.
#
# TutorialObjects may also be "reset". What the reset means
# is up to the object. It can be the resetting of the world
# itself, or the removal of an inventory item from a
# character's inventory when leaving the tutorial, for example.
#
# -------------------------------------------------------------
class TutorialObject(DefaultObject):
"""
This is the baseclass for all objects in the tutorial.
"""
def at_object_creation(self):
"""Called when the object is first created."""
super(TutorialObject, self).at_object_creation()
self.db.tutorial_info = "No tutorial info is available for this object."
def reset(self):
"""Resets the object, whatever that may mean."""
self.location = self.home
# -------------------------------------------------------------
#
# Readable - an object that can be "read"
#
# -------------------------------------------------------------
#
# Read command
#
class CmdRead(Command):
"""
Usage:
read [obj]
Read some text of a readable object.
"""
key = "read"
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""
Implements the read command. This simply looks for an
Attribute "readable_text" on the object and displays that.
"""
if self.args:
obj = self.caller.search(self.args.strip())
else:
obj = self.obj
if not obj:
return
# we want an attribute read_text to be defined.
readtext = obj.db.readable_text
if readtext:
string = "You read |C%s|n:\n %s" % (obj.key, readtext)
else:
string = "There is nothing to read on %s." % obj.key
self.caller.msg(string)
class CmdSetReadable(CmdSet):
"""
A CmdSet for readables.
"""
def at_cmdset_creation(self):
"""
Called when the cmdset is created.
"""
self.add(CmdRead())
class Readable(TutorialObject):
"""
This simple object defines some attributes and
"""
def at_object_creation(self):
"""
Called when object is created. We make sure to set the needed
Attribute and add the readable cmdset.
"""
super(Readable, self).at_object_creation()
self.db.tutorial_info = "This is an object with a 'read' command defined in a command set on itself."
self.db.readable_text = "There is no text written on %s." % self.key
# define a command on the object.
self.cmdset.add_default(CmdSetReadable, permanent=True)
# -------------------------------------------------------------
#
# Climbable object
#
# The climbable object works so that once climbed, it sets
# a flag on the climber to show that it was climbed. A simple
# command 'climb' handles the actual climbing. The memory
# of what was last climbed is used in a simple puzzle in the
# tutorial.
#
# -------------------------------------------------------------
class CmdClimb(Command):
"""
Climb an object
Usage:
climb <object>
This allows you to climb.
"""
key = "climb"
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""Implements function"""
if not self.args:
self.caller.msg("What do you want to climb?")
return
obj = self.caller.search(self.args.strip())
if not obj:
return
if obj != self.obj:
self.caller.msg("Try as you might, you cannot climb that.")
return
ostring = self.obj.db.climb_text
if not ostring:
ostring = "You climb %s. Having looked around, you climb down again." % self.obj.name
self.caller.msg(ostring)
# set a tag on the caller to remember that we climbed.
self.caller.tags.add("tutorial_climbed_tree", category="tutorial_world")
class CmdSetClimbable(CmdSet):
"""Climbing cmdset"""
def at_cmdset_creation(self):
"""populate set"""
self.add(CmdClimb())
class Climbable(TutorialObject):
"""
A climbable object. All that is special about it is that it has
the "climb" command available on it.
"""
def at_object_creation(self):
"""Called at initial creation only"""
self.cmdset.add_default(CmdSetClimbable, permanent=True)
# -------------------------------------------------------------
#
# Obelisk - a unique item
#
# The Obelisk is an object with a modified return_appearance method
# that causes it to look slightly different every time one looks at it.
# Since what you actually see is a part of a game puzzle, the act of
# looking also stores a key attribute on the looking object (different
# depending on which text you saw) for later reference.
#
# -------------------------------------------------------------
class Obelisk(TutorialObject):
"""
This object changes its description randomly, and which is shown
determines which order "clue id" is stored on the Character for
future puzzles.
Important Attribute:
puzzle_descs (list): list of descriptions. One of these is
picked randomly when this object is looked at and its index
in the list is used as a key for to solve the puzzle.
"""
def at_object_creation(self):
"""Called when object is created."""
super(Obelisk, self).at_object_creation()
self.db.tutorial_info = "This object changes its desc randomly, and makes sure to remember which one you saw."
self.db.puzzle_descs = ["You see a normal stone slab"]
# make sure this can never be picked up
self.locks.add("get:false()")
def return_appearance(self, caller):
"""
This hook is called by the look command to get the description
of the object. We overload it with our own version.
"""
# randomly get the index for one of the descriptions
descs = self.db.puzzle_descs
clueindex = random.randint(0, len(descs) - 1)
# set this description, with the random extra
string = "The surface of the obelisk seem to waver, shift and writhe under your gaze, with " \
"different scenes and structures appearing whenever you look at it. "
self.db.desc = string + descs[clueindex]
# remember that this was the clue we got. The Puzzle room will
# look for this later to determine if you should be teleported
# or not.
caller.db.puzzle_clue = clueindex
# call the parent function as normal (this will use
# the new desc Attribute we just set)
return super(Obelisk, self).return_appearance(caller)
# -------------------------------------------------------------
#
# LightSource
#
# This object emits light. Once it has been turned on it
# cannot be turned off. When it burns out it will delete
# itself.
#
# This could be implemented using a single-repeat Script or by
# registering with the TickerHandler. We do it simpler by
# using the delay() utility function. This is very simple
# to use but does not survive a server @reload. Because of
# where the light matters (in the Dark Room where you can
# find new light sources easily), this is okay here.
#
# -------------------------------------------------------------
class CmdLight(Command):
"""
Creates light where there was none. Something to burn.
"""
key = "on"
aliases = ["light", "burn"]
# only allow this command if command.obj is carried by caller.
locks = "cmd:holds()"
help_category = "TutorialWorld"
def func(self):
"""
Implements the light command. Since this command is designed
to sit on a "lightable" object, we operate only on self.obj.
"""
if self.obj.light():
self.caller.msg("You light %s." % self.obj.key)
self.caller.location.msg_contents("%s lights %s!" % (self.caller, self.obj.key), exclude=[self.caller])
else:
self.caller.msg("%s is already burning." % self.obj.key)
class CmdSetLight(CmdSet):
"""CmdSet for the lightsource commands"""
key = "lightsource_cmdset"
# this is higher than the dark cmdset - important!
priority = 3
def at_cmdset_creation(self):
"""called at cmdset creation"""
self.add(CmdLight())
class LightSource(TutorialObject):
"""
This implements a light source object.
When burned out, the object will be deleted.
"""
def at_init(self):
"""
If this is called with the Attribute is_giving_light already
set, we know that the timer got killed by a server
reload/reboot before it had time to finish. So we kill it here
instead. This is the price we pay for the simplicity of the
non-persistent delay() method.
"""
if self.db.is_giving_light:
self.delete()
def at_object_creation(self):
"""Called when object is first created."""
super(LightSource, self).at_object_creation()
self.db.tutorial_info = "This object can be lit to create light. It has a timeout for how long it burns."
self.db.is_giving_light = False
self.db.burntime = 60 * 3 # 3 minutes
# this is the default desc, it can of course be customized
# when created.
self.db.desc = "A splinter of wood with remnants of resin on it, enough for burning."
# add the Light command
self.cmdset.add_default(CmdSetLight, permanent=True)
def _burnout(self):
"""
This is called when this light source burns out. We make no
use of the return value.
"""
# delete ourselves from the database
self.db.is_giving_light = False
try:
self.location.location.msg_contents("%s's %s flickers and dies." %
(self.location, self.key), exclude=self.location)
self.location.msg("Your %s flickers and dies." % self.key)
self.location.location.check_light_state()
except AttributeError:
try:
self.location.msg_contents("A %s on the floor flickers and dies." % self.key)
self.location.location.check_light_state()
except AttributeError:
# Mainly happens if we happen to be in a None location
pass
self.delete()
def light(self):
"""
Light this object - this is called by Light command.
"""
if self.db.is_giving_light:
return False
# burn for 3 minutes before calling _burnout
self.db.is_giving_light = True
# if we are in a dark room, trigger its light check
try:
self.location.location.check_light_state()
except AttributeError:
try:
# maybe we are directly in the room
self.location.check_light_state()
except AttributeError:
# we are in a None location
pass
finally:
# start the burn timer. When it runs out, self._burnout
# will be called. We store the deferred so it can be
# killed in unittesting.
self.deferred = utils.delay(60 * 3, self._burnout)
return True
# -------------------------------------------------------------
#
# Crumbling wall - unique exit
#
# This implements a simple puzzle exit that needs to be
# accessed with commands before one can get to traverse it.
#
# The puzzle-part is simply to move roots (that have
# presumably covered the wall) aside until a button for a
# secret door is revealed. The original position of the
# roots blocks the button, so they have to be moved to a certain
# position - when they have, the "press button" command
# is made available and the Exit is made traversable.
#
# -------------------------------------------------------------
# There are four roots - two horizontal and two vertically
# running roots. Each can have three positions: top/middle/bottom
# and left/middle/right respectively. There can be any number of
# roots hanging through the middle position, but only one each
# along the sides. The goal is to make the center position clear.
# (yes, it's really as simple as it sounds, just move the roots
# to each side to "win". This is just a tutorial, remember?)
#
# The ShiftRoot command depends on the root object having an
# Attribute root_pos (a dictionary) to describe the current
# position of the roots.
class CmdShiftRoot(Command):
"""
Shifts roots around.
Usage:
shift blue root left/right
shift red root left/right
shift yellow root up/down
shift green root up/down
"""
key = "shift"
aliases = ["shiftroot", "push", "pull", "move"]
# we only allow to use this command while the
# room is properly lit, so we lock it to the
# setting of Attribute "is_lit" on our location.
locks = "cmd:locattr(is_lit)"
help_category = "TutorialWorld"
def parse(self):
"""
Custom parser; split input by spaces for simplicity.
"""
self.arglist = self.args.strip().split()
def func(self):
"""
Implement the command.
blue/red - vertical roots
yellow/green - horizontal roots
"""
if not self.arglist:
self.caller.msg("What do you want to move, and in what direction?")
return
if "root" in self.arglist:
# we clean out the use of the word "root"
self.arglist.remove("root")
# we accept arguments on the form <color> <direction>
if not len(self.arglist) > 1:
self.caller.msg("You must define which colour of root you want to move, and in which direction.")
return
color = self.arglist[0].lower()
direction = self.arglist[1].lower()
# get current root positions dict
root_pos = self.obj.db.root_pos
if color not in root_pos:
self.caller.msg("No such root to move.")
return
# first, vertical roots (red/blue) - can be moved left/right
if color == "red":
if direction == "left":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the reddish root to the left.")
if root_pos[color] != 0 and root_pos[color] == root_pos["blue"]:
root_pos["blue"] += 1
self.caller.msg("The root with blue flowers gets in the way and is pushed to the right.")
elif direction == "right":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the reddish root to the right.")
if root_pos[color] != 0 and root_pos[color] == root_pos["blue"]:
root_pos["blue"] -= 1
self.caller.msg("The root with blue flowers gets in the way and is pushed to the left.")
else:
self.caller.msg("You cannot move the root in that direction.")
elif color == "blue":
if direction == "left":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the root with small blue flowers to the left.")
if root_pos[color] != 0 and root_pos[color] == root_pos["red"]:
root_pos["red"] += 1
self.caller.msg("The reddish root is to big to fit as well, so that one falls away to the left.")
elif direction == "right":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the root adorned with small blue flowers to the right.")
if root_pos[color] != 0 and root_pos[color] == root_pos["red"]:
root_pos["red"] -= 1
self.caller.msg("The thick reddish root gets in the way and is pushed back to the left.")
else:
self.caller.msg("You cannot move the root in that direction.")
# now the horizontal roots (yellow/green). They can be moved up/down
elif color == "yellow":
if direction == "up":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the root with small yellow flowers upwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["green"]:
root_pos["green"] += 1
self.caller.msg("The green weedy root falls down.")
elif direction == "down":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the root adorned with small yellow flowers downwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["green"]:
root_pos["green"] -= 1
self.caller.msg("The weedy green root is shifted upwards to make room.")
else:
self.caller.msg("You cannot move the root in that direction.")
elif color == "green":
if direction == "up":
root_pos[color] = max(-1, root_pos[color] - 1)
self.caller.msg("You shift the weedy green root upwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["yellow"]:
root_pos["yellow"] += 1
self.caller.msg("The root with yellow flowers falls down.")
elif direction == "down":
root_pos[color] = min(1, root_pos[color] + 1)
self.caller.msg("You shove the weedy green root downwards.")
if root_pos[color] != 0 and root_pos[color] == root_pos["yellow"]:
root_pos["yellow"] -= 1
self.caller.msg("The root with yellow flowers gets in the way and is pushed upwards.")
else:
self.caller.msg("You cannot move the root in that direction.")
# we have moved the root. Store new position
self.obj.db.root_pos = root_pos
# Check victory condition
if listvalues(root_pos).count(0) == 0: # no roots in middle position
# This will affect the cmd: lock of CmdPressButton
self.obj.db.button_exposed = True
self.caller.msg("Holding aside the root you think you notice something behind it ...")
class CmdPressButton(Command):
"""
Presses a button.
"""
key = "press"
aliases = ["press button", "button", "push button"]
# only accessible if the button was found and there is light. This checks
# the Attribute button_exposed on the Wall object so that
# you can only push the button when the puzzle is solved. It also
# checks the is_lit Attribute on the location.
locks = "cmd:objattr(button_exposed) and objlocattr(is_lit)"
help_category = "TutorialWorld"
def func(self):
"""Implements the command"""
if self.caller.db.crumbling_wall_found_exit:
# we already pushed the button
self.caller.msg("The button folded away when the secret passage opened. You cannot push it again.")
return
# pushing the button
string = "You move your fingers over the suspicious depression, then gives it a " \
"decisive push. First nothing happens, then there is a rumble and a hidden " \
"|wpassage|n opens, dust and pebbles rumbling as part of the wall moves aside."
self.caller.msg(string)
string = "%s moves their fingers over the suspicious depression, then gives it a " \
"decisive push. First nothing happens, then there is a rumble and a hidden " \
"|wpassage|n opens, dust and pebbles rumbling as part of the wall moves aside."
self.caller.location.msg_contents(string % self.caller.key, exclude=self.caller)
self.obj.open_wall()
class CmdSetCrumblingWall(CmdSet):
"""Group the commands for crumblingWall"""
key = "crumblingwall_cmdset"
priority = 2
def at_cmdset_creation(self):
"""called when object is first created."""
self.add(CmdShiftRoot())
self.add(CmdPressButton())
class CrumblingWall(TutorialObject, DefaultExit):
"""
This is a custom Exit.
The CrumblingWall can be examined in various ways, but only if a
lit light source is in the room. The traversal itself is blocked
by a traverse: lock on the exit that only allows passage if a
certain attribute is set on the trying account.
Important attribute
destination - this property must be set to make this a valid exit
whenever the button is pushed (this hides it as an exit
until it actually is)
"""
def at_init(self):
"""
Called when object is recalled from cache.
"""
self.reset()
def at_object_creation(self):
"""called when the object is first created."""
super(CrumblingWall, self).at_object_creation()
self.aliases.add(["secret passage", "passage",
"crack", "opening", "secret door"])
# starting root positions. H1/H2 are the horizontally hanging roots,
# V1/V2 the vertically hanging ones. Each can have three positions:
# (-1, 0, 1) where 0 means the middle position. yellow/green are
# horizontal roots and red/blue vertical, all may have value 0, but n
# ever any other identical value.
self.db.root_pos = {"yellow": 0, "green": 0, "red": 0, "blue": 0}
# flags controlling the puzzle victory conditions
self.db.button_exposed = False
self.db.exit_open = False
# this is not even an Exit until it has a proper destination, and we won't assign
# that until it is actually open. Until then we store the destination here. This
# should be given a reasonable value at creation!
self.db.destination = "#2"
# we lock this Exit so that one can only execute commands on it
# if its location is lit and only traverse it once the Attribute
# exit_open is set to True.
self.locks.add("cmd:locattr(is_lit);traverse:objattr(exit_open)")
# set cmdset
self.cmdset.add(CmdSetCrumblingWall, permanent=True)
def open_wall(self):
"""
This method is called by the push button command once the puzzle
is solved. It opens the wall and sets a timer for it to reset
itself.
"""
# this will make it into a proper exit (this returns a list)
eloc = search.search_object(self.db.destination)
if not eloc:
self.caller.msg("The exit leads nowhere, there's just more stone behind it ...")
else:
self.destination = eloc[0]
self.db.exit_open = True
# start a 45 second timer before closing again. We store the deferred so it can be
# killed in unittesting.
self.deferred = utils.delay(45, self.reset)
def _translate_position(self, root, ipos):
"""Translates the position into words"""
rootnames = {"red": "The |rreddish|n vertical-hanging root ",
"blue": "The thick vertical root with |bblue|n flowers ",
"yellow": "The thin horizontal-hanging root with |yyellow|n flowers ",
"green": "The weedy |ggreen|n horizontal root "}
vpos = {-1: "hangs far to the |wleft|n on the wall.",
0: "hangs straight down the |wmiddle|n of the wall.",
1: "hangs far to the |wright|n of the wall."}
hpos = {-1: "covers the |wupper|n part of the wall.",
0: "passes right over the |wmiddle|n of the wall.",
1: "nearly touches the floor, near the |wbottom|n of the wall."}
if root in ("yellow", "green"):
string = rootnames[root] + hpos[ipos]
else:
string = rootnames[root] + vpos[ipos]
return string
def return_appearance(self, caller):
"""
This is called when someone looks at the wall. We need to echo the
current root positions.
"""
if self.db.button_exposed:
# we found the button by moving the roots
result = ["Having moved all the roots aside, you find that the center of the wall, "
"previously hidden by the vegetation, hid a curious square depression. It was maybe once "
"concealed and made to look a part of the wall, but with the crumbling of stone around it,"
"it's now easily identifiable as some sort of button."]
elif self.db.exit_open:
# we pressed the button; the exit is open
result = ["With the button pressed, a crack has opened in the root-covered wall, just wide enough "
"to squeeze through. A cold draft is coming from the hole and you get the feeling the "
"opening may close again soon."]
else:
# puzzle not solved yet.
result = ["The wall is old and covered with roots that here and there have permeated the stone. "
"The roots (or whatever they are - some of them are covered in small nondescript flowers) "
"crisscross the wall, making it hard to clearly see its stony surface. Maybe you could "
"try to |wshift|n or |wmove|n them.\n"]
# display the root positions to help with the puzzle
for key, pos in self.db.root_pos.items():
result.append("\n" + self._translate_position(key, pos))
self.db.desc = "".join(result)
# call the parent to continue execution (will use the desc we just set)
return super(CrumblingWall, self).return_appearance(caller)
def at_after_traverse(self, traverser, source_location):
"""
This is called after we traversed this exit. Cleans up and resets
the puzzle.
"""
del traverser.db.crumbling_wall_found_buttothe
del traverser.db.crumbling_wall_found_exit
self.reset()
def at_failed_traverse(self, traverser):
"""This is called if the account fails to pass the Exit."""
traverser.msg("No matter how you try, you cannot force yourself through %s." % self.key)
def reset(self):
"""
Called by tutorial world runner, or whenever someone successfully
traversed the Exit.
"""
self.location.msg_contents("The secret door closes abruptly, roots falling back into place.")
# reset the flags and remove the exit destination
self.db.button_exposed = False
self.db.exit_open = False
self.destination = None
# Reset the roots with some random starting positions for the roots:
start_pos = [{"yellow": 1, "green": 0, "red": 0, "blue": 0},
{"yellow": 0, "green": 0, "red": 0, "blue": 0},
{"yellow": 0, "green": 1, "red": -1, "blue": 0},
{"yellow": 1, "green": 0, "red": 0, "blue": 0},
{"yellow": 0, "green": 0, "red": 0, "blue": 1}]
self.db.root_pos = random.choice(start_pos)
# -------------------------------------------------------------
#
# Weapon - object type
#
# A weapon is necessary in order to fight in the tutorial
# world. A weapon (which here is assumed to be a bladed
# melee weapon for close combat) has three commands,
# stab, slash and defend. Weapons also have a property "magic"
# to determine if they are usable against certain enemies.
#
# Since Characters don't have special skills in the tutorial,
# we let the weapon itself determine how easy/hard it is
# to hit with it, and how much damage it can do.
#
# -------------------------------------------------------------
class CmdAttack(Command):
"""
Attack the enemy. Commands:
stab <enemy>
slash <enemy>
parry
stab - (thrust) makes a lot of damage but is harder to hit with.
slash - is easier to land, but does not make as much damage.
parry - forgoes your attack but will make you harder to hit on next
enemy attack.
"""
# this is an example of implementing many commands as a single
# command class, using the given command alias to separate between them.
key = "attack"
aliases = ["hit", "kill", "fight", "thrust", "pierce", "stab", "slash", "chop", "parry", "defend"]
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""Implements the stab"""
cmdstring = self.cmdstring
if cmdstring in ("attack", "fight"):
string = "How do you want to fight? Choose one of 'stab', 'slash' or 'defend'."
self.caller.msg(string)
return
# parry mode
if cmdstring in ("parry", "defend"):
string = "You raise your weapon in a defensive pose, ready to block the next enemy attack."
self.caller.msg(string)
self.caller.db.combat_parry_mode = True
self.caller.location.msg_contents("%s takes a defensive stance" % self.caller, exclude=[self.caller])
return
if not self.args:
self.caller.msg("Who do you attack?")
return
target = self.caller.search(self.args.strip())
if not target:
return
if cmdstring in ("thrust", "pierce", "stab"):
hit = float(self.obj.db.hit) * 0.7 # modified due to stab
damage = self.obj.db.damage * 2 # modified due to stab
string = "You stab with %s. " % self.obj.key
tstring = "%s stabs at you with %s. " % (self.caller.key, self.obj.key)
ostring = "%s stabs at %s with %s. " % (self.caller.key, target.key, self.obj.key)
self.caller.db.combat_parry_mode = False
elif cmdstring in ("slash", "chop"):
hit = float(self.obj.db.hit) # un modified due to slash
damage = self.obj.db.damage # un modified due to slash
string = "You slash with %s. " % self.obj.key
tstring = "%s slash at you with %s. " % (self.caller.key, self.obj.key)
ostring = "%s slash at %s with %s. " % (self.caller.key, target.key, self.obj.key)
self.caller.db.combat_parry_mode = False
else:
self.caller.msg("You fumble with your weapon, unsure of whether to stab, slash or parry ...")
self.caller.location.msg_contents("%s fumbles with their weapon." % self.caller, exclude=self.caller)
self.caller.db.combat_parry_mode = False
return
if target.db.combat_parry_mode:
# target is defensive; even harder to hit!
target.msg("|GYou defend, trying to avoid the attack.|n")
hit *= 0.5
if random.random() <= hit:
self.caller.msg(string + "|gIt's a hit!|n")
target.msg(tstring + "|rIt's a hit!|n")
self.caller.location.msg_contents(ostring + "It's a hit!", exclude=[target, self.caller])
# call enemy hook
if hasattr(target, "at_hit"):
# should return True if target is defeated, False otherwise.
target.at_hit(self.obj, self.caller, damage)
return
elif target.db.health:
target.db.health -= damage
else:
# sorry, impossible to fight this enemy ...
self.caller.msg("The enemy seems unaffected.")
return
else:
self.caller.msg(string + "|rYou miss.|n")
target.msg(tstring + "|gThey miss you.|n")
self.caller.location.msg_contents(ostring + "They miss.", exclude=[target, self.caller])
class CmdSetWeapon(CmdSet):
"""Holds the attack command."""
def at_cmdset_creation(self):
"""called at first object creation."""
self.add(CmdAttack())
class Weapon(TutorialObject):
"""
This defines a bladed weapon.
Important attributes (set at creation):
hit - chance to hit (0-1)
parry - chance to parry (0-1)
damage - base damage given (modified by hit success and
type of attack) (0-10)
"""
def at_object_creation(self):
"""Called at first creation of the object"""
super(Weapon, self).at_object_creation()
self.db.hit = 0.4 # hit chance
self.db.parry = 0.8 # parry chance
self.db.damage = 1.0
self.db.magic = False
self.cmdset.add_default(CmdSetWeapon, permanent=True)
def reset(self):
"""
When reset, the weapon is simply deleted, unless it has a place
to return to.
"""
if self.location.has_account and self.home == self.location:
self.location.msg_contents("%s suddenly and magically fades into nothingness, as if it was never there ..."
% self.key)
self.delete()
else:
self.location = self.home
# -------------------------------------------------------------
#
# Weapon rack - spawns weapons
#
# This is a spawner mechanism that creates custom weapons from a
# spawner prototype dictionary. Note that we only create a single typeclass
# (Weapon) yet customize all these different weapons using the spawner.
# The spawner dictionaries could easily sit in separate modules and be
# used to create unique and interesting variations of typeclassed
# objects.
#
# -------------------------------------------------------------
WEAPON_PROTOTYPES = {
"weapon": {
"typeclass": "evennia.contrib.tutorial_world.objects.Weapon",
"key": "Weapon",
"hit": 0.2,
"parry": 0.2,
"damage": 1.0,
"magic": False,
"desc": "A generic blade."},
"knife": {
"prototype": "weapon",
"aliases": "sword",
"key": "Kitchen knife",
"desc": "A rusty kitchen knife. Better than nothing.",
"damage": 3},
"dagger": {
"prototype": "knife",
"key": "Rusty dagger",
"aliases": ["knife", "dagger"],
"desc": "A double-edged dagger with a nicked edge and a wooden handle.",
"hit": 0.25},
"sword": {
"prototype": "weapon",
"key": "Rusty sword",
"aliases": ["sword"],
"desc": "A rusty shortsword. It has a leather-wrapped handle covered i food grease.",
"hit": 0.3,
"damage": 5,
"parry": 0.5},
"club": {
"prototype": "weapon",
"key": "Club",
"desc": "A heavy wooden club, little more than a heavy branch.",
"hit": 0.4,
"damage": 6,
"parry": 0.2},
"axe": {
"prototype": "weapon",
"key": "Axe",
"desc": "A woodcutter's axe with a keen edge.",
"hit": 0.4,
"damage": 6,
"parry": 0.2},
"ornate longsword": {
"prototype": "sword",
"key": "Ornate longsword",
"desc": "A fine longsword with some swirling patterns on the handle.",
"hit": 0.5,
"magic": True,
"damage": 5},
"warhammer": {
"prototype": "club",
"key": "Silver Warhammer",
"aliases": ["hammer", "warhammer", "war"],
"desc": "A heavy war hammer with silver ornaments. This huge weapon causes massive damage - if you can hit.",
"hit": 0.4,
"magic": True,
"damage": 8},
"rune axe": {
"prototype": "axe",
"key": "Runeaxe",
"aliases": ["axe"],
"hit": 0.4,
"magic": True,
"damage": 6},
"thruning": {
"prototype": "ornate longsword",
"key": "Broadsword named Thruning",
"desc": "This heavy bladed weapon is marked with the name 'Thruning'. It is very powerful in skilled hands.",
"hit": 0.6,
"parry": 0.6,
"damage": 7},
"slayer waraxe": {
"prototype": "rune axe",
"key": "Slayer waraxe",
"aliases": ["waraxe", "war", "slayer"],
"desc": "A huge double-bladed axe marked with the runes for 'Slayer'."
" It has more runic inscriptions on its head, which you cannot decipher.",
"hit": 0.7,
"damage": 8},
"ghostblade": {
"prototype": "ornate longsword",
"key": "The Ghostblade",
"aliases": ["blade", "ghost"],
"desc": "This massive sword is large as you are tall, yet seems to weigh almost nothing."
" It's almost like it's not really there.",
"hit": 0.9,
"parry": 0.8,
"damage": 10},
"hawkblade": {
"prototype": "ghostblade",
"key": "The Hawkblade",
"aliases": ["hawk", "blade"],
"desc": "The weapon of a long-dead heroine and a more civilized age,"
" the hawk-shaped hilt of this blade almost has a life of its own.",
"hit": 0.85,
"parry": 0.7,
"damage": 11}
}
class CmdGetWeapon(Command):
"""
Usage:
get weapon
This will try to obtain a weapon from the container.
"""
key = "get weapon"
aliases = "get weapon"
locks = "cmd:all()"
help_category = "TutorialWorld"
def func(self):
"""
Get a weapon from the container. It will
itself handle all messages.
"""
self.obj.produce_weapon(self.caller)
class CmdSetWeaponRack(CmdSet):
"""
The cmdset for the rack.
"""
key = "weaponrack_cmdset"
def at_cmdset_creation(self):
"""Called at first creation of cmdset"""
self.add(CmdGetWeapon())
class WeaponRack(TutorialObject):
"""
This object represents a weapon store. When people use the
"get weapon" command on this rack, it will produce one
random weapon from among those registered to exist
on it. This will also set a property on the character
to make sure they can't get more than one at a time.
Attributes to set on this object:
available_weapons: list of prototype-keys from
WEAPON_PROTOTYPES, the weapons available in this rack.
no_more_weapons_msg - error message to return to accounts
who already got one weapon from the rack and tries to
grab another one.
"""
def at_object_creation(self):
"""
called at creation
"""
self.cmdset.add_default(CmdSetWeaponRack, permanent=True)
self.db.rack_id = "weaponrack_1"
# these are prototype names from the prototype
# dictionary above.
self.db.get_weapon_msg = "You find |c%s|n."
self.db.no_more_weapons_msg = "you find nothing else of use."
self.db.available_weapons = ["knife", "dagger",
"sword", "club"]
def produce_weapon(self, caller):
"""
This will produce a new weapon from the rack,
assuming the caller hasn't already gotten one. When
doing so, the caller will get Tagged with the id
of this rack, to make sure they cannot keep
pulling weapons from it indefinitely.
"""
rack_id = self.db.rack_id
if caller.tags.get(rack_id, category="tutorial_world"):
caller.msg(self.db.no_more_weapons_msg)
else:
prototype = random.choice(self.db.available_weapons)
# use the spawner to create a new Weapon from the
# spawner dictionary, tag the caller
wpn = spawn(WEAPON_PROTOTYPES[prototype], prototype_parents=WEAPON_PROTOTYPES)[0]
caller.tags.add(rack_id, category="tutorial_world")
wpn.location = caller
caller.msg(self.db.get_weapon_msg % wpn.key)
| {
"content_hash": "161b13d9f3c369a722025ccd5f0fed42",
"timestamp": "",
"source": "github",
"line_count": 1079,
"max_line_length": 119,
"avg_line_length": 37.76367006487489,
"alnum_prop": 0.5837239551378015,
"repo_name": "feend78/evennia",
"id": "3859de7d2d0d318426c68ba2129620f299224bcc",
"size": "40747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evennia/contrib/tutorial_world/objects.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42859"
},
{
"name": "Emacs Lisp",
"bytes": "2734"
},
{
"name": "HTML",
"bytes": "20118"
},
{
"name": "JavaScript",
"bytes": "32388"
},
{
"name": "Python",
"bytes": "2734770"
},
{
"name": "Shell",
"bytes": "4237"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0029_unicode_slugfield_dj19'),
('events', '0017_auto_20160922_2000'),
]
operations = [
migrations.CreateModel(
name='EventsArchivePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AlterField(
model_name='eventpage',
name='finish',
field=models.DateTimeField(default=datetime.datetime(2016, 9, 22, 21, 59, 53, 179554, tzinfo=utc)),
),
migrations.AlterField(
model_name='eventpage',
name='signup_close',
field=models.DateTimeField(default=datetime.datetime(2016, 9, 22, 21, 59, 53, 179630, tzinfo=utc)),
),
]
| {
"content_hash": "74fd0c4eeef27b7b00d1a7b00b6b1c38",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 191,
"avg_line_length": 33.21621621621622,
"alnum_prop": 0.5947925142392189,
"repo_name": "davidjrichardson/uwcs-zarya",
"id": "1bb7987d29636b51901706a7ddc82ec1fd77b71d",
"size": "1302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/migrations/0018_auto_20160922_2059.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "77041"
},
{
"name": "Python",
"bytes": "162015"
},
{
"name": "SCSS",
"bytes": "54876"
}
],
"symlink_target": ""
} |
import os
from jetee.base.config_factory import AnsiblePreTaskConfigFactory
from jetee.runtime.configuration import project_configuration
class PipRequirementsAnsiblePreTaskConfigFactory(AnsiblePreTaskConfigFactory):
template = {
u'name': u'Install PIP requirements',
u'pip': {
u'chdir': u'',
u'requirements': u'',
}
}
def get_config(self, parent):
project = parent
template = self.template.copy()
template[u'pip'][u'chdir'] = os.path.join(project.location, project_configuration.get_project_name())
template[u'pip'][u'requirements'] = project.requirements
return [template] | {
"content_hash": "fff44cf7d18573d7bca94476d7919b0a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 109,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.6651917404129793,
"repo_name": "WhackoJacko/Jetee",
"id": "bbd86c0b42a209e82d25656737871553087e1cc3",
"size": "678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jetee/common/config_factories/project/pip.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "82508"
}
],
"symlink_target": ""
} |
""" This document contain the responsible
methods to write and parse the GrAF files.
The parser use the ContentHandler from
SAX Xml module.
"""
from __future__ import absolute_import, unicode_literals
import abc
import codecs
import os
from xml.etree.ElementTree import tostring
from xml.dom import minidom
import graf
# GrAF ID's separator
GRAFSEPARATOR = ".."
(TEXT, AUDIO, VIDEO, NONE) = ("text", "audio", "video", "none")
class Tier:
"""A list of tiers.
The name is the tier unique identification.
"""
__slots__ = ['name', 'annotation_space']
def __init__(self, name, annotation_space=None):
self.name = name
self.annotation_space = annotation_space
class Annotation:
"""A list of annotations.
The id is the annotation identification, the
value the annotation value and the features are
a dict type of values containing the annotation
features.
"""
__slots__ = ['id', 'value', 'features']
def __init__(self, id, value, features=None):
self.value = value
self.id = id
self.features = features
class NodeId:
"""A list of nodes using a specific format.
The prefix is the node type and the index
the identification number.
"""
__slots__ = ['prefix', 'index']
def __init__(self, prefix, index):
self.prefix = prefix
self.index = str(index)
def to_str(self):
return "{0}{1}n{2}".format(self.prefix, GRAFSEPARATOR, self.index)
def str_edge(self):
return "e{0}".format(self.index)
def str_region(self):
return "{0}{1}r{2}".format(self.prefix, GRAFSEPARATOR, self.index)
class PrimaryData:
"""This class represents the primary data of an AnnotationGraph object.
"""
def __init__(self):
self.type = None
self.external_link = None
self.filename = None
self.content = None
class BaseParser(object):
"""This class is a base class to the
parser classes in order to create
GrAF objects.
This class contains some methods that must be
implemented other wise it will be raise a
exception error.
Although the methods that should be implemented
with properly code are the get_root_tiers,
get_child_tiers_for_tier and get_annotations_for_tier.
The method tier_has_regions and region_for_annotation
could simply return None or pass.
Raises
------
NotImplementedError
Method must be implemented.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_root_tiers(self):
"""Method to get the root tiers. The root tiers
are defined by the parser when the method is
implemented.
Returns
-------
list : array-like
List of tiers type.
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def get_child_tiers_for_tier(self, tier):
"""Method that get the child tiers of a specific tier.
Parameters
----------
tier : object
Tier object.
Returns
-------
list : array-like
List of tiers type.
See also
--------
Tier
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def get_annotations_for_tier(self, tier, annotation_parent=None):
"""Method that get all the annotations for a specific tier.
The annotations can be filtered using an annotation parent.
Parameters
----------
tier : object
Tier object.
annotation_parent : object
Annotation object.
Returns
-------
list : array-like
List of annotations type.
See also
--------
Tier, Annotation
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def tier_has_regions(self, tier):
"""Method to verify if a tier has regions.
Parameters
----------
tier : object
Tier object.
Returns
-------
has_region : bool
A true or false variable.
See also
--------
Tier
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def region_for_annotation(self, annotation):
"""Method to get the regions values of a specific
annotation.
Parameters
----------
annotation : object
Annotation object.
Returns
-------
regions : tuple
A tuple with the two regions.
See also
--------
Annotation
"""
raise NotImplementedError("Method must be implemented")
@abc.abstractmethod
def get_primary_data(self):
"""Method to get the primary data of the GrAF file.
Returns
-------
primaryData : object
Object type of PrimaryData class.
See also
--------
PrimaryData
"""
raise NotImplementedError("Method must be implemented")
class BaseWriter(object):
"""This class is a base class to the
writer classes in order to create
files from GrAF objects.
This class contains some methods that must be
implemented other wise it will be raise a
exception error.
Raises
------
NotImplementedError
Method must be implemented.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write(self, outputfile, converter):
"""Method that will write the GrAF object into
a specific format.
Parameters
----------
outputfile : str
The filename of the output file. The filename should be the header
file for GrAF with the extension ".hdr".
converter : Converter or AnnotationGraph
A converter object. The converter object containes the data that
will be use for output. All writers need at least a GrAF graph
and the tier hierarchy, some will also need the primary data object.
"""
raise NotImplementedError("Method must be implemented")
class GrAFConverter:
"""This class handles the conversion of different file formats into GrAF
objects and back again. It uses a sub-class of BaseParser to get the
annotations and the tier hierarchies. A sub-class of BaseWriter is used
to write back the files. Please be aware that meta-data might get lost
if you write to a file format from another one. This depends on whether the
output file format can store all meta-data from the input file format.
In any case all the data and annotation will be stored.
"""
def __init__(self, parser, writer=None):
self.parser = parser
self.writer = writer
self.graf = graf.Graph()
self.tier_hierarchies = []
self.meta_information = None
self.primary_data = None
self.original_file = None
def write(self, outputfile):
if self.writer:
self.writer.write(outputfile, self)
def parse(self):
"""This method will be the responsible to transform
the parser into a GrAF object. This method also
retrieves the tiers hierarchies.
"""
self._tiers_parent_list = []
self.root_tiers = []
tiers_hierarchy_map = {}
for tier in self.parser.get_root_tiers():
self.root_tiers.append(tier.name)
self._convert_tier(tier, None, None)
i = 0
for t in self._tiers_parent_list:
if t[1] is None:
i += 1
tiers_hierarchy_map[str(i)] = [t[0]]
else:
self._append_tier_to_hierarchy(tiers_hierarchy_map[str(i)],
t[1], t[0])
for i, hierarchy in tiers_hierarchy_map.items():
self.tier_hierarchies.append(hierarchy)
if hasattr(self.parser, 'meta_information'):
self.meta_information = self.parser.meta_information
self.primary_data = self.parser.get_primary_data()
if hasattr(self.parser, 'filepath') and \
isinstance(self.parser.filepath, str):
self.original_file = os.path.abspath(self.parser.filepath)
def _convert_tier(self, tier, parent_node, parent_annotation,
parent_prefix=None):
child_tiers = self.parser.get_child_tiers_for_tier(tier)
if tier.annotation_space is None:
prefix = tier.name
annotation_name = prefix
else:
annotation_name = tier.annotation_space.replace(' ', '_')
prefix = "{0}{1}{2}".format(annotation_name, GRAFSEPARATOR,
tier.name)
has_regions = False
if self.parser.tier_has_regions(tier):
has_regions = True
self._add_tier_in_hierarchy_list(prefix, parent_prefix)
annotations = self.parser.get_annotations_for_tier(tier,
parent_annotation)
for annotation in annotations:
regions = None
if has_regions:
regions = self.parser.region_for_annotation(annotation)
node_id = NodeId(prefix, annotation.id)
self._add_node(node_id, annotation, annotation_name, regions,
parent_node)
self._add_root_nodes(prefix, node_id)
if child_tiers:
for t in child_tiers:
self._convert_tier(t, node_id, annotation, prefix)
if annotations == [] and child_tiers:
for t in child_tiers:
self._convert_tier(t, None, None, prefix)
def _add_tier_in_hierarchy_list(self, prefix, parent_prefix):
if not (prefix, parent_prefix) in self._tiers_parent_list:
self._tiers_parent_list.append((prefix, parent_prefix))
def _append_tier_to_hierarchy(self, tiers_list, parent_tier, tier):
for t in tiers_list:
if isinstance(t, list):
self._append_tier_to_hierarchy(t, parent_tier, tier)
else:
if t == parent_tier:
tiers_list.append([tier])
def _add_node(self, node_id, annotation, annotation_name, regions,
from_node_id):
self._add_node_to_graph(node_id, regions, from_node_id)
self._add_graf_annotation(annotation_name, annotation.id, node_id,
annotation.value, annotation.features)
def _add_root_nodes(self, prefix, node_id):
if prefix in self.root_tiers:
self.graf.header.roots.append(node_id.to_str())
def _add_graf_annotation(self, annotation_name, annotation_id,
annotation_ref, annotation_value, annotation_features=None):
annotation = graf.Annotation(annotation_name, annotation_features,
annotation_id)
if annotation_value is not None:
annotation.features['annotation_value'] = annotation_value
self.graf.nodes[annotation_ref.to_str()].annotations.add(annotation)
if annotation_name in self.graf.annotation_spaces:
#if annotation not in self.graf.annotation_spaces[annotation_name]:
self.graf.annotation_spaces[annotation_name].add(annotation)
else:
annotation_space = graf.AnnotationSpace(annotation_name)
annotation_space.add(annotation)
self.graf.annotation_spaces.add(annotation_space)
def _add_node_to_graph(self, node_id, regions=None,
from_node_id=None):
node = graf.Node(node_id.to_str())
if from_node_id is not None:
edge_id = node_id.str_edge()
self.graf.create_edge(self.graf.nodes[from_node_id.to_str()], node,
edge_id)
if regions is not None:
region_id = node_id.str_region()
region = graf.Region(region_id, *regions)
node.add_region(region)
self.graf.regions.add(region)
self.graf.nodes.add(node)
class Writer(BaseWriter):
def __init__(self, **kwargs):
self.tier_hierarchies = None
self.meta_information = None
self.standoffheader = graf.StandoffHeader(**kwargs)
def _flatten_hierarchy_elements(self, elements):
"""Flat the elements appended to a new list of elements.
Parameters
----------
elements : array_like
An array of string values.
Returns
-------
flat_elements : array_like
An array of flattened `elements`.
"""
flat_elements = []
for e in elements:
if type(e) is list:
flat_elements.extend(self._flatten_hierarchy_elements(e))
else:
flat_elements.append(e)
return flat_elements
def write(self, outputfile, ag):
"""Writes an AnnotationGraph object as GrAF files.
Parameters
----------
outputfile : str
The filename of the output file. The filename should be the header
file for GrAF with the extension ".hdr".
ag : poioapi.annotationgraph.AnnotationGraph
An AnnotationGraph object. The AG object containes the data that
will be use for output.
"""
(basedirname, _) = os.path.splitext(outputfile)
self._get_parents(ag.tier_hierarchies)
standoffrenderer = graf.StandoffHeaderRenderer("{0}.hdr".format(
basedirname))
for tier_name in self._flatten_hierarchy_elements(
ag.tier_hierarchies):
annotation_space = tier_name.split(GRAFSEPARATOR)[0]
out_graf = graf.Graph()
renderer = graf.GrafRenderer("{0}-{1}.xml".format(
basedirname, annotation_space
))
out_graf.nodes = [n for n in ag.graf.nodes
if n.id.startswith(tier_name)]
out_graf.edges = [e for e in ag.graf.edges
if e.to_node.id.startswith(tier_name)]
out_graf.regions = [r for r in ag.graf.regions
if r.id.startswith(tier_name)]
out_graf.annotation_spaces.add(graf.AnnotationSpace(
annotation_space))
out_graf.header.add_dependency(self._parent[tier_name])
out_graf = self._add_root_nodes(ag.graf, annotation_space,
out_graf)
renderer.render(out_graf)
basename = os.path.basename(basedirname)
self.standoffheader.datadesc.add_annotation(
"{0}-{1}.xml".format(basename, annotation_space),
annotation_space)
self._add_primary_data(ag.primary_data, basedirname)
standoffrenderer.render(self.standoffheader)
self._generate_metafile(basedirname, ag.meta_information)
def _add_root_nodes(self, graph, annotation_space, out_graf):
for root in graph.header.roots:
if annotation_space in root:
out_graf.header.roots.append(root)
return out_graf
def _get_parents(self, tier_hierarchies):
self._parent = {}
for h in tier_hierarchies:
self._get_hierarchy_parents(h, None)
def _get_hierarchy_parents(self, hierarchy, parent):
for i, h in enumerate(hierarchy):
if isinstance(h, list):
self._get_hierarchy_parents(h, parent)
else:
self._parent[h] = parent
if i is 0:
parent = h.split(GRAFSEPARATOR)[0]
def _add_primary_data(self, primary_data, basedirname):
if primary_data.external_link:
loc = primary_data.external_link
elif primary_data.content:
loc = self._create_raw_txt_file(primary_data.content, basedirname)
elif primary_data.filename:
loc = primary_data.filename
self.standoffheader.datadesc.primaryData = {'loc': loc,
'f.id': primary_data.type}
def _create_raw_txt_file(self, content, basedirname):
filename = "{0}.txt".format(os.path.splitext(basedirname)[0])
file = os.path.abspath(filename)
f = codecs.open(file, 'w', 'utf-8')
f.write(content)
f.close()
return os.path.basename(filename)
def _generate_metafile(self, basedirname, meta_information=None):
"""Generate a metafile with all the extra information
extracted from a file when it is parsed.
Parameters
----------
basedirname : str
Base name of the inpufile.
meta_information: ElementTree
ElementTree with the extra information.
"""
if meta_information is not None:
out = open("{0}-extinfo.xml".format(basedirname), "wb")
doc = minidom.parseString(tostring(meta_information,
encoding="utf-8"))
out.write(doc.toprettyxml(encoding='utf-8'))
out.close() | {
"content_hash": "5c80901227ccb27bdf6b050a25d242cf",
"timestamp": "",
"source": "github",
"line_count": 577,
"max_line_length": 80,
"avg_line_length": 30.85095320623917,
"alnum_prop": 0.5675523846974889,
"repo_name": "cidles/poio-api",
"id": "c1167c7d4a924ca9006ec14d6bb82e5d81b8090d",
"size": "18028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/poioapi/io/graf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "292243"
},
{
"name": "TeX",
"bytes": "4281"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import time
from copy import deepcopy
from pymanopt.solvers.linesearch import LineSearchBackTracking
from pymanopt.solvers.solver import Solver
class SteepestDescent(Solver):
"""
Steepest descent (gradient descent) algorithm based on
steepestdescent.m from the manopt MATLAB package.
"""
def __init__(self, linesearch=LineSearchBackTracking(), *args, **kwargs):
super(SteepestDescent, self).__init__(*args, **kwargs)
if linesearch is None:
self._linesearch = LineSearchBackTracking()
else:
self._linesearch = linesearch
self.linesearch = None
# Function to solve optimisation problem using steepest descent.
def solve(self, problem, x=None, reuselinesearch=False):
"""
Perform optimization using gradient descent with linesearch.
This method first computes the gradient (derivative) of obj
w.r.t. arg, and then optimizes by moving in the direction of
steepest descent (which is the opposite direction to the gradient).
Arguments:
- problem
Pymanopt problem setup using the Problem class, this must
have a .manifold attribute specifying the manifold to optimize
over, as well as a cost and enough information to compute
the gradient of that cost.
- x=None
Optional parameter. Starting point on the manifold. If none
then a starting point will be randomly generated.
- reuselinesearch=False
Whether to reuse the previous linesearch object. Allows to
use information from a previous solve run.
Returns:
- x
Local minimum of obj, or if algorithm terminated before
convergence x will be the point at which it terminated.
"""
man = problem.manifold
verbosity = problem.verbosity
objective = problem.cost
gradient = problem.grad
if not reuselinesearch or self.linesearch is None:
self.linesearch = deepcopy(self._linesearch)
linesearch = self.linesearch
# If no starting point is specified, generate one at random.
if x is None:
x = man.rand()
# Initialize iteration counter and timer
iter = 0
time0 = time.time()
if verbosity >= 2:
print(" iter\t\t cost val\t grad. norm")
self._start_optlog(extraiterfields=['gradnorm'],
solverparams={'linesearcher': linesearch})
while True:
# Calculate new cost, grad and gradnorm
cost = objective(x)
grad = gradient(x)
gradnorm = man.norm(x, grad)
iter = iter + 1
if verbosity >= 2:
print("%5d\t%+.16e\t%.8e" % (iter, cost, gradnorm))
if self._logverbosity >= 2:
self._append_optlog(iter, x, cost, gradnorm=gradnorm)
# Descent direction is minus the gradient
desc_dir = -grad
# Perform line-search
stepsize, x = linesearch.search(objective, man, x, desc_dir,
cost, -gradnorm**2)
stop_reason = self._check_stopping_criterion(
time0, stepsize=stepsize, gradnorm=gradnorm, iter=iter)
if stop_reason:
if verbosity >= 1:
print(stop_reason)
print('')
break
if self._logverbosity <= 0:
return x
else:
self._stop_optlog(x, objective(x), stop_reason, time0,
stepsize=stepsize, gradnorm=gradnorm,
iter=iter)
return x, self._optlog
| {
"content_hash": "c1f43195b35584292e7ed690854ecf80",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 78,
"avg_line_length": 36.242990654205606,
"alnum_prop": 0.574780814853017,
"repo_name": "j-towns/pymanopt",
"id": "b8732db070e732ab76c22832ca7a73a461843824",
"size": "3878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymanopt/solvers/steepest_descent.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "210957"
}
],
"symlink_target": ""
} |
from pylab import plot,grid,title,subplot,xlabel,ylabel,text,subplots_adjust,fill_between,mean,connect,show
import shogun as sg
import util
util.set_title('ROC example')
util.DISTANCE=0.5
subplots_adjust(hspace=0.3)
pos=util.get_realdata(True)
neg=util.get_realdata(False)
features=util.get_realfeatures(pos, neg)
labels=util.get_labels()
# classifiers
gk = sg.GaussianKernel(features, features, 1.0)
svm = sg.LibSVM(1000.0, gk, labels)
svm.train()
lda = sg.LDA(1,features,labels)
lda.train()
## plot points
subplot(211)
plot(pos[0,:], pos[1,:], "r.")
plot(neg[0,:], neg[1,:], "b.")
grid(True)
title('Data',size=10)
# plot ROC for SVM
subplot(223)
ROC_evaluation = sg.ROCEvaluation()
ROC_evaluation.evaluate(svm.apply(),labels)
roc = ROC_evaluation.get_ROC()
print roc
plot(roc[0], roc[1])
fill_between(roc[0],roc[1],0,alpha=0.1)
text(mean(roc[0])/2,mean(roc[1])/2,'auROC = %.5f' % ROC_evaluation.get_auROC())
grid(True)
xlabel('FPR')
ylabel('TPR')
title('LibSVM (Gaussian kernel, C=%.3f) ROC curve' % svm.get_C1(),size=10)
# plot ROC for LDA
subplot(224)
ROC_evaluation.evaluate(lda.apply(),labels)
roc = ROC_evaluation.get_ROC()
plot(roc[0], roc[1])
fill_between(roc[0],roc[1],0,alpha=0.1)
text(mean(roc[0])/2,mean(roc[1])/2,'auROC = %.5f' % ROC_evaluation.get_auROC())
grid(True)
xlabel('FPR')
ylabel('TPR')
title('LDA (gamma=%.3f) ROC curve' % lda.get_gamma(),size=10)
connect('key_press_event', util.quit)
show()
| {
"content_hash": "b59e5e6e93f4c2a66bee7a9f045027ed",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 107,
"avg_line_length": 25.464285714285715,
"alnum_prop": 0.7005610098176718,
"repo_name": "shogun-toolbox/shogun",
"id": "be3bf06e0e73a83372756a346f5dc5a59452be9a",
"size": "1426",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/undocumented/python/graphical/roc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "64"
},
{
"name": "Batchfile",
"bytes": "615"
},
{
"name": "C",
"bytes": "12178"
},
{
"name": "C++",
"bytes": "10278013"
},
{
"name": "CMake",
"bytes": "196539"
},
{
"name": "Dockerfile",
"bytes": "2046"
},
{
"name": "GDB",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "2060"
},
{
"name": "MATLAB",
"bytes": "8755"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "286749"
},
{
"name": "SWIG",
"bytes": "386485"
},
{
"name": "Shell",
"bytes": "7267"
}
],
"symlink_target": ""
} |
import sys
from autosar.parser.parser_base import ElementParser
import autosar.datatype
class DataTypeParser(ElementParser):
def __init__(self,version=3.0):
super().__init__(version)
if self.version >= 3.0 and self.version < 4.0:
self.switcher = {'ARRAY-TYPE': self.parseArrayType,
'BOOLEAN-TYPE': self.parseBooleanType,
'INTEGER-TYPE': self.parseIntegerType,
'REAL-TYPE': self.parseRealType,
'RECORD-TYPE': self.parseRecordType,
'STRING-TYPE': self.parseStringType}
elif self.version >= 4.0:
self.switcher = {
'DATA-CONSTR': self.parseDataConstraint,
'IMPLEMENTATION-DATA-TYPE': self.parseImplementationDataType,
'SW-BASE-TYPE': self.parseSwBaseType,
'DATA-TYPE-MAPPING-SET': self.parseDataTypeMappingSet,
'APPLICATION-PRIMITIVE-DATA-TYPE': self.parseApplicationPrimitiveDataType,
'APPLICATION-ARRAY-DATA-TYPE' : self.parseApplicationArrayDataType,
'APPLICATION-RECORD-DATA-TYPE': self.parseApplicationRecordDataTypeXML,
}
def getSupportedTags(self):
return self.switcher.keys()
def parseElement(self, xmlElement, parent = None):
parseFunc = self.switcher.get(xmlElement.tag)
if parseFunc is not None:
return parseFunc(xmlElement,parent)
else:
return None
def parseIntegerType(self,root,parent=None):
if self.version>=3.0:
name=root.find("./SHORT-NAME").text
minval = int(root.find("./LOWER-LIMIT").text)
maxval = int(root.find("./UPPER-LIMIT").text)
dataDefXML = root.find('./SW-DATA-DEF-PROPS')
dataType = autosar.datatype.IntegerDataType(name,minval,maxval)
self.parseDesc(root,dataType)
if dataDefXML is not None:
for elem in dataDefXML.findall('./*'):
if elem.tag=='COMPU-METHOD-REF':
dataType.compuMethodRef=self.parseTextNode(elem)
else:
raise NotImplementedError(elem.tag)
return dataType
def parseRecordType(self,root,parent=None):
if self.version>=3.0:
elements = []
name=root.find("./SHORT-NAME").text
for elem in root.findall('./ELEMENTS/RECORD-ELEMENT'):
elemName = self.parseTextNode(elem.find("./SHORT-NAME"))
elemTypeRef = self.parseTextNode(elem.find("./TYPE-TREF"))
elements.append(autosar.datatype.RecordTypeElement(elemName, elemTypeRef))
dataType=autosar.datatype.RecordDataType(name,elements);
self.parseDesc(root,dataType)
return dataType
def parseArrayType(self,root,parent=None):
if self.version>=3.0:
name=root.find("./SHORT-NAME").text
length=int(root.find('ELEMENT/MAX-NUMBER-OF-ELEMENTS').text)
typeRef=root.find('ELEMENT/TYPE-TREF').text
dataType=autosar.datatype.ArrayDataType(name,typeRef,length)
self.parseDesc(root,dataType)
return dataType;
def parseBooleanType(self,root,parent=None):
if self.version>=3:
name=root.find("./SHORT-NAME").text
dataType=autosar.datatype.BooleanDataType(name)
self.parseDesc(root,dataType)
return dataType
def parseStringType(self,root,parent=None):
if self.version>=3.0:
name=root.find("./SHORT-NAME").text
length=int(root.find('MAX-NUMBER-OF-CHARS').text)
encoding=root.find('ENCODING').text
dataType=autosar.datatype.StringDataType(name,length,encoding)
self.parseDesc(root,dataType)
return dataType
def parseRealType(self,root,parent=None):
if self.version>=3.0:
name=root.find("./SHORT-NAME").text
elem = root.find("./LOWER-LIMIT")
if elem is not None:
minval = elem.text
minvalType = elem.attrib['INTERVAL-TYPE']
elem = root.find("./UPPER-LIMIT")
if elem is not None:
maxval = elem.text
maxvalType = elem.attrib['INTERVAL-TYPE']
hasNaNText = self.parseTextNode(root.find("./ALLOW-NAN"))
hasNaN = True if (hasNaNText is not None and hasNaNText == 'true') else False
encoding = self.parseTextNode(root.find("./ENCODING"))
dataType=autosar.datatype.RealDataType(name,minval,maxval,minvalType,maxvalType,hasNaN,encoding)
self.parseDesc(root,dataType)
return dataType
def parseDataConstraint(self, xmlRoot, parent=None):
assert (xmlRoot.tag == 'DATA-CONSTR')
rules=[]
constraintLevel = None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'DATA-CONSTR-RULES':
for xmlChildElem in xmlElem.findall('./DATA-CONSTR-RULE/*'):
if xmlChildElem.tag == 'INTERNAL-CONSTRS':
rules.append(self._parseDataConstraintRule(xmlChildElem, 'internalConstraint'))
elif xmlChildElem.tag == 'PHYS-CONSTRS':
rules.append(self._parseDataConstraintRule(xmlChildElem, 'physicalConstraint'))
elif xmlChildElem.tag == 'CONSTR-LEVEL':
constraintLevel = self.parseIntNode(xmlChildElem)
else:
raise NotImplementedError(xmlChildElem.tag)
else:
self.defaultHandler(xmlElem)
elem = autosar.datatype.DataConstraint(self.name, rules, constraintLevel, parent, self.adminData)
self.pop(elem)
return elem
def _parseDataConstraintRule(self, xmlElem, constraintType):
lowerLimitXML = xmlElem.find('./LOWER-LIMIT')
upperLimitXML = xmlElem.find('./UPPER-LIMIT')
lowerLimit = None if lowerLimitXML is None else self.parseNumberNode(lowerLimitXML)
upperLimit = None if upperLimitXML is None else self.parseNumberNode(upperLimitXML)
lowerLimitType = 'CLOSED'
upperLimitType = 'CLOSED'
key = 'INTERVAL-TYPE'
if lowerLimitXML is not None and key in lowerLimitXML.attrib and lowerLimitXML.attrib[key]=='OPEN':
lowerLimitType='OPEN'
if upperLimitXML is not None and key in upperLimitXML.attrib and upperLimitXML.attrib[key]=='OPEN':
upperLimitType='OPEN'
return {
'type': constraintType,
'lowerLimit': lowerLimit,
'upperLimit': upperLimit,
'lowerLimitType': lowerLimitType,
'upperLimitType': upperLimitType}
def parseImplementationDataType(self, xmlRoot, parent=None):
assert (xmlRoot.tag == 'IMPLEMENTATION-DATA-TYPE')
variantProps, typeEmitter, parseTextNode, dynamicArraySizeProfile, subElementsXML, symbolProps = None, None, None, None, None, None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'SW-DATA-DEF-PROPS':
variantProps = self.parseSwDataDefProps(xmlElem)
elif xmlElem.tag == 'TYPE-EMITTER':
typeEmitter = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'DYNAMIC-ARRAY-SIZE-PROFILE':
dynamicArraySizeProfile = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'SUB-ELEMENTS':
subElementsXML = xmlElem
elif xmlElem.tag == 'SYMBOL-PROPS':
symbolProps = self.parseSymbolProps(xmlElem)
else:
self.defaultHandler(xmlElem)
dataType = autosar.datatype.ImplementationDataType(
self.name,
variantProps = variantProps,
dynamicArraySizeProfile = dynamicArraySizeProfile,
typeEmitter = typeEmitter,
category = self.category,
parent = parent,
adminData = self.adminData
)
if subElementsXML is not None:
dataType.subElements = self.parseImplementationDataTypeSubElements(subElementsXML, dataType)
if symbolProps is not None:
dataType.symbolProps = symbolProps
self.pop(dataType)
return dataType
def parseImplementationDataTypeSubElements(self, xmlRoot, parent):
assert (xmlRoot.tag == 'SUB-ELEMENTS')
elements = []
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'IMPLEMENTATION-DATA-TYPE-ELEMENT':
elements.append(self.parseImplementationDataTypeElement(xmlElem, parent))
else:
raise NotImplementedError(xmlElem.tag)
return elements
def parseImplementationDataTypeElement(self, xmlRoot, parent):
assert (xmlRoot.tag == 'IMPLEMENTATION-DATA-TYPE-ELEMENT')
(arraySize, arraySizeSemantics, variants) = (None, None, None)
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'SW-DATA-DEF-PROPS':
variants = self.parseSwDataDefProps(xmlElem)
elif xmlElem.tag == 'ARRAY-SIZE':
arraySize = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'ARRAY-SIZE-SEMANTICS':
arraySizeSemantics = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'SUB-ELEMENTS':
pass #implement later
else:
self.defaultHandler(xmlElem)
elem = autosar.datatype.ImplementationDataTypeElement(self.name, self.category, arraySize, arraySizeSemantics, variants, parent, self.adminData)
self.pop(elem)
return elem
def parseSwBaseType(self, xmlRoot, parent = None):
assert (xmlRoot.tag == 'SW-BASE-TYPE')
baseTypeSize, baseTypeEncoding, nativeDeclaration = None, None, None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'BASE-TYPE-SIZE':
baseTypeSize = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'BASE-TYPE-ENCODING':
baseTypeEncoding = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'NATIVE-DECLARATION':
nativeDeclaration = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'MEM-ALIGNMENT':
pass #implement later
elif xmlElem.tag == 'BYTE-ORDER':
pass #implement later
else:
self.defaultHandler(xmlElem)
elem = autosar.datatype.SwBaseType(self.name, baseTypeSize, baseTypeEncoding, nativeDeclaration, self.category, parent, self.adminData)
self.pop(elem)
return elem
def parseDataTypeMappingSet(self, xmlRoot, parent = None):
assert (xmlRoot.tag == 'DATA-TYPE-MAPPING-SET')
(name, dataTypeMaps, adminData) = (None, None, None)
dataTypeMaps = []
modeRequestTypeMaps = []
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'ADMIN-DATA':
adminData=self.parseAdminDataNode(xmlElem)
elif xmlElem.tag == 'SHORT-NAME':
name = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'DATA-TYPE-MAPS':
for xmlChild in xmlElem.findall('./*'):
if xmlChild.tag == 'DATA-TYPE-MAP':
dataTypeMap = self._parseDataTypeMapXML(xmlChild)
assert(dataTypeMap is not None)
dataTypeMaps.append(dataTypeMap)
else:
raise NotImplementedError(xmlElem.tag)
elif xmlElem.tag == 'MODE-REQUEST-TYPE-MAPS':
for xmlChild in xmlElem.findall('./*'):
if xmlChild.tag == 'MODE-REQUEST-TYPE-MAP':
modeRequestTypeMap = self._parseModeRequestTypeMapXML(xmlChild)
assert(modeRequestTypeMap is not None)
modeRequestTypeMaps.append(modeRequestTypeMap)
else:
raise NotImplementedError(xmlElem.tag)
else:
raise NotImplementedError(xmlElem.tag)
if (name is None):
raise RuntimeError('SHORT-NAME cannot be None')
elem = autosar.datatype.DataTypeMappingSet(name, parent, adminData)
for mapping in dataTypeMaps + modeRequestTypeMaps:
elem.add(mapping)
return elem
def parseApplicationPrimitiveDataType(self, xmlRoot, parent = None):
assert (xmlRoot.tag == 'APPLICATION-PRIMITIVE-DATA-TYPE')
variantProps = None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'SW-DATA-DEF-PROPS':
variantProps = self.parseSwDataDefProps(xmlElem)
else:
self.defaultHandler(xmlElem)
if (self.name is None):
raise RuntimeError('SHORT-NAME cannot be None')
elem = autosar.datatype.ApplicationPrimitiveDataType(self.name, variantProps, self.category, parent, self.adminData)
self.pop(elem)
return elem
def parseApplicationArrayDataType(self, xmlRoot, parent = None):
assert (xmlRoot.tag == 'APPLICATION-ARRAY-DATA-TYPE')
element, variantProps = None, None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'ELEMENT':
element = self.parseApplicationArrayElement(xmlElem)
elif xmlElem.tag == 'SW-DATA-DEF-PROPS':
variantProps = self.parseSwDataDefProps(xmlElem)
elif xmlElem.tag == 'DYNAMIC-ARRAY-SIZE-PROFILE':
pass #implement later
else:
self.defaultHandler(xmlElem)
if element is None:
raise RuntimeError('No <ELEMENT> object found')
elem = autosar.datatype.ApplicationArrayDataType(self.name, element, variantProps, self.category, parent, self.adminData)
self.pop(elem)
return elem
def parseApplicationArrayElement(self, xmlRoot):
assert (xmlRoot.tag == 'ELEMENT')
(typeRef, arraySize, sizeHandling, sizeSemantics) = (None, None, None, None)
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'TYPE-TREF':
typeRef = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'ARRAY-SIZE-HANDLING':
sizeHandling = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'ARRAY-SIZE-SEMANTICS':
sizeSemantics = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'MAX-NUMBER-OF-ELEMENTS':
arraySize = self.parseTextNode(xmlElem)
else:
self.defaultHandler(xmlElem)
elem = autosar.datatype.ApplicationArrayElement(self.name, typeRef, arraySize, sizeHandling, sizeSemantics, self.category, adminData = self.adminData)
self.pop(elem)
return elem
def parseApplicationRecordDataTypeXML(self, xmlRoot, parent = None):
assert (xmlRoot.tag == 'APPLICATION-RECORD-DATA-TYPE')
elementsXML, variantProps = None, None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'ELEMENTS':
elementsXML = xmlElem
elif xmlElem.tag == 'SW-DATA-DEF-PROPS':
variantProps = self.parseSwDataDefProps(xmlElem)
else:
self.defaultHandler(xmlElem)
elem = autosar.datatype.ApplicationRecordDataType(self.name, None, variantProps, self.category, parent, self.adminData)
if elementsXML is not None:
for xmlChild in elementsXML.findall('./'):
if xmlChild.tag == 'APPLICATION-RECORD-ELEMENT':
elem.elements.append(self._parseApplicationRecordElementXML(xmlChild, parent = elem))
else:
raise NotImplementedError(xmlChild.tag)
self.pop(elem)
return elem
def _parseApplicationRecordElementXML(self, xmlRoot, parent):
assert (xmlRoot.tag == 'APPLICATION-RECORD-ELEMENT')
typeRef, variantProps = None, None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'TYPE-TREF':
typeRef = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'SW-DATA-DEF-PROPS':
variantProps = self.parseSwDataDefProps(xmlElem)
else:
self.defaultHandler(xmlElem)
elem = autosar.datatype.ApplicationRecordElement(self.name, typeRef, self.category, parent, self.adminData)
self.pop(elem)
return elem
def _parseDataTypeMapXML(self, xmlRoot):
assert (xmlRoot.tag == 'DATA-TYPE-MAP')
(applicationDataTypeRef, implementationDataTypeRef) = (None, None)
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'APPLICATION-DATA-TYPE-REF':
applicationDataTypeRef = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'IMPLEMENTATION-DATA-TYPE-REF':
implementationDataTypeRef = self.parseTextNode(xmlElem)
else:
raise NotImplementedError(xmlElem.tag)
return autosar.datatype.DataTypeMap(applicationDataTypeRef, implementationDataTypeRef)
def _parseModeRequestTypeMapXML(self, xmlRoot):
assert (xmlRoot.tag == 'MODE-REQUEST-TYPE-MAP')
(modeDeclarationGroupRef, implementationDataTypeRef) = (None, None)
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'MODE-GROUP-REF':
modeDeclarationGroupRef = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'IMPLEMENTATION-DATA-TYPE-REF':
implementationDataTypeRef = self.parseTextNode(xmlElem)
else:
raise NotImplementedError(xmlElem.tag)
return autosar.datatype.ModeRequestTypeMap(modeDeclarationGroupRef, implementationDataTypeRef)
class DataTypeSemanticsParser(ElementParser):
def __init__(self,version=3.0):
super().__init__(version)
def getSupportedTags(self):
return ['COMPU-METHOD']
def parseElement(self, xmlElement, parent = None):
if xmlElement.tag == 'COMPU-METHOD':
return self._parseCompuMethodXML(xmlElement, parent)
else:
return None
def _parseCompuMethodXML(self, xmlRoot, parent=None):
assert (xmlRoot.tag == 'COMPU-METHOD')
compuInternalToPhys, compuPhysToInternal, unitRef = None, None, None
self.push()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'COMPU-INTERNAL-TO-PHYS':
compuInternalToPhys = self._parseComputationXML(xmlElem)
assert(compuInternalToPhys is not None)
elif xmlElem.tag == 'COMPU-PHYS-TO-INTERNAL':
compuPhysToInternal = self._parseComputationXML(xmlElem)
assert(compuPhysToInternal is not None)
elif xmlElem.tag == 'UNIT-REF':
unitRef = self.parseTextNode(xmlElem)
else:
self.defaultHandler(xmlElem)
compuMethod = autosar.datatype.CompuMethod(self.name, False, False, unitRef, self.category, parent, self.adminData)
self.pop(compuMethod)
if compuInternalToPhys is not None:
compuMethod.intToPhys = compuInternalToPhys
if compuPhysToInternal is not None:
compuMethod.physToInt = compuPhysToInternal
return compuMethod
def _parseComputationXML(self, xmlRoot):
assert (xmlRoot.tag == 'COMPU-INTERNAL-TO-PHYS') or (xmlRoot.tag == 'COMPU-PHYS-TO-INTERNAL')
computation = autosar.datatype.Computation()
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'COMPU-SCALES':
for compuScaleXml in xmlElem.findall('COMPU-SCALE'):
compuScale = self._parseCompuScaleXML(compuScaleXml)
computation.elements.append(compuScale)
elif xmlElem.tag == 'COMPU-DEFAULT-VALUE':
for xmlChild in xmlElem.findall('./*'):
if xmlChild.tag == 'V':
computation.defaultValue = self.parseNumberNode(xmlChild)
break
elif xmlChild.tag == 'VT':
computation.defaultValue = self.parseTextNode(xmlChild)
break
elif xmlChild.tag == 'VF':
computation.defaultValue = self.parseNumberNode(xmlChild)
break
else:
raise NotImplementedError(xmlChild.tag)
else:
raise NotImplementedError(xmlElem.tag)
return computation
def _parseCompuScaleXML(self, xmlRoot):
assert(xmlRoot.tag == 'COMPU-SCALE')
label, lowerLimit, upperLimit, lowerLimitType, upperLimitType, symbol, adminData = None, None, None, None, None, None, None
offset, numerator, denominator, textValue, mask = None, None, None, None, None
for xmlElem in xmlRoot.findall('./*'):
if xmlElem.tag == 'DESC':
pass #implement later
elif xmlElem.tag == 'SHORT-LABEL':
label = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'LOWER-LIMIT':
lowerLimit = self.parseNumberNode(xmlElem)
if (self.version >= 4.0) and 'INTERVAL-TYPE' in xmlElem.attrib:
lowerLimitType = xmlElem.attrib['INTERVAL-TYPE']
elif xmlElem.tag == 'UPPER-LIMIT':
upperLimit = self.parseNumberNode(xmlElem)
if (self.version >= 4.0) and 'INTERVAL-TYPE' in xmlElem.attrib:
upperLimitType = xmlElem.attrib['INTERVAL-TYPE']
elif xmlElem.tag == 'COMPU-RATIONAL-COEFFS':
offset, numerator, denominator = self._parseCompuRationalXML(xmlElem)
elif xmlElem.tag == 'SYMBOL':
symbol = self.parseTextNode(xmlElem)
elif xmlElem.tag == 'ADMIN-DATA':
adminData = self.parseAdminDataNode(xmlElem)
elif xmlElem.tag == 'COMPU-CONST':
textValue = self.parseTextNode(xmlElem.find('./VT'))
elif xmlElem.tag == 'MASK':
mask = self.parseIntNode(xmlElem)
else:
raise NotImplementedError(xmlElem.tag)
compuScale = autosar.datatype.CompuScaleElement(lowerLimit, upperLimit, lowerLimitType, upperLimitType, label, symbol, adminData)
compuScale.offset = offset
compuScale.numerator = numerator
compuScale.denominator = denominator
compuScale.textValue = textValue
compuScale.mask = mask
return compuScale
def _parseCompuRationalXML(self, xmlRoot):
assert(xmlRoot.tag == 'COMPU-RATIONAL-COEFFS')
numXml = xmlRoot.findall('./COMPU-NUMERATOR/V')
denXml = xmlRoot.findall('./COMPU-DENOMINATOR/V')
assert(numXml is not None)
assert(len(numXml) == 2)
assert(denXml is not None)
if self.parseTextNode(numXml[0]):
offset = self.parseNumberNode(numXml[0])
else:
offset = 0
if self.parseTextNode(numXml[1]):
numerator = self.parseNumberNode(numXml[1])
else:
numerator = 1
denominator = self.parseNumberNode(denXml[0])
return offset, numerator, denominator
class DataTypeUnitsParser(ElementParser):
def __init__(self,version=3.0):
super().__init__(version)
def getSupportedTags(self):
return ['UNIT']
def parseElement(self, xmlElement, parent = None):
if xmlElement.tag == 'UNIT':
return self._parseUnit(xmlElement, parent)
else:
return None
def _parseUnit(self, xmlRoot, parent=None):
assert (xmlRoot.tag == 'UNIT')
name = self.parseTextNode(xmlRoot.find("./SHORT-NAME"))
displayName = self.parseTextNode(xmlRoot.find("./DISPLAY-NAME"))
if self.version>=4.0:
factor = self.parseTextNode(xmlRoot.find("./FACTOR-SI-TO-UNIT"))
offset = self.parseTextNode(xmlRoot.find("./OFFSET-SI-TO-UNIT"))
else:
(factor,offset) = (None, None)
return autosar.datatype.Unit(name, displayName, factor, offset, parent)
| {
"content_hash": "2114729f6973bed964718f69c96e84ef",
"timestamp": "",
"source": "github",
"line_count": 528,
"max_line_length": 158,
"avg_line_length": 47.58143939393939,
"alnum_prop": 0.5892608366835171,
"repo_name": "cogu/autosar",
"id": "5e5b34726b56d28dd624fc828ec672fc765350df",
"size": "25123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autosar/parser/datatype_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "1039000"
},
{
"name": "Shell",
"bytes": "445"
}
],
"symlink_target": ""
} |
from zeep import Client, xsd
from zeep.plugins import HistoryPlugin
from zeep.exceptions import Fault
import zeep.cache
import zeep.transports
from .mappings import query_type_mapping, cvd_field_mapping, layer_field_mapping, collection_field_mapping, pending_device_field_mapping, policy_field_mapping, volume_field_mapping
from .queries import _collect_query_results
from collections import namedtuple
class VmwareMirageClient():
def __init__(self, server, username, password, port=7443, cache=zeep.cache.InMemoryCache()):
transport = zeep.transports.Transport(cache=cache)
self.history = HistoryPlugin()
self.client = Client("https://{}:{}/mirageapi/MitService.svc?singleWsdl".format(server, port), plugins=[self.history], transport=transport)
self.username = username
self.password = password
login_response = self.client.service.Login(username=username, password=password)
self.query_factory = self.client.type_factory('vmware.mirage.mit.query')
self.type_factory = self.client.type_factory('vmware.mirage.mit.types')
def reauth(function):
def wrapper(self, *args, **kwargs):
try:
return function(self, *args, **kwargs)
except Fault as e:
if e.message == 'The session is not authenticated.':
login_response = self.client.service.Login(username=self.username, password=self.password)
return function(self, *args, **kwargs)
else:
raise(e)
return wrapper
@reauth
def get_cvds(self, by='NAME', value='', query_type='BEGINS_WITH'):
field = cvd_field_mapping[by]
cvds = _collect_query_results(
self,
field=field,
value=value,
query_type=query_type,
query_function=self.client.service.Cvd_Query
)
return cvds
@reauth
def get_cvd(self, id):
result = self.client.service.Cvd_Get(id=id)
return result
@reauth
def get_collection_cvds(self, collection_id, by='NAME', value='', query_type='BEGINS_WITH'):
field = cvd_field_mapping[by]
cvds = _collect_query_results(
self,
field=field,
value=value,
query_type=query_type,
query_function=self.client.service.CollectionCvd_Query,
collectionId=collection_id
)
return cvds
@reauth
def get_app_layers(self, by='NAME', value='', query_type='BEGINS_WITH'):
field = layer_field_mapping[by]
layers = _collect_query_results(
self,
field=field,
value=value,
query_type=query_type,
query_function=self.client.service.AppLayer_Query
)
return layers
@reauth
def get_base_layers(self, by='NAME', value='', query_type='BEGINS_WITH'):
field = layer_field_mapping[by]
layers = _collect_query_results(
self,
field=field,
value=value,
query_type=query_type,
query_function=self.client.service.BaseLayer_Query
)
return layers
@reauth
def get_collections(self, by='NAME', value='', query_type='BEGINS_WITH'):
field = collection_field_mapping[by]
collections = _collect_query_results(
self,
field=field,
value=value,
query_type=query_type,
query_function=self.client.service.Collection_Query
)
return collections
@reauth
def get_pending_devices(self, by='NAME', value='', query_type='BEGINS_WITH'):
field = pending_device_field_mapping[by]
pending_devices = _collect_query_results(
self,
field=field,
value=value,
query_type=query_type,
query_function=self.client.service.PendingDevice_Query
)
return pending_devices
@reauth
def get_policies(self, by='NAME', value='', query_type='BEGINS_WITH'):
field = policy_field_mapping[by]
policies = _collect_query_results(
self,
field=field,
value=value,
query_type=query_type,
query_function=self.client.service.Policy_Query
)
return policies
@reauth
def get_volumes(self, by='NAME', value='', query_type='BEGINS_WITH'):
field = volume_field_mapping[by]
volumes = _collect_query_results(
self,
field=field,
value=value,
query_type=query_type,
query_function=self.client.service.Volume_Query
)
return volumes
@reauth
def provision_pending_device(self, pending_device_id, policy, base_layer, app_layers, identity_info, volume_id, ignore_warnings=False):
pending_device = self.type_factory.ArrayOfId([self.type_factory.Id(pending_device_id)])
_policy = self.type_factory.ImageId(
self.type_factory.Id(policy.id),
self.type_factory.ImageVersion(policy.major_version, policy.minor_version)
)
_base_layer = self.type_factory.ImageId(
self.type_factory.Id(base_layer.id),
self.type_factory.ImageVersion(base_layer.major_version, base_layer.minor_version)
)
app_layers_list = [
self.type_factory.ImageId(
self.type_factory.Id(app.id),
self.type_factory.ImageVersion(app.major_version, app.minor_version)
)
for app in app_layers
]
app_layers_array = self.type_factory.ArrayOfImageId(app_layers_list)
_identity_info = self.type_factory.MachineIdentityInfo(
DomainMember=identity_info.domain_member,
DomainOrWorkgroupName=identity_info.domain_or_workgroup_name,
MachineName=identity_info.new_machine_name,
OU=identity_info.ou,
Password=identity_info.password,
User=identity_info.user
)
_volume_id = self.type_factory.Id(volume_id)
return self.client.service.PendingDevice_Provision(
pendingDevices=pending_device,
policyImageId=_policy,
baseLayerImageId=_base_layer,
appLayerImageIds=app_layers_array,
identityInfo=_identity_info,
volumeId=_volume_id,
ignoreWarnings=ignore_warnings
)
@reauth
def delete_cvd(self, id):
cvd_id = self.type_factory.Id(id)
delete_array = self.type_factory.ArrayOfId([cvd_id])
result = self.client.service.Cvd_Delete(cvdIds=delete_array)
return result
Policy = namedtuple('Policy', ['id', 'major_version', 'minor_version'])
AppLayer = namedtuple('AppLayer', ['id', 'major_version', 'minor_version'])
BaseLayer = namedtuple('BaseLayer', ['id', 'major_version', 'minor_version'])
IdentityInfo = namedtuple('IdentityInfo', ['domain_member', 'domain_or_workgroup_name', 'new_machine_name', 'ou', 'password', 'user'])
| {
"content_hash": "4bb0e72038d40a657760589b4821028f",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 180,
"avg_line_length": 35.47826086956522,
"alnum_prop": 0.5879629629629629,
"repo_name": "jay-tuckey/python-vmwaremirage",
"id": "edc177d6755178663a767ee839202261d5d7fbd5",
"size": "7344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vmwaremirage/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22141"
}
],
"symlink_target": ""
} |
"""Support for RainMachine devices."""
import asyncio
from datetime import timedelta
from functools import partial
from regenmaschine import Client
from regenmaschine.controller import Controller
from regenmaschine.errors import RainMachineError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
CONF_ZONE_RUN_TIME,
DATA_CONTROLLER,
DATA_COORDINATOR,
DATA_PROGRAMS,
DATA_PROVISION_SETTINGS,
DATA_RESTRICTIONS_CURRENT,
DATA_RESTRICTIONS_UNIVERSAL,
DATA_ZONES,
DOMAIN,
LOGGER,
)
DATA_LISTENER = "listener"
DEFAULT_ATTRIBUTION = "Data provided by Green Electronics LLC"
DEFAULT_ICON = "mdi:water"
DEFAULT_SSL = True
DEFAULT_UPDATE_INTERVAL = timedelta(seconds=15)
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
PLATFORMS = ["binary_sensor", "sensor", "switch"]
async def async_update_programs_and_zones(
hass: HomeAssistant, entry: ConfigEntry
) -> None:
"""Update program and zone DataUpdateCoordinators.
Program and zone updates always go together because of how linked they are:
programs affect zones and certain combinations of zones affect programs.
"""
await asyncio.gather(
*[
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
DATA_PROGRAMS
].async_refresh(),
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
DATA_ZONES
].async_refresh(),
]
)
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the RainMachine component."""
hass.data[DOMAIN] = {DATA_CONTROLLER: {}, DATA_COORDINATOR: {}, DATA_LISTENER: {}}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up RainMachine as config entry."""
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id] = {}
entry_updates = {}
if not entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = entry.data[CONF_IP_ADDRESS]
if CONF_ZONE_RUN_TIME in entry.data:
# If a zone run time exists in the config entry's data, pop it and move it to
# options:
data = {**entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**entry.options,
CONF_ZONE_RUN_TIME: data.pop(CONF_ZONE_RUN_TIME),
}
if entry_updates:
hass.config_entries.async_update_entry(entry, **entry_updates)
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(session=websession)
try:
await client.load_local(
entry.data[CONF_IP_ADDRESS],
entry.data[CONF_PASSWORD],
port=entry.data[CONF_PORT],
ssl=entry.data.get(CONF_SSL, DEFAULT_SSL),
)
except RainMachineError as err:
LOGGER.error("An error occurred: %s", err)
raise ConfigEntryNotReady from err
# regenmaschine can load multiple controllers at once, but we only grab the one
# we loaded above:
controller = hass.data[DOMAIN][DATA_CONTROLLER][entry.entry_id] = next(
iter(client.controllers.values())
)
async def async_update(api_category: str) -> dict:
"""Update the appropriate API data based on a category."""
try:
if api_category == DATA_PROGRAMS:
return await controller.programs.all(include_inactive=True)
if api_category == DATA_PROVISION_SETTINGS:
return await controller.provisioning.settings()
if api_category == DATA_RESTRICTIONS_CURRENT:
return await controller.restrictions.current()
if api_category == DATA_RESTRICTIONS_UNIVERSAL:
return await controller.restrictions.universal()
return await controller.zones.all(details=True, include_inactive=True)
except RainMachineError as err:
raise UpdateFailed(err) from err
controller_init_tasks = []
for api_category in [
DATA_PROGRAMS,
DATA_PROVISION_SETTINGS,
DATA_RESTRICTIONS_CURRENT,
DATA_RESTRICTIONS_UNIVERSAL,
DATA_ZONES,
]:
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
api_category
] = DataUpdateCoordinator(
hass,
LOGGER,
name=f'{controller.name} ("{api_category}")',
update_interval=DEFAULT_UPDATE_INTERVAL,
update_method=partial(async_update, api_category),
)
controller_init_tasks.append(coordinator.async_refresh())
await asyncio.gather(*controller_init_tasks)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
hass.data[DOMAIN][DATA_LISTENER] = entry.add_update_listener(async_reload_entry)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload an RainMachine config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(entry.entry_id)
cancel_listener = hass.data[DOMAIN][DATA_LISTENER].pop(entry.entry_id)
cancel_listener()
return unload_ok
async def async_reload_entry(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Handle an options update."""
await hass.config_entries.async_reload(entry.entry_id)
class RainMachineEntity(CoordinatorEntity):
"""Define a generic RainMachine entity."""
def __init__(
self, coordinator: DataUpdateCoordinator, controller: Controller
) -> None:
"""Initialize."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._controller = controller
self._device_class = None
# The colons are removed from the device MAC simply because that value
# (unnecessarily) makes up the existing unique ID formula and we want to avoid
# a breaking change:
self._unique_id = controller.mac.replace(":", "")
self._name = None
@property
def device_class(self) -> str:
"""Return the device class."""
return self._device_class
@property
def device_info(self) -> dict:
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._controller.mac)},
"name": self._controller.name,
"manufacturer": "RainMachine",
"model": (
f"Version {self._controller.hardware_version} "
f"(API: {self._controller.api_version})"
),
"sw_version": self._controller.software_version,
}
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@callback
def _handle_coordinator_update(self):
"""Respond to a DataUpdateCoordinator update."""
self.update_from_latest_data()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
self.update_from_latest_data()
@callback
def update_from_latest_data(self) -> None:
"""Update the state."""
raise NotImplementedError
| {
"content_hash": "0a7ee7466a85865d2e61aa56d6d8718d",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 86,
"avg_line_length": 32.366533864541836,
"alnum_prop": 0.6392171344165436,
"repo_name": "turbokongen/home-assistant",
"id": "98fbdbcf4015e9f42dfcc2e55bd0b10066ea025b",
"size": "8124",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rainmachine/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "30405146"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
import weakref
import Gaffer
import GafferUI
from GafferUI.PlugValueWidget import sole
from Qt import QtCore
class ColorPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plugs, **kw ) :
self.__column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 )
GafferUI.PlugValueWidget.__init__( self, self.__column, plugs, **kw )
with self.__column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
self.__compoundNumericWidget = GafferUI.CompoundNumericPlugValueWidget( plugs )
self.__swatch = GafferUI.ColorSwatchPlugValueWidget( plugs, parenting = { "expand" : True } )
self.__swatch.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__swatchButtonRelease ), scoped = False )
self.__chooserButton = GafferUI.Button( image = "colorPlugValueWidgetSlidersOff.png", hasFrame = False )
self.__chooserButton.clickedSignal().connect( Gaffer.WeakMethod( self.__chooserButtonClicked ), scoped = False )
self.__colorChooser = GafferUI.ColorChooserPlugValueWidget( plugs )
self.setColorChooserVisible(
sole( Gaffer.Metadata.value( plug, "colorPlugValueWidget:colorChooserVisible" ) for plug in self.getPlugs() )
)
self.__blinkBehaviour = None
def setColorChooserVisible( self, visible ) :
self.__colorChooser.setVisible( visible )
self.__chooserButton.setImage(
"colorPlugValueWidgetSliders{}.png".format( "On" if visible else "Off" )
)
def getColorChooserVisible( self ) :
return self.__colorChooser.getVisible()
def setPlugs( self, plugs ) :
GafferUI.PlugValueWidget.setPlugs( self, plugs )
self.__compoundNumericWidget.setPlugs( plugs )
self.__colorChooser.setPlugs( plugs )
self.__swatch.setPlugs( plugs )
def setHighlighted( self, highlighted ) :
GafferUI.PlugValueWidget.setHighlighted( self, highlighted )
self.__compoundNumericWidget.setHighlighted( highlighted )
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.PlugValueWidget.setReadOnly( self, readOnly )
self.__compoundNumericWidget.setReadOnly( readOnly )
self.__swatch.setReadOnly( readOnly )
def childPlugValueWidget( self, childPlug ) :
return self.__compoundNumericWidget.childPlugValueWidget( childPlug )
def __swatchButtonRelease( self, widget, event ) :
if not self._editable() :
# The swatch will have been unable to display a colour chooser, so we
# draw the user's attention to the components which are preventing that.
if self.__blinkBehaviour is not None :
self.__blinkBehaviour.stop()
widgets = [
self.__compoundNumericWidget.childPlugValueWidget( p )
for p in Gaffer.Plug.Range( next( iter( self.getPlugs() ) ) )
]
widgets = [ w for w in widgets if not w._editable() ]
self.__blinkBehaviour = _BlinkBehaviour( widgets )
self.__blinkBehaviour.start()
return False
def __chooserButtonClicked( self, widget ) :
visible = not self.getColorChooserVisible()
self.setColorChooserVisible( visible )
# Remember the user's choice so we can match it next time
# we construct a widget for one of these plugs.
for plug in self.getPlugs() :
Gaffer.Metadata.registerValue( plug, "colorPlugValueWidget:colorChooserVisible", visible, persistent = False )
GafferUI.PlugValueWidget.registerType( Gaffer.Color3fPlug, ColorPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.Color4fPlug, ColorPlugValueWidget )
## \todo Consider if this is something that might be useful elsewhere, if
# there are other such things, and what a Behaviour base class for them
# might look like.
class _BlinkBehaviour( object ) :
def __init__( self, targetWidgets, blinks = 2 ) :
self.__targetWidgets = [ weakref.ref( w ) for w in targetWidgets ]
self.__initialStates = [ w.getHighlighted() for w in targetWidgets ]
self.__blinks = blinks
self.__toggleCount = 0
self.__timer = QtCore.QTimer()
self.__timer.timeout.connect( self.__blink )
def start( self ) :
self.__toggleCount = 0
self.__blink()
self.__timer.start( 250 )
def stop( self ) :
self.__timer.stop()
for widget, initialState in zip( self.__targetWidgets, self.__initialStates ) :
widget = widget()
if widget :
widget.setHighlighted( initialState )
def __blink( self ) :
self.__toggleCount += 1
for widget, initialState in zip( self.__targetWidgets, self.__initialStates ) :
widget = widget()
if widget :
widget.setHighlighted( bool( ( int( initialState ) + self.__toggleCount ) % 2 ) )
if self.__toggleCount >= self.__blinks * 2 :
self.__timer.stop()
| {
"content_hash": "14f4a27ab12a261bfbd845eb155c338e",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 116,
"avg_line_length": 31.69178082191781,
"alnum_prop": 0.7220661335638643,
"repo_name": "hradec/gaffer",
"id": "6e4ac3e25d1629de22b5c5ed28bc031c77be53ce",
"size": "6497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferUI/ColorPlugValueWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "54696"
},
{
"name": "C++",
"bytes": "8682649"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "9458935"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14299"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topics', '0095_auto_20200603_2208'),
]
operations = [
migrations.AddField(
model_name='programmingchallenge',
name='testing_examples',
field=models.TextField(default=''),
),
]
| {
"content_hash": "a42ccd98f7128a15f2c7ff7559efc1fe",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 47,
"avg_line_length": 22.1875,
"alnum_prop": 0.5859154929577465,
"repo_name": "uccser/cs-unplugged",
"id": "9da0a9145b4271066eebd69066d964c66f584eef",
"size": "405",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "csunplugged/topics/migrations/0096_programmingchallenge_testing_examples.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7927"
},
{
"name": "HTML",
"bytes": "432891"
},
{
"name": "JavaScript",
"bytes": "104806"
},
{
"name": "Python",
"bytes": "1257568"
},
{
"name": "SCSS",
"bytes": "67560"
},
{
"name": "Shell",
"bytes": "12461"
}
],
"symlink_target": ""
} |
import core.social.Objects.User
from ..Mongo import Mongo
import facebook
# from .User import User
from .Comment import Comment
from .Likes import Likes
from ..configurations import Config
import re
import json
import ast
class Post:
def __init__(self, **kwargs):
post_fields = ["created_time", "_id", "id", "updated_time", "likes", "message", "status_type", "comments"]
for key, value in kwargs.iteritems():
if key in post_fields:
setattr(self, key, value)
def __repr__(self):
return str(self.__dict__)
@property
def id(self):
return self._id
@property
def likes(self):
return self.likes
@likes.setter
def likes(self, value):
self.likes = value
def get_all_likes(self):
likes = []
if type(self.likes) is dict:
for x in self.likes['data']:
likes.append(Likes(id=x['id'], user_name=x['name']))
else :
for x in ast.literal_eval(self.likes)['data']:
likes.append(Likes(id=x['id'], user_name=x['name']))
return likes
def get_comments(self):
comments = []
if 'comments' in self.__dict__:
for x in ast.literal_eval(self.comments)['data']:
comments.append(Comment(id= x['id'], user=x['from'], like_count=x['like_count'], created_time=x['created_time'],
message=x['message'], user_likes=x['user_likes']))
return comments
@staticmethod
def demo(**kwargs):
for key, value in kwargs.iteritems():
print (str(key) + "---------->" + str(value))
@staticmethod
def load_from_db(id):
post = Mongo.getPostCollection().find_one({'_id': id})
# print ast.literal_eval(str(post))
return Post(**post)
@staticmethod
def load_from_facebook(id):
graph = facebook.GraphAPI(
access_token=Config.api_key,
version='2.2')
post = graph.get_object(id)
# print post
return Post(**post)
def json(self):
json_res = {}
for key, value in self.__dict__.items():
if value:
try:
json_res[key] = str(value)
except :
json_res[key] = str(value.encode('ascii', 'ignore'))
if "id" in json_res.keys():
id = json_res.pop("id")
json_res['_id'] = id
return json_res
def load_user(self):
return User.load_user(self.user)
def save(self):
json_res = self.json()
Mongo.getPostCollection().insert(json_res)
@classmethod
def load_from_json(cls, values):
return cls(**values)
@staticmethod
def load_post(id):
record = Mongo.getPostCollection().find_one({'_id': id})
print (record)
if not record:
return None
return Post.load_from_json(record)
| {
"content_hash": "49248d9c14cfaf8e18225a7c4e92dc36",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 128,
"avg_line_length": 28.00943396226415,
"alnum_prop": 0.5405860559110812,
"repo_name": "euronmetaliaj/MarketAnalyzer",
"id": "de04d435e5d8fce9c7687725ee7802ea0f941a12",
"size": "2969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/social/Objects/Mini.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "447174"
},
{
"name": "HTML",
"bytes": "52235"
},
{
"name": "JavaScript",
"bytes": "1628285"
},
{
"name": "Python",
"bytes": "57579"
}
],
"symlink_target": ""
} |
from django.core.files import File
from django.core.management.base import BaseCommand
from django.shortcuts import get_list_or_404
from employees.models import Employee
class Command(BaseCommand):
help = 'Set default avatar image for all employees, suggested use on development environments'
def get_employee_list(self):
default_image = File(open('sample_data/default_avatar.png', 'rb'))
employees = get_list_or_404(Employee)
for employee in employees:
employee.avatar.save('default.png', default_image)
def handle(self, *args, **options):
self.get_employee_list()
| {
"content_hash": "e66b48fa14637f7490bb830afe0c590c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 98,
"avg_line_length": 34.833333333333336,
"alnum_prop": 0.7145135566188198,
"repo_name": "belatrix/BackendAllStars",
"id": "2ac9702b2294af1b7054bea06f439393c0df4a76",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "employees/management/commands/setdefaultavatar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "198882"
}
],
"symlink_target": ""
} |
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import Property
from gravur.common.labelbox import LabelBox # NOQA
from gravur.utils import load_widget
@load_widget
class TransactionPreview(BoxLayout):
txid = Property(None)
timestamp = Property("loading")
delta = Property("loading")
def on_txid(self, instance, value):
instance.timestamp = '2015-07-01 16:41' # TODO get from txid
instance.delta = '0.0012BTC' # TODO get from txid
| {
"content_hash": "84a8172872b341a35a8f63c243455275",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 30.25,
"alnum_prop": 0.71900826446281,
"repo_name": "F483/gravur",
"id": "141d3b22d1d8707b9d86b0ad077e838e491df4c8",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gravur/wallet/transactionpreview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1254"
},
{
"name": "Python",
"bytes": "30027"
}
],
"symlink_target": ""
} |
import collections
import functools
def filter_and_text_type_assertions(func):
"""Decorates all functions with the filter and text signature to confirm the types of """
@functools.wraps(func)
def decorator(*args, **kwargs):
try:
args = list(args)
filters = args[0]
assert args[0] is not None or args[1] is not None, "Arguments must not be empty"
assert isinstance(args[0], collections.Iterable) and isinstance(args[1], collections.Iterable), "Filters and objects to filter must be iterable."
except IndexError:
filters = kwargs.get('filter')
assert kwargs.get('filter') is not None or kwargs.get('text') is not None, "Arguments or Key Word Arguments must not be empty"
assert isinstance(kwargs.get('filter'), collections.Iterable) and isinstance(kwargs.get('text'), collections.Iterable), "Filters and objects to filter must be iterable."
if isinstance(filters, str):
try:
delimiter = args[2]
except IndexError:
delimiter = kwargs.get('delimiter', ',')
args[0] = filters.split(delimiter)
return func(*args, **kwargs)
return decorator
| {
"content_hash": "44ea648ebca1740a811bc203ee920853",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 181,
"avg_line_length": 45.851851851851855,
"alnum_prop": 0.635702746365105,
"repo_name": "52inc/python-naughty-words",
"id": "97cef6e90e427be3bdc099e86f0d5cd5ca6ec46e",
"size": "1238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "naughty_words/utils/decorators.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "132685"
}
],
"symlink_target": ""
} |
import sys, os
try:
import sphinx_rtd_theme
except ImportError:
_RTD_THEME = False
pass
else:
_RTD_THEME = True
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../bitmath'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bitmath'
copyright = u'2014-2016, Tim Bielawa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.1'
# The full version, including alpha/beta/rc tags.
release = '1.3.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'appendices/mixed_math.rst',
'appendices/*',
'example_block_devices.rst',
'query_device_capacity_warning.rst'
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if _RTD_THEME:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d - %H:%M:%S %Z'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bitmathdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bitmath.tex', u'bitmath Documentation',
u'Tim Bielawa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bitmath', u'bitmath Documentation',
[u'Tim Bielawa'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bitmath', u'bitmath Documentation',
u'Tim Bielawa', 'bitmath', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "4c8807fe0d22912fbd177fcaf0981d0c",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 122,
"avg_line_length": 31.812244897959182,
"alnum_prop": 0.6986143187066974,
"repo_name": "pombredanne/bitmath",
"id": "fd082bb251fe56728a407d7073080e85b8762f4b",
"size": "8212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docsite/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "2344"
},
{
"name": "Makefile",
"bytes": "15620"
},
{
"name": "Python",
"bytes": "181055"
},
{
"name": "Shell",
"bytes": "311"
}
],
"symlink_target": ""
} |
from pytos.securechange.xml_objects.restapi.step.initialize import *
from pytos.securechange.xml_objects.restapi.step.access_request.designer import DesignerResult
from pytos.securechange.xml_objects.restapi.step.step import Binding, SlimRuleWithMetadata, SlimRule
logger = logging.getLogger(XML_LOGGER_NAME)
class Rule_decommission_Generator:
"""This class is used to generate a SecureChange RuleDecommission from tuple."""
def __init__(self, revision_id, device_id, bindings, action):
self.revision_id = revision_id
self.device_id = device_id
self.bindings = bindings
self.action = action
@classmethod
def from_dict(cls, rule_decommission_dict):
"""Constructor
:param rule_decommission_dict: A dict, which consists of (device_id,revision_id,bindings,action).
device_id is an int
revision_id is an int
bindings is a dict: Keys are the bind uid(string) and the values are lists of rules(list of strings)
action is a string, which can be one of the following:
1. Disable
2. Remove
:type rule_decommission_dict: tuple(int, int, dict{[str]:list[str]}, str)
"""
device_id = rule_decommission_dict['device_id']
revision_id = rule_decommission_dict['revision_id']
bindings = rule_decommission_dict['bindings']
action = rule_decommission_dict['action']
return cls(revision_id, device_id, bindings, action)
def create_devices_bindings(self):
"""Create the Device object for the Rule Decommission ticket.
:return: The generated Devices list object.
:rtype: XML_List of devices
"""
device_bindings = []
for bind_uid, rules in self.bindings.items():
bind_rules = []
for rule_uid in rules:
bind_rules.append(SlimRule(uid=rule_uid))
bind_rules = XML_List(Elements.RULES, bind_rules)
device_bindings.append(RuleDecommissionBinding(binding_uid=bind_uid, rules=bind_rules))
device_bindings = XML_List(Elements.BINDINGS, device_bindings)
devices = XML_List(Elements.DEVICES, [RuleDecommissionDevice(revision_id=self.revision_id,
management_id=self.device_id,
bindings=device_bindings)])
return devices
class Step_Field_Rule_Decommission(Step_Field_Base):
def __init__(self, num_id, name, read_only=None, action=None, devices=None, verifier_result=None,
designer_result=None):
self.action = action
self.devices = devices
self.verifier_result = verifier_result
self.designer_result = designer_result
super().__init__(num_id, name, read_only)
self.set_attrib(Attributes.XSI_TYPE, Attributes.FIELD_TYPE_RULE_DECOMMISSION)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
num_id = get_xml_int_value(xml_node, Elements.ID)
name = get_xml_text_value(xml_node, Elements.NAME)
read_only = get_xml_text_value(xml_node, Elements.READ_ONLY)
action = get_xml_text_value(xml_node, Elements.ACTION)
verifier_node = get_xml_node(xml_node, Elements.VERIFIER_RESULT, True)
if verifier_node is not None:
verifier_result = RDVerifier.from_xml_node(verifier_node)
else:
verifier_result = None
designer_node = get_xml_node(xml_node, Elements.DESIGNER_RESULT, True)
if designer_node is not None:
designer_result = DesignerResult.from_xml_node(designer_node)
else:
designer_result = None
devices = XML_List.from_xml_node_by_tags(xml_node, Elements.DEVICES, Elements.DEVICE, RuleDecommissionDevice,
True)
return cls(num_id, name, read_only, action, devices, verifier_result, designer_result)
def remove_verifier_result(self):
"""
Remove verifier result from a rule decommission for a task.
Need to use it when trying to put rule decommission task with verifier result
"""
self.verifier_result = None
def remove_designer_result(self):
"""
Remove designer result from a rule decommission for a task.
Need to use it when trying to put rule decommission task with designer result
"""
self.designer_result = None
def sanitize_results(self):
"""
Remove both designer and verifier result from a rule decommission for a task.
Need to use it when trying to put rule decommission task with designer result and verifier results
"""
self.remove_designer_result()
self.remove_verifier_result()
def to_pretty_str(self):
action_str = "Action: {}".format(self.action)
devices_str = '\n'.join(device.to_pretty_str() for device in self.devices)
return '\n\n'.join((action_str, devices_str))
class RuleDecommissionDevice(XML_Object_Base):
"""This class represents the RuleDecommissionDeviceDTO used in rule decommission field"""
def __init__(self, revision_id, management_id, bindings, management_ip=None, revision_number=None,
number_of_rules=None, administrator=None, management_name=None):
self.revision_id = revision_id
self.management_id = management_id
self.bindings = bindings
self.management_ip = management_ip
self.revision_number = revision_number
self.number_of_rules = number_of_rules
self.administrator = administrator
self.management_name = management_name
super().__init__(Elements.DEVICE)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
revision_id = get_xml_text_value(xml_node, Elements.REVISION_ID)
management_id = get_xml_int_value(xml_node, Elements.MANAGEMENT_ID)
bindings = XML_List.from_xml_node_by_tags(xml_node, Elements.BINDINGS, Elements.BINDING,
RuleDecommissionBinding)
management_ip = get_xml_text_value(xml_node, Elements.MANAGEMENT_IP)
revision_number = get_xml_int_value(xml_node, Elements.REVISION_NUMBER)
number_of_rules = get_xml_int_value(xml_node, Elements.NUMBER_OF_RULES)
administrator = get_xml_text_value(xml_node, Elements.ADMINISTRATOR)
management_name = get_xml_text_value(xml_node, Elements.MANAGEMENT_NAME)
return cls(revision_id, management_id, bindings, management_ip, revision_number, number_of_rules, administrator,
management_name)
def to_pretty_str(self):
bindings_info = "\n".join(binding.to_pretty_str() for binding in self.bindings)
return "Device name: {}\n{}".format(self.management_name, bindings_info)
class RDVerifier(XML_Object_Base):
"""This class represents the RDVerifier used in rule decommission field"""
def __init__(self, result, reason, message, status):
self.result = result
self.reason = reason
self.message = message
self.status = status
super().__init__(Elements.VERIFIER_RESULT)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
status = get_xml_text_value(xml_node, Elements.STATUS)
reason = get_xml_text_value(xml_node, Elements.REASON)
message = get_xml_text_value(xml_node, Elements.MESSAGE)
result = get_xml_node(xml_node, Elements.RESULT, True)
if result is not None:
result = ReferenceURL.from_xml_node(result)
return cls(result, reason, message, status)
class RuleDecommissionBinding(XML_Object_Base):
"""This class represents the RuleDecommissionBindingDTO used in rule decommission field"""
def __init__(self, binding_uid, rules, binding=None):
self.binding_uid = binding_uid
self.rules = rules
if binding is not None:
self.binding = binding
super().__init__(Elements.BINDING)
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
binding_uid = get_xml_text_value(xml_node, Elements.BINDING_UID)
rules = XML_List.from_xml_node_by_tags(xml_node, Elements.RULES, Elements.RULE, SlimRuleWithMetadata)
binding_node = get_xml_node(xml_node, Elements.BINDING, True)
if binding_node is not None:
binding = Binding.from_xml_node(binding_node)
else:
binding = None
return cls(binding_uid, rules, binding)
def to_pretty_str(self):
return '\n'.join(rule.to_pretty_str() for rule in self.rules)
| {
"content_hash": "c71b31b6abb4b91c86dd7ae19cb7a4e2",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 120,
"avg_line_length": 44.308411214953274,
"alnum_prop": 0.6396329888209239,
"repo_name": "Tufin/pytos",
"id": "e7a38fce3a36f06bd87bf21b5496191749e5497a",
"size": "9482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytos/securechange/xml_objects/restapi/step/rule_decommission/rule_decommission.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "11180"
},
{
"name": "Python",
"bytes": "1073816"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.utils.compatibility import python_2_unicode_compatible
from . import filemodels
@python_2_unicode_compatible
class Clipboard(models.Model):
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'), verbose_name=_(
'user'), related_name="media_clipboards")
files = models.ManyToManyField(
'media.File', verbose_name=_('files'), related_name="in_clipboards",
through='ClipboardItem')
def append_file(self, file_obj):
try:
# We have to check if file is already in the clipboard as otherwise
# polymorphic complains
self.files.get(pk=file_obj.pk)
return False
except filemodels.File.DoesNotExist:
newitem = ClipboardItem(file=file_obj, clipboard=self)
newitem.save()
return True
def __str__(self):
return "Clipboard %s of %s" % (self.id, self.user)
class Meta:
app_label = 'media'
verbose_name = _('clipboard')
verbose_name_plural = _('clipboards')
class ClipboardItem(models.Model):
file = models.ForeignKey('media.File', verbose_name=_('file'))
clipboard = models.ForeignKey(Clipboard, verbose_name=_('clipboard'))
class Meta:
app_label = 'media'
verbose_name = _('clipboard item')
verbose_name_plural = _('clipboard items')
| {
"content_hash": "8d591dfdf4667b62cfcbbdd54ab9ca88",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 95,
"avg_line_length": 33.30434782608695,
"alnum_prop": 0.6481723237597912,
"repo_name": "django-leonardo/django-leonardo",
"id": "ebcaa623f0df3dc87959b69d93af4a84f3781a9b",
"size": "1556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leonardo/module/media/models/clipboardmodels.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "33187"
},
{
"name": "Dockerfile",
"bytes": "835"
},
{
"name": "HTML",
"bytes": "323851"
},
{
"name": "JavaScript",
"bytes": "264957"
},
{
"name": "Makefile",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "705902"
},
{
"name": "SCSS",
"bytes": "68482"
},
{
"name": "Shell",
"bytes": "5569"
}
],
"symlink_target": ""
} |
"""
This script uses yum and rpm to generate in-toto material provenance and
writes the resulting JSON to stdout or to argv[0] if provided.
"""
import dnf
import json
import sys
import urllib.parse
in_toto_data = list()
in_toto_fmt = "pkg:rpm/{origin}/{name}@{epoch}{version}-{release}?arch={arch}"
with dnf.Base() as db:
db.fill_sack()
q = db.sack.query()
for pkg in q.installed():
in_toto_data.append(
{
"uri": in_toto_fmt.format(
origin=urllib.parse.quote(pkg.vendor),
name=pkg.name,
epoch=str(pkg.epoch) + ':' if pkg.epoch != 0 else '',
version=pkg.version,
release=pkg.release,
arch=pkg.arch
),
"digest": {
# The DNF documentation says:
# The checksum is returned only for packages from
# repository. The checksum is not returned for
# installed package or packages from commandline
# repository.
# Which is super lame, so we use the header checksum to
# have _something_.
'sha1': pkg.hdr_chksum[1].hex()
}
}
)
if len(sys.argv) > 1:
with open(sys.argv[1], 'w') as f:
json.dump(in_toto_data, f)
else:
print(json.dumps(in_toto_data))
| {
"content_hash": "6f9e20eb37bf80fda80ce4034ed0e10c",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 78,
"avg_line_length": 31.956521739130434,
"alnum_prop": 0.5027210884353741,
"repo_name": "PowerDNS/pdns-builder",
"id": "507ccb606f88909ca2c37da9d45fd443140de5ed",
"size": "1493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers/generate-dnf-provenance.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4197"
},
{
"name": "Shell",
"bytes": "43128"
}
],
"symlink_target": ""
} |
import os
import re
import sys
import tempfile
import textwrap
import fixtures
import mock
import pkg_resources
import six
from testtools import matchers
from pbr import git
from pbr import packaging
from pbr.tests import base
class TestRepo(fixtures.Fixture):
"""A git repo for testing with.
Use of TempHomeDir with this fixture is strongly recommended as due to the
lack of config --local in older gits, it will write to the users global
configuration without TempHomeDir.
"""
def __init__(self, basedir):
super(TestRepo, self).__init__()
self._basedir = basedir
def setUp(self):
super(TestRepo, self).setUp()
base._run_cmd(['git', 'init', '.'], self._basedir)
base._config_git()
base._run_cmd(['git', 'add', '.'], self._basedir)
def commit(self, message_content='test commit'):
files = len(os.listdir(self._basedir))
path = self._basedir + '/%d' % files
open(path, 'wt').close()
base._run_cmd(['git', 'add', path], self._basedir)
base._run_cmd(['git', 'commit', '-m', message_content], self._basedir)
def uncommit(self):
base._run_cmd(['git', 'reset', '--hard', 'HEAD^'], self._basedir)
def tag(self, version):
base._run_cmd(
['git', 'tag', '-sm', 'test tag', version], self._basedir)
class GPGKeyFixture(fixtures.Fixture):
"""Creates a GPG key for testing.
It's recommended that this be used in concert with a unique home
directory.
"""
def setUp(self):
super(GPGKeyFixture, self).setUp()
tempdir = self.useFixture(fixtures.TempDir())
gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])')
gnupg_version = base._run_cmd(['gpg', '--version'], tempdir.path)
for line in gnupg_version[0].split('\n'):
gnupg_version = gnupg_version_re.match(line)
if gnupg_version:
gnupg_version = (int(gnupg_version.group(1)),
int(gnupg_version.group(2)),
int(gnupg_version.group(3)))
break
else:
if gnupg_version is None:
gnupg_version = (0, 0, 0)
config_file = tempdir.path + '/key-config'
f = open(config_file, 'wt')
try:
if gnupg_version[0] == 2 and gnupg_version[1] >= 1:
f.write("""
%no-protection
%transient-key
""")
f.write("""
%no-ask-passphrase
Key-Type: RSA
Name-Real: Example Key
Name-Comment: N/A
Name-Email: example@example.com
Expire-Date: 2d
Preferences: (setpref)
%commit
""")
finally:
f.close()
# Note that --quick-random (--debug-quick-random in GnuPG 2.x)
# does not have a corresponding preferences file setting and
# must be passed explicitly on the command line instead
if gnupg_version[0] == 1:
gnupg_random = '--quick-random'
elif gnupg_version[0] >= 2:
gnupg_random = '--debug-quick-random'
else:
gnupg_random = ''
base._run_cmd(
['gpg', '--gen-key', '--batch', gnupg_random, config_file],
tempdir.path)
class TestPackagingInGitRepoWithCommit(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestPackagingInGitRepoWithCommit, self).setUp()
repo = self.useFixture(TestRepo(self.package_dir))
repo.commit()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# One commit, something should be in the authors list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertNotEqual(body, '')
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
# One commit, something should be in the ChangeLog list
self.assertNotEqual(body, '')
def test_manifest_exclude_honoured(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(
self.package_dir,
'pbr_testpackage.egg-info/SOURCES.txt'), 'r') as f:
body = f.read()
self.assertThat(
body, matchers.Not(matchers.Contains('pbr_testpackage/extra.py')))
self.assertThat(body, matchers.Contains('pbr_testpackage/app.py'))
def test_install_writes_changelog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(stdout, matchers.Contains('Generating ChangeLog'))
class TestPackagingInGitRepoWithoutCommit(base.BaseTestCase):
def setUp(self):
super(TestPackagingInGitRepoWithoutCommit, self).setUp()
self.useFixture(TestRepo(self.package_dir))
self.run_setup('sdist', allow_fail=False)
def test_authors(self):
# No commits, no authors in list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertEqual(body, '\n')
def test_changelog(self):
# No commits, nothing should be in the ChangeLog list
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
self.assertEqual(body, 'CHANGES\n=======\n\n')
class TestPackagingInPlainDirectory(base.BaseTestCase):
def setUp(self):
super(TestPackagingInPlainDirectory, self).setUp()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# Not a git repo, no AUTHORS file created
filename = os.path.join(self.package_dir, 'AUTHORS')
self.assertFalse(os.path.exists(filename))
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
# Not a git repo, no ChangeLog created
filename = os.path.join(self.package_dir, 'ChangeLog')
self.assertFalse(os.path.exists(filename))
def test_install_no_ChangeLog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(
stdout, matchers.Not(matchers.Contains('Generating ChangeLog')))
class TestPresenceOfGit(base.BaseTestCase):
def testGitIsInstalled(self):
with mock.patch.object(git,
'_run_shell_command') as _command:
_command.return_value = 'git version 1.8.4.1'
self.assertEqual(True, git._git_is_installed())
def testGitIsNotInstalled(self):
with mock.patch.object(git,
'_run_shell_command') as _command:
_command.side_effect = OSError
self.assertEqual(False, git._git_is_installed())
class TestNestedRequirements(base.BaseTestCase):
def test_nested_requirement(self):
tempdir = tempfile.mkdtemp()
requirements = os.path.join(tempdir, 'requirements.txt')
nested = os.path.join(tempdir, 'nested.txt')
with open(requirements, 'w') as f:
f.write('-r ' + nested)
with open(nested, 'w') as f:
f.write('pbr')
result = packaging.parse_requirements([requirements])
self.assertEqual(result, ['pbr'])
class TestVersions(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestVersions, self).setUp()
self.repo = self.useFixture(TestRepo(self.package_dir))
self.useFixture(GPGKeyFixture())
self.useFixture(base.DiveDir(self.package_dir))
def test_capitalized_headers(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('Sem-Ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_capitalized_headers_partial(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('Sem-ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_tagged_version_has_tag_version(self):
self.repo.commit()
self.repo.tag('1.2.3')
version = packaging._get_version_from_git('1.2.3')
self.assertEqual('1.2.3', version)
def test_untagged_version_has_dev_version_postversion(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.dev1'))
def test_untagged_pre_release_has_pre_dev_version_postversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1'))
def test_untagged_version_minor_bump(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: deprecation')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.3.0.dev1'))
def test_untagged_version_major_bump(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_untagged_version_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
version = packaging._get_version_from_git('1.2.5')
self.assertThat(version, matchers.StartsWith('1.2.5.dev1'))
def test_untagged_version_after_pre_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git('1.2.5')
self.assertThat(version, matchers.StartsWith('1.2.5.dev1'))
def test_untagged_version_after_rc_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git('1.2.3')
self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1'))
def test_preversion_too_low_simple(self):
# That is, the target version is either already released or not high
# enough for the semver requirements given api breaks etc.
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
# Note that we can't target 1.2.3 anymore - with 1.2.3 released we
# need to be working on 1.2.4.
err = self.assertRaises(
ValueError, packaging._get_version_from_git, '1.2.3')
self.assertThat(err.args[0], matchers.StartsWith('git history'))
def test_preversion_too_low_semver_headers(self):
# That is, the target version is either already released or not high
# enough for the semver requirements given api breaks etc.
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: feature')
# Note that we can't target 1.2.4, the feature header means we need
# to be working on 1.3.0 or above.
err = self.assertRaises(
ValueError, packaging._get_version_from_git, '1.2.4')
self.assertThat(err.args[0], matchers.StartsWith('git history'))
def test_get_kwargs_corner_cases(self):
# No tags:
git_dir = self.repo._basedir + '/.git'
get_kwargs = lambda tag: packaging._get_increment_kwargs(git_dir, tag)
def _check_combinations(tag):
self.repo.commit()
self.assertEqual(dict(), get_kwargs(tag))
self.repo.commit('sem-ver: bugfix')
self.assertEqual(dict(), get_kwargs(tag))
self.repo.commit('sem-ver: feature')
self.assertEqual(dict(minor=True), get_kwargs(tag))
self.repo.uncommit()
self.repo.commit('sem-ver: deprecation')
self.assertEqual(dict(minor=True), get_kwargs(tag))
self.repo.uncommit()
self.repo.commit('sem-ver: api-break')
self.assertEqual(dict(major=True), get_kwargs(tag))
self.repo.commit('sem-ver: deprecation')
self.assertEqual(dict(major=True, minor=True), get_kwargs(tag))
_check_combinations('')
self.repo.tag('1.2.3')
_check_combinations('1.2.3')
def test_invalid_tag_ignored(self):
# Fix for bug 1356784 - we treated any tag as a version, not just those
# that are valid versions.
self.repo.commit()
self.repo.tag('1')
self.repo.commit()
# when the tree is tagged and its wrong:
self.repo.tag('badver')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.0.1.dev1'))
# When the tree isn't tagged, we also fall through.
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.0.1.dev2'))
# We don't fall through x.y versions
self.repo.commit()
self.repo.tag('1.2')
self.repo.commit()
self.repo.tag('badver2')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.1.dev1'))
# Or x.y.z versions
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
self.repo.tag('badver3')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.dev1'))
# Or alpha/beta/pre versions
self.repo.commit()
self.repo.tag('1.2.4.0a1')
self.repo.commit()
self.repo.tag('badver4')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.0a2.dev1'))
# Non-release related tags are ignored.
self.repo.commit()
self.repo.tag('2')
self.repo.commit()
self.repo.tag('non-release-tag/2014.12.16-1')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.1.dev1'))
def test_valid_tag_honoured(self):
# Fix for bug 1370608 - we converted any target into a 'dev version'
# even if there was a distance of 0 - indicating that we were on the
# tag itself.
self.repo.commit()
self.repo.tag('1.3.0.0a1')
version = packaging._get_version_from_git()
self.assertEqual('1.3.0.0a1', version)
def test_skip_write_git_changelog(self):
# Fix for bug 1467440
self.repo.commit()
self.repo.tag('1.2.3')
os.environ['SKIP_WRITE_GIT_CHANGELOG'] = '1'
version = packaging._get_version_from_git('1.2.3')
self.assertEqual('1.2.3', version)
def tearDown(self):
super(TestVersions, self).tearDown()
os.environ.pop('SKIP_WRITE_GIT_CHANGELOG', None)
class TestRequirementParsing(base.BaseTestCase):
def test_requirement_parsing(self):
tempdir = self.useFixture(fixtures.TempDir()).path
requirements = os.path.join(tempdir, 'requirements.txt')
with open(requirements, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
bar
quux<1.0; python_version=='2.6'
requests-aws>=0.1.4 # BSD License (3 clause)
Routes>=1.12.3,!=2.0,!=2.1;python_version=='2.7'
requests-kerberos>=0.6;python_version=='2.7' # MIT
""")))
setup_cfg = os.path.join(tempdir, 'setup.cfg')
with open(setup_cfg, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
[metadata]
name = test_reqparse
[extras]
test =
foo
baz>3.2 :python_version=='2.7' # MIT
bar>3.3 :python_version=='2.7' # MIT # Apache
""")))
# pkg_resources.split_sections uses None as the title of an
# anonymous section instead of the empty string. Weird.
expected_requirements = {
None: ['bar', 'requests-aws>=0.1.4'],
":(python_version=='2.6')": ['quux<1.0'],
":(python_version=='2.7')": ['Routes>=1.12.3,!=2.0,!=2.1',
'requests-kerberos>=0.6'],
'test': ['foo'],
"test:(python_version=='2.7')": ['baz>3.2', 'bar>3.3']
}
setup_py = os.path.join(tempdir, 'setup.py')
with open(setup_py, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
#!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
""")))
self._run_cmd(sys.executable, (setup_py, 'egg_info'),
allow_fail=False, cwd=tempdir)
egg_info = os.path.join(tempdir, 'test_reqparse.egg-info')
requires_txt = os.path.join(egg_info, 'requires.txt')
with open(requires_txt, 'rt') as requires:
generated_requirements = dict(
pkg_resources.split_sections(requires))
self.assertEqual(expected_requirements, generated_requirements)
| {
"content_hash": "1a5c6a9d17d2ee8f6fd0aee7cdc7c32f",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 79,
"avg_line_length": 37.64,
"alnum_prop": 0.5846523854801723,
"repo_name": "rosudrag/Freemium-winner",
"id": "32e9f562e0297ae4f80c780ba0d5a9b421db6a0f",
"size": "19731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VirtualEnvironment/Lib/site-packages/pbr/tests/test_packaging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1079"
},
{
"name": "C",
"bytes": "6349"
},
{
"name": "CSS",
"bytes": "12216"
},
{
"name": "Groff",
"bytes": "17679"
},
{
"name": "HTML",
"bytes": "13547"
},
{
"name": "JavaScript",
"bytes": "35679"
},
{
"name": "PowerShell",
"bytes": "1506"
},
{
"name": "Python",
"bytes": "12351458"
},
{
"name": "Tcl",
"bytes": "24447"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._api_revision_operations import build_list_by_service_request
from .._vendor import ApiManagementClientMixinABC
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ApiRevisionOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.apimanagement.aio.ApiManagementClient`'s
:attr:`api_revision` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_service(
self,
resource_group_name: str,
service_name: str,
api_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
skip: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ApiRevisionContract"]:
"""Lists all revisions of an API.
:param resource_group_name: The name of the resource group. Required.
:type resource_group_name: str
:param service_name: The name of the API Management service. Required.
:type service_name: str
:param api_id: API identifier. Must be unique in the current API Management service instance.
Required.
:type api_id: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>| apiRevision
| filter | ge, le, eq, ne, gt, lt | substringof, contains, startswith, endswith |</br>. Default
value is None.
:type filter: str
:param top: Number of records to return. Default value is None.
:type top: int
:param skip: Number of records to skip. Default value is None.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApiRevisionContract or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.apimanagement.models.ApiRevisionContract]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2021-08-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ApiRevisionCollection]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_service_request(
resource_group_name=resource_group_name,
service_name=service_name,
api_id=api_id,
subscription_id=self._config.subscription_id,
filter=filter,
top=top,
skip=skip,
api_version=api_version,
template_url=self.list_by_service.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ApiRevisionCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_service.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apis/{apiId}/revisions"} # type: ignore
| {
"content_hash": "3021ff4ec7df392bb7436219dc793190",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 203,
"avg_line_length": 44.018181818181816,
"alnum_prop": 0.61737574005232,
"repo_name": "Azure/azure-sdk-for-python",
"id": "f2d3a0e416f32d2a6e455422a588caf8dd7d7a0a",
"size": "7763",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_api_revision_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
import time
import socket
from internetcheck import ServerConnection
# construct the argument parse and parse the arguments
# v4l2-ctl --set-ctrl brightness=130
#cmd commands:
#source ~/.profile
#workon cv
#python '/home/pi/Documents/PythonProjects/pyImage.py' or wherever u have pyImage saved
from threading import Thread
import cv2
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
#self.stream.set(3, 1920)
#self.stream.set(4, 1080)
#self.stream.set(15,-100)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.stream.release()
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def contourArea(contours):
area = []
for i in range(0,len(contours)):
area.append([cv2.contourArea(contours[i]),i])
area.sort()
if(area[len(area) - 1] >= 7 * area[0]):
return area[len(area)-1]
else: return 0
def onmouse(k, x, y, s, p):
global hsv
if k == 1: # left mouse, print pixel at x,y
print(hsv[y, x])
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# ball in the HSV color space, then initialize the
# list of tracked points
#lower_green = (70, 120, 120)
#upper_green = (120, 200, 255)
#changed from 70 to 55 for hue; hue is basically the only one that matters
lower_green = (55, 50, 120)
upper_green = (90, 250, 256)
UDP_IP = '192.168.1.111'
#UDP_IP = '10.140.123.54'
#UDP_IP = '10.54.65.79'
UDP_PORT = 5465
BUFFER_SIZE = 1024
MESSAGE1 = 'Y'
MESSAGE2 = 'N'
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
numframes = 0
#connection = ServerConnection()
#connection.start()
time.sleep(1)
camera = WebcamVideoStream(src=1).start()
start_time = time.time()
while True:
frame = camera.read()
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
img = cv2.GaussianBlur(frame, (5, 5), 0)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, lower_green, upper_green)
edged = cv2.Canny(mask, 35, 125)
# find contours in the mask and initialize the current
# (x, y) center of the ball
im2, cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
if(numframes == 0): print(img.shape)
if (len(cnts) > 1):
area,place = contourArea(cnts)
if(area != 0):
c = cnts[place]
#cv2.drawContours(frame, c, -1, (0, 0, 255), 3)
M = cv2.moments(c)
cx = int(M['m10'] / M['m00']) # Center of MASS Coordinates
cy = int(M['m01'] / M['m00'])
rect = cv2.minAreaRect(c)
length = rect[1][1]
sock.sendto(('Y ' + str(cx) + ' ' + str(cy) + ' '+ "{0:.2f}".format(length)).encode(),(UDP_IP,UDP_PORT))
#sock.sendto(('Y').encode(),(UDP_IP,UDP_PORT))
else:
sock.sendto('N'.encode(),(UDP_IP,UDP_PORT))
cv2.namedWindow("Image w Contours")
cv2.setMouseCallback("Image w Contours", onmouse)
cv2.imshow('Image w Contours', frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
#if(numframes == 0): print(frame.shape)
numframes+=1
#if numframes >= 200:
#break
camera.stop()
totTime = time.time() - start_time
print("--- %s seconds ---" % (totTime))
print('----%s fps ----' % (numframes / totTime))
print(numframes)
# cleanup the camera and close any open windows
cv2.destroyAllWindows()
#connection.stop()
#sock.sendto('D'.encode(),(UDP_IP,UDP_PORT))
| {
"content_hash": "6e5f5ff7aa640a1203a0e56e545b8a94",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 120,
"avg_line_length": 29.873626373626372,
"alnum_prop": 0.5655692477469193,
"repo_name": "SachinKonan/Windows-RPI-Vision-Framework",
"id": "fa42d304684f220248434357306c14392775d9c6",
"size": "5437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "VisionCode/pyImage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "189"
},
{
"name": "Jupyter Notebook",
"bytes": "20480"
},
{
"name": "Python",
"bytes": "140014"
}
],
"symlink_target": ""
} |
import os
# 3rd party
# Flask
from flask_appbuilder.security.manager import AUTH_OID, AUTH_REMOTE_USER, AUTH_DB, AUTH_LDAP, AUTH_OAUTH
basedir = os.path.abspath(os.path.dirname(__file__))
# Your App secret key
SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h'
# The SQLAlchemy connection string.
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
#SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
#SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
# Flask-WTF flag for CSRF
CSRF_ENABLED = True
#------------------------------
# GLOBALS FOR APP Builder
#------------------------------
# Uncomment to setup Your App name
#APP_NAME = "My App Name"
# Uncomment to setup Setup an App icon
#APP_ICON = "static/img/logo.jpg"
#----------------------------------------------------
# AUTHENTICATION CONFIG
#----------------------------------------------------
# The authentication type
# AUTH_OID : Is for OpenID
# AUTH_DB : Is for database (username/password()
# AUTH_LDAP : Is for LDAP
# AUTH_REMOTE_USER : Is for using REMOTE_USER from web server
AUTH_TYPE = AUTH_DB
# Uncomment to setup Full admin role name
#AUTH_ROLE_ADMIN = 'Admin'
# Uncomment to setup Public role name, no authentication needed
#AUTH_ROLE_PUBLIC = 'Public'
# Will allow user self registration
#AUTH_USER_REGISTRATION = True
# The default user self registration role
#AUTH_USER_REGISTRATION_ROLE = "Public"
# When using LDAP Auth, setup the ldap server
#AUTH_LDAP_SERVER = "ldap://ldapserver.new"
# Uncomment to setup OpenID providers example for OpenID authentication
#OPENID_PROVIDERS = [
# { 'name': 'Yahoo', 'url': 'https://me.yahoo.com' },
# { 'name': 'AOL', 'url': 'http://openid.aol.com/<username>' },
# { 'name': 'Flickr', 'url': 'http://www.flickr.com/<username>' },
# { 'name': 'MyOpenID', 'url': 'https://www.myopenid.com' }]
#---------------------------------------------------
# Babel config for translations
#---------------------------------------------------
# Setup default language
BABEL_DEFAULT_LOCALE = 'en'
# Your application default translation path
BABEL_DEFAULT_FOLDER = 'translations'
# The allowed translation for you app
LANGUAGES = {
'en': {'flag':'gb', 'name':'English'},
'pt': {'flag':'pt', 'name':'Portuguese'},
'pt_BR': {'flag':'br', 'name': 'Pt Brazil'},
'es': {'flag':'es', 'name':'Spanish'},
'de': {'flag':'de', 'name':'German'},
'zh': {'flag':'cn', 'name':'Chinese'},
'ru': {'flag':'ru', 'name':'Russian'},
'pl': {'flag':'pl', 'name':'Polish'}
}
#---------------------------------------------------
# Image and file configuration
#---------------------------------------------------
# The file upload folder, when using models with files
UPLOAD_FOLDER = basedir + '/app/static/uploads/'
# The image upload folder, when using models with images
IMG_UPLOAD_FOLDER = basedir + '/app/static/uploads/'
# The image upload url, when using models with images
IMG_UPLOAD_URL = '/static/uploads/'
# Setup image size default is (300, 200, True)
#IMG_SIZE = (300, 200, True)
# Theme configuration
# these are located on static/appbuilder/css/themes
# you can create your own and easily use them placing them on the same dir structure to override
#APP_THEME = "bootstrap-theme.css" # default bootstrap
#APP_THEME = "cerulean.css"
#APP_THEME = "amelia.css"
#APP_THEME = "cosmo.css"
#APP_THEME = "cyborg.css"
#APP_THEME = "flatly.css"
#APP_THEME = "journal.css"
#APP_THEME = "readable.css"
#APP_THEME = "simplex.css"
#APP_THEME = "slate.css"
#APP_THEME = "spacelab.css"
#APP_THEME = "united.css"
#APP_THEME = "yeti.css"
| {
"content_hash": "4216fb63facfbbf8dc9f0405ee7e4c70",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 104,
"avg_line_length": 33.50925925925926,
"alnum_prop": 0.6145344017684443,
"repo_name": "approximatelylinear/trans-passports-docker",
"id": "b033240afd2ad1ebb152d42451acb9403425d2f6",
"size": "3653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/trans_passports/trans_passports/app/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6477"
},
{
"name": "HTML",
"bytes": "128"
},
{
"name": "Makefile",
"bytes": "9526"
},
{
"name": "Nginx",
"bytes": "1133"
},
{
"name": "Python",
"bytes": "1548921"
},
{
"name": "Shell",
"bytes": "1604"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2013, G Roberts
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the project nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__all__ = ['make_facet_id', 'flatten_facet', 'all_permutations', 'Daemon',
'QueueDaemon', 'unixtime', 'to_utf8_str', 'split_facet']
from base64 import b64encode
from time import time as unixtime
import hashlib, marshal, signal, json, logging
LOG = logging.getLogger(__name__)
class DataObject(object):
"""
http://www.saltycrane.com/blog/2012/08/python-data-object-motivated-desire-mutable-namedtuple-default-values/
An object to hold data. Motivated by a desire for a mutable namedtuple with
default values. To use, subclass, and define __slots__.
The default default value is None. To set a default value other than None,
set the `default_value` class variable.
Example:
class Jello(DataObject):
default_value = 'no data'
__slots__ = (
'request_date',
'source_id',
'year',
'group_id',
'color',
# ...
)
"""
__slots__ = ()
default_value = None
def __init__(self, *args, **kwargs):
# Set default values
for att in self.__slots__:
setattr(self, att, self.default_value)
# Set attributes passed in as arguments
for k, v in zip(self.__slots__, args):
setattr(self, k, v)
for k, v in kwargs.items():
setattr(self, k, v)
def asdict(self):
return dict(
(att, getattr(self, att)) for att in self.__slots__)
n
def astuple(self):
return tuple(getattr(self, att) for att in self.__slots__)
def __repr__(self):
return '{}({})'.format(
self.__class__.__name__,
', '.join('{}={}'.format(
att, repr(getattr(self, att))) for att in self.__slots__))
def to_utf8_str(obj):
"""
Converts argument into a utf-8 byte string
"""
if isinstance(obj, basestring):
if isinstance(obj, unicode):
return obj.encode('utf-8', 'ignore')
# Otherwise.. must be a str() which is already bytes
return obj
elif isinstance(obj, int):
return "%d" % (obj,)
elif isinstance(obj, float):
return "%f" % (obj)
raise TypeError, "Cannot convert type '%s' to utf-8 string" % (type(obj),)
def split_facet(facet):
"""
Returns a dictionary of the following elements:
- id: Hashed facet ID
- parent_id: ID of the parent facet
- child: Full value of the last item from the facet
"""
assert type(facet) in [list, set]
facet = flatten_facet(facet)
facet_id = make_facet_id(facet)
facet_parent = facet[:-1]
if len(facet_parent) == len(facet):
facet_child = ''
else:
facet_child = to_utf8_str(facet[-1])
facet_parent_id = make_facet_id(facet_parent)
return {
'id': facet_id,
'parent_id': facet_parent_id,
'child': facet_child
}
def make_facet_id(facet):
"""
Unique ID for the facet
"""
if type(facet) not in [list, tuple]:
facet = [facet]
hasher = hashlib.new('sha1')
for value in facet:
hasher.update(to_utf8_str(value))
return b64encode(hasher.digest()[:9])
def flatten_facet(facets_to_flatten):
"""
Flattens a list of lists into a single list.
[['derp', 123], ['merp', 456]]
becomes
['derp', 123, 'merp', 456]
"""
out = []
for facet in facets_to_flatten:
out += facet
return out
def power_set(inputs, minlength=1):
count = len(inputs)
members = int(pow(2, count))
# Ewww... we're formatting the number into binary to work out which
# entries to output
bstr = '{' + "0:0{}b".format(count) + '}'
ret = []
for i in range(0, members):
b = bstr.format(i)
out = []
for j in range(0, count):
if b[j] == '1':
out.append(inputs[j])
if len(out) >= minlength:
ret.append(out)
return ret
def permute(dims, i=0, chain=None):
if chain is None:
chain = []
if i >= len(dims):
return [chain]
chains = []
for l in dims[i]:
chains += permute(dims, i + 1, chain + [l])
return chains
def all_permutations(inputs):
"""
All permutations of the input dictionary.
{'derp': [123, 456], 'merp': 987}
Will create output like:
[['derp', 123, 456],
['derp', 123],
['merp', 987],
['merp', 987, 'derp', 123],
['merp', 987, 'derp', 123, 456]]
Values can be floats, ints and strings.
Or they can be lists of ints, floats and strings.
"""
sets = {}
if type(inputs) == dict:
inputs = inputs.items()
for key, levels in inputs:
combos = []
for level in levels:
combos.append(level)
if key not in sets:
sets[key] = []
sets[key].append([key] + combos)
all_points = power_set(sets.values())
ret = []
for lol in all_points:
ret += permute(lol)
return ret
class Daemon(object):
"""
This daemon takes records from the `queue` list in Redis and inserts into
HyperDex.
"""
def __init__(self):
self._stop = False
self._status = {
'last': unixtime(),
'interval': 2
}
self._stats = {}
signal.signal(signal.SIGINT, self._signal_handler)
def is_stopping(self):
"""
Break the run() loop at the next possible opportunity
"""
return self._stop
def _signal_handler(self, _sig, _frame=None):
"""
Stop daemon gracefully
"""
print "SIGINT caught, stopping gracefully"
self._stop = True
def incr_stats(self, name, value=1):
"""
Increment the named counter
"""
self._stats[name] = self._stats.get(name, 0) + value
def show_status(self):
"""
Display a summary line
"""
status = self._status
stats = self._stats
now = unixtime()
if (now - status['last']) > status['interval']:
status['last'] = now
print 'now:', ' | '.join(['%s:%d' % (key, value) for key, value in stats.items()])
self._stats = {key: 0 for key in stats.keys()}
class QueueDaemon(Daemon):
"""
Waits for entries on a Redis queue and hands the recors to the process()
funtion. The records must be a 'marshal' encoded dictionary with both 'id'
and 'ttl' keys.
"""
def __init__(self, rdb):
"""
:param rdb: StrictRedis instance
"""
assert rdb is not None
super(QueueDaemon, self).__init__()
self._rdb = rdb
@property
def redis(self):
return self._rdb
def process(self, record):
"""
Process a single record
:param record: Dictionary
"""
raise NotImplementedError
def _handle(self, data):
"""
Grunt work, wrapper for the 'process' method.
Handles re-queueing of items which couldn't be processed.
"""
self.incr_stats('popped')
try:
record = marshal.loads(data)
except ValueError:
record = None
if record is None:
self.incr_stats('invalid')
return
is_processed = False
try:
is_processed = self.process(record)
except Exception:
LOG.error("Failed to process", exc_info=True)
# Failed processing for some reason
if not is_processed:
# Put the CDR back in queue for processing if process fails
record['ttl'] = record.get('ttl', 0) + 1
if record['ttl'] > 3:
# But only 3-4 times... then it's 'fucked'
# XXX: how do we handle 'fucked' items?
self.redis.rpush('queue_fucked', json.dumps(record))
self.incr_stats('fucked')
else:
self.redis.rpush('queue', json.dumps(record))
self.incr_stats('retry')
self.incr_stats('redis.ops.rpush')
self.incr_stats('redis.ops')
else:
# TODO: insert the 'cost' of processing this record
self.redis.rpush(record['id'], unixtime())
self.redis.expire(record['id'], 2)
self.incr_stats('processed')
self.incr_stats('redis.ops.rpush')
self.incr_stats('redis.ops.expire')
self.incr_stats('redis.ops', 2)
def run(self, queue_name):
"""
Listen for messages on the 'cdrpickup' channel and process them.
Loops forever.
"""
while not self.is_stopping():
# TODO: blpoprpush onto a 'working' list
# then move to a 'done' list
# must be uber reliable!
msg = self.redis.blpop([queue_name], timeout=1)
self.incr_stats('redis.ops.blpop')
self.incr_stats('redis.ops')
if msg is not None and len(msg) == 2:
self._handle(msg[1])
self.show_status()
print "stopped" | {
"content_hash": "ee3fc2e6c13bc83bd07c66705a29abde",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 113,
"avg_line_length": 31.38192419825073,
"alnum_prop": 0.5674470457079153,
"repo_name": "HarryR/HyperStats",
"id": "3b7b6860a8e22a4f874bf31a9ec5df6d4e0552af",
"size": "10764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hyperstats/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "36210"
}
],
"symlink_target": ""
} |
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants_test.tasks.task_test_base import TaskTestBase
from squarepants.plugins.link_resources_jars.targets.resources_jar import ResourcesJar
from squarepants.plugins.link_resources_jars.tasks.link_resources_jars import LinkResourcesJars
class CopyResourcesTest(TaskTestBase):
@classmethod
def task_type(cls):
return LinkResourcesJars
def test_resources_jar_target(self):
jar = JarDependency(org='foo', name='bar', rev='1.2.3')
lib = self.make_target(spec='test/foo-library', target_type=JarLibrary, jars=[jar])
resource_jar = self.make_target(spec='test/copy-resources', target_type=ResourcesJar,
dependencies=[lib], dest='foo.jar')
self.assertEquals('foo.jar', resource_jar.payload.dest)
| {
"content_hash": "c4c90df005fc7d5fe97717c000ac2bdb",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 95,
"avg_line_length": 42.85,
"alnum_prop": 0.7736289381563594,
"repo_name": "ericzundel/mvn2pants",
"id": "a0b4e94b3b4a3e4439a5b84940a160611b866816",
"size": "1063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/python/squarepants_test/plugins/test_link_resources_jars.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "283"
},
{
"name": "Python",
"bytes": "641401"
},
{
"name": "Shell",
"bytes": "240"
}
],
"symlink_target": ""
} |
from robot.utils import is_string, py2to3, PY3
from .comments import Comment
if PY3:
unicode = str
@py2to3
class Setting(object):
def __init__(self, setting_name, parent=None, comment=None):
self.setting_name = setting_name
self.parent = parent
self._set_initial_value()
self._set_comment(comment)
self._populated = False
def _set_initial_value(self):
self.value = []
def _set_comment(self, comment):
self.comment = Comment(comment)
def reset(self):
self.__init__(self.setting_name, self.parent)
@property
def source(self):
return self.parent.source if self.parent is not None else None
@property
def directory(self):
return self.parent.directory if self.parent is not None else None
def populate(self, value, comment=None):
"""Mainly used at parsing time, later attributes can be set directly."""
if not self._populated:
self._populate(value)
self._set_comment(comment)
self._populated = True
else:
self._set_initial_value()
self._set_comment(None)
self.report_invalid_syntax("Setting '%s' used multiple times."
% self.setting_name, 'ERROR')
def _populate(self, value):
self.value = value
def is_set(self):
return bool(self.value)
def is_for_loop(self):
return False
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def _string_value(self, value):
return value if is_string(value) else ' '.join(value)
def _concat_string_with_value(self, string, value):
if string:
return string + ' ' + self._string_value(value)
return self._string_value(value)
def as_list(self):
return self._data_as_list() + self.comment.as_list()
def _data_as_list(self):
ret = [self.setting_name]
if self.value:
ret.extend(self.value)
return ret
def __nonzero__(self):
return self.is_set()
def __iter__(self):
return iter(self.value or ())
def __unicode__(self):
return unicode(self.value or '')
class StringValueJoiner(object):
def __init__(self, separator):
self._separator = separator
def join_string_with_value(self, string, value):
if string:
return string + self._separator + self.string_value(value)
return self.string_value(value)
def string_value(self, value):
if is_string(value):
return value
return self._separator.join(value)
class Documentation(Setting):
def _set_initial_value(self):
self.value = ''
def _populate(self, value):
self.value = self._concat_string_with_value(self.value, value)
def _string_value(self, value):
return value if is_string(value) else ''.join(value)
def _data_as_list(self):
return [self.setting_name, self.value]
class Template(Setting):
def _set_initial_value(self):
self.value = None
def _populate(self, value):
self.value = self._concat_string_with_value(self.value, value)
def is_set(self):
return self.value is not None
def is_active(self):
return self.value and self.value.upper() != 'NONE'
def _data_as_list(self):
ret = [self.setting_name]
if self.value:
ret.append(self.value)
return ret
class Fixture(Setting):
# `keyword`, `is_comment` and `assign` make the API compatible with Step.
@property
def keyword(self):
return self.name or ''
def is_comment(self):
return False
def _set_initial_value(self):
self.name = None
self.args = []
self.assign = ()
def _populate(self, value):
if not self.name:
self.name = value[0] if value else ''
value = value[1:]
self.args.extend(value)
def is_set(self):
return self.name is not None
def is_active(self):
return self.name and self.name.upper() != 'NONE'
def _data_as_list(self):
ret = [self.setting_name]
if self.name or self.args:
ret.append(self.name or '')
if self.args:
ret.extend(self.args)
return ret
class Timeout(Setting):
def _set_initial_value(self):
self.value = None
self.message = ''
def _populate(self, value):
if not self.value:
self.value = value[0] if value else ''
value = value[1:]
self.message = self._concat_string_with_value(self.message, value)
# TODO: Remove custom timeout message support in RF 3.1.
if value and self.parent:
self.parent.report_invalid_syntax(
'Using custom timeout messages is deprecated since Robot '
'Framework 3.0.1 and will be removed in future versions. '
"Message that was used is '%s'." % self.message, level='WARN')
def is_set(self):
return self.value is not None
def _data_as_list(self):
ret = [self.setting_name]
if self.value or self.message:
ret.append(self.value or '')
if self.message:
ret.append(self.message)
return ret
class Tags(Setting):
def _set_initial_value(self):
self.value = None
def _populate(self, value):
self.value = (self.value or []) + value
def is_set(self):
return self.value is not None
def __add__(self, other):
if not isinstance(other, Tags):
raise TypeError('Tags can only be added with tags')
tags = Tags('Tags')
tags.value = (self.value or []) + (other.value or [])
return tags
class Arguments(Setting):
pass
class Return(Setting):
pass
class Metadata(Setting):
setting_name = 'Metadata'
def __init__(self, parent, name, value, comment=None, joined=False):
self.parent = parent
self.name = name
joiner = StringValueJoiner('' if joined else ' ')
self.value = joiner.join_string_with_value('', value)
self._set_comment(comment)
def reset(self):
pass
def is_set(self):
return True
def _data_as_list(self):
return [self.setting_name, self.name, self.value]
class _Import(Setting):
def __init__(self, parent, name, args=None, alias=None, comment=None):
self.parent = parent
self.name = name
self.args = args or []
self.alias = alias
self._set_comment(comment)
def reset(self):
pass
@property
def type(self):
return type(self).__name__
def is_set(self):
return True
def _data_as_list(self):
return [self.type, self.name] + self.args
def report_invalid_syntax(self, message, level='ERROR', parent=None):
parent = parent or getattr(self, 'parent', None)
if parent:
parent.report_invalid_syntax(message, level)
else:
from robot.api import logger
logger.write(message, level)
class Library(_Import):
def __init__(self, parent, name, args=None, alias=None, comment=None):
if args and not alias:
args, alias = self._split_alias(args, parent)
_Import.__init__(self, parent, name, args, alias, comment)
def _split_alias(self, args, parent):
if len(args) > 1 and is_string(args[-2]):
with_name = args[-2]
if with_name.upper() == 'WITH NAME':
# TODO: Require all uppercase 'WITH NAME' in RF 3.1.
# https://github.com/robotframework/robotframework/issues/2263
if with_name != 'WITH NAME':
self._deprecation_warning(with_name, parent)
return args[:-2], args[-1]
return args, None
def _deprecation_warning(self, with_name, parent):
message = ("Using 'WITH NAME' syntax when importing libraries case "
"insensitively like '%s' is deprecated. Use all upper case "
"format 'WITH NAME' instead." % with_name)
self.report_invalid_syntax(message, 'WARN', parent)
def _data_as_list(self):
data = ['Library', self.name] + self.args
if self.alias:
data += ['WITH NAME', self.alias]
return data
class Resource(_Import):
def __init__(self, parent, name, invalid_args=None, comment=None):
if invalid_args:
name += ' ' + ' '.join(invalid_args)
_Import.__init__(self, parent, name, comment=comment)
class Variables(_Import):
def __init__(self, parent, name, args=None, comment=None):
_Import.__init__(self, parent, name, args, comment=comment)
class _DataList(object):
def __init__(self, parent):
self._parent = parent
self.data = []
def add(self, meta):
self._add(meta)
def _add(self, meta):
self.data.append(meta)
def _parse_name_and_value(self, value):
name = value[0] if value else ''
return name, value[1:]
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, item):
self.data[index] = item
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
class ImportList(_DataList):
def populate_library(self, data, comment):
self._populate(Library, data, comment)
def populate_resource(self, data, comment):
self._populate(Resource, data, comment)
def populate_variables(self, data, comment):
self._populate(Variables, data, comment)
def _populate(self, item_class, data, comment):
name, value = self._parse_name_and_value(data)
self._add(item_class(self._parent, name, value, comment=comment))
class MetadataList(_DataList):
def populate(self, name, value, comment):
self._add(Metadata(self._parent, name, value, comment, joined=True))
| {
"content_hash": "95f98efc60545e4390408709232432c5",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 80,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.5860199152124618,
"repo_name": "alexandrul-ci/robotframework",
"id": "fb51cd3cf88cb31dc0566a8c25ee0f4053b8a144",
"size": "10787",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/robot/parsing/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "27452"
},
{
"name": "HTML",
"bytes": "140980"
},
{
"name": "Java",
"bytes": "58264"
},
{
"name": "JavaScript",
"bytes": "161259"
},
{
"name": "Python",
"bytes": "2271402"
},
{
"name": "RobotFramework",
"bytes": "2096190"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
from ensetuptools.utilities import *
import unittest
import os, sys
class FindEggsTest(unittest.TestCase):
def test_find_eggs_in_scipy(self):
# a 'hard' download page to parse
url = 'http://sourceforge.net/project/showfiles.php?group_id=27747&package_id=19531'
# should be something here
self.assertNotEqual(find_eggs_in_url(url), [])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "18bc9c6f7f21494d4ecaec5ab2f226ad",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 92,
"avg_line_length": 32.30769230769231,
"alnum_prop": 0.6714285714285714,
"repo_name": "cournape/ensetuptools",
"id": "6ef1b38468e4b13a6b47d7b20857bde2682bb05d",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ensetuptools/tests/utilities_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "13592"
},
{
"name": "Python",
"bytes": "654383"
}
],
"symlink_target": ""
} |
import webtest
from .. common import BaseTest, WPS_TEST_SERVICE, dummy_request
from twitcher.store import ServiceStore
class FunctionalTest(BaseTest):
def test_app(self):
app = webtest.TestApp(
self.config.make_wsgi_app(),
extra_environ={'db.session': self.session, 'tm.active': True})
return app
def init_store(self):
# add public wps service
service_store = ServiceStore(
dummy_request(dbsession=self.session))
service_store.save_service(
name="wps",
url=WPS_TEST_SERVICE,
type="wps",
auth='token',
verify=False,
purl="http://purl/wps")
# add secured wps service
service_store.save_service(
name="wps_secured",
url=WPS_TEST_SERVICE,
type="wps",
auth='token',
verify=False,
purl="http://purl/wps_secured")
| {
"content_hash": "b718f875ca5272ddc2e7d6c4ea918891",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 28.96969696969697,
"alnum_prop": 0.5575313807531381,
"repo_name": "bird-house/pywps-proxy",
"id": "1b4bb6a143643603b374cc2a8e73cf35c669c386",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "10665"
},
{
"name": "Nginx",
"bytes": "1306"
},
{
"name": "Python",
"bytes": "64846"
},
{
"name": "Shell",
"bytes": "2644"
}
],
"symlink_target": ""
} |
import csv
import pkg_resources
import re
from lxml import etree
import StringIO
LEFT_WHITE_SPACE_RE = re.compile('\A[ \n\t].*\Z', re.DOTALL)
RIGHT_WHITE_SPACE_RE = re.compile('\A.*[ \n\t]\Z', re.DOTALL)
INNER_XML_RE = re.compile(u'\A[ \n\t]*<[^>]+?>(?P<body>.*)</[^>]+?>[ \n\t]*\Z')
def strip_text(text):
"""
Takes a bunch of test and removes all possible "indentation gunk"
from it.
>>> example_noisy_text = '''
... hey guys
... it looks like
... I am all over the place'''
>>> strip_text(example_noisy_text)
u'hey guys it looks like I am all over the place'
"""
if text:
return u' '.join(
[line.strip() for line in text.splitlines() if line.strip()])
else:
# return whatever text was, there's nothing to do
# (prolly None or empty string)
return text
def strip_xml(element):
"""
Recursively strip clean indentation from xml. Especially useful
if you're using a template.
For example, this is a bit of a mess:
>>> xml_mess = '''
... <help> How did
...
... <person>I
... </person>
... get to be so <cleanliness
... xmlns:clean="http://example.org/howclean/#"
... clean:cleanliness="filthy">messy</cleanliness>?
... </help> '''
strip_xml requires that you pass in an element though, so let's
get the root node and pass it in:
>>> from lxml import etree
>>> import StringIO
>>> etree_mess = etree.parse(StringIO.StringIO(xml_mess))
>>> cleaned_root_mess = strip_xml(etree_mess.getroot())
>>> etree.tostring(cleaned_root_mess)
'<help>How did <person>I</person> get to be so <cleanliness xmlns:clean="http://example.org/howclean/#" clean:cleanliness="filthy">messy</cleanliness>?</help>'
Note that strip_xml operates on the mutability of the argument
`element`, so the object returned is the same object that's passed
in.
>>> cleaned_root_mess is etree_mess.getroot()
True
"""
def _recursive_strip(elt, childpos, childrenlen):
orig_text = elt.text or ''
orig_tail = elt.tail or ''
children = list(elt)
new_childrenlen = len(children)
elt.text = strip_text(elt.text)
elt.tail = strip_text(elt.tail)
# We have to do a lot of stuff here to put whitespace in the
# right places and make it look pretty, as if a human wrote
# it.
##########
#### whitespace re-appending
##########
####
## left of the .text
####
# pretty much never
####
## right of the .text
####
# if there are children and is presently whitespace
if elt.text \
and new_childrenlen \
and RIGHT_WHITE_SPACE_RE.match(orig_text):
elt.text = elt.text + ' '
####
## left of the .tail
####
# any time there is presently whitespace
if elt.tail and LEFT_WHITE_SPACE_RE.match(orig_tail):
elt.tail = ' ' + elt.tail
####
## right of the .tail
####
# if there is presently whitespace and not the last child
if elt.tail \
and RIGHT_WHITE_SPACE_RE.match(orig_tail) \
and childpos != childrenlen - 1:
elt.tail = elt.tail + ' '
for i in range(new_childrenlen):
child = children[i]
_recursive_strip(child, i, new_childrenlen)
_recursive_strip(element, 0, 1)
return element
def inner_xml(xml_text):
"""
Get the inner xml of an element.
>>> inner_xml('<div>This is some <i><b>really</b> silly</i> text!</div>')
u'This is some <i><b>really</b> silly</i> text!'
"""
return unicode(INNER_XML_RE.match(xml_text).groupdict()['body'])
def stripped_inner_xml(xml_string):
"""
Take a string of xml and both strip whitespace and return its
inner elements.
This is a convenience function so you don't have to run strip_xml
and inner_xml manually.
>>> stripped_inner_xml('''
... <div>
... This is some <i><b>really</b>
... silly</i> text!</div>''')
u'This is some <i><b>really</b> silly</i> text!'
"""
et = etree.parse(StringIO.StringIO(xml_string))
strip_xml(et.getroot())
return inner_xml(etree.tostring(et))
def remove_blank_lines(string):
new_lines = []
for line in string.splitlines():
if line.strip():
new_lines.append(line)
return '\n'.join(new_lines)
def unicode_cleaner(string):
if isinstance(string, unicode):
return string
try:
return string.decode('utf-8')
except UnicodeError:
try:
return string.decode('latin-1')
except UnicodeError:
return string.decode('utf-8', 'ignore')
def escape(string):
"""
Escape a string into something safe to insert into HTML.
"""
# Simplest escaping possible, kinda borrowed from jinja2.
return (
unicode(string)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"'))
def locale_dict_fetch_with_fallbacks(data_dict, locale):
"""
Take a dictionary with various locales as keys and translations as
values and a locale, and try to find a value that matches with
good fallbacks.
"""
# try returning the locale as-is
if data_dict.has_key(locale):
return data_dict[locale]
# nope? try just returning the language...
if '-' in locale:
language, country = locale.split('-', 1)
if data_dict.has_key(language):
return data_dict[language]
# still nope? okay, try returning 'en', our default...
if data_dict.has_key('en'):
return data_dict['en']
# still no?? last attempt!
return data_dict[None]
###
## ISO 3166 -- country names to country code utilities
###
CODE_COUNTRY_LIST = sorted([
(unicode_cleaner(code), unicode_cleaner(country))
for code, country in csv.reader(
file(pkg_resources.resource_filename('cc.license', 'iso3166.csv')))],
key=lambda country: country[1])
CODE_COUNTRY_MAP = dict(CODE_COUNTRY_LIST)
| {
"content_hash": "d559fe72f45e3be149466670b960a1f1",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 165,
"avg_line_length": 28.390134529147982,
"alnum_prop": 0.573053230137419,
"repo_name": "creativecommons/cc.license",
"id": "6266fa99d97b4d4b3e712247ba41dbdea0bf5391",
"size": "6331",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cc/license/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1742"
},
{
"name": "Python",
"bytes": "172412"
}
],
"symlink_target": ""
} |
"""
ulid/providers/provider
~~~~~~~~~~~~~~~~~~~~~~~
Contains provider abstract classes.
"""
import abc
import typing
from .. import hints
#: Type hint that defines a two item tuple of bytes returned by the provider.
TimestampRandomnessBytes = typing.Tuple[hints.Bytes, hints.Bytes] # pylint: disable=invalid-name
class Provider(metaclass=abc.ABCMeta):
"""
Abstract class that defines providers that yield timestamp and randomness values.
"""
def new(self) -> TimestampRandomnessBytes:
"""
Create a new timestamp and randomness value.
:return: Two item tuple containing timestamp and randomness values as :class:`~bytes`.
:rtype: :class:`~tuple`
"""
timestamp = self.timestamp()
randomness = self.randomness(timestamp)
return timestamp, randomness
@abc.abstractmethod
def timestamp(self) -> hints.Bytes:
"""
Create a new timestamp value.
:return: Timestamp value in bytes.
:rtype: :class:`~bytes`
"""
raise NotImplementedError('Method must be implemented by derived class')
@abc.abstractmethod
def randomness(self, timestamp: hints.Bytes) -> hints.Bytes:
"""
Create a new randomness value.
:param timestamp: Timestamp in milliseconds
:type timestamp: :class:`~bytes`
:return: Randomness value in bytes.
:rtype: :class:`~bytes`
"""
raise NotImplementedError('Method must be implemented by derived class')
| {
"content_hash": "3df0c53912bc2c07d37f32a447bd0c74",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 97,
"avg_line_length": 30.07843137254902,
"alnum_prop": 0.6421121251629727,
"repo_name": "ahawker/ulid",
"id": "7fe6758ad241681372db552183fde39f6520a99a",
"size": "1534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ulid/providers/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3387"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "93605"
}
],
"symlink_target": ""
} |
__author__ = 'Patrizio Tufarolo'
__email__ = 'patrizio.tufarolo@studenti.unimi.it'
from testagent.probe import Probe
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), "Search-Scan/"))
print("PROXY:" , os.getenv("http_proxy"));
_searchscan = __import__('Search-scan', globals(), locals(), [], -1); ''' add every class u want to import to the list in the fourth argument, then call it by asking for _searchscan.* '''
globals().update(vars(_searchscan));
class SearchScanPublicInterface(_searchscan.PublicInterface):
def __init__(self, testinstances):
self.time = testinstances["parameters"]["Time"]
self.category = testinstances["parameters"]["Category"]
print("CATEGORY", self.category)
self.cvss = float(testinstances["parameters"]["CVSS"])
self.mongoHost = testinstances["mongo"]["host"]
self.mongoPort = int(testinstances["mongo"]["port"])
self.nessusHost = testinstances["nessus"]["host"]
self.nessusLogin = testinstances["nessus"]["login"]
self.nessusPassword = testinstances["nessus"]["password"]
self.nessusPolicyName = 'Test_Policy_%s' % self.category
self.nessusScanName = 'Scan_%s' % self.category
self.nessusTarget = testinstances["nessus"]["Target"]
self.sshUser = testinstances["credentials"]["ssh_user"]
self.sshPassword = testinstances["credentials"]["ssh_pass"]
self.file = testinstances["credentials"]["PrivateKeyPath"]
self.certUser = testinstances["credentials"]["certUser"]
self.certPass = testinstances["credentials"]["certPass"]
self.mongoUser = testinstances["credentials"]["MongoDB_user"]
self.mongoPassword = testinstances["credentials"]["MongoDB_pass"]
self.mongoDB = testinstances["credentials"]["MongoDB"]
self.mysqlUser= testinstances["credentials"]["MySQL_user"]
self.mysqlPassword = testinstances["credentials"]["MySQL_pass"]
self.checkRequirements()
self.final_status = False
Engine(self)
def outputs(self, Scan):
Scan.scan_results()
self.myScanResults = Scan.download_scan(export_format='nessus');
def certification(self):
if not self.myScanResults:
raise Exception("No scan results yet")
rpt = dotnessus_parser.Report()
rpt.parse(self.myScanResults, True)
if len(rpt.targets) is not 0:
for t in rpt.targets:
for v in t.vulns:
if v.get('risk_factor') != 'None':
print("Certification not possible: plugin %s return a positive match!"
%v.get('plugin_name'))
self.final_status = False
return False
else:
print("No vulnerability found on the target, certification ok!")
self.final_status = True
return True
else:
print('Error, no target found in report!')
self.final_status = False
return False
def returnFinalStatus(self):
print self.final_status
return self.final_status
class SearchScanProbe(Probe): #ricordati di ereditare da Probe poi
def main(self, inputs):
print(self.testinstances)
return SearchScanPublicInterface(self.testinstances).returnFinalStatus()
def nullRollback (self, inputs):
return
def appendAtomics(self):
self.appendAtomic(self.main, self.nullRollback)
probe = SearchScanProbe
| {
"content_hash": "b088ce9f2569e7661032122ec93cdabe",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 187,
"avg_line_length": 44.333333333333336,
"alnum_prop": 0.6229462545252019,
"repo_name": "SESARLab/mooncloud_probe_vuln",
"id": "6780c0afdc0490cc5ec45eb823be83c8609a7cf0",
"size": "3591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "probe_searchscan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "119064"
},
{
"name": "Shell",
"bytes": "1361"
}
],
"symlink_target": ""
} |
from distutils.core import setup
try:
from setuptools import setup
except:
pass
setup(
name = "pev",
version = "0.1.0",
author = "Stanislav Feldman",
description = ("Python EVenter library"),
url = "https://github.com/stanislavfeldman/pev",
keywords = "eventer pubsub publish subscribe",
packages=['pev'],
classifiers=[
"Topic :: Software Development"
],
)
| {
"content_hash": "b5ef2c28c72f04d9abde4fab8f7c37b2",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.6435643564356436,
"repo_name": "stanislavfeldman/pev",
"id": "86fd2908dcdfe9b1ab2012cd1772f839f4344141",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1592"
}
],
"symlink_target": ""
} |
import replace as r
import re
# rename files first, REMEMBER to refer to them later as their new names
r.rename_file('chromium.spec', 'chromium-browser.spec');
r.rename_file('chromium.changes', 'chromium-browser.changes');
# change the master_preferences file to use SLEish settings instead of
# openSUSEish settings
r.replace_line('master_preferences',
' "http://www.opensuse.org",',
' "http://www.novell.com/linux",')
r.replace_line('master_preferences',
' "homepage": "http://www.opensuse.org",',
' "homepage": "http://www.novell.com/linux",')
# replace necessary lines in spec file
r.replace_line('chromium-browser.spec',
'Name: chromium',
'Name: chromium-browser')
r.remove_line('chromium-browser.spec',
'Provides: chromium-browser = %{version}')
r.remove_line('chromium-browser.spec',
'Obsoletes: chromium-browser < %{version}')
# don't forget the desktop file
r.replace_line('chromium.desktop',
'Exec=chromium %u',
'Exec=chromium-browser %u')
r.replace_line('chromium.desktop',
'Icon=chromium',
'Icon=chromium-browser')
# fix patch
r.replace_line('chromium-master-prefs-path.patch',
'+ master_prefs = FilePath("/etc/chromium");',
'+ master_prefs = FilePath("/etc/chromium-browser");')
# rename the decompressed src directory from chromium to chromium-browser,
# also rename the tarball and compressed lzma
r.rename_src('chromium', 'chromium-browser')
| {
"content_hash": "acc40fe97420a6438a231cd1bdbe1570",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 39.48780487804878,
"alnum_prop": 0.6195182211241507,
"repo_name": "bgmerrell/novellium",
"id": "6bf620b87b12b6368866474885d06f8fd25e397a",
"size": "1777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SLEize.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4780"
}
],
"symlink_target": ""
} |
import logging
import os.path
from ask_amy.core.exceptions import FileExistsError
logger = logging.getLogger()
class CodeGenerator(object):
def __init__(self, skill_name, aws_role='', intent_schema=None):
self._skill_name = skill_name
self._aws_role = aws_role
self._intent_schema = intent_schema
self._method_names = []
self._slot_names = []
def create_cli_config(self):
# cwd = os.getcwd()
CLI_CONFIG='./cli_config.json'
if os.path.isfile(CLI_CONFIG) :
raise FileExistsError("Attempting to OVERWRITE {}".format(CLI_CONFIG))
with open(CLI_CONFIG, 'w') as f:
f.write('{\n')
f.write(' "skill_name": "{}",\n'.format(self._skill_name))
f.write(' "skill_home_dir": ".",\n')
f.write(' "aws_region": "us-east-1",\n')
f.write(' "aws_profile": "default",\n')
f.write(' "aws_role": "{}",\n\n'.format(self._aws_role))
f.write(' "lambda_runtime": "python3.6",\n'.format('',''))
f.write(' "lambda_handler": "ask_amy.lambda_function.lambda_handler",\n')
f.write(' "lambda_timeout": "5",\n')
f.write(' "lambda_memory": "128",\n')
f.write(' "lambda_zip": "alexa_skill.zip",\n\n')
f.write(' "ask_amy_dev": false,\n')
f.write(' "ask_amy_home_dir": ""\n')
f.write('}\n')
def create_skill_config(self):
SKILL_CONFIG='./skill_config.json'
if os.path.isfile(SKILL_CONFIG) :
raise FileExistsError("Attempting to OVERWRITE {}".format(SKILL_CONFIG))
with open(SKILL_CONFIG, 'w') as file_ptr:
file_ptr.write('{\n')
file_ptr.write(' "Skill" : {\n')
file_ptr.write(' "version": "1.0",\n')
file_ptr.write(' "class_name": "{}.{}",\n'.format(self._skill_name,self.class_name()))
file_ptr.write(' "logging_level": "debug"\n')
file_ptr.write(' },\n')
file_ptr.write(' "Session": {\n')
file_ptr.write(' "persistence": false\n')
file_ptr.write(' },\n')
file_ptr.write(' "Dialog": {\n')
self.intent_control(file_ptr)
self.slots(file_ptr)
self.intent_methods(file_ptr)
file_ptr.write(' "help_intent": {\n')
file_ptr.write(' "method_name": "handle_default_intent",\n')
file_ptr.write(' "speech_out_text": "help intent",\n')
file_ptr.write(' "should_end_session": true\n')
file_ptr.write(' }\n')
file_ptr.write(' }\n')
file_ptr.write('}\n')
def class_name(self):
name = self._skill_name.replace("_", " ")
name = name.title()
name = name.replace(" ", "")
return name
def intent_control(self,file_ptr):
file_ptr.write(' "intent_control": {\n')
if 'intents' in self._intent_schema:
for intent_item in self._intent_schema['intents']:
if 'intent' in intent_item:
intent_nm =intent_item['intent']
method_name = self.process_intent_nm(intent_nm)
if method_name is not None:
file_ptr.write(' "{}": "{}",\n'.format(intent_nm, method_name))
file_ptr.write(' "AMAZON.HelpIntent": "help_intent",\n')
file_ptr.write(' "AMAZON.CancelIntent": "default_cancel_intent",\n')
file_ptr.write(' "AMAZON.StopIntent": "default_stop_intent"\n')
file_ptr.write(' },\n')
def method_name(self, intent_nm):
method_nm = intent_nm[0].lower()
for c in intent_nm[1:]:
if c.isupper():
method_nm += '_'+c.lower()
else:
method_nm += c
return method_nm
def process_intent_nm(self, intent_nm, for_dialog=True):
method_nm = None
if intent_nm.startswith('AMAZON.'):
if intent_nm == "AMAZON.HelpIntent" or \
intent_nm == "AMAZON.CancelIntent" or \
intent_nm == "AMAZON.StopIntent":
intent_nm = None
else:
intent_nm = intent_nm[7:]
if intent_nm is not None:
method_nm = self.method_name(intent_nm)
if for_dialog:
self._method_names.append(method_nm)
return method_nm
def slots(self, file_ptr):
add_close_comma = False
file_ptr.write(' "slots": {\n')
if 'intents' in self._intent_schema:
for intent_item in self._intent_schema['intents']:
intent_nm =intent_item['intent']
if 'slots' in intent_item:
slots =intent_item['slots']
for slot in slots:
slot_nm = slot['name']
if slot_nm not in self._slot_names:
self._slot_names.append(slot_nm)
method_name = self.process_intent_nm(intent_nm, for_dialog=False)
if add_close_comma:
file_ptr.write(',\n')
add_close_comma= True
file_ptr.write(' "{}":\n'.format(slot_nm))
file_ptr.write(' {\n')
file_ptr.write(' "speech_out_text": "Please provide the {}",\n'.format(slot_nm))
file_ptr.write(' "re_prompt_text": "Sorry I did not hear that.",\n')
file_ptr.write(' "expected_intent": "{}"\n'.format(method_name))
file_ptr.write(' }')
file_ptr.write('\n },\n')
def intent_methods(self, file_ptr):
for method_nm in self._method_names:
file_ptr.write(' "{}": '.format(method_nm))
file_ptr.write('{\n')
file_ptr.write(' "speech_out_text": "you have called the {}",\n'.format(method_nm.replace("_", " ")))
file_ptr.write(' "should_end_session": true\n')
file_ptr.write(' },\n')
def create_skill_py(self):
SKILL_PY='./'+self._skill_name+'.py'
if os.path.isfile(SKILL_PY) :
raise FileExistsError("Attempting to OVERWRITE {}".format(SKILL_PY))
with open(SKILL_PY, 'w') as file_ptr:
file_ptr.write('from ask_amy.state_mgr.stack_dialog_mgr import StackDialogManager\n')
file_ptr.write('from ask_amy.core.reply import Reply\n')
file_ptr.write('import logging\n')
file_ptr.write('\n')
file_ptr.write('logger = logging.getLogger()\n')
file_ptr.write('\n')
file_ptr.write('class {}(StackDialogManager):\n'.format(self.class_name()))
file_ptr.write('\n')
self.create_intent_methods(file_ptr)
def create_intent_methods(self,file_ptr):
if 'intents' in self._intent_schema:
for intent_item in self._intent_schema['intents']:
if 'intent' in intent_item:
intent_nm =intent_item['intent']
method_name = self.process_intent_nm(intent_nm)
if method_name is not None:
file_ptr.write(' def {}(self):\n'.format(method_name))
file_ptr.write(' logger.debug("**************** entering {}.{}".format('
'self.__class__.__name__, self.intent_name))\n')
file_ptr.write(' return self.handle_default_intent()\n')
file_ptr.write('\n')
| {
"content_hash": "817d5277cd376ce9dbcec3e4da67cca5",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 122,
"avg_line_length": 44.64204545454545,
"alnum_prop": 0.48911798396334477,
"repo_name": "dphiggs01/ask_amy",
"id": "db27c569c893b59511b55ef2b8579abc3241848e",
"size": "7857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ask_amy/cli/code_gen/code_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7300"
},
{
"name": "HTML",
"bytes": "2620"
},
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "109419"
},
{
"name": "Shell",
"bytes": "152"
}
],
"symlink_target": ""
} |
import rwdb
def test_basic():
class Test(rwdb.Document):
pass
# TODO write actual tests
| {
"content_hash": "d251b49bd3c21e43be88605123d21edd",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 18.166666666666668,
"alnum_prop": 0.6146788990825688,
"repo_name": "FlorianLudwig/rwdb",
"id": "08593479c9645ec81c050c213050755b8b3e3539",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_rwdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17445"
}
],
"symlink_target": ""
} |
subreddit = 'ChristmasCake'
t_channel = '@r_christmascake'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| {
"content_hash": "0955098602db522a41752d2ff392967e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 38,
"avg_line_length": 22,
"alnum_prop": 0.7348484848484849,
"repo_name": "Fillll/reddit2telegram",
"id": "0ddd98148fd93bae192b19f406477a0a2a8b294e",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit2telegram/channels/~inactive/r_christmascake/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301463"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
} |
"""
Support the ISY-994 controllers.
For configuration details please visit the documentation for this component at
https://home-assistant.io/components/isy994/
"""
import logging
from future.moves.urllib.parse import urlparse
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import validate_config, discovery
from homeassistant.helpers.entity import ToggleEntity
DOMAIN = "isy994"
ISY = None
SENSOR_STRING = 'Sensor'
HIDDEN_STRING = '{HIDE ME}'
CONF_TLS_VER = 'tls'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
"""Setup ISY994 component.
This will automatically import associated lights, switches, and sensors.
"""
import PyISY
# pylint: disable=global-statement
# check for required values in configuration file
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return False
# Pull and parse standard configuration.
user = config[DOMAIN][CONF_USERNAME]
password = config[DOMAIN][CONF_PASSWORD]
host = urlparse(config[DOMAIN][CONF_HOST])
addr = host.geturl()
if host.scheme == 'http':
addr = addr.replace('http://', '')
https = False
elif host.scheme == 'https':
addr = addr.replace('https://', '')
https = True
else:
_LOGGER.error('isy994 host value in configuration file is invalid.')
return False
port = host.port
addr = addr.replace(':{}'.format(port), '')
# Pull and parse optional configuration.
global SENSOR_STRING
global HIDDEN_STRING
SENSOR_STRING = str(config[DOMAIN].get('sensor_string', SENSOR_STRING))
HIDDEN_STRING = str(config[DOMAIN].get('hidden_string', HIDDEN_STRING))
tls_version = config[DOMAIN].get(CONF_TLS_VER, None)
# Connect to ISY controller.
global ISY
ISY = PyISY.ISY(addr, port, user, password, use_https=https,
tls_ver=tls_version, log=_LOGGER)
if not ISY.connected:
return False
# Listen for HA stop to disconnect.
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
# Load platforms for the devices in the ISY controller that we support.
for component in ('sensor', 'light', 'switch'):
discovery.load_platform(hass, component, DOMAIN, {}, config)
ISY.auto_update = True
return True
def stop(event):
"""Cleanup the ISY subscription."""
ISY.auto_update = False
class ISYDeviceABC(ToggleEntity):
"""An abstract Class for an ISY device."""
_attrs = {}
_onattrs = []
_states = []
_dtype = None
_domain = None
_name = None
def __init__(self, node):
"""Initialize the device."""
# setup properties
self.node = node
# track changes
self._change_handler = self.node.status. \
subscribe('changed', self.on_update)
def __del__(self):
"""Cleanup subscriptions because it is the right thing to do."""
self._change_handler.unsubscribe()
@property
def domain(self):
"""Return the domain of the entity."""
return self._domain
@property
def dtype(self):
"""Return the data type of the entity (binary or analog)."""
if self._dtype in ['analog', 'binary']:
return self._dtype
return 'binary' if self.unit_of_measurement is None else 'analog'
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def value(self):
"""Return the unclean value from the controller."""
# pylint: disable=protected-access
return self.node.status._val
@property
def state_attributes(self):
"""Return the state attributes for the node."""
attr = {}
for name, prop in self._attrs.items():
attr[name] = getattr(self, prop)
attr = self._attr_filter(attr)
return attr
def _attr_filter(self, attr):
"""A Placeholder for attribute filters."""
# pylint: disable=no-self-use
return attr
@property
def unique_id(self):
"""Return the ID of this ISY sensor."""
# pylint: disable=protected-access
return self.node._id
@property
def raw_name(self):
"""Return the unclean node name."""
return str(self._name) \
if self._name is not None else str(self.node.name)
@property
def name(self):
"""Return the cleaned name of the node."""
return self.raw_name.replace(HIDDEN_STRING, '').strip() \
.replace('_', ' ')
@property
def hidden(self):
"""Suggestion if the entity should be hidden from UIs."""
return HIDDEN_STRING in self.raw_name
def update(self):
"""Update state of the sensor."""
# ISY objects are automatically updated by the ISY's event stream
pass
def on_update(self, event):
"""Handle the update received event."""
self.update_ha_state()
@property
def is_on(self):
"""Return a boolean response if the node is on."""
return bool(self.value)
@property
def is_open(self):
"""Return boolean response if the node is open. On = Open."""
return self.is_on
@property
def state(self):
"""Return the state of the node."""
if len(self._states) > 0:
return self._states[0] if self.is_on else self._states[1]
return self.value
def turn_on(self, **kwargs):
"""Turn the device on."""
if self.domain is not 'sensor':
attrs = [kwargs.get(name) for name in self._onattrs]
self.node.on(*attrs)
else:
_LOGGER.error('ISY cannot turn on sensors.')
def turn_off(self, **kwargs):
"""Turn the device off."""
if self.domain is not 'sensor':
self.node.off()
else:
_LOGGER.error('ISY cannot turn off sensors.')
@property
def unit_of_measurement(self):
"""Return the defined units of measurement or None."""
try:
return self.node.units
except AttributeError:
return None
| {
"content_hash": "5fbe1b5283b521675ede04f9d412a72a",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 79,
"avg_line_length": 29.12037037037037,
"alnum_prop": 0.6049284578696343,
"repo_name": "Julian/home-assistant",
"id": "c224303f8d0502c7ed7859c450949e46dceb8547",
"size": "6290",
"binary": false,
"copies": "1",
"ref": "refs/heads/py2",
"path": "homeassistant/components/isy994.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1354942"
},
{
"name": "Python",
"bytes": "2755966"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
} |
from __future__ import print_function, unicode_literals
from string import Template
__author__ = "danishabdullah"
ITEM_JSON = Template("""{"name": "$name", "type": "$type", "mode": "$mode"}""")
RECORD_JSON = Template("""{"name": "$name", "type": "RECORD", "mode": "$mode", "fields":[$fields]}""")
TABLE_JSON = Template("""[$fields]""")
SPEC_JSON = Template("""$tables""")
ITEM_JAVA = Template(
"""$path_modifier.add(new TableFieldSchema().setName("$name").setType("$type").setMode("$mode"));""")
RECORD_JAVA = Template("""$path_modifier.add(new TableFieldSchema().setName("$name").setType("RECORD").setMode("$mode").setFields(
new ArrayList<TableFieldSchema>() {
{$fields}
}));""")
TABLE_JAVA = Template("""private static TableSchema $name() {
List<TableFieldSchema> fields = new ArrayList<>();
$fields
TableSchema schema = new TableSchema().setFields(fields);
return schema;
}""")
SPEC_JAVA = Template("""import com.google.api.services.bigquery.model.TableFieldSchema;
import com.google.api.services.bigquery.model.TableSchema;
import java.util.ArrayList;
import java.util.List;
public class Pipeline {
$tables
}
""")
| {
"content_hash": "a128d700384cc0447d944c5f23a17acf",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 130,
"avg_line_length": 36.375,
"alnum_prop": 0.6666666666666666,
"repo_name": "danishabdullah/BigSchema",
"id": "cec660fefd0bea97c35a60d05062a32a60bc42f2",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigschema/templates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18593"
}
],
"symlink_target": ""
} |
from setuptools import setup
from odk_aggregation_tool import __version__
setup(
name="odk_aggregation_tool",
version=__version__,
description="A tool for aggregating ODK XML data.",
url="https://github.com/lindsay-stevens/",
author="Lindsay Stevens",
author_email="lindsay.stevens.au@gmail.com",
packages=['odk_aggregation_tool'],
test_suite='tests',
include_package_data=True,
license="MIT",
install_requires=[
# see requirements.txt
],
keywords="odk",
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
],
)
| {
"content_hash": "46a7828856a1a5cea7825bab6425b363",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 55,
"avg_line_length": 28.541666666666668,
"alnum_prop": 0.635036496350365,
"repo_name": "lindsay-stevens/odk_aggregation_tool",
"id": "1b56ddb10d157b036182f70fd4626a063dab6a3e",
"size": "685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54600"
}
],
"symlink_target": ""
} |
import numpy as np
# import dynamicalModels.models
from dynamicalModels.models.MarkovCRF import MarkovCRF
from dynamicalModels.models.MarkovSemiMarkovCRF import MarkovSemiMarkovCRF
from dynamicalModels.models.SemiMarkovCRF import SemiMarkovCRF
from dynamicalModels.models.CRF_Functions import *#msm_preprocess_train, msm_preprocess_test
from pystruct.learners import NSlackSSVM,SubgradientSSVM
import os
import optparse
import time
from pyKinectTools.dataset_readers.MSR_DailyActivities import MSRPlayer
from IPython import embed
from pylab import *
subjects_train = '12345'
subjects_test = '678910'
# subjects_train = '123456789'
# subjects_test = '10'
actions = '125'
feature_type = 'smij'
''' Training data '''
## Load in feature data if pre-trained
filename = '/Users/colin/Desktop/msm_features/{}_a{}_s{}.npz'.format(feature_type, actions, subjects_train)
data = np.load(filename)
features, frame_labels, gesture_labels, n_gestures, n_frame_features, n_frames_per_gesture, BoF_dict_train = data.items()[0][1]
frame_hists_train, gesture_hists_train = preprocess_features(None, features, frame_labels, n_frames_per_gesture)
# Calculate per-frame unary value using an SVM
frame_clf_train = chi2_classifier(kernel='rbf')
frame_clf_train.fit(np.vstack(frame_hists_train), np.hstack(frame_labels))
# Calculate per-gesture unary value using an SVM
gesture_clf_train = chi2_classifier(kernel='rbf')
gesture_clf_train.fit(np.vstack(gesture_hists_train), np.hstack(gesture_labels))
# Calculate HMM transitions for each frame and gesture
n_gestures = len(np.unique(gesture_labels))
frame_prior_train, frame_transition_matrix_train = calculate_hmm_params(frame_labels, n_gestures)
gesture_prior_train, gesture_transition_matrix_train = calculate_hmm_params(gesture_labels, n_gestures)
print "Unary (frame) score:", frame_clf_train.score(np.vstack(frame_hists_train), np.hstack(frame_labels))
print "Unary (gesture) score:", gesture_clf_train.score(np.vstack(gesture_hists_train), np.hstack(gesture_labels))
gesture_transition_matrix_train = np.ones([n_gestures,3])/3.
# Markov CRF
markovCRF = MarkovCRF(n_states=n_gestures, clf=frame_clf_train,
prior=frame_prior_train, transition=frame_transition_matrix_train,
inference_method='dai')
markov_svm = SubgradientSSVM(markovCRF, verbose=1, C=1., n_jobs=1)
markov_svm.fit(frame_hists_train, frame_labels)
m_predict = markov_svm.predict(frame_hists_train)
print 'Markov w:', markov_svm.w
print 'Markov CRF score: {}%'.format(100*np.sum([np.sum(np.equal(m_predict[i],x)) for i,x in enumerate(frame_labels)]) / np.sum([np.size(x) for x in frame_labels], dtype=np.float))
# semi-Markov CRF
sm_crf = SemiMarkovCRF(n_states=n_gestures,clf=gesture_clf_train,
prior=gesture_prior_train, transition_matrix=gesture_transition_matrix_train)
sm_svm = SubgradientSSVM(sm_crf, verbose=1, C=1., n_jobs=1)
sm_svm.fit(frame_hists_train, frame_labels)
sm_predict = sm_svm.predict(frame_hists_train)
print 'Semi-Markov w:', sm_svm.w
print 'Semi-Markov CRF score: {}%'.format(100*np.sum([np.sum(sm_predict[i]==x) for i,x in enumerate(frame_labels)]) / np.sum([np.size(x) for x in frame_labels], dtype=np.float))
# Markov semi-Markov CRF
MarkovSemiMarkovCRF = MarkovSemiMarkovCRF(n_states=n_gestures,
markov_prior=frame_prior_train, markov_transition=frame_transition_matrix_train,
semi_markov_prior=gesture_prior_train, semi_markov_transition=gesture_transition_matrix_train,
markov_clf=frame_clf_train,semi_markov_clf=gesture_clf_train)
msm_svm = SubgradientSSVM(MarkovSemiMarkovCRF, verbose=1, C=1., n_jobs=1)
msm_svm.fit(frame_hists_train, frame_labels)
msm_predict = msm_svm.predict(frame_hists_train)
print 'MsM w:', msm_svm.w
print 'MsM-CRF score: {}%'.format(100*np.sum([np.sum(msm_predict[i]==x) for i,x in enumerate(frame_labels)]) / np.sum([np.size(x) for x in frame_labels], dtype=np.float))
for i in range(len(subjects_train)):
print 'i', i
print 'm ', m_predict[i]
print 'sm ', sm_predict[i]
print 'msm', msm_predict[i]
print 'tru', np.array(frame_labels[i])
print ""
print ""
print "SVM Weights"
print 'Markov w:', markov_svm.w
print 'Semi-Markov w:', sm_svm.w
print 'MsM w:', msm_svm.w
print ""
print "SCORES"
print 'Markov CRF score: {}%'.format(100*np.sum([np.sum(np.equal(m_predict[i],x)) for i,x in enumerate(frame_labels)]) / np.sum([np.size(x) for x in frame_labels], dtype=np.float))
print 'Semi-Markov CRF score: {}%'.format(100*np.sum([np.sum(sm_predict[i]==x) for i,x in enumerate(frame_labels)]) / np.sum([np.size(x) for x in frame_labels], dtype=np.float))
print 'MsM-CRF score: {}%'.format(100*np.sum([np.sum(msm_predict[i]==x) for i,x in enumerate(frame_labels)]) / np.sum([np.size(x) for x in frame_labels], dtype=np.float))
''' ------------------------------------------------- '''
''' Testing data '''
filename = '/Users/colin/Desktop/msm_features/{}_a{}_s{}.npz'.format(feature_type, actions, subjects_test)
data = np.load(filename)
features_test, frame_labels_test, gesture_labels_test, n_gestures_test, n_frame_features_test, n_frames_per_gesture_test, BoF_dict_test = data.items()[0][1]
frame_hists_test, _ = preprocess_features(None, features_test, frame_labels_test, n_frames_per_gesture_test)
# Evaluate models
m_predict = markov_svm.predict(frame_hists_test)
sm_predict = sm_svm.predict(frame_hists_test)
msm_predict = msm_svm.predict(frame_hists_test)
for i in range(len(subjects_test)-1):
print 'i', i
print 'm ', m_predict[i]
print 'sm ', sm_predict[i]
print 'msm', msm_predict[i]
print 'tru', np.array(frame_labels[i])
print ""
print ""
print "EXPERIMENT:"
print "TRAIN -- Subjects {:5} -- Actions {}".format(",".join(subjects_train), ",".join(actions))
print "TEST -- Subjects {:5} -- Actions {}".format(",".join(subjects_test), ",".join(actions))
print ""
print "SCORES"
print 'Markov CRF score: {}%'.format(100*np.sum([np.sum(np.equal(m_predict[i],x)) for i,x in enumerate(frame_labels_test)]) / np.sum([np.size(x) for x in frame_labels_test], dtype=np.float))
print 'Semi-Markov CRF score: {}%'.format(100*np.sum([np.sum(sm_predict[i]==x) for i,x in enumerate(frame_labels_test)]) / np.sum([np.size(x) for x in frame_labels_test], dtype=np.float))
print 'MsM-CRF score: {}%'.format(100*np.sum([np.sum(msm_predict[i]==x) for i,x in enumerate(frame_labels_test)]) / np.sum([np.size(x) for x in frame_labels_test], dtype=np.float))
# Plot unaries:
if 0:
for i in range(5):
subplot(2,5, i+1)
plot(frame_unary_train[i])
subplot(2,5, i+1+5)
plot(gesture_unary_train[i])
| {
"content_hash": "752d2f7e9ef47fe562f5caf4d2f9a814",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 191,
"avg_line_length": 46.234042553191486,
"alnum_prop": 0.7200490872833256,
"repo_name": "colincsl/StructuredModels",
"id": "820b2b97fd5e929f4eb8b57d1b720996717ecd90",
"size": "6520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "StructuredModels/scripts/MsM_Daily_Activities.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "74622"
}
],
"symlink_target": ""
} |
import errno
import logging
import os
from paramiko.client import SSHClient, AutoAddPolicy
from paramiko.ssh_exception import AuthenticationException, SSHException, \
NoValidConnectionsError
import pytest
import requests
import shutil
def _create_or_update_symplink(target, link_name):
"""
Create or update a symlink
"""
try:
os.symlink(target, link_name)
except OSError as error:
if error.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise error
def _check_sshd_service(ip_address, ssh_port):
"""
Ensure SSHd service running on the container
"""
with SSHClient() as ssh_client:
ssh_client.set_missing_host_key_policy(AutoAddPolicy())
# Add Paramiko transport console logger if requested
if os.environ.get('PARAMIKO_DEBUG'):
paramiko_logger = logging.getLogger('paramiko.transport')
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter('%(asctime)s | %(levelname)-8s| PARAMIKO: '
'%(lineno)03d@%(module)-10s| %(message)s')
)
paramiko_logger.addHandler(console_handler)
paramiko_logger.setLevel(logging.DEBUG)
# Check with bad credentials to raise an AuthenticationException
try:
ssh_client.connect( # nosec
ip_address,
port=ssh_port,
username='root',
password='foobar',
allow_agent=False,
look_for_keys=False)
except AuthenticationException:
return True
except (SSHException, NoValidConnectionsError):
return False
@pytest.fixture(scope='session')
def aci_ansible_target(docker_ip, docker_services):
"""
Ensure that "some service" is up and responsive.
"""
ssh_port = docker_services.port_for('aci-ansible-target', 22)
# Check SSH connection before next steps
docker_services.wait_until_responsive(
timeout=30.0, pause=0.1,
check=lambda: _check_sshd_service(docker_ip, ssh_port)
)
return {'ip': docker_ip, 'ssh_port': ssh_port}
@pytest.fixture(scope='session')
def aci_ansible_structure(tmpdir_factory, aci_ansible_target):
"""
This fixture manage a basic ansible project structure with:
* hosts file
* private key file
"""
BASE_IMAGE_PRIVATE_KEY_URL = (
'https://github.com/phusion/baseimage-docker/raw/master/image/'
+ 'services/sshd/keys/insecure_key')
hosts_infos = 'ansible_host={} ansible_user=root ansible_port={}'.format(
aci_ansible_target.get('ip'),
aci_ansible_target.get('ssh_port')
)
hosts_file_content = [
'foo {}'.format(hosts_infos),
'bar {}'.format(hosts_infos),
'foobar {}'.format(hosts_infos),
]
base_image_private_key = requests.get(BASE_IMAGE_PRIVATE_KEY_URL)
base_dir = tmpdir_factory.mktemp('ansible_config')
base_dir.join('roles').mkdir()
base_dir.join('hosts').write('\n'.join(hosts_file_content))
base_dir.join('ssh_key').write(base_image_private_key.content)
base_dir.join('ssh_key').chmod(0o400)
shutil.copy2(
os.path.join(os.getcwd(), 'tests/resources/ansible/basic_play.yml'),
base_dir.join('basic_play.yml').strpath
)
shutil.copy2(
os.path.join(os.getcwd(), 'tests/resources/ansible/requirements.yml'),
base_dir.join('requirements.yml').strpath
)
return base_dir
@pytest.fixture(scope='session')
def aci_ansible_project(aci_ansible_structure):
"""
Prepare environment vars to work with aci_ansible_project fixture
"""
inventory_path = aci_ansible_structure.join('hosts').strpath
private_key_path = aci_ansible_structure.join('ssh_key').strpath
roles_path = aci_ansible_structure.join('roles').strpath
os.environ['ANSIBLE_INVENTORY'] = inventory_path
os.environ['ANSIBLE_HOST_KEY_CHECKING'] = str(False)
os.environ['ANSIBLE_PRIVATE_KEY_FILE'] = private_key_path
os.environ['ANSIBLE_ROLES_PATH'] = roles_path
return aci_ansible_structure
@pytest.fixture(scope='session')
def aci_molecule_project(tmpdir_factory):
"""
This fixture manage a basic molecule scenario structure with:
* create and destroy playbooks
* molecule configuration file
* playbook to run
"""
base_dir = tmpdir_factory.mktemp('molecule_config')
base_dir.join('molecule').mkdir()
scenario_dir = base_dir.join('molecule').join('basic-scenario')
scenario_dir.mkdir()
scenario_dir.join('tests').mkdir()
managed_filenames = [
'Dockerfile',
'create.yml',
'destroy.yml',
'molecule.yml',
'playbook.yml',
'requirements.yml',
'.yamllint',
]
for filename in managed_filenames:
shutil.copy2(
os.path.join(
os.getcwd(),
'tests/resources/molecule/{}'.format(filename)),
scenario_dir.join('{}'.format(filename)).strpath
)
shutil.copy2(
os.path.join(
os.getcwd(), 'tests/resources/molecule/tests/test_default.py'),
scenario_dir.join('tests').join('test_default.py').strpath
)
_create_or_update_symplink(base_dir.join('molecule').strpath, 'molecule')
| {
"content_hash": "c8cc50ffaa227a74f6b19255402f6dde",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 78,
"avg_line_length": 30.205555555555556,
"alnum_prop": 0.6277358837594261,
"repo_name": "infOpen/ansible_customer",
"id": "69ac24ec27302e9aebc0f263bb026b7ddae97397",
"size": "5437",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2310"
},
{
"name": "Python",
"bytes": "37298"
}
],
"symlink_target": ""
} |
from factory import Sequence, PostGenerationMethodCall
from factory.alchemy import SQLAlchemyModelFactory
from snapface.user.models import User
from snapface.database import db
class BaseFactory(SQLAlchemyModelFactory):
class Meta:
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
username = Sequence(lambda n: "user{0}".format(n))
email = Sequence(lambda n: "user{0}@example.com".format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
model = User
| {
"content_hash": "ba6fab37c740ff389ae5015980c3f449",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 66,
"avg_line_length": 26.363636363636363,
"alnum_prop": 0.7275862068965517,
"repo_name": "krekle/snapface",
"id": "ce1de8d9ec686a4e789b375f565b906b3a0efac0",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1383"
},
{
"name": "HTML",
"bytes": "18073"
},
{
"name": "JavaScript",
"bytes": "240956"
},
{
"name": "Python",
"bytes": "45898"
}
],
"symlink_target": ""
} |
GENOMES_DIR = '/home/cmb-panasas2/skchoudh/genomes'
OUT_DIR = '/staging/as/skchoudh/rna/Feb_02_2017_Radiation_GBM_EBV_Sept2017_ribo'
SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/clip_seq_pipeline/scripts'
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/Luiz_radiation_data/Penalva_08242016_Run160902/Penalva_08242016'
GENOME_BUILD = 'hg38'
GENOME_FASTA = GENOMES_DIR + '/' + GENOME_BUILD + '/fasta/'+ GENOME_BUILD+ '.fa'
STAR_INDEX = GENOMES_DIR + '/' + GENOME_BUILD + '/star_annotated'
GTF = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.annotation.without_rRNA_tRNA.gtf'
GENE_NAMES = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + GENOME_BUILD+'_gene_names_stripped.tsv'
GTF_UTR = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.gffutils.modifiedUTRs.gtf'
GENE_LENGTHS = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.coding_lengths.tsv' #+ GENOME_BUILD+'_gene_lengths.tsv'
DESIGN_FILE = RAWDATA_DIR + '/' + 'design.txt'
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
GENE_BED = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.24.genes.bed' #+ GENOME_BUILD+'_gene_lengths.tsv'
FEATURECOUNTS_T = 'CDS'
HTSEQ_MODE = 'intersection-strict'
UTR5_BED = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.gffutils.UTR5.bed'
UTR3_BED = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.gffutils.UTR3.bed'
START_CODON_BED = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.gffutils.start_codon.bed' #+ GENOME_BUILD+'_gene_lengths.tsv'
STOP_CODON_BED = GENOMES_DIR + '/' + GENOME_BUILD + '/annotation/' + 'gencode.v25.gffutils.stop_codon.bed' #+ GENOME_BUILD+'_gene_lengths.tsv'
PYTHON2ENV = 'python2'
CDS_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.cds.bed'
CHROM_SIZES = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.chrom.sizes'
| {
"content_hash": "c102bf30eaa750ec8562f17d7f7432d1",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 145,
"avg_line_length": 63.36666666666667,
"alnum_prop": 0.7006838506049448,
"repo_name": "saketkc/ribo-seq-snakemake",
"id": "3e28218ef6f76c1be4cf5dd5f17bd636968b794b",
"size": "1901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configs/config_Feb_2017_Radiation_GBM.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "57714"
},
{
"name": "R",
"bytes": "3539"
},
{
"name": "Shell",
"bytes": "8205"
}
],
"symlink_target": ""
} |
''' This package contains functionality exterior to the core. This is where higher level tools, which depend on the
core live. ''' | {
"content_hash": "d851dea462ddf0bf2d121f61293810d6",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 115,
"avg_line_length": 67,
"alnum_prop": 0.746268656716418,
"repo_name": "rectangletangle/nlplib",
"id": "553b32021c8d92b2549990df12658aa644f5ea72",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nlplib/exterior/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "164027"
}
],
"symlink_target": ""
} |
from pycqed.measurement.waveform_control_CC import waveform as wf
try:
from qcodes import Instrument
except ImportError:
print('could not import qcodes Instrument')
def mock_control_pulse_prepare(command, **kwargs):
'''
Mock function for testing purposes returns the kwargs
'''
# printing to be caught in test suite
print('mock called with {}'.format(kwargs))
return
def QWG_pulse_prepare(operation_name, **kwargs):
QWG_name = kwargs.pop('QWG_name')
codeword = kwargs.pop('codeword')
channels = kwargs.pop('channels')
QWG = Instrument.find_instrument(QWG_name)
pulse_func = getattr(wf, kwargs['pulse_type'])
waveform = pulse_func(kwargs)
for i, ch in enumerate(channels):
wf_name = operation_name+str(ch)
QWG.createWaveformReal(wf_name, waveform[i], [], [])
QWG.set(codeword, ch, wf_name)
| {
"content_hash": "ca840dd4db3919bc24663dddaf56d24f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 65,
"avg_line_length": 24.61111111111111,
"alnum_prop": 0.6715575620767494,
"repo_name": "DiCarloLab-Delft/PycQED_py3",
"id": "c33f6af2bdaa1ad28e96ed5ff69d98ecd2370acd",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "deprecated/pycqed/measurement/waveform_control_CC/operation_prep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "8748"
},
{
"name": "C++",
"bytes": "8802"
},
{
"name": "Cython",
"bytes": "8291"
},
{
"name": "OpenQASM",
"bytes": "15894"
},
{
"name": "Python",
"bytes": "7978715"
},
{
"name": "TeX",
"bytes": "8"
}
],
"symlink_target": ""
} |
import bioinf
from numpy import array, dot, arccos, rad2deg, ndarray, cross
from numpy.linalg import norm
from constants import *
from collections import OrderedDict, namedtuple
class PDBATOMFileReader(object):
def __init__(self, file_or_path):
self._parse_atom_lines(file_or_path)
self._set_residues_and_chains_of_each_atom()
self._set_chain_of_each_residue_and_add_it_to_itsown_chain()
def _parse_atom_lines(self, file_or_path):
if isinstance(file_or_path, basestring):
f = open(file_or_path, 'r')
else:
f = file_or_path
self._atoms = OrderedDict()
self._residues = OrderedDict()
self._chains = OrderedDict()
for line in f:
clean_line = line.strip()
if clean_line.startswith('ATOM'):
atom = AtomIQ(clean_line)
self._atoms[atom.serial] = atom
try:
self._chains[atom.chainID].add_atom(atom)
except KeyError:
self._chains[atom.chainID] = ChainIQ(atom)
try:
self._residues[atom.chainID + atom.uid].add_atom(atom)
except KeyError:
self._residues[atom.chainID + atom.uid] = ResidueIQ(atom)
f.close()
def _set_residues_and_chains_of_each_atom(self):
for atom in self._atoms.itervalues():
atom.set_Residue(self._residues[atom.chainID + atom.uid])
atom.set_Chain(self._chains[atom.chainID])
def _set_chain_of_each_residue_and_add_it_to_itsown_chain(self):
for residue in self._residues.itervalues():
residue.set_Chain(self._chains[residue.chainID])
self._chains[residue.chainID].add_residue(residue)
def __iter__(self):
for atom in self._atoms:
yield self._atoms[atom]
class AtomIQ(object):
def __init__(self, pdb_line):
assert isinstance(pdb_line, basestring)
pdb_atom_line = bioinf.PDBAtomLine.parse_string(pdb_line)
self._res_name = pdb_atom_line.resName
self._resSeq = pdb_atom_line.resSeq
self._name = pdb_atom_line.name
self._serial = pdb_atom_line.serial
self._residue = None
self._chain = None
self._chainID = pdb_atom_line.chainID
self._coordinates = array([
float(pdb_atom_line.x),
float(pdb_atom_line.y),
float(pdb_atom_line.z)
])
self._participant = \
HBondParticipant.generate_participant_by_valence(self)
def set_Residue(self, residue):
assert isinstance(residue, ResidueIQ)
assert residue.uid == self._resSeq
self._residue = residue
def set_Chain(self, chain):
assert isinstance(chain, ChainIQ)
assert chain.chainID == self._chainID
if self._chain is None:
self._chain = chain
else:
raise TypeError('chain was already set and thus was not None')
res_name = property(lambda self: self._res_name)
uid = property(lambda self: self._resSeq)
name = property(lambda self: self._name)
chainID = property(lambda self: self._chainID)
coordinates = property(lambda self: self._coordinates)
serial = property(lambda self: self._serial)
residue = property(lambda self: self._residue, set_Residue)
chain = property(lambda self: self._chain, set_Chain)
participant = property(lambda self: self._participant)
class HBondParticipant(object):
def __init__(self, atom,
is_donor=False, H_bond_donor_radius=None, max_num_H_donations=None,
is_acceptor=False, H_bond_acceptor_radius=None,
max_num_H_acceptance=None, NN=None, NNN=None):
assert isinstance(atom, AtomIQ)
self._atom = atom
self._is_acceptor = is_acceptor
self._is_donor = is_donor
self._H_bond_acceptor_radius = H_bond_acceptor_radius
self._H_bond_donor_radius = H_bond_donor_radius
self._max_num_H_acceptance = max_num_H_acceptance
self._max_num_H_donations = max_num_H_donations
self._NN = NN
self._NNN = NNN
self._acceptor_list = []
self._donor_list = []
self._backup_donors = []
self._backup_acceptors = []
@staticmethod
def _atom_in_group_is_Hbond_participant(atom, currentGroup, backbone_atom_name):
assert isinstance(atom, AtomIQ)
assert isinstance(currentGroup, HBondGroup)
assert backbone_atom_name in ('N', 'O')
return (
(atom.name in currentGroup.atoms_str_tupl and
atom.res_name == currentGroup.residue.upper())
or
(atom.name == backbone_atom_name and
currentGroup.residue == 'Peptide')
)
@staticmethod
def generate_participant_by_valence(atom):
assert isinstance(atom, AtomIQ)
backbone = namedtuple('backbone_Hbond_atom_name',
['donor','acceptor'])('N', 'O')
is_acceptor = False
is_donor = False
H_bond_donor_radius = None
max_num_H_donations = None
H_bond_acceptor_radius = None
max_num_H_acceptance = None
for currentDonorGroup in hbond_donor_groups:
if HBondParticipant._atom_in_group_is_Hbond_participant(
atom, currentDonorGroup, backbone.donor):
is_donor = True
valence = currentDonorGroup.valence
H_bond_donor_radius = currentDonorGroup.H_bond_radius
max_num_H_donations = currentDonorGroup.max_num_H_bonds
NN = currentDonorGroup.NN
NNN = currentDonorGroup.NNN
for currentAcceptorGroup in hbond_acceptor_groups:
if HBondParticipant._atom_in_group_is_Hbond_participant(
atom, currentAcceptorGroup, backbone.acceptor):
is_acceptor = True
valence = currentAcceptorGroup.valence
H_bond_acceptor_radius = currentDonorGroup.H_bond_radius
max_num_H_acceptance = currentDonorGroup.max_num_H_bonds
NN = currentAcceptorGroup.NN
NNN = currentAcceptorGroup.NNN
if is_acceptor or is_donor:
if valence == 'sp2':
return Sp2HBondParticipant(atom,
is_donor, H_bond_donor_radius, max_num_H_donations,
is_acceptor, H_bond_acceptor_radius, max_num_H_acceptance,
NN, NNN
)
elif valence == 'sp3':
return Sp3HBondParticipant(atom,
is_donor, H_bond_donor_radius, max_num_H_donations,
is_acceptor, H_bond_acceptor_radius, max_num_H_acceptance,
NN, NNN
)
else:
return None
def has_excessive_donors(self):
return len(self._donor_list) > self._max_num_H_donations
def has_excessive_acceptors(self):
return len(self._acceptor_list) > self._max_num_H_donations
is_acceptor = property(lambda self: self._is_acceptor)
is_donor = property(lambda self: self._is_donor)
H_bond_acceptor_radius = property(
lambda self: self._H_bond_acceptor_radius)
H_bond_donor_radius = property(lambda self: self._H_bond_donor_radius)
max_num_H_acceptance = property(lambda self: self._max_num_H_acceptance)
max_num_H_donations = property(lambda self: self._max_num_H_donations)
atom = property(lambda self: self._atom)
NN = property(lambda self: self._NN)
NNN = property(lambda self: self._NNN)
class AngleMinimum(namedtuple('AngleMinimum', ['as_donor', 'as_acceptor'])):
def angle_as_donor(self, donor=True):
return self.as_donor if donor else self.as_acceptor
class PlaneAngleMaximum(
namedtuple('AngleMinimum', ['as_donor', 'as_acceptor'])):
def angle_as_donor(self, donor=True):
return self.as_donor if donor else self.as_acceptor
class Sp3HBondParticipant(HBondParticipant):
_angle_min = AngleMinimum(90., 60.)
def _distance_is_ok(self, partner):
M = self._atom.coordinates
P = partner.atom.coordinates
distance = norm(M - P)
if distance < self._H_bond_donor_radius + partner.H_bond_acceptor_radius:
return distance
else:
return False
@staticmethod
def angle(ba, bc):
assert isinstance(ba, ndarray)
assert isinstance(bc, ndarray)
return rad2deg(arccos(dot(bc, ba) / (norm(bc) * norm(ba))))
def angle_is_ok(self, MtP, MtMM, as_donor=True):
angle = self.angle(MtP, MtMM)
return angle < 180. and angle > self._angle_min.angle_as_donor(as_donor)
def planarity_is_ok(self, MtP, MtMM, MMtMMM, as_donor=True):
return True
@staticmethod
def can_bond_to_partner(myself, partner, as_donor=True):
assert isinstance(myself, HBondParticipant)
assert isinstance(partner, HBondParticipant)
M = myself.atom.coordinates
P = partner.atom.coordinates
MM = myself.atom.residue.atoms[myself.NN].coordinates
MtoMM = MM - M
MtoP = P - M
if myself.angle_is_ok(MtoP, MtoMM, as_donor):
MMM = myself.atom.residue.atoms[myself.NNN].coordinates
MMtoMMM = MMM - MM
if myself.planarity_is_ok(MtoP, MtoMM, MMtoMMM, as_donor):
return True
def H_bond_is_mutual(self, partner):
assert isinstance(partner, HBondParticipant)
distance_or_is_ok = self._distance_is_ok(partner)
if distance_or_is_ok and \
self.can_bond_to_partner(self, partner) and \
self.can_bond_to_partner(partner, self, as_donor=False):
partner.append_donor_list(self)
self.append_acceptor_list(partner)
return distance_or_is_ok
def append_donor_list(self, potential_h_donor):
self.donor_list.append(potential_h_donor)
def append_acceptor_list(self, potential_h_acceptor):
self.acceptor_list.append(potential_h_acceptor)
valence = property(lambda valence:'sp3')
acceptor_list = property(lambda self: self._acceptor_list, append_acceptor_list)
donor_list = property(lambda self: self._donor_list, append_donor_list)
class Sp2HBondParticipant(Sp3HBondParticipant):
_angle_min = AngleMinimum(90., 90.)
_plane_angle_max = PlaneAngleMaximum(60., 90.)
@staticmethod
def planarity(ba, bc, cd):
assert isinstance(ba, ndarray)
assert isinstance(bc, ndarray)
assert isinstance(cd, ndarray)
my_plane_norm = cross(ba, bc)
perndclr_bc_in_plane = cross(bc, my_plane_norm)
torsion_angle_center = 0 if dot(cd, perndclr_bc_in_plane) > 0. else 180.
plane_norm_w_partner = cross(-bc, cd)
return abs(torsion_angle_center - Sp3HBondParticipant.angle(
my_plane_norm, plane_norm_w_partner))
def planarity_is_ok(self, MtP, MtMM, MMtMMM, as_donor=True):
plane_angle = self.planarity(MMtMMM, -MtMM, MtP)
return plane_angle < self._plane_angle_max.angle_as_donor(as_donor)
valence = property(lambda valence: 'sp2')
class ResidueIQ(object):
def __init__(self, atom):
assert isinstance(atom, AtomIQ)
self._atoms = {atom.name: atom}
self._abbr = atom.res_name
self._uid = atom.uid
self._chainID = atom.chainID
self._chain = None
def add_atom(self, atom):
assert isinstance(atom, AtomIQ)
assert self.uid == atom.uid
if atom.name not in self._atoms:
self._atoms[atom.name] = atom
def set_Chain(self, chain):
assert isinstance(chain, ChainIQ)
assert chain.chainID == self._chainID
if self._chain is None:
self._chain = chain
else:
raise TypeError('chain was already set and thus was not None')
atoms = property(lambda self: self._atoms, add_atom)
uid = property(lambda self: self._uid)
chainID = property(lambda self: self._chainID)
abbr = property(lambda self: self._abbr)
chain = property(lambda self: self._chain, set_Chain)
class ChainIQ(object):
def __init__(self, atom):
assert isinstance(atom, AtomIQ)
self._chainID = atom.chainID
self._atoms = OrderedDict({atom.serial: atom})
self._residues = OrderedDict({atom.uid: atom.residue})
def add_atom(self, atom):
assert isinstance(atom, AtomIQ)
if atom.serial not in self._atoms:
self._atoms[atom.serial] = atom
else:
raise KeyError('%s already exists in list of atoms for chain %s' %
(atom.serial, self._chainID))
def add_residue(self, residue):
assert isinstance(residue, ResidueIQ)
if residue.uid not in self._residues:
self._residues[residue.uid] = residue
atoms = property(lambda self: self._atoms, add_atom)
residues = property(lambda self: self._residues, add_residue)
chainID = property(lambda self: self._chainID)
class ProteinIQ(object):
def generate_protein_from_PDB_ATOM_File_Reader(pdb):
assert isinstance(pdb, PDBATOMFileReader)
donorDict = {}
acceptorDict = {}
atoms = {}
for atom in reader:
atoms[atom.serial] = atom
if atom.participant:
if atom.participant.is_donor:
donorDict[atom.serial] = atom
if atom.participant.is_acceptor:
acceptorDict[atom.serial] = atom
for donor in donorDict.itervalues():
for acceptor in acceptorDict.itervalues():
pass
pass
| {
"content_hash": "005e3fda1801bd9e19cf046d9d7b2717",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 84,
"avg_line_length": 37.24797843665768,
"alnum_prop": 0.6053983645705189,
"repo_name": "jruhym/intrapeptideContacts",
"id": "5126b8640adea1e7cabeed61f00028533816ecb6",
"size": "13819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Contact_Report/hydrogen_bonds.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "31346"
},
{
"name": "Shell",
"bytes": "1889"
}
],
"symlink_target": ""
} |
from elephunk.database import Row
class Bloat(Row):
@property
def table_id(self):
return (self.schemaname, self.tablename)
@property
def full_table_name(self):
return "%s.%s" % self.table_id
@staticmethod
def sql():
return """
SELECT
schemaname,
tablename,
reltuples::bigint,
relpages::bigint,
relpages * block_size AS usedbytes,
otta,
ROUND(CASE WHEN otta=0 THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat,
CASE WHEN relpages < otta THEN 0 ELSE block_size*(sml.relpages-otta)::bigint END AS wastedbytes,
iname,
ituples::bigint,
ipages::bigint,
ipages * block_size AS iusedbytes,
iotta,
ROUND(CASE WHEN iotta=0 OR ipages=0 THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat,
CASE WHEN ipages < iotta THEN 0 ELSE block_size*(ipages-iotta) END AS wastedibytes
FROM (
SELECT
schemaname, tablename, cc.reltuples, cc.relpages, block_size,
CEIL((cc.reltuples*((datahdr + ma - (CASE WHEN mod(datahdr, ma) = 0 THEN ma ELSE mod(datahdr, ma) END)) + nullhdr2 + 4)) / (block_size-20::float)) AS otta,
COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages,
COALESCE(CEIL((c2.reltuples*(datahdr-12))/(block_size-20::float)),0) AS iotta -- very rough approximation, assumes all cols
FROM (
SELECT
ma,block_size,schemaname,tablename,
(datawidth + (hdr + ma - (case when mod(hdr, ma) = 0 THEN ma ELSE mod(hdr, ma) END)))::numeric AS datahdr,
(maxfracsum * (nullhdr + ma - (case when mod(nullhdr, ma) = 0 THEN ma ELSE mod(nullhdr, ma) END))) AS nullhdr2
FROM (
SELECT
schemaname, tablename, hdr, ma, block_size,
SUM((1-null_frac)*avg_width) AS datawidth,
MAX(null_frac) AS maxfracsum,
hdr+(
SELECT 1+count(*)/8
FROM pg_stats s2
WHERE null_frac<>0 AND s2.schemaname = s.schemaname AND s2.tablename = s.tablename
) AS nullhdr
FROM pg_stats s, (
SELECT
(SELECT current_setting('block_size')::numeric) AS block_size,
CASE WHEN substring(v,12,3) IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr,
CASE WHEN v ~ 'mingw32' THEN 8 ELSE 4 END AS ma
FROM (SELECT version() AS v) AS foo
) AS constants
GROUP BY 1,2,3,4,5
) AS foo
) AS rs
JOIN pg_class cc ON cc.relname = rs.tablename
JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname = rs.schemaname AND nn.nspname <> 'information_schema'
LEFT JOIN pg_index i ON indrelid = cc.oid
LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid
) AS sml
ORDER BY schemaname, tablename
"""
| {
"content_hash": "c423efaf3e06cac68e6fab24c8e91d30",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 159,
"avg_line_length": 37.521126760563384,
"alnum_prop": 0.6482732732732732,
"repo_name": "pitluga/elephunk",
"id": "039e9e9163a1403dfc8d92d1a794b0d22ce79e99",
"size": "2664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elephunk/records/bloat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "375"
},
{
"name": "JavaScript",
"bytes": "2840"
},
{
"name": "Python",
"bytes": "30243"
},
{
"name": "Shell",
"bytes": "272"
}
],
"symlink_target": ""
} |
"""A module for the main curvature optimizer class."""
from typing import Any, Callable, Iterator, Mapping, Optional, Sequence, Tuple, Union
import jax
import jax.lax as lax
import jax.numpy as jnp
import jax.random as jnr
from kfac_ferminet_alpha import estimator
from kfac_ferminet_alpha import tag_graph_matcher as tgm
from kfac_ferminet_alpha import utils
ScheduleType = Callable[[jnp.ndarray], Optional[jnp.ndarray]]
Parameters = Any
Batch = Any
FuncState = Any
State = Mapping[str, Any]
@utils.Stateful.infer_class_state
class Optimizer(utils.Stateful):
"""The default optimizer class."""
velocities: Parameters
estimator: estimator.CurvatureEstimator
step_counter: jnp.ndarray
def __init__(
self,
value_and_grad_func,
l2_reg: Union[float, jnp.ndarray],
value_func_has_aux: bool = False,
value_func_has_state: bool = False,
value_func_has_rng: bool = False,
learning_rate_schedule: Optional[ScheduleType] = None,
momentum_schedule: Optional[ScheduleType] = None,
damping_schedule: Optional[ScheduleType] = None,
min_damping: Union[float, jnp.ndarray] = 1e-8,
max_damping: Union[float, jnp.ndarray] = jnp.inf,
norm_constraint: Optional[Union[float, jnp.ndarray]] = None,
num_burnin_steps: int = 10,
estimation_mode: str = "fisher_gradients",
curvature_ema: Union[float, jnp.ndarray] = 0.95,
inverse_update_period: int = 5,
register_only_generic: bool = False,
layer_tag_to_block_cls: Optional[estimator.TagMapping] = None,
patterns_to_skip: Sequence[str] = (),
donate_parameters: bool = False,
donate_optimizer_state: bool = False,
donate_batch_inputs: bool = False,
donate_func_state: bool = False,
batch_process_func: Optional[Callable[[Any], Any]] = None,
multi_device: bool = False,
use_jax_cond: bool = True,
debug: bool = False,
pmap_axis_name="kfac_axis",
):
"""Initializes the K-FAC optimizer with the given settings.
Args:
value_and_grad_func: Python callable. The function should return the value
of the loss to be optimized and its gradients. If the argument
`value_func_has_aux` is `False` then the interface should be: loss,
loss_grads = value_and_grad_func(params, batch)
If `value_func_has_aux` is `True` then the interface should be: (loss,
aux), loss_grads = value_and_grad_func(params, batch)
l2_reg: Scalar. Set this value to tell the optimizer what L2
regularization coefficient you are using (if any). Note the coefficient
appears in the regularizer as coeff / 2 * sum(param**2). Note that the
user is still responsible for adding regularization to the loss.
value_func_has_aux: Boolean. Specifies whether the provided callable
`value_and_grad_func` returns the loss value only, or also some
auxiliary data. (Default: False)
value_func_has_state: Boolean. Specifies whether the provided callable
`value_and_grad_func` has a persistent state that is inputed and
it also outputs an update version of it. (Default: False)
value_func_has_rng: Boolean. Specifies whether the provided callable
`value_and_grad_func` additionally takes as input an rng key.
(Default: False)
learning_rate_schedule: Callable. A schedule for the learning rate. This
should take as input the current step number and return a single
`jnp.ndarray` that represents the learning rate. (Default: None)
momentum_schedule: Callable. A schedule for the momentum. This should take
as input the current step number and return a single `jnp.ndarray`
that represents the momentum. (Default: None)
damping_schedule: Callable. A schedule for the damping. This should take
as input the current step number and return a single `jnp.ndarray`
that represents the learning rate. (Default: None)
min_damping: Scalar. Minimum value the damping parameter can take. Note
that the default value of 1e-8 is quite arbitrary, and you may have to
adjust this up or down for your particular problem. If you are using a
non-zero value of l2_reg you *may* be able to set this to
zero. (Default: 1e-8)
max_damping: Scalar. Maximum value the damping parameter can take.
(Default: Infinity)
norm_constraint: Scalar. If specified, the update is scaled down so that
its approximate squared Fisher norm `v^T F v` is at most the specified
value.(Note that here `F` is the approximate curvature matrix, not the
exact.) (Default: None)
num_burnin_steps: Int. At the start of optimization, e.g. the first step,
before performing the actual step the optimizer will perform this many
times updates to the curvature approximation without updating the
actual parameters. (Default: 10)
estimation_mode: String. The type of estimator to use for the curvature
matrix. Can be one of: * fisher_empirical * fisher_exact *
fisher_gradients * fisher_curvature_prop * ggn_exact *
ggn_curvature_prop See the doc-string for CurvatureEstimator (in
estimator.py) for a more
detailed description of these options. (Default: 'fisher_gradients').
curvature_ema: The decay factor used when calculating the covariance
estimate moving averages. (Default: 0.95)
inverse_update_period: Int. The number of steps in between updating the
the computation of the inverse curvature approximation. (Default: 5)
register_only_generic: Boolean. Whether when running the auto-tagger to
register only generic parameters, or allow it to use the graph matcher
to automatically pick up any kind of layer tags. (Default: False)
layer_tag_to_block_cls: Dictionary. A mapping from layer tags to block
classes which to override the default choices of block approximation for
that specific tag. See the doc-string for CurvatureEstimator (in
estimator.py) for a more detailed description of this.
patterns_to_skip: Tuple. A list of any patterns that should be skipped by
the graph matcher when auto-tagging.
donate_parameters: Boolean. Whether to use jax's `donate_argnums` to
donate the parameter values of each call to `step`. Note that this
implies that you will not be able to access the old parameter values'
buffers after calling into `step`.
donate_optimizer_state: Boolean. Whether to use jax's `donate_argnums` to
donate the optimizer state of each call to `step`. Note that this
implies that you will not be able to access the old optimizer state
values' buffers after calling into `step`.
donate_batch_inputs: Boolean. Whether to use jax's `donate_argnums` to
donate the batch values of each call to `step`. Note that this implies
that you will not be able to access the old batch values' buffers after
calling into `step`.
donate_func_state: Boolean. Whether to use jax's `donate_argnums` to
donate the persistent function state of each call to `step`. Note that
this implies that you will not be able to access the old function state
values' buffers after calling into `step`.
batch_process_func: Callable. A function which to be called on each batch
before feeding to the KFAC on device. This could be useful for specific
device input optimizations.
multi_device: Boolean. Whether to use `pmap` and run the optimizer on
multiple devices. (Default: False)
use_jax_cond: Not used for the moment.
debug: Boolean. If non of the step or init functions would be jitted. Note
that this also overrides `multi_device` and prevents using `pmap`.
(Default: False)
pmap_axis_name: String. The name of the `pmap` axis to use when
`multi_device` is set to True. (Default: curvature_axis)
"""
super().__init__()
self.value_and_grad_func = value_and_grad_func
self.value_func_has_aux = value_func_has_aux
self.value_func_has_state = value_func_has_state
self.value_func_has_rng = value_func_has_rng
self.value_func = utils.convert_value_and_grad_to_value_func(
value_and_grad_func, has_aux=value_func_has_aux)
self.l2_reg = l2_reg
self.learning_rate_schedule = learning_rate_schedule
if momentum_schedule is not None:
def schedule_with_first_step_zero(global_step: jnp.ndarray):
value = momentum_schedule(global_step)
check = jnp.equal(global_step, 0)
return check * jnp.zeros_like(value) + (1 - check) * value
self.momentum_schedule = schedule_with_first_step_zero
else:
self.momentum_schedule = None
self.damping_schedule = damping_schedule
self.min_damping = min_damping
self.max_damping = max_damping
self.norm_constraint = norm_constraint
self.num_burnin_steps = num_burnin_steps
self.estimation_mode = estimation_mode
self.curvature_ema = curvature_ema
self.inverse_update_period = inverse_update_period
self.register_only_generic = register_only_generic
self.layer_tag_to_block_cls = layer_tag_to_block_cls
self.patterns_to_skip = patterns_to_skip
self.donate_parameters = donate_parameters
self.donate_optimizer_state = donate_optimizer_state
self.donate_batch_inputs = donate_batch_inputs
self.donate_func_state = donate_func_state
self.batch_process_func = batch_process_func or (lambda x: x)
self.multi_device = multi_device
self.use_jax_cond = use_jax_cond
self.debug = debug
self.pmap_axis_name = pmap_axis_name if multi_device else None
self._rng_split = utils.p_split if multi_device else jnr.split
# Attributes filled in during self.init()
self.finalized = False
self.tagged_func = None
self.flat_params_shapes = None
self.params_treedef = None
# Special attributes related to jitting/pmap
self._jit_init = None
self._jit_burnin = None
self._jit_step = None
def finalize(
self,
params: Parameters,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState] = None,
) -> None:
"""Finalizes the optimizer by tracing the model function with the params and batch."""
if self.finalized:
raise ValueError("Optimizer has already been finalized.")
if self.multi_device:
# We assume that the parameters and batch are replicated, while tracing
# must happen with parameters for a single device call
params, rng, batch = jax.tree_map(lambda x: x[0], (params, rng, batch))
if func_state is not None:
func_state = jax.tree_map(lambda x: x[0], func_state)
batch = self.batch_process_func(batch)
# These are all tracing operations and we can run them with abstract values
func_args = utils.make_func_args(params, func_state, rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Run all tracing with abstract values so no computation is done
flat_params, self.params_treedef = jax.tree_flatten(params)
self.flat_params_shapes = tuple(p.shape for p in flat_params)
self.tagged_func = tgm.auto_register_tags(
func=self.value_func,
func_args=func_args,
params_index=0,
register_only_generic=self.register_only_generic,
patterns_to_skip=self.patterns_to_skip)
self.estimator = estimator.CurvatureEstimator(
self.tagged_func,
func_args,
self.l2_reg,
self.estimation_mode,
layer_tag_to_block_cls=self.layer_tag_to_block_cls)
# Arguments: params, opt_state, rng, batch, func_state
donate_argnums = []
if self.donate_parameters:
donate_argnums.append(0)
if self.donate_optimizer_state:
donate_argnums.append(1)
if self.donate_batch_inputs:
donate_argnums.append(3)
if self.donate_func_state and self.value_func_has_state:
donate_argnums.append(4)
donate_argnums = tuple(donate_argnums)
if self.debug:
self._jit_init = self._init
self._jit_burnin = self._burnin
self._jit_step = self._step
elif self.multi_device:
self._jit_init = jax.pmap(
self._init, axis_name=self.pmap_axis_name, donate_argnums=[0])
# batch size is static argnum and is at index 5
self._jit_burnin = jax.pmap(
self._burnin,
axis_name=self.pmap_axis_name,
static_broadcasted_argnums=[5])
self._jit_step = jax.pmap(
self._step,
axis_name=self.pmap_axis_name,
donate_argnums=donate_argnums,
static_broadcasted_argnums=[5])
else:
self._jit_init = jax.jit(self._init, donate_argnums=[0])
# batch size is static argnum and is at index 5
self._jit_burnin = jax.jit(self._burnin, static_argnums=[5])
self._jit_step = jax.jit(
self._step, donate_argnums=donate_argnums, static_argnums=[5])
self.finalized = True
def _init(self, rng: jnp.ndarray) -> State:
"""This is the non-jitted version of initializing the state."""
flat_velocities = [jnp.zeros(shape) for shape in self.flat_params_shapes]
return dict(
velocities=jax.tree_unflatten(self.params_treedef, flat_velocities),
estimator=self.estimator.init(rng, None),
step_counter=jnp.asarray(0))
def verify_args_and_get_step_counter(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
data_iterator: Iterator[Batch],
func_state: Optional[FuncState] = None,
learning_rate: Optional[jnp.ndarray] = None,
momentum: Optional[jnp.ndarray] = None,
damping: Optional[jnp.ndarray] = None,
global_step_int: Optional[int] = None,
) -> int:
"""Verifies that the arguments passed to `Optimizer.step` are correct."""
if not self.finalized:
rng, rng_finalize = self._rng_split(rng)
self.finalize(params, rng_finalize, next(data_iterator), func_state)
# Verify correct arguments invocation
if self.learning_rate_schedule is not None and learning_rate is not None:
raise ValueError("When you have passed a `learning_rate_schedule` you "
"should not pass a value to the step function.")
if self.momentum_schedule is not None and momentum is not None:
raise ValueError("When you have passed a `momentum_schedule` you should "
"not pass a value to the step function.")
if self.damping_schedule is not None and damping is not None:
raise ValueError("When you have passed a `damping_schedule` you should "
"not pass a value to the step function.")
# Do a bunrnin on the first iteration
if global_step_int is None:
if self.multi_device:
return int(utils.get_first(state["step_counter"]))
else:
return int(state["step_counter"])
return global_step_int
def _burnin(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState],
batch_size: Optional[int],
) -> Tuple[State, Optional[FuncState]]:
"""This is the non-jitted version of a single burnin step."""
self.set_state(state)
batch = self.batch_process_func(batch)
rng, func_rng = jnr.split(rng) if self.value_func_has_rng else (rng, None)
func_args = utils.make_func_args(params, func_state, func_rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Compute batch size
if batch_size is None:
batch_size = jax.tree_flatten(batch)[0][0].shape[0]
# Update curvature estimate
ema_old, ema_new = 1.0, 1.0 / self.num_burnin_steps
self.estimator.update_curvature_matrix_estimate(ema_old, ema_new,
batch_size, rng, func_args,
self.pmap_axis_name)
if func_state is not None:
out, _ = self.value_and_grad_func(*func_args)
_, func_state, _ = utils.extract_func_outputs(out,
self.value_func_has_aux,
self.value_func_has_state)
return self.pop_state(), func_state
def _step(
self,
params: Parameters,
state: State,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState],
batch_size: Optional[int],
learning_rate: Optional[jnp.ndarray],
momentum: Optional[jnp.ndarray],
damping: Optional[jnp.ndarray],
) -> Union[Tuple[Parameters, State, FuncState, Mapping[str, jnp.ndarray]],
Tuple[Parameters, State, Mapping[str, jnp.ndarray]]]:
"""This is the non-jitted version of a single step."""
# Unpack and set the state
self.set_state(state)
if damping is not None:
assert self.estimator.damping is None
self.estimator.damping = damping
else:
assert self.estimator.damping is not None
# Preprocess the batch and construct correctly the function arguments
batch = self.batch_process_func(batch)
rng, func_rng = jnr.split(rng) if self.value_func_has_rng else (rng, None)
func_args = utils.make_func_args(params, func_state, func_rng, batch,
self.value_func_has_state,
self.value_func_has_rng)
# Compute the batch size
if batch_size is None:
batch_size = jax.tree_flatten(batch)[0][0].shape[0]
# Compute schedules if applicable
if self.learning_rate_schedule is not None:
assert learning_rate is None
learning_rate = self.learning_rate_schedule(self.step_counter)
else:
assert learning_rate is not None
if self.momentum_schedule is not None:
assert momentum is None
momentum = self.momentum_schedule(self.step_counter)
else:
assert momentum is not None
if self.damping_schedule is not None:
assert damping is None
damping = self.damping_schedule(self.step_counter)
else:
assert damping is not None
# Compute current loss and gradients
out, grads = self.value_and_grad_func(*func_args)
loss, new_func_state, aux = utils.extract_func_outputs(
out, self.value_func_has_aux, self.value_func_has_state)
# Sync loss and grads
loss, grads = utils.pmean_if_pmap((loss, grads), self.pmap_axis_name)
# Update curvature estimate
self.estimator.update_curvature_matrix_estimate(
self.curvature_ema,
1.0,
batch_size,
rng,
func_args,
self.pmap_axis_name,
)
# Optionally update the inverse estimate
self.estimator.set_state(
lax.cond(
self.step_counter % self.inverse_update_period == 0,
lambda s: self.estimator.update_curvature_estimate_inverse( # pylint: disable=g-long-lambda
self.pmap_axis_name, s),
lambda s: s,
self.estimator.pop_state()))
# Compute proposed directions
vectors = self.propose_directions(
grads,
self.velocities,
learning_rate,
momentum,
)
# The learning rate is defined as the negative of the coefficient by which
# we multiply the gradients, while the momentum is the coefficient by
# which we multiply the velocities.
neg_learning_rate = -learning_rate # pytype: disable=unsupported-operands # trace-all-classes
# Compute the coefficients of the update vectors
assert neg_learning_rate is not None and momentum is not None
coefficients = (neg_learning_rate, momentum)
# Update velocities and compute new delta
self.velocities, delta = self.velocities_and_delta(
self.velocities,
vectors,
coefficients,
)
# Update parameters: params = params + delta
params = jax.tree_map(jnp.add, params, delta)
# Optionally compute the reduction ratio and update the damping
self.estimator.damping = None
rho = jnp.nan
# Statistics with useful information
stats = dict()
stats["step"] = self.step_counter
stats["loss"] = loss
stats["learning_rate"] = -coefficients[0]
stats["momentum"] = coefficients[1]
stats["damping"] = damping
stats["rho"] = rho
if self.value_func_has_aux:
stats["aux"] = aux
self.step_counter = self.step_counter + 1
if self.value_func_has_state:
return params, self.pop_state(), new_func_state, stats
else:
assert new_func_state is None
return params, self.pop_state(), stats
def init(
self,
params: Parameters,
rng: jnp.ndarray,
batch: Batch,
func_state: Optional[FuncState] = None,
) -> State:
"""Initializes the optimizer and returns the appropriate optimizer state."""
if not self.finalized:
self.finalize(params, rng, batch, func_state)
return self._jit_init(rng)
def step(
self,
params: Parameters,
state: Mapping[str, Any],
rng: jnp.ndarray,
data_iterator: Iterator[Any],
func_state: Any = None,
learning_rate: Optional[jnp.ndarray] = None,
momentum: Optional[jnp.ndarray] = None,
damping: Optional[jnp.ndarray] = None,
batch_size: Optional[int] = None,
global_step_int: Optional[int] = None,
) -> Union[Tuple[Parameters, State, FuncState, Mapping[str, jnp.ndarray]],
Tuple[Parameters, State, Mapping[str, jnp.ndarray]]]:
"""Performs a single update step using the optimizer.
Args:
params: The parameters of the model.
state: The state of the optimizer.
rng: A Jax PRNG key.
data_iterator: An iterator that returns a batch of data.
func_state: Any function state that gets passed in and returned.
learning_rate: This must be provided when
`use_adaptive_learning_rate=False` and `learning_rate_schedule=None`.
momentum: This must be provided when
`use_adaptive_momentum=False` and `momentum_schedule=None`.
damping: This must be provided when
`use_adaptive_damping=False` and `damping_schedule=None`.
batch_size: The batch size to use for KFAC. The default behaviour when it
is None is to use the leading dimension of the first data array.
global_step_int: The global step as a python int. Note that this must
match the step inte rnal to the optimizer that is part of its state.
Returns:
(params, state, stats)
where:
params: The updated model parameters.
state: The updated optimizer state.
stats: A dictionary of key statistics provided to be logged.
"""
step_counter_int = self.verify_args_and_get_step_counter(
params=params,
state=state,
rng=rng,
data_iterator=data_iterator,
func_state=func_state,
learning_rate=learning_rate,
momentum=momentum,
damping=damping,
global_step_int=global_step_int)
if step_counter_int == 0:
for _ in range(self.num_burnin_steps):
rng, rng_burn = self._rng_split(rng)
batch = next(data_iterator)
state, func_state = self._jit_burnin(params, state, rng_burn, batch,
func_state, batch_size)
# On the first step we always treat the momentum as 0.0
if self.momentum_schedule is None:
momentum = jnp.zeros([])
if self.multi_device:
momentum = utils.replicate_all_local_devices(momentum)
batch = next(data_iterator)
return self._jit_step(params, state, rng, batch, func_state, batch_size,
learning_rate, momentum, damping)
def propose_directions(
self,
grads: Parameters,
velocities: Parameters,
learning_rate: Optional[jnp.ndarray],
momentum: Optional[jnp.ndarray],
) -> Tuple[Parameters, Parameters]:
"""Computes the vector proposals for the next step."""
del momentum # not used in this, but could be used in subclasses
preconditioned_grads = self.estimator.multiply_matpower(grads, -1)
if self.norm_constraint is not None:
assert learning_rate is not None
sq_norm_grads = utils.inner_product(preconditioned_grads, grads)
sq_norm_scaled_grads = sq_norm_grads * learning_rate**2
# We need to sync the norms here, because reduction can be
# non-deterministic. They specifically are on GPUs by default for better
# performance. Hence although grads and preconditioned_grads are synced,
# the inner_product operation can still produce different answers on
# different devices.
sq_norm_scaled_grads = utils.pmean_if_pmap(sq_norm_scaled_grads,
self.pmap_axis_name)
max_coefficient = jnp.sqrt(self.norm_constraint / sq_norm_scaled_grads)
coefficient = jnp.minimum(max_coefficient, 1)
preconditioned_grads = utils.scalar_mul(preconditioned_grads, coefficient)
return preconditioned_grads, velocities
def velocities_and_delta(
self,
velocities: Parameters,
vectors: Sequence[Parameters],
coefficients: Sequence[jnp.ndarray],
) -> Sequence[Parameters]:
"""Computes the new velocities and delta (update to parameters)."""
del velocities
assert len(vectors) == len(coefficients)
delta = utils.scalar_mul(vectors[0], coefficients[0])
for vi, wi in zip(vectors[1:], coefficients[1:]):
delta = jax.tree_map(jnp.add, delta, utils.scalar_mul(vi, wi))
return delta, delta
| {
"content_hash": "33609bb019e87564e8b754bdd99469b4",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 104,
"avg_line_length": 43.32998324958124,
"alnum_prop": 0.6567960414411629,
"repo_name": "deepmind/deepmind-research",
"id": "9f1973cf180501b62c8bb6daa265897c267d6c2e",
"size": "26461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kfac_ferminet_alpha/optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1002"
},
{
"name": "C++",
"bytes": "5765"
},
{
"name": "Jupyter Notebook",
"bytes": "12330730"
},
{
"name": "Lua",
"bytes": "76186"
},
{
"name": "OpenEdge ABL",
"bytes": "15630"
},
{
"name": "PureBasic",
"bytes": "8"
},
{
"name": "Python",
"bytes": "3419119"
},
{
"name": "Racket",
"bytes": "226692"
},
{
"name": "Shell",
"bytes": "84450"
},
{
"name": "Starlark",
"bytes": "3463"
}
],
"symlink_target": ""
} |
import datetime
import pytz
from feedgen.feed import FeedGenerator
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
thecodingloveurl = "http://thecodinglove.com/rss"
pubdate = datetime.datetime(2017, 8, 7, 21, 0, 0, 0, pytz.UTC)
fg = FeedGenerator()
fg.title('The coding love with images.')
fg.link(href=thecodingloveurl)
fg.description('The coding love with images.')
fe = fg.add_entry()
fe.id("Life is good.")
fe.link(href=thecodingloveurl)
fe.title("The original coding love now has images!")
fe.pubdate(pubdate)
rssfeed = fg.rss_str(pretty=True)
return rssfeed
if __name__ == "__main__":
app.run(host='0.0.0.0')
| {
"content_hash": "5dc44078a8baddbf409268b358e811dc",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 22.838709677419356,
"alnum_prop": 0.653954802259887,
"repo_name": "chrillux/thecodingloverss",
"id": "4fa7ac9826573d84c912d5030eb4c43dccf36598",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thecodingloverss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "708"
}
],
"symlink_target": ""
} |
import django
import pytest
from django.http import Http404
from ninja import NinjaAPI, Schema
from ninja.testing import TestAsyncClient, TestClient
api = NinjaAPI()
class CustomException(Exception):
pass
@api.exception_handler(CustomException)
def on_custom_error(request, exc):
return api.create_response(request, {"custom": True}, status=422)
class Payload(Schema):
test: int
@api.post("/error/{code}")
def err_thrower(request, code: str, payload: Payload = None):
if code == "base":
raise RuntimeError("test")
if code == "404":
raise Http404("test")
if code == "custom":
raise CustomException("test")
client = TestClient(api)
def test_default_handler(settings):
settings.DEBUG = True
response = client.post("/error/base")
assert response.status_code == 500
assert b"RuntimeError: test" in response.content
response = client.post("/error/404")
assert response.status_code == 404
assert response.json() == {"detail": "Not Found: test"}
response = client.post("/error/custom", body="invalid_json")
assert response.status_code == 400
assert response.json() == {
"detail": "Cannot parse request body (Expecting value: line 1 column 1 (char 0))",
}
settings.DEBUG = False
with pytest.raises(RuntimeError):
response = client.post("/error/base")
response = client.post("/error/custom", body="invalid_json")
assert response.status_code == 400
assert response.json() == {"detail": "Cannot parse request body"}
@pytest.mark.parametrize(
"route,status_code,json",
[
("/error/404", 404, {"detail": "Not Found"}),
("/error/custom", 422, {"custom": True}),
],
)
def test_exceptions(route, status_code, json):
response = client.post(route)
assert response.status_code == status_code
assert response.json() == json
@pytest.mark.skipif(django.VERSION < (3, 1), reason="requires django 3.1 or higher")
@pytest.mark.asyncio
async def test_asyncio_exceptions():
api = NinjaAPI()
@api.get("/error")
async def thrower(request):
raise Http404("test")
client = TestAsyncClient(api)
response = await client.get("/error")
assert response.status_code == 404
def test_no_handlers():
api = NinjaAPI()
api._exception_handlers = {}
@api.get("/error")
def thrower(request):
raise RuntimeError("test")
client = TestClient(api)
with pytest.raises(RuntimeError):
client.get("/error")
| {
"content_hash": "48c5d0eb77a61d06fc6a42cd8ecb7b4a",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 90,
"avg_line_length": 24.97029702970297,
"alnum_prop": 0.654639175257732,
"repo_name": "vitalik/django-ninja",
"id": "839863b6d9c596cc8d5e1ed069acc41712a7cf07",
"size": "2522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1519"
},
{
"name": "HTML",
"bytes": "2544"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Python",
"bytes": "331788"
},
{
"name": "Shell",
"bytes": "486"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._replication_logical_networks_operations import (
build_get_request,
build_list_by_replication_fabrics_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReplicationLogicalNetworksOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicessiterecovery.aio.SiteRecoveryManagementClient`'s
:attr:`replication_logical_networks` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_replication_fabrics(self, fabric_name: str, **kwargs: Any) -> AsyncIterable["_models.LogicalNetwork"]:
"""Gets the list of logical networks under a fabric.
Lists all the logical networks of the Azure Site Recovery fabric.
:param fabric_name: Server Id. Required.
:type fabric_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LogicalNetwork or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.LogicalNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-10"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.LogicalNetworkCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_replication_fabrics_request(
fabric_name=fabric_name,
resource_name=self._config.resource_name,
resource_group_name=self._config.resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_replication_fabrics.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("LogicalNetworkCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_replication_fabrics.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationLogicalNetworks"
}
@distributed_trace_async
async def get(self, fabric_name: str, logical_network_name: str, **kwargs: Any) -> _models.LogicalNetwork:
"""Gets a logical network with specified server id and logical network name.
Gets the details of a logical network.
:param fabric_name: Server Id. Required.
:type fabric_name: str
:param logical_network_name: Logical network name. Required.
:type logical_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: LogicalNetwork or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicessiterecovery.models.LogicalNetwork
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-10"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.LogicalNetwork] = kwargs.pop("cls", None)
request = build_get_request(
fabric_name=fabric_name,
logical_network_name=logical_network_name,
resource_name=self._config.resource_name,
resource_group_name=self._config.resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("LogicalNetwork", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationLogicalNetworks/{logicalNetworkName}"
}
| {
"content_hash": "d48bf90ce0024294663d3293dacb42f6",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 222,
"avg_line_length": 43.43601895734597,
"alnum_prop": 0.6454991816693945,
"repo_name": "Azure/azure-sdk-for-python",
"id": "bd96444d7e33e1f5b7bca48106fd99c92bff9b58",
"size": "9665",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_replication_logical_networks_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from .bins import SimpleBin, NumberBin, VolumeError
from .fit import Fit, BinReduction, BinOrdering
| {
"content_hash": "4d662b36ffdbdd2ebef7da0bc985b4e8",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 51,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.8118811881188119,
"repo_name": "ibigpapa/bin_packing_problem",
"id": "885dc836609f4c3a74202636155cea8d9da37087",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "binpackp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39251"
}
],
"symlink_target": ""
} |