code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2017-05-28 02:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('personas', '0034_auto_20170527_1648'),
]
operations = [
migrations.AlterField(
model_name='itemctto',
name='DescripItem',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='itemodc',
name='DescripItem',
field=models.CharField(blank=True, max_length=150, null=True),
),
]
| Ykharo/tutorial_P3_4 | personas/migrations/0035_auto_20170527_2200.py | Python | mit | 665 |
#!/usr/bin/env python
# Plot nurbs surface corresponding to a 2D damped oscillation.
# The surface has two holes. A Teapot below the surface is
# visible through these holes.
#
# Copyright (C) 2007 "Peter Roesch" <Peter.Roesch@fh-augsburg.de>
#
# This code is licensed under the PyOpenGL License.
# Details are given in the file license.txt included in this distribution.
import sys
import math
from time import sleep
import traceback
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
except ImportError, err:
traceback.print_exc()
print ''' Error: PyOpenGL not installed properly!!'''
sys.exit( )
# globals
animationAngle = 0.0
frameRate = 25
curr_time=0.0
nurbsID=0
def animationStep( ):
"""Update animated parameters"""
global animationAngle
global frameRate
animationAngle += 1
while animationAngle > 360:
animationAngle -= 360
global curr_time
curr_time+=0.05
global nurbsID
glDeleteLists( nurbsID, 1 )
nurbsID = glGenLists( 1 )
glNewList( nurbsID, GL_COMPILE )
plotSurface( curr_time )
glEndList( )
sleep( 1 / float( frameRate ) )
glutPostRedisplay( )
sigma = 0.5;
twoSigSq = 2. * sigma * sigma;
def dampedOscillation( u, v, t):
"""Calculation of a R2 -> R1 function at position u,v at curr_time t.
A t-dependent cosine function is multiplied with a 2D gaussian.
Both functions depend on the distance of (u,v) to the origin."""
distSq = u * u + v * v;
dist = math.pi * 4 * math.sqrt( distSq );
global twoSigSq
return 0.5 * math.exp(-distSq / twoSigSq) * math.cos(dist - t);
nPts = 15
degree = 4
samplingTolerance=2.0
xMin, xMax, yMin, yMax = -1.0, 1.0, -1.0, 1.0
xStep = (xMax-xMin)/(nPts-1)
yStep = (yMax-yMin)/(nPts-1)
# initialise a list representing a regular 2D grid of control points.
controlPoints = [ \
[ [ yMin+y*yStep, xMin+x*xStep, 0.0 ] for x in range ( nPts )]\
for y in range( nPts ) ]
# initialise knots ...
knots = [ 0.0 for i in range( degree/2 ) ] +\
[ float(i)/(nPts-1) for i in range( nPts )] +\
[ 1.0 for i in range( (degree+1)/2 ) ]
# initialise enclosing
enclosing=[ [0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0] ]
# first trim curve is a circle
angleNum = 16
angles = [ -2*math.pi*float(i)/angleNum for i in range( angleNum ) ]
radius=0.05
offset=(0.4, 0.6)
circleDegree=degree
circlePoints = [ \
[ offset[0]+radius*math.cos(theta), offset[1]+radius*math.sin(theta) ]\
for theta in angles ]
for i in range( circleDegree-1 ):
circlePoints = circlePoints + [ circlePoints[i] ]
knotNum = len( circlePoints ) + circleDegree
circleKnots = [ float(i)/(knotNum-1) for i in range( knotNum ) ]
# second trim curve is a square
squareHolePoints=[ [0.4, 0.4], [0.4, 0.45], [0.45, 0.45],\
[0.45, 0.4], [0.4, 0.4] ]
def updateControlPoints( t ):
"""Calculate function values for all 2D grid points."""
for row in controlPoints:
for coord in row:
coord[2] = dampedOscillation( coord[0], coord[1], t )
def plotSurface( t ):
# display surface
updateControlPoints( t )
global controlPoints, knots
global nurb
gluBeginSurface( nurb )
gluNurbsSurface ( nurb, knots, knots, controlPoints, GL_MAP2_VERTEX_3 )
# trim curve enclosing
gluBeginTrim( nurb )
global enclosing
gluPwlCurve( nurb, enclosing, GLU_MAP1_TRIM_2 )
gluEndTrim( nurb )
# trim using square
gluBeginTrim( nurb )
global squareHolePoints
gluPwlCurve( nurb, squareHolePoints, GLU_MAP1_TRIM_2 )
gluEndTrim( nurb )
# trim using circle
gluBeginTrim( nurb )
global circlePoints, circleKnots
gluNurbsCurve ( nurb, circleKnots, circlePoints, GLU_MAP1_TRIM_2 )
gluEndTrim( nurb )
gluEndSurface( nurb )
def display( ):
"""Glut display function."""
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
glMatrixMode( GL_PROJECTION )
glLoadIdentity( )
xSize, ySize = glutGet( GLUT_WINDOW_WIDTH ), glutGet( GLUT_WINDOW_HEIGHT )
gluPerspective(60, float(xSize) / float(ySize), 0.1, 50)
glMatrixMode( GL_MODELVIEW )
glLoadIdentity( )
glPushMatrix( )
glTranslatef( 0, 0, -3 )
glRotatef( -30, 1, .3, 0)
glRotatef( animationAngle, 0, 0, 1 )
glMaterialfv( GL_FRONT_AND_BACK, GL_AMBIENT, [0.2, 0.2, 0.2, 1] )
glMaterialfv( GL_FRONT_AND_BACK, GL_DIFFUSE, [0.7, 0.7, 0.7, 1] )
glCallList( nurbsID )
glPopMatrix( )
glMaterialfv( GL_FRONT_AND_BACK, GL_AMBIENT, [0.0, 0.0, 0.2, 1] )
glMaterialfv( GL_FRONT_AND_BACK, GL_DIFFUSE, [0.0, 0.0, 0.7, 1] )
glTranslatef( 0.0, 0.0, -12.0 )
glCallList( teapotID )
glutSwapBuffers( )
teapotID=0
nurb=None
def init( ):
"""Glut init function."""
glClearColor ( 0, 0, 0, 0 )
glEnable( GL_DEPTH_TEST )
glShadeModel( GL_SMOOTH )
glEnable( GL_LIGHTING )
glEnable( GL_LIGHT0 )
glLightModeli( GL_LIGHT_MODEL_TWO_SIDE, 0 )
glLightfv( GL_LIGHT0, GL_POSITION, [2, 0, 10, 1] )
lA = 0.8; glLightfv( GL_LIGHT0, GL_AMBIENT, [lA, lA, lA, 1] )
lD = 1; glLightfv( GL_LIGHT0, GL_DIFFUSE, [lD, lD, lD, 1] )
lS = 1; glLightfv( GL_LIGHT0, GL_SPECULAR, [lS, lS, lS, 1] )
glMaterialfv( GL_FRONT_AND_BACK, GL_SPECULAR, [0.5, 0.5, 0.5, 1] )
glMaterialf( GL_FRONT_AND_BACK, GL_SHININESS, 50 )
glEnable( GL_AUTO_NORMAL )
global nurb
nurb = gluNewNurbsRenderer()
global samplingTolerance
gluNurbsProperty(nurb, GLU_SAMPLING_TOLERANCE, samplingTolerance)
gluNurbsProperty(nurb, GLU_DISPLAY_MODE, GLU_FILL)
global teapotID
teapotID = glGenLists( 1 )
glNewList( teapotID, GL_COMPILE )
glutSolidTeapot( 1.0 )
glEndList( )
global nurbsID
nurbsID = glGenLists( 1 )
glNewList( nurbsID, GL_COMPILE )
global curr_time
plotSurface( curr_time )
glEndList( )
glutInit( sys.argv )
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH )
glutInitWindowSize( 250, 250 )
glutInitWindowPosition( 100, 100 )
glutCreateWindow( sys.argv[0] )
init( )
glutDisplayFunc( display )
glutIdleFunc( animationStep )
glutMainLoop( )
| mgood7123/UPM | Sources/PyOpenGL-Demo-3.0.1b1/PyOpenGL-Demo/proesch/nurbs/nurbs.py | Python | gpl-3.0 | 5,778 |
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lobpcg',parent_package,top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/scipy/sparse/linalg/eigen/lobpcg/setup.py | Python | gpl-2.0 | 431 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from random import randint
"""
算法的思想是首先排序,然后从一端开始累加,在遇到使得结果值大于等于0.5的值的权的时候就把这个元素返回。
由于带权中位数偏向于大端,所以从大端开始累加。
证明算法的正确性:首先在返回结果的时候肯定在小的一端满足结果。这个时候可以证明在上一次的时候必定有累加
值小于0.5,否则就返回了。
"""
def partition(li, start, end):
li_len = end - start + 1
if li_len < 2:
raise ValueError("list which lenght is less then 2 do not need to partition")
# 使用随机元素元素作为分割点并且把随机元素放在列表最后
# 这样就可以不变动原来的逻辑了
key_index = randint(start, end)
key = li[key_index]
li[key_index], li[end] = li[end], li[key_index]
middle_index = start
for x in xrange(start, end):
if li[x] < key:
li[middle_index], li[x] = li[x], li[middle_index]
middle_index += 1
li[end], li[middle_index] = li[middle_index], li[end]
return middle_index
def sort(li, start, end):
li_len = end - start + 1
if li_len < 2:
return li
middle_index = partition(li, start, end)
sort(li, start, middle_index - 1)
sort(li, middle_index + 1, end)
return li
def find_weight_middle(li):
li_len = len(li)
sorted_list = sort(li, 0, li_len - 1)
weight_sum = 0
for x in reversed(xrange(0, li_len)):
weight_sum += li[x]
if weight_sum > 0.5:
return li[x]
elif weight_sum == 0.5:
return li[x - 1]
def main():
l = [0.1, 0.35, 0.05, 0.1, 0.15, 0.05, 0.2]
print find_weight_middle(l)
print sort(l, 0, len(l) - 1)
if __name__ == '__main__':
main()
| ssjssh/algorithm | src/ssj/clrs/9/9-2(b).py | Python | gpl-2.0 | 1,854 |
try:
set
except NameError:
from sets import Set as set
from django import template
from django.http import Http404
from django.core.paginator import Paginator, InvalidPage
from django.conf import settings
register = template.Library()
DEFAULT_PAGINATION = getattr(settings, 'PAGINATION_DEFAULT_PAGINATION', 20)
DEFAULT_WINDOW = getattr(settings, 'PAGINATION_DEFAULT_WINDOW', 4)
DEFAULT_ORPHANS = getattr(settings, 'PAGINATION_DEFAULT_ORPHANS', 0)
INVALID_PAGE_RAISES_404 = getattr(settings, 'PAGINATION_INVALID_PAGE_RAISES_404',
False)
def do_autopaginate(parser, token):
"""
Splits the arguments to the autopaginate tag and formats them correctly.
"""
split = token.split_contents()
as_index = None
context_var = None
for i, bit in enumerate(split):
if bit == 'as':
as_index = i
break
if as_index is not None:
try:
context_var = split[as_index + 1]
except IndexError:
raise template.TemplateSyntaxError("Context variable assignment " +\
"must take the form of {%% %r object.example_set.all ... as " +\
"context_var_name %%}" % split[0])
del split[as_index:as_index + 2]
if len(split) == 2:
return AutoPaginateNode(split[1])
elif len(split) == 3:
return AutoPaginateNode(split[1], paginate_by=split[2],
context_var=context_var)
elif len(split) == 4:
try:
orphans = int(split[3])
except ValueError:
raise template.TemplateSyntaxError(u'Got %s, but expected integer.' % split[3])
return AutoPaginateNode(split[1], paginate_by=split[2], orphans=orphans,
context_var=context_var)
else:
raise template.TemplateSyntaxError('%r tag takes one required ' + \
'argument and one optional argument' % split[0])
class AutoPaginateNode(template.Node):
"""
Emits the required objects to allow for Digg-style pagination.
First, it looks in the current context for the variable specified, and using
that object, it emits a simple ``Paginator`` and the current page object
into the context names ``paginator`` and ``page_obj``, respectively.
It will then replace the variable specified with only the objects for the
current page.
.. note::
It is recommended to use *{% paginate %}* after using the autopaginate
tag. If you choose not to use *{% paginate %}*, make sure to display the
list of available pages, or else the application may seem to be buggy.
"""
def __init__(self, queryset_var, paginate_by=DEFAULT_PAGINATION,
orphans=DEFAULT_ORPHANS, context_var=None):
self.queryset_var = template.Variable(queryset_var)
if isinstance(paginate_by, int):
self.paginate_by = paginate_by
else:
self.paginate_by = template.Variable(paginate_by)
self.orphans = orphans
self.context_var = context_var
def render(self, context):
key = self.queryset_var.var
value = self.queryset_var.resolve(context)
if isinstance(self.paginate_by, int):
paginate_by = self.paginate_by
else:
paginate_by = self.paginate_by.resolve(context)
paginator = Paginator(value, paginate_by, self.orphans)
try:
page_obj = paginator.page(context['request'].page)
except InvalidPage:
if INVALID_PAGE_RAISES_404:
raise Http404('Invalid page requested. If DEBUG were set to ' +
'False, an HTTP 404 page would have been shown instead.')
context[key] = []
context['invalid_page'] = True
return u''
if self.context_var is not None:
context[self.context_var] = page_obj.object_list
else:
context[key] = page_obj.object_list
context['paginator'] = paginator
context['page_obj'] = page_obj
return u''
def paginate(context, window=DEFAULT_WINDOW, float='right'):
"""
Renders the ``pagination/pagination.html`` template, resulting in a
Digg-like display of the available pages, given the current page. If there
are too many pages to be displayed before and after the current page, then
elipses will be used to indicate the undisplayed gap between page numbers.
Requires one argument, ``context``, which should be a dictionary-like data
structure and must contain the following keys:
``paginator``
A ``Paginator`` or ``QuerySetPaginator`` object.
``page_obj``
This should be the result of calling the page method on the
aforementioned ``Paginator`` or ``QuerySetPaginator`` object, given
the current page.
This same ``context`` dictionary-like data structure may also include:
``getvars``
A dictionary of all of the **GET** parameters in the current request.
This is useful to maintain certain types of state, even when requesting
a different page.
"""
try:
paginator = context['paginator']
page_obj = context['page_obj']
page_range = paginator.page_range
# First and last are simply the first *n* pages and the last *n* pages,
# where *n* is the current window size.
first = set(page_range[:window])
last = set(page_range[-window:])
# Now we look around our current page, making sure that we don't wrap
# around.
current_start = page_obj.number-1-window
if current_start < 0:
current_start = 0
current_end = page_obj.number-1+window
if current_end < 0:
current_end = 0
current = set(page_range[current_start:current_end])
pages = []
# If there's no overlap between the first set of pages and the current
# set of pages, then there's a possible need for elusion.
if len(first.intersection(current)) == 0:
first_list = list(first)
first_list.sort()
second_list = list(current)
second_list.sort()
pages.extend(first_list)
diff = second_list[0] - first_list[-1]
# If there is a gap of two, between the last page of the first
# set and the first page of the current set, then we're missing a
# page.
if diff == 2:
pages.append(second_list[0] - 1)
# If the difference is just one, then there's nothing to be done,
# as the pages need no elusion and are correct.
elif diff == 1:
pass
# Otherwise, there's a bigger gap which needs to be signaled for
# elusion, by pushing a None value to the page list.
else:
pages.append(None)
pages.extend(second_list)
else:
unioned = list(first.union(current))
unioned.sort()
pages.extend(unioned)
# If there's no overlap between the current set of pages and the last
# set of pages, then there's a possible need for elusion.
if len(current.intersection(last)) == 0:
second_list = list(last)
second_list.sort()
diff = second_list[0] - pages[-1]
# If there is a gap of two, between the last page of the current
# set and the first page of the last set, then we're missing a
# page.
if diff == 2:
pages.append(second_list[0] - 1)
# If the difference is just one, then there's nothing to be done,
# as the pages need no elusion and are correct.
elif diff == 1:
pass
# Otherwise, there's a bigger gap which needs to be signaled for
# elusion, by pushing a None value to the page list.
else:
pages.append(None)
pages.extend(second_list)
else:
differenced = list(last.difference(current))
differenced.sort()
pages.extend(differenced)
to_return = {
'float': float,
'pages': pages,
'page_obj': page_obj,
'paginator': paginator,
'is_paginated': paginator.count > paginator.per_page,
}
if 'request' in context:
getvars = context['request'].GET.copy()
if 'page' in getvars:
del getvars['page']
if len(getvars.keys()) > 0:
to_return['getvars'] = "&%s" % getvars.urlencode()
else:
to_return['getvars'] = ''
return to_return
except KeyError, AttributeError:
return {}
register.inclusion_tag('pagination/pagination.html', takes_context=True)(paginate)
register.tag('autopaginate', do_autopaginate)
| Alerion/shopping-helper | lib/pagination/templatetags/pagination_tags.py | Python | mit | 8,885 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from transit_indicators.gtfs import GTFSRouteTypes
def forwards(apps, schema_editor):
""" Create a table row for each entry in GTFSRouteType.CHOICES
Note that this migration will use whatever the current CHOICES are in the GTFSRouteTypes models,
and not the CHOICES that were defined at the time the migration was created.
TODO: Create a management command to update the GTFSRouteTypes (and call it here?)
"""
GTFSRouteType = apps.get_model('transit_indicators', 'GTFSRouteType')
for choice in GTFSRouteTypes.CHOICES:
gtfs_route_type = GTFSRouteType.objects.update_or_create(route_type=choice[0], description=choice[1])
def backwards(apps, schema_editor):
""" Destroy all route type rows before deleting table"""
GTFSRouteType = apps.get_model('transit_indicators', 'GTFSRouteType')
gtfs_route_types = GTFSRouteType.objects.all()
gtfs_route_types.delete()
class Migration(migrations.Migration):
dependencies = [
('transit_indicators', '0022_gtfsroutetype'),
]
operations = [
migrations.RunPython(forwards, backwards),
]
| flibbertigibbet/open-transit-indicators | python/django/transit_indicators/migrations/0023_auto_20140904_1922.py | Python | gpl-3.0 | 1,222 |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam
short_description: Manage IAM users, groups, roles and keys
description:
- Allows for the management of IAM users, user API keys, groups, roles.
version_added: "2.0"
options:
iam_type:
description:
- Type of IAM resource
choices: ["user", "group", "role"]
type: str
name:
description:
- Name of IAM resource to create or identify
required: true
type: str
new_name:
description:
- When state is update, will replace name with new_name on IAM resource
type: str
new_path:
description:
- When state is update, will replace the path with new_path on the IAM resource
type: str
state:
description:
- Whether to create, delete or update the IAM resource. Note, roles cannot be updated.
required: true
choices: [ "present", "absent", "update" ]
type: str
path:
description:
- When creating or updating, specify the desired path of the resource. If state is present,
it will replace the current path to match what is passed in when they do not match.
default: "/"
type: str
trust_policy:
description:
- The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy_filepath).
version_added: "2.2"
type: dict
trust_policy_filepath:
description:
- The path to the trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy).
version_added: "2.2"
type: str
access_key_state:
description:
- When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified.
choices: [ "create", "remove", "active", "inactive", "Create", "Remove", "Active", "Inactive"]
type: str
key_count:
description:
- When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1.
default: 1
type: int
access_key_ids:
description:
- A list of the keys that you want impacted by the access_key_state parameter.
type: list
groups:
description:
- A list of groups the user should belong to. When update, will gracefully remove groups not listed.
type: list
password:
description:
- When type is user and state is present, define the users login password. Also works with update. Note that always returns changed.
type: str
update_password:
default: always
choices: ['always', 'on_create']
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
type: str
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will error out if your
user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
author:
- "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Basic user creation example
tasks:
- name: Create two new IAM users with API keys
iam:
iam_type: user
name: "{{ item }}"
state: present
password: "{{ temp_pass }}"
access_key_state: create
loop:
- jcleese
- mpython
# Advanced example, create two new groups and add the pre-existing user
# jdavila to both groups.
task:
- name: Create Two Groups, Mario and Luigi
iam:
iam_type: group
name: "{{ item }}"
state: present
loop:
- Mario
- Luigi
register: new_groups
- name:
iam:
iam_type: user
name: jdavila
state: update
groups: "{{ item.created_group.group_name }}"
loop: "{{ new_groups.results }}"
# Example of role with custom trust policy for Lambda service
- name: Create IAM role with custom trust relationship
iam:
iam_type: role
name: AAALambdaTestRole
state: present
trust_policy:
Version: '2012-10-17'
Statement:
- Action: sts:AssumeRole
Effect: Allow
Principal:
Service: lambda.amazonaws.com
'''
RETURN = '''
role_result:
description: the IAM.role dict returned by Boto
type: str
returned: if iam_type=role and state=present
sample: {
"arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role",
"assume_role_policy_document": "...truncated...",
"create_date": "2017-09-02T14:32:23Z",
"path": "/",
"role_id": "AROAA1B2C3D4E5F6G7H8I",
"role_name": "my-new-role"
}
roles:
description: a list containing the name of the currently defined roles
type: list
returned: if iam_type=role and state=present
sample: [
"my-new-role",
"my-existing-role-1",
"my-existing-role-2",
"my-existing-role-3",
"my-existing-role-...",
]
'''
import json
import traceback
try:
import boto.exception
import boto.iam
import boto.iam.connection
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO, boto_exception, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def _paginate(func, attr):
'''
paginates the results from func by continuously passing in
the returned marker if the results were truncated. this returns
an iterator over the items in the returned response. `attr` is
the name of the attribute to iterate over in the response.
'''
finished, marker = False, None
while not finished:
res = func(marker=marker)
for item in getattr(res, attr):
yield item
finished = res.is_truncated == 'false'
if not finished:
marker = res.marker
def list_all_groups(iam):
return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')]
def list_all_users(iam):
return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')]
def list_all_roles(iam):
return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')]
def list_all_instance_profiles(iam):
return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')]
def create_user(module, iam, name, pwd, path, key_state, key_count):
key_qty = 0
keys = []
try:
user_meta = iam.create_user(
name, path).create_user_response.create_user_result.user
changed = True
if pwd is not None:
pwd = iam.create_login_profile(name, pwd)
if key_state in ['create']:
if key_count:
while key_count > key_qty:
keys.append(iam.create_access_key(
user_name=name).create_access_key_response.
create_access_key_result.
access_key)
key_qty += 1
else:
keys = None
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
user_info = dict(created_user=user_meta, password=pwd, access_keys=keys)
return (user_info, changed)
def delete_dependencies_first(module, iam, name):
changed = False
# try to delete any keys
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
for key in current_keys:
iam.delete_access_key(key, name)
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc())
# try to delete login profiles
try:
login_profile = iam.get_login_profiles(name).get_login_profile_response
iam.delete_login_profile(name)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg:
module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc())
# try to detach policies
try:
for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
iam.delete_user_policy(name, policy)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'must detach all policies first' in error_msg:
module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the policies "
"through the console and try again." % name)
module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc())
# try to deactivate associated MFA devices
try:
mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', [])
for device in mfa_devices:
iam.deactivate_mfa_device(name, device['serial_number'])
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc())
return changed
def delete_user(module, iam, name):
changed = delete_dependencies_first(module, iam, name)
try:
iam.delete_user(name)
except boto.exception.BotoServerError as ex:
module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc())
else:
changed = True
return name, changed
def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated):
changed = False
name_change = False
if updated and new_name:
name = new_name
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
updated_key_list = {}
if new_name or new_path:
c_path = iam.get_user(name).get_user_result.user['path']
if (name != new_name) or (c_path != new_path):
changed = True
try:
if not updated:
user = iam.update_user(
name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata
else:
user = iam.update_user(
name, new_path=new_path).update_user_response.response_metadata
user['updates'] = dict(
old_username=name, new_username=new_name, old_path=c_path, new_path=new_path)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
module.fail_json(changed=False, msg=str(err))
else:
if not updated:
name_change = True
if pwd:
try:
iam.update_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError:
try:
iam.create_login_profile(name, pwd)
changed = True
except boto.exception.BotoServerError as err:
error_msg = boto_exception(str(err))
if 'Password does not conform to the account password policy' in error_msg:
module.fail_json(changed=False, msg="Password doesn't conform to policy")
else:
module.fail_json(msg=error_msg)
try:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
key_qty = len(current_keys)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if 'cannot be found' in error_msg and updated:
current_keys = [ck['access_key_id'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
status = [ck['status'] for ck in
iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata]
name = new_name
else:
module.fail_json(changed=False, msg=str(err))
new_keys = []
if key_state == 'create':
try:
while key_count > key_qty:
new_keys.append(iam.create_access_key(
user_name=name).create_access_key_response.create_access_key_result.access_key)
key_qty += 1
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
if keys and key_state:
for access_key in keys:
if key_state in ('active', 'inactive'):
if access_key in current_keys:
for current_key, current_key_state in zip(current_keys, status):
if key_state != current_key_state.lower():
try:
iam.update_access_key(access_key, key_state.capitalize(), user_name=name)
changed = True
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
module.fail_json(msg="Supplied keys not found for %s. "
"Current keys: %s. "
"Supplied key(s): %s" %
(name, current_keys, keys)
)
if key_state == 'remove':
if access_key in current_keys:
try:
iam.delete_access_key(access_key, user_name=name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=False, msg=str(err))
else:
changed = True
try:
final_keys, final_key_status = \
[ck['access_key_id'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata],\
[ck['status'] for ck in
iam.get_all_access_keys(name).
list_access_keys_result.
access_key_metadata]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
for fk, fks in zip(final_keys, final_key_status):
updated_key_list.update({fk: fks})
return name_change, updated_key_list, changed, new_keys
def set_users_groups(module, iam, name, groups, updated=None,
new_name=None):
""" Sets groups for a user, will purge groups not explicitly passed, while
retaining pre-existing groups that also are in the new list.
"""
changed = False
if updated:
name = new_name
try:
orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user(
name).list_groups_for_user_result.groups]
remove_groups = [
rg for rg in frozenset(orig_users_groups).difference(groups)]
new_groups = [
ng for ng in frozenset(groups).difference(orig_users_groups)]
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
if len(orig_users_groups) > 0:
for new in new_groups:
iam.add_user_to_group(new, name)
for rm in remove_groups:
iam.remove_user_from_group(rm, name)
else:
for group in groups:
try:
iam.add_user_to_group(group, name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('The group with name %s cannot be found.' % group) in error_msg:
module.fail_json(changed=False, msg="Group %s doesn't exist" % group)
if len(remove_groups) > 0 or len(new_groups) > 0:
changed = True
return (groups, changed)
def create_group(module=None, iam=None, name=None, path=None):
changed = False
try:
iam.create_group(
name, path).create_group_response.create_group_result.group
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
return name, changed
def delete_group(module=None, iam=None, name=None):
changed = False
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must delete policies first') in error_msg:
for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names:
iam.delete_group_policy(name, policy)
try:
iam.delete_group(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must delete policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the policies "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
else:
module.fail_json(changed=changed, msg=str(error_msg))
else:
changed = True
return changed, name
def update_group(module=None, iam=None, name=None, new_name=None, new_path=None):
changed = False
try:
current_group_path = iam.get_group(
name).get_group_response.get_group_result.group['path']
if new_path:
if current_group_path != new_path:
iam.update_group(name, new_path=new_path)
changed = True
if new_name:
if name != new_name:
iam.update_group(name, new_group_name=new_name, new_path=new_path)
changed = True
name = new_name
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
return changed, name, new_path, current_group_path
def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name not in role_list:
changed = True
iam_role_result = iam.create_role(name,
assume_role_policy_document=trust_policy_doc,
path=path).create_role_response.create_role_result.role
if name not in prof_list:
instance_profile_result = iam.create_instance_profile(name, path=path) \
.create_instance_profile_response.create_instance_profile_result.instance_profile
iam.add_role_to_instance_profile(name, name)
else:
instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
iam_role_result = iam.get_role(name).get_role_response.get_role_result.role
return changed, updated_role_list, iam_role_result, instance_profile_result
def delete_role(module, iam, name, role_list, prof_list):
changed = False
iam_role_result = None
instance_profile_result = None
try:
if name in role_list:
cur_ins_prof = [rp['instance_profile_name'] for rp in
iam.list_instance_profiles_for_role(name).
list_instance_profiles_for_role_result.
instance_profiles]
for profile in cur_ins_prof:
iam.remove_role_from_instance_profile(profile, name)
try:
iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
for policy in iam.list_role_policies(name).list_role_policies_result.policy_names:
iam.delete_role_policy(name, policy)
try:
iam_role_result = iam.delete_role(name)
except boto.exception.BotoServerError as err:
error_msg = boto_exception(err)
if ('must detach all policies first') in error_msg:
module.fail_json(changed=changed, msg="All inline policies have been removed. Though it appears"
"that %s has Managed Polices. This is not "
"currently supported by boto. Please detach the policies "
"through the console and try again." % name)
else:
module.fail_json(changed=changed, msg=str(err))
else:
changed = True
else:
changed = True
for prof in prof_list:
if name == prof:
instance_profile_result = iam.delete_instance_profile(name)
except boto.exception.BotoServerError as err:
module.fail_json(changed=changed, msg=str(err))
else:
updated_role_list = list_all_roles(iam)
return changed, updated_role_list, iam_role_result, instance_profile_result
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
iam_type=dict(required=True, choices=['user', 'group', 'role']),
groups=dict(type='list', default=None, required=False),
state=dict(required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False, no_log=True),
update_password=dict(default='always', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove',
'Active', 'Inactive', 'Create', 'Remove']),
access_key_ids=dict(type='list', default=None, required=False),
key_count=dict(type='int', default=1, required=False),
name=dict(default=None, required=False),
trust_policy_filepath=dict(default=None, required=False),
trust_policy=dict(type='dict', default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['trust_policy', 'trust_policy_filepath']],
)
if not HAS_BOTO:
module.fail_json(msg='This module requires boto, please install it')
state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower()
groups = module.params.get('groups')
name = module.params.get('name')
new_name = module.params.get('new_name')
password = module.params.get('password')
update_pw = module.params.get('update_password')
path = module.params.get('path')
new_path = module.params.get('new_path')
key_count = module.params.get('key_count')
key_state = module.params.get('access_key_state')
trust_policy = module.params.get('trust_policy')
trust_policy_filepath = module.params.get('trust_policy_filepath')
key_ids = module.params.get('access_key_ids')
if key_state:
key_state = key_state.lower()
if any([n in key_state for n in ['active', 'inactive']]) and not key_ids:
module.fail_json(changed=False, msg="At least one access key has to be defined in order"
" to use 'active' or 'inactive'")
if iam_type == 'user' and module.params.get('password') is not None:
pwd = module.params.get('password')
elif iam_type != 'user' and module.params.get('password') is not None:
module.fail_json(msg="a password is being specified when the iam_type "
"is not user. Check parameters")
else:
pwd = None
if iam_type != 'user' and (module.params.get('access_key_state') is not None or
module.params.get('access_key_id') is not None):
module.fail_json(msg="the IAM type must be user, when IAM access keys "
"are being modified. Check parameters")
if iam_type == 'role' and state == 'update':
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specify present or absent")
# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file
if trust_policy_filepath:
try:
with open(trust_policy_filepath, 'r') as json_data:
trust_policy_doc = json.dumps(json.load(json_data))
except Exception as e:
module.fail_json(msg=str(e) + ': ' + trust_policy_filepath)
elif trust_policy:
try:
trust_policy_doc = json.dumps(trust_policy)
except Exception as e:
module.fail_json(msg=str(e) + ': ' + trust_policy)
else:
trust_policy_doc = None
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
if region:
iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
result = {}
changed = False
try:
orig_group_list = list_all_groups(iam)
orig_user_list = list_all_users(iam)
orig_role_list = list_all_roles(iam)
orig_prof_list = list_all_instance_profiles(iam)
except boto.exception.BotoServerError as err:
module.fail_json(msg=err.message)
if iam_type == 'user':
been_updated = False
user_groups = None
user_exists = any([n in [name, new_name] for n in orig_user_list])
if user_exists:
current_path = iam.get_user(name).get_user_result.user['path']
if not new_path and current_path != path:
new_path = path
path = current_path
if state == 'present' and not user_exists and not new_name:
(meta, changed) = create_user(
module, iam, name, password, path, key_state, key_count)
keys = iam.get_all_access_keys(name).list_access_keys_result.\
access_key_metadata
if groups:
(user_groups, changed) = set_users_groups(
module, iam, name, groups, been_updated, new_name)
module.exit_json(
user_meta=meta, groups=user_groups, keys=keys, changed=changed)
elif state in ['present', 'update'] and user_exists:
if update_pw == 'on_create':
password = None
if name not in orig_user_list and new_name in orig_user_list:
been_updated = True
name_change, key_list, user_changed, new_key = update_user(
module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated)
if new_key:
user_meta = {'access_keys': list(new_key)}
user_meta['access_keys'].extend(
[{'access_key_id': key, 'status': value} for key, value in key_list.items() if
key not in [it['access_key_id'] for it in new_key]])
else:
user_meta = {
'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]}
if name_change and new_name:
orig_name = name
name = new_name
if isinstance(groups, list):
user_groups, groups_changed = set_users_groups(
module, iam, name, groups, been_updated, new_name)
if groups_changed == user_changed:
changed = groups_changed
else:
changed = True
else:
changed = user_changed
if new_name and new_path:
module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name,
new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list,
created_keys=new_key, user_meta=user_meta)
elif new_name and not new_path and not been_updated:
module.exit_json(
changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list,
created_keys=new_key, user_meta=user_meta)
elif new_name and not new_path and been_updated:
module.exit_json(
changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state,
created_keys=new_key, user_meta=user_meta)
elif not new_name and new_path:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path,
keys=key_list, created_keys=new_key, user_meta=user_meta)
else:
module.exit_json(
changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key,
user_meta=user_meta)
elif state == 'update' and not user_exists:
module.fail_json(
msg="The user %s does not exist. No update made." % name)
elif state == 'absent':
if user_exists:
try:
set_users_groups(module, iam, name, '')
name, changed = delete_user(module, iam, name)
module.exit_json(deleted_user=name, changed=changed)
except Exception as ex:
module.fail_json(changed=changed, msg=str(ex))
else:
module.exit_json(
changed=False, msg="User %s is already absent from your AWS IAM users" % name)
elif iam_type == 'group':
group_exists = name in orig_group_list
if state == 'present' and not group_exists:
new_group, changed = create_group(module=module, iam=iam, name=name, path=path)
module.exit_json(changed=changed, group_name=new_group)
elif state in ['present', 'update'] and group_exists:
changed, updated_name, updated_path, cur_path = update_group(
module=module, iam=iam, name=name, new_name=new_name,
new_path=new_path)
if new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, old_path=cur_path,
new_group_path=updated_path)
if new_path and not new_name:
module.exit_json(changed=changed, group_name=name,
old_path=cur_path,
new_group_path=updated_path)
if not new_path and new_name:
module.exit_json(changed=changed, old_group_name=name,
new_group_name=updated_name, group_path=cur_path)
if not new_path and not new_name:
module.exit_json(
changed=changed, group_name=name, group_path=cur_path)
elif state == 'update' and not group_exists:
module.fail_json(
changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name)
elif state == 'absent':
if name in orig_group_list:
removed_group, changed = delete_group(module=module, iam=iam, name=name)
module.exit_json(changed=changed, delete_group=removed_group)
else:
module.exit_json(changed=changed, msg="Group already absent")
elif iam_type == 'role':
role_list = []
if state == 'present':
changed, role_list, role_result, instance_profile_result = create_role(
module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc)
elif state == 'absent':
changed, role_list, role_result, instance_profile_result = delete_role(
module, iam, name, orig_role_list, orig_prof_list)
elif state == 'update':
module.fail_json(
changed=False, msg='Role update not currently supported by boto.')
module.exit_json(changed=changed, roles=role_list, role_result=role_result,
instance_profile_result=instance_profile_result)
if __name__ == '__main__':
main()
| kvar/ansible | lib/ansible/modules/cloud/amazon/iam.py | Python | gpl-3.0 | 35,543 |
#!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
import logging
import time as _time
from lib.rds import rds_utils
from scli import config_file, prompt
from scli.constants import EbConfigFile, ParameterName, RdsDefault, ServiceDefault
from scli.exception import EBSCliException
from scli.operation.base import OperationBase, OperationResult
from scli.resources import AskConfirmationOpMessage, CommandType, ConfigFileMessage
from scli.terminal.base import TerminalBase
log = logging.getLogger('cli.op')
class ValidateParameterOperation(OperationBase):
''' Validate all required parameters and verify all have value'''
_input_parameters = set()
_output_parameters = set()
def execute(self, parameter_pool):
# Update parameter
self._update_timeout_thresholds(parameter_pool)
# Checking parameters
required_params = self._operation_queue.required_parameters
missing_params = required_params - parameter_pool.parameter_names
if len(missing_params) > 0:
raise EBSCliException(u'Missing required parameter. "{0}"'.format(missing_params))
log.debug(u'Finished gathering required parameter')
ret_result = OperationResult(self, None, None, None)
return ret_result
def _update_timeout_thresholds(self, parameter_pool):
parameter_pool.update(ParameterName.WaitForFinishTimeout,
parameter_pool.get_value(ParameterName.WaitForFinishTimeout)\
+ self._rds_time_out(parameter_pool))
parameter_pool.update(ParameterName.WaitForUpdateTimeout,
parameter_pool.get_value(ParameterName.WaitForUpdateTimeout)\
+ self._rds_time_out(parameter_pool))
def _rds_time_out(self, parameter_pool):
if parameter_pool.has(ParameterName.RdsEnabled)\
and parameter_pool.get_value(ParameterName.RdsEnabled):
return ServiceDefault.RDS_ADDITION_TIMEOUT_IN_SEC
else:
return 0
class AskConfirmationOperation(OperationBase):
''' Ask for user's confirmation'''
_input_parameters = set()
_output_parameters = set()
def execute(self, parameter_pool):
command = parameter_pool.get_value(ParameterName.Command)
self._probe_rds_change(parameter_pool, command)
if (parameter_pool.has(ParameterName.Force) \
and parameter_pool.get_value(ParameterName.Force) == ServiceDefault.ENABLED) \
or TerminalBase.ask_confirmation(AskConfirmationOpMessage.CommandConfirmation[command]):
ret_result = OperationResult(self, None, None, None)
return ret_result
else:
log.info(u'User cancelled command.')
raise EBSCliException()
def _probe_rds_change(self, parameter_pool, command):
if parameter_pool.has(ParameterName.ApplicationName)\
and parameter_pool.has(ParameterName.EnvironmentName):
app_name = parameter_pool.get_value(ParameterName.ApplicationName)
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
policy = rds_utils.is_rds_delete_to_snapshot(parameter_pool, app_name, env_name)
local_rds_switch = parameter_pool.get_value(ParameterName.RdsEnabled)
if policy is not None and not RdsDefault.del_policy_to_bool(policy):
if command == CommandType.UPDATE:
if local_rds_switch:
pass
else:
prompt.result(AskConfirmationOpMessage.CommandWarning[command])
else:
prompt.result(AskConfirmationOpMessage.CommandWarning[command])
class SleepOperation(OperationBase):
''' Idle sleep'''
_input_parameters = set()
_output_parameters = set()
def execute(self, parameter_pool):
create_request_id = parameter_pool.get_value(ParameterName.CreateEnvironmentRequestID)\
if parameter_pool.has(ParameterName.CreateEnvironmentRequestID) else None
delay = ServiceDefault.CREATE_ENV_POLL_DELAY if create_request_id is not None else 0
_time.sleep(delay)
ret_result = OperationResult(self, None, None, None)
return ret_result
class SanitizeBranchOperation(OperationBase):
''' Remove branch registrations if critical parameters are changed.'''
_input_parameters = set()
_output_parameters = set()
def execute(self, parameter_pool):
command = parameter_pool.get_value(ParameterName.Command)
if command == CommandType.INIT:
sanitize = False
for name, ori_name in EbConfigFile.BranchResetParameters.iteritems():
if parameter_pool.has(ori_name) and \
parameter_pool.get_value(name) != parameter_pool.get_value(ori_name):
sanitize = True
break
blast = False
if sanitize:
if parameter_pool.has(ParameterName.Branches):
parameter_pool.remove(ParameterName.Branches)
blast = True
if parameter_pool.has(ParameterName.BranchMapping):
parameter_pool.remove(ParameterName.BranchMapping)
blast = True
if blast:
prompt.error(ConfigFileMessage.BranchResetWarning);
class SanitizeRdsPasswordOperation(OperationBase):
''' Remove Rds master passwords from credential file'''
_input_parameters = set()
_output_parameters = set()
def execute(self, parameter_pool):
command = parameter_pool.get_value(ParameterName.Command)
if command == CommandType.DELETE:
# Remove RDS master password from crential file
credential_file_loc = config_file.default_aws_credential_file_location()
# default environment
env_name = parameter_pool.get_value(ParameterName.EnvironmentName)
param_list = [rds_utils.password_key_name(env_name)]
# branch environment
if parameter_pool.has(ParameterName.Branches)\
and parameter_pool.get_value(ParameterName.Branches) is not None:
branches = parameter_pool.get_value(ParameterName.Branches)
for branch in branches.values():
env_name = branch[ParameterName.EnvironmentName]
param_list.append(rds_utils.password_key_name(env_name))
# Remove passwords
config_file.trim_aws_credential_file(credential_file_loc, param_list, True)
| JoaoVasques/aws-devtool | eb/macosx/python2.7/scli/operation/pseudo_operations.py | Python | apache-2.0 | 7,565 |
import sublime
import sublime_plugin
import os
from subprocess import call
class RubyMotionSpecCommand(sublime_plugin.WindowCommand):
def run(self):
s = sublime.load_settings("RubyMotionSpec.sublime-settings")
terminal = s.get("terminal")
if terminal == 'iTerm':
self.iterm_command()
elif terminal == 'Terminal':
self.terminal_command()
else:
self.sublime_command()
def current_spec(self):
return self.current_file().split('/')[-1].split('.')[0]
def current_file(self):
return self.window.active_view().file_name()
def current_dir(self):
return os.path.split(os.path.abspath(self.current_file()))[0]
def iterm_command(self):
command = """
tell application "iTerm"
tell the first terminal
tell the current session
write text "rake spec files=%s"
end tell
end tell
end tell
""" % (self.current_spec())
call(['osascript', '-e', command])
def terminal_command(self):
command = """
tell application "Terminal"
activate
set currentTab to do script "rake spec files=%s" in window 0
end tell
""" % (self.current_spec())
call(['osascript', '-e', command])
def sublime_command(self):
stdout = self.current_dir() + '/_stdout_spec'
stderr = self.current_dir() + '/_stderr_spec'
rake_spec = """
rake spec files=%s SIM_STDOUT_PATH=%s SIM_STDERR_PATH=%s
""" % (self.current_spec(), stdout, stderr)
show_spec = """
cat %s && cat %s
""" % (stderr, stdout)
clean_spec = """
rm %s %s
""" % (stderr, stdout)
command = rake_spec + show_spec + clean_spec
working_dir = self.current_dir()
self.window.run_command("exec", {
"cmd": [command],
"shell": True,
"working_dir": working_dir
}) | neocsr/sublime-RubyMotionSpec | ruby_motion_spec.py | Python | mit | 2,090 |
# -*- codes: utf-8 -*-
from __future__ import print_function
import os
import sys
from argparse import ArgumentParser
import regex
import requests
from .filters import style
from .utils import merge_dicts
def parse_arguments(args):
query = ArgumentParser(prog="poirot", description="""Poirot: Mind Your Language""")
query.add_argument("--url", "-u", dest="url", default="", action="store",
help="""The repository's git URL, e.g.
'https://github.com/dcgov/poirot.git'.""")
query.add_argument("--dir", "-d", dest="dir", default=os.getcwd(),
help="""The path to the local directory where the
git repo is located or should be stored;
defaults to the current directory.""")
query.add_argument("--term", "-t", dest="term", required=False, action="store",
help="""A single string or regular expression to search for.""")
query.add_argument("--patterns", "-p", dest="patterns", action="store",
help="""The path to the local file(s) containing strings
or regular expressions to match against, each
on a new line. Accepts a comma-separated list
of file paths.""")
query.add_argument("--output", "-o", dest="output", required=False,
help="""Output results as JSON to FILE.""")
query.add_argument("--revlist", "-rl", dest="revlist", required=False, default="HEAD^!",
help="""A comma-delimited list of revision (commit)
ranges to search. Defaults to HEAD^!. Specify
'all' to search the entire revision history.""")
query.add_argument("--before", "-b", dest="before", required=False,
help="""Search commits prior to a given date, e.g., Dec-12-2015""")
query.add_argument("--after", "-a", dest="after", required=False,
help="""Search commits after a given date, e.g., Jan-01-2015""")
query.add_argument("--author", "-au", dest="author", required=False,
help="""Restrict to commits made by an AUTHOR. An email
address is fine.""")
query.add_argument("--staged", "-st", dest="staged", action="store_true",
help="""Flag to search staged modifications, instead of
already committed ones.""")
query.add_argument("--verbose", "-v", dest="verbose", action="store_true",
help="""Flag to output colorful, verbose results.""")
parsed_args = query.parse_args(args)
formatted_args = format_arguments(parsed_args)
return formatted_args
def parse_patterns(path):
"""
Reads in patterns from pattern file at path
"""
result = {}
try:
if regex.search(r"^http[s]://", path):
response = requests.get(path)
if response.status_code == 200:
lines = response.text.split("\n")
else:
sys.exit(1)
else:
with open(path) as infile:
lines = infile.readlines()
label = None
for line in lines:
line = str(line).strip()
if line.startswith("#"):
label = line.lstrip("# ")
elif not line:
label = ""
else:
result[line] = label
except:
out = """Pattern file {file} does not exist.\n
Specify the correct path with --patterns""".format(file=path)
print(style(out, "red"))
return result
def format_arguments(args):
"""Cleans up arguments passed to argparse"""
def format_revlist():
if args.revlist == "all":
return ["--all"]
else:
return [revision.strip() for revision in args.revlist.split(",")]
def format_patterns():
patterns = {}
if args.term:
patterns[args.term] = None
try:
file_list = [path.strip() for path in args.patterns.split(",") if path.strip()]
for path in file_list:
patterns = merge_dicts(patterns, parse_patterns(path))
except AttributeError:
pass
if not patterns:
print("No patterns given! Using default pattern set.")
file_dir = os.path.dirname(os.path.realpath(__file__))
default_file = os.path.join(file_dir, "patterns/default.txt")
patterns = merge_dicts(patterns, parse_patterns(default_file))
return patterns
return {
"before": args.before,
"after": args.after,
"author": args.author,
"verbose": args.verbose,
"dir": args.dir,
"staged": args.staged,
"git_dir": args.dir + "/.git",
"repo_dir": args.dir,
"revlist": format_revlist(),
"git_url": args.url.rstrip("/"),
"patterns": format_patterns(),
"output": args.output
}
| emanuelfeld/poirot | poirot/parser.py | Python | mit | 5,131 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_axis20.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43572224, 43812352]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'label_position': 'next_to'})
chart.set_y_axis({'label_position': 'none'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_chart_axis20.py | Python | bsd-2-clause | 1,665 |
# vim: set ts=4 sw=4 et: coding=UTF-8
from .rpmsection import Section
class RpmPrep(Section):
'''
Try to simplify to %setup -q when possible.
Replace %patch with %patch0
'''
def add(self, line):
line = self._complete_cleanup(line)
line = self._cleanup_setup(line)
line = self._prepare_patch(line)
Section.add(self, line)
def _cleanup_setup(self, line):
"""
Remove the useless stuff from %setup line
"""
# NOTE: not using regexp as this covers 99% cases for now
if line.startswith('%setup'):
line = line.replace(' -qn', ' -q -n')
line = line.replace(' -q', '')
line = self.reg.re_setup.sub(' ', line)
line = self.strip_useless_spaces(line)
line = line.replace('%setup', '%setup -q')
return line
def _prepare_patch(self, line):
"""
Convert patchlines to something pretty
"""
# -p0 is default
line = line.replace('-p0', '')
# %patch0 is desired
if (line.startswith('%patch ') or line == '%patch') and '-P' not in line:
line = line.replace('%patch', '%patch0')
# convert the %patch -P 50 -p10 to %patch50 -p10
# this apply only if there is ONE -P on the line, not multiple ones
if self.reg.re_patch_prep.match(line):
match = self.reg.re_patch_prep.match(line)
line = self.strip_useless_spaces('%%patch%s %s %s' % (match.group(2), match.group(1), match.group(3)))
return line
| pombredanne/spec-cleaner | spec_cleaner/rpmprep.py | Python | bsd-3-clause | 1,581 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Citation.note'
db.delete_column('main_citation', 'note_id')
def backwards(self, orm):
# We cannot add back in field 'Citation.note'
raise RuntimeError(
"Cannot reverse this migration. 'Citation.note' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.alias': {
'Meta': {'unique_together': "(('topic', 'name'),)", 'object_name': 'Alias'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_alias_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': "'80'"}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aliases'", 'to': "orm['main.Topic']"})
},
'main.citation': {
'Meta': {'object_name': 'Citation'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_citation_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locator': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'citations'", 'to': "orm['main.Source']"})
},
'main.footnote': {
'Meta': {'object_name': 'Footnote'},
'content': ('editorsnotes.main.fields.XHTMLField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_footnote_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'transcript': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'footnotes'", 'to': "orm['main.Transcript']"})
},
'main.note': {
'Meta': {'ordering': "['-last_updated']", 'object_name': 'Note'},
'content': ('editorsnotes.main.fields.XHTMLField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_note_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_updater': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_to_update_note_set'", 'to': "orm['auth.User']"})
},
'main.scan': {
'Meta': {'ordering': "['ordering']", 'object_name': 'Scan'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_scan_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'scans'", 'to': "orm['main.Source']"})
},
'main.source': {
'Meta': {'ordering': "['ordering']", 'object_name': 'Source'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_source_set'", 'to': "orm['auth.User']"}),
'description': ('editorsnotes.main.fields.XHTMLField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'S'", 'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'main.topic': {
'Meta': {'ordering': "['preferred_name']", 'object_name': 'Topic'},
'article': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'main_topic'", 'unique': 'True', 'null': 'True', 'to': "orm['main.Note']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_topic_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preferred_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'80'"}),
'related_topics': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_topics_rel_+'", 'blank': 'True', 'to': "orm['main.Topic']"}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'80'"})
},
'main.topicassignment': {
'Meta': {'unique_together': "(('topic', 'object_id'),)", 'object_name': 'TopicAssignment'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_topicassignment_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['main.Topic']"})
},
'main.transcript': {
'Meta': {'object_name': 'Transcript'},
'content': ('editorsnotes.main.fields.XHTMLField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_transcript_set'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'_transcript'", 'unique': 'True', 'to': "orm['main.Source']"})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['main']
| CENDARI/editorsnotes | editorsnotes/main/migrations/0028_auto__del_field_citation_note.py | Python | agpl-3.0 | 11,084 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espressopp.check.System import *
| espressopp/espressopp | src/check/__init__.py | Python | gpl-3.0 | 900 |
"""Tests for the Risco event sensors."""
from homeassistant.components.risco import (
LAST_EVENT_TIMESTAMP_KEY,
CannotConnectError,
UnauthorizedError,
)
from homeassistant.components.risco.const import DOMAIN, EVENTS_COORDINATOR
from .util import TEST_CONFIG, setup_risco
from .util import two_zone_alarm # noqa: F401
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
ENTITY_IDS = {
"Alarm": "sensor.risco_test_site_name_alarm_events",
"Status": "sensor.risco_test_site_name_status_events",
"Trouble": "sensor.risco_test_site_name_trouble_events",
"Other": "sensor.risco_test_site_name_other_events",
}
TEST_EVENTS = [
MagicMock(
time="2020-09-02T10:00:00Z",
category_id=4,
category_name="System Status",
type_id=16,
type_name="disarmed",
name="'user' disarmed 'partition'",
text="",
partition_id=0,
zone_id=None,
user_id=3,
group=None,
priority=2,
raw={},
),
MagicMock(
time="2020-09-02T09:00:00Z",
category_id=7,
category_name="Troubles",
type_id=36,
type_name="service needed",
name="Device Fault",
text="Service is needed.",
partition_id=None,
zone_id=None,
user_id=None,
group=None,
priority=1,
raw={},
),
MagicMock(
time="2020-09-02T08:00:00Z",
category_id=2,
category_name="Alarms",
type_id=3,
type_name="triggered",
name="Alarm is on",
text="Yes it is.",
partition_id=0,
zone_id=1,
user_id=None,
group=None,
priority=0,
raw={},
),
MagicMock(
time="2020-09-02T07:00:00Z",
category_id=4,
category_name="System Status",
type_id=119,
type_name="group arm",
name="You armed a group",
text="",
partition_id=0,
zone_id=None,
user_id=1,
group="C",
priority=2,
raw={},
),
MagicMock(
time="2020-09-02T06:00:00Z",
category_id=8,
category_name="Made up",
type_id=200,
type_name="also made up",
name="really made up",
text="",
partition_id=2,
zone_id=None,
user_id=1,
group=None,
priority=2,
raw={},
),
]
CATEGORIES_TO_EVENTS = {
"Alarm": 2,
"Status": 0,
"Trouble": 1,
"Other": 4,
}
async def test_cannot_connect(hass):
"""Test connection error."""
with patch(
"homeassistant.components.risco.RiscoAPI.login",
side_effect=CannotConnectError,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=TEST_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
registry = await hass.helpers.entity_registry.async_get_registry()
for id in ENTITY_IDS.values():
assert not registry.async_is_registered(id)
async def test_unauthorized(hass):
"""Test unauthorized error."""
with patch(
"homeassistant.components.risco.RiscoAPI.login",
side_effect=UnauthorizedError,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=TEST_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
registry = await hass.helpers.entity_registry.async_get_registry()
for id in ENTITY_IDS.values():
assert not registry.async_is_registered(id)
def _check_state(hass, category, entity_id):
event_index = CATEGORIES_TO_EVENTS[category]
event = TEST_EVENTS[event_index]
state = hass.states.get(entity_id)
assert state.state == event.time
assert state.attributes["category_id"] == event.category_id
assert state.attributes["category_name"] == event.category_name
assert state.attributes["type_id"] == event.type_id
assert state.attributes["type_name"] == event.type_name
assert state.attributes["name"] == event.name
assert state.attributes["text"] == event.text
assert state.attributes["partition_id"] == event.partition_id
assert state.attributes["zone_id"] == event.zone_id
assert state.attributes["user_id"] == event.user_id
assert state.attributes["group"] == event.group
assert state.attributes["priority"] == event.priority
assert state.attributes["raw"] == event.raw
if event_index == 2:
assert state.attributes["zone_entity_id"] == "binary_sensor.zone_1"
else:
assert "zone_entity_id" not in state.attributes
async def test_setup(hass, two_zone_alarm): # noqa: F811
"""Test entity setup."""
registry = await hass.helpers.entity_registry.async_get_registry()
for id in ENTITY_IDS.values():
assert not registry.async_is_registered(id)
with patch(
"homeassistant.components.risco.RiscoAPI.get_events",
return_value=TEST_EVENTS,
), patch(
"homeassistant.components.risco.Store.async_save",
) as save_mock:
entry = await setup_risco(hass)
await hass.async_block_till_done()
save_mock.assert_awaited_once_with(
{LAST_EVENT_TIMESTAMP_KEY: TEST_EVENTS[0].time}
)
for id in ENTITY_IDS.values():
assert registry.async_is_registered(id)
for category, entity_id in ENTITY_IDS.items():
_check_state(hass, category, entity_id)
coordinator = hass.data[DOMAIN][entry.entry_id][EVENTS_COORDINATOR]
with patch(
"homeassistant.components.risco.RiscoAPI.get_events", return_value=[]
) as events_mock, patch(
"homeassistant.components.risco.Store.async_load",
return_value={LAST_EVENT_TIMESTAMP_KEY: TEST_EVENTS[0].time},
):
await coordinator.async_refresh()
await hass.async_block_till_done()
events_mock.assert_awaited_once_with(TEST_EVENTS[0].time, 10)
for category, entity_id in ENTITY_IDS.items():
_check_state(hass, category, entity_id)
| tboyce021/home-assistant | tests/components/risco/test_sensor.py | Python | apache-2.0 | 6,156 |
#!/usr/bin/env python
"""GRR restful API rendering plugins."""
# pylint: disable=unused-import
from grr.gui.api_plugins import aff4
from grr.gui.api_plugins import artifact
from grr.gui.api_plugins import config
from grr.gui.api_plugins import docs
from grr.gui.api_plugins import hunt
from grr.gui.api_plugins import reflection
from grr.gui.api_plugins import stats
| wandec/grr | gui/api_plugins/__init__.py | Python | apache-2.0 | 368 |
import csv
patent_info = {"feature": [abstract,number,inventor,phone,[litigation,listt]}
say its highly litigated, or lowly litigated, you know. Say that analytic for a patent
view similarly litigated patents
yeah all that
| JessieSalas/WatsonApp | database.py | Python | mit | 225 |
# -*- encoding: utf-8 -*-
"""Programs for computing and visualizing empirical power as a function of
effect size in the spatial separate-classes model.
Author: Tuomas Puoliväli
Email: tuomas.puolivali@helsinki.fi
Last modified: 24th April 2019
License: Revised 3-clause BSD
WARNING: There is unfinished code and only partial testing has been
performed.
"""
from data import spatial_separate_classes_model
from fdr import lsu, tst
import matplotlib.pyplot as plt
import numpy as np
from reproducibility import fdr_rvalue, fwer_replicability
from scipy.optimize import curve_fit
import seaborn as sns
from util import (empirical_power, separate_classes_model_counts,
logistic_function)
from viz import plot_separate_classes_model
def separate_data(X, c1, c2):
"""Function for diving the dataset X into two separate classes.
Input arguments:
================
X : ndarray [n_rows, n_cols]
The entire dataset having 'n_rows' rows and 'n_cols' columns.
c1, c2 : ndarray [4, ]
The coordinates of the top left and bottom right corners of
the two classes.
More specifically, c1[0:2] are the x-coordinates and
c1[2:4] are the y-coordinates.
Output arguments:
=================
X_c1, X_c2 : ndarray
The separated datasets.
"""
"""Get the classes' coordinates."""
c1_x1, c1_x2, c1_y1, c1_y2 = c1
c2_x1, c2_x2, c2_y1, c2_y2 = c2
"""Separate the dataset into two parts."""
X_c1 = X[c1_y1:c1_y2, c1_x1:c1_x2]
X_c2 = X[c2_y1:c2_y2, c2_x1:c2_x2]
return X_c1, X_c2
def separate_classes_model_power(deltas, n_iter=20, alpha=0.05, nl=45,
sl=15, method=lsu,
single_analysis=False):
"""Function for simulating data under the spatial separate-classes
model at various effect sizes.
Input arguments:
================
deltas : ndarray
The tested effect sizes are all possible effect size pairs
(delta1, delta2) among the given array.
n_iter : int
Number of repetitions of each simulation.
alpha : float
The desired critical level.
nl, sl : int
The side lengths of the signal and noise regions within a single
class.
method : function
The applied correction method.
single_analysis : bool
A flag for deciding whether to perform a single combined analysis
or two separate analyses.
"""
# TODO: make a proper function
n_deltas = len(deltas)
pwr = np.zeros([n_deltas, n_deltas, n_iter])
for ind in np.ndindex(n_deltas, n_deltas, n_iter):
delta1, delta2 = deltas[ind[0]], deltas[ind[1]]
X = spatial_separate_classes_model(delta1, delta2)[0]
"""Perform the multiple testing using either a single combined
analysis or two separate analyses. If two analyses are performed,
it is assumed that the separate classes are known a priori by
the experiments. (However, it would be interesting to also test
happens when this assumption is incorrect.)
"""
if (single_analysis == True):
Y = method(X.flatten(), alpha)
Y = Y.reshape([nl, 2*nl])
else:
X_c1, X_c2 = separate_data(X, [0, nl, 0, nl],
[nl, 2*nl, 0, nl])
Y1, Y2 = (method(X_c1.flatten(), alpha),
method(X_c2.flatten(), alpha))
Y1, Y2 = Y1.reshape([nl, nl]), Y2.reshape([nl, nl])
Y = np.hstack([Y1, Y2])
"""Compute empirical power."""
tp, _, _, fn = separate_classes_model_counts(Y, nl, sl)
pwr[ind] = empirical_power(tp, 2*sl**2)
return np.mean(pwr, axis=2)
def plot_separate_classes_model_power(effect_sizes, pwr):
"""Function for visualizing empirical power in the separate-classes
model as a function of the effect size at the first and second signal
regions.
Input arguments:
================
effect_sizes : ndarray
The tested effect sizes.
pwr : ndarray
The power at each combination of effect sizes.
Output arguments:
=================
fig : Figure
An instance of the matplotlib Figure class for further editing.
"""
sns.set_style('darkgrid')
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
im = ax.imshow(pwr, origin='lower', cmap='viridis', interpolation='none',
vmin=0, vmax=1)
ax.grid(False)
# Only display every other <x/y>tick.
n_effect_sizes = len(effect_sizes)
ax.set_xticks(np.arange(0, n_effect_sizes, 2))
ax.set_yticks(np.arange(0, n_effect_sizes, 2))
ax.set_xticklabels(effect_sizes[0::2])
ax.set_yticklabels(effect_sizes[0::2])
ax.set_xlabel('Effect size $\Delta_1$')
ax.set_ylabel('Effect size $\Delta_2$')
return fig, im
def simulate_separate_classes_model(method):
"""Function for performing simulations using the separate-classes model
and visualizing the results.
Input arguments:
================
method : function
The applied correction method.
"""
"""Compute empirical power at the chosen effect sizes using the chosen
multiple testing method."""
single_analysis = True
effect_sizes = np.linspace(0.2, 2.4, 12)
pwr = separate_classes_model_power(effect_sizes, method=method,
single_analysis=single_analysis)
"""Visualize the results."""
fig, im = plot_separate_classes_model_power(effect_sizes, pwr)
fig.axes[0].set_title('Method: %s' % method.__name__)
fig.colorbar(im)
fig.tight_layout()
fig.axes[0].grid(False)
plt.show()
def simulate_single_separate_analyses():
"""Function for simulating data using the separate-classes model and
comparing the performance of single and separate analyses."""
effect_sizes = np.linspace(0.2, 2.4, 12)
n_iter = 5
alpha = 0.05
nl, sl = 45, 15
method = lsu
pwr1 = separate_classes_model_power(deltas=effect_sizes, n_iter=n_iter,
alpha=alpha, nl=nl, sl=sl, method=method,
single_analysis=True)
pwr2 = separate_classes_model_power(deltas=effect_sizes, n_iter=n_iter,
alpha=alpha, nl=nl, sl=sl, method=method,
single_analysis=False)
"""Visualize the results."""
sns.set_style('white')
fig = plt.figure(figsize=(8, 5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.imshow(pwr1, origin='lower', vmin=0, vmax=1)
ax2.imshow(pwr2, origin='lower', vmin=0, vmax=1)
#ax.plot(effect_sizes, pwr1[6, :], 'k')
#ax.plot(effect_sizes, pwr2[6, :], 'r')
ax1.set_xlabel('Effect size')
ax1.set_ylabel('Power')
fig.tight_layout()
plt.show()
| puolival/multipy | multipy/separate_classes_power.py | Python | bsd-3-clause | 6,959 |
#!/usr/bin/env python2.7
import re
import os
def obtain_forces(file_in):
lista = []
for line in file_in.readlines():
m = re.match("\s+\d+\s+([0-9.-]+)\s+([0-9.-]+)\s+([0-9.-]+)",line)
if m:
lista.append(float(m.group(1)))
lista.append(float(m.group(2)))
lista.append(float(m.group(3)))
return lista
def error(fc,fc_ok):
dim1 = len(fc)
dim2 = len(fc_ok)
scr = 0
if dim1 != dim2:
print "Amount of forces in forces and forces.ok is different."
return -1
for num in range(dim1):
value = abs(fc[num] - fc_ok[num])
if value > 1e-3:
scr = -1
print "Error detected in Forces:"
print "Value in forces",fc[num]
print "Value in forces.ok",fc_ok[num]
return scr
def Check():
# Output
fc = []
is_file = os.path.isfile("forces")
if is_file == False:
print "The forces file is missing."
return -1
f = open("forces","r")
fc = obtain_forces(f)
f.close()
if not fc:
print "Error reading in forces."
return -1
is_file = os.path.isfile("forces.ok")
if is_file == False:
print "The forces.ok file is missing."
return -1
fcok = []
f = open("forces.ok","r")
fcok = obtain_forces(f)
f.close()
if not fcok:
print "Error reading in forces.ok."
return -1
ok_output = error(fc,fcok)
if ok_output != 0:
print "Test Forces: ERROR"
else:
print "Test Forces: OK"
| ramirezfranciscof/lio | test/tests_engine/forces.py | Python | gpl-2.0 | 1,489 |
import click
import logging
import time
from dotenv import load_dotenv, find_dotenv
from grazer.config import Config
from grazer.core import crawler
from grazer.core import scrapper
from grazer.util import time_convert, grouper
logger = logging.getLogger("Verata")
@click.group()
@click.option("--env", default=find_dotenv(), help="Environment file")
@click.option("--log_level",
default="INFO",
help="Defines a log level",
type=click.Choice(["DEBUG", "INFO", "TRACE"]))
@click.option("--debug",
default=False,
is_flag=True,
help="Shortcut for DEBUG log level")
@click.option("--output", help="All results goes here",
prompt="Enter output file name")
@click.option("--config", help="Configuration file", prompt="Enter config")
@click.pass_context
def main(ctx, env, log_level, debug, output, config):
if output is None:
logger.error("Please provide output file")
exit()
else:
click.echo(ctx)
ctx.meta["output"] = output
ctx.meta["config"] = config
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=getattr(logging, log_level))
load_dotenv(env)
@main.command()
@click.option("--link", help="Site url for scrapping")
@click.pass_context
def scrape(ctx, link):
cfg = Config(ctx.meta["config"])
output = ctx.meta["output"]
with open(output, "w") as f:
data = scrapper.fetch_page(link, cfg)
data = scrapper.scrape(data, cfg.mappings)
for title, info, meta in data:
f.write("{0}, {1}\n".format(title, info))
@main.command()
@click.option("--paginate",
help="Split results into pages by",
default=10,
type=int)
@click.option("--rest_interval",
help="How long to wait before fetching next page",
default="0s")
@click.pass_context
def crawl(ctx, paginate, rest_interval, output):
rest = time_convert(rest_interval)
cfg = Config(ctx.meta["config"])
output = ctx.meta["output"]
with open(output, "w") as f:
for chunk in grouper(paginate, crawler.create(cfg)):
if chunk is None:
continue
for record, link in chunk:
logging.debug("Record: {0} Link: {1}".format(record, link))
f.write("({0}, {1})\n".format(record, link))
if rest > 0:
time.sleep(rest)
if __name__ == "__main__":
main()
| CodersOfTheNight/verata | grazer/run.py | Python | mit | 2,536 |
# -*- coding: utf-8 -*-
"""
File: urls.py
Creator: MazeFX
Date: 11-7-2016
Main url resolver as from cookiecutter-django.
Added following resolver patterns:
* ('/') -> pages.home_page
* ('/contact/') -> pages.contact_page
* ('/contact/email_sent/') -> pages.email_sent_page
"""
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from website.pages import views as page_views
urlpatterns = [
url(r'^$', page_views.home_page, name='home'),
url(r'^home_old', page_views.home_page_old, name='home_old'),
url(r'^a-frame/virtual-developers/$', page_views.aFrame_developers, name='aframe-developers'),
url(r'^portfolio/coming_soon/$', page_views.coming_soon_page, name='coming_soon'),
url(r'^contact/$', page_views.contact_page, name='contact'),
url(r'^contact/email_sent/$', page_views.email_sent_page, name='email_sent'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include('website.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^aframe_test/(?P<num>[0-9]+)/$', page_views.aFrame_test_page, name='aframe'),
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| MazeFX/cookiecutter_website_project | config/urls.py | Python | mit | 2,277 |
# encoding: utf-8
#
# Copyright (c) 2014 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-02-15
#
"""
The :class:`Workflow` object is the main interface to this library.
See :ref:`setup` in the :ref:`user-manual` for an example of how to set
up your Python script to best utilise the :class:`Workflow` object.
"""
from __future__ import print_function, unicode_literals
import os
import sys
import string
import re
import plistlib
import subprocess
import unicodedata
import shutil
import json
import cPickle
import pickle
import time
import logging
import logging.handlers
try:
import xml.etree.cElementTree as ET
except ImportError: # pragma: no cover
import xml.etree.ElementTree as ET
####################################################################
# Standard system icons
####################################################################
# These icons are default OS X icons. They are super-high quality, and
# will be familiar to users.
# This library uses `ICON_ERROR` when a workflow dies in flames, so
# in my own workflows, I use `ICON_WARNING` for less fatal errors
# (e.g. bad user input, no results etc.)
# The system icons are all in this directory. There are many more than
# are listed here
ICON_ROOT = '/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources'
ICON_ACCOUNT = os.path.join(ICON_ROOT, 'Accounts.icns')
ICON_BURN = os.path.join(ICON_ROOT, 'BurningIcon.icns')
ICON_COLOR = os.path.join(ICON_ROOT, 'ProfileBackgroundColor.icns')
ICON_COLOUR = ICON_COLOR # Queen's English, if you please
# Shown when a workflow throws an error
ICON_ERROR = os.path.join(ICON_ROOT, 'AlertStopIcon.icns')
ICON_FAVORITE = os.path.join(ICON_ROOT, 'ToolbarFavoritesIcon.icns')
ICON_FAVOURITE = ICON_FAVORITE
ICON_GROUP = os.path.join(ICON_ROOT, 'GroupIcon.icns')
ICON_HELP = os.path.join(ICON_ROOT, 'HelpIcon.icns')
ICON_INFO = os.path.join(ICON_ROOT, 'ToolbarInfo.icns')
ICON_MUSIC = os.path.join(ICON_ROOT, 'ToolbarMusicFolderIcon.icns')
ICON_NETWORK = os.path.join(ICON_ROOT, 'GenericNetworkIcon.icns')
ICON_NOTE = os.path.join(ICON_ROOT, 'AlertNoteIcon.icns')
ICON_SETTINGS = os.path.join(ICON_ROOT, 'ToolbarAdvanced.icns')
ICON_SYNC = os.path.join(ICON_ROOT, 'Sync.icns')
ICON_TRASH = os.path.join(ICON_ROOT, 'TrashIcon.icns')
ICON_USER = os.path.join(ICON_ROOT, 'UserIcon.icns')
ICON_WARNING = os.path.join(ICON_ROOT, 'AlertCautionIcon.icns')
ICON_WEB = os.path.join(ICON_ROOT, 'BookmarkIcon.icns')
####################################################################
# non-ASCII to ASCII diacritic folding.
# Used by `fold_to_ascii` method
####################################################################
ASCII_REPLACEMENTS = {
'À': 'A',
'Á': 'A',
'Â': 'A',
'Ã': 'A',
'Ä': 'A',
'Å': 'A',
'Æ': 'AE',
'Ç': 'C',
'È': 'E',
'É': 'E',
'Ê': 'E',
'Ë': 'E',
'Ì': 'I',
'Í': 'I',
'Î': 'I',
'Ï': 'I',
'Ð': 'D',
'Ñ': 'N',
'Ò': 'O',
'Ó': 'O',
'Ô': 'O',
'Õ': 'O',
'Ö': 'O',
'Ø': 'O',
'Ù': 'U',
'Ú': 'U',
'Û': 'U',
'Ü': 'U',
'Ý': 'Y',
'Þ': 'Th',
'ß': 'ss',
'à': 'a',
'á': 'a',
'â': 'a',
'ã': 'a',
'ä': 'a',
'å': 'a',
'æ': 'ae',
'ç': 'c',
'è': 'e',
'é': 'e',
'ê': 'e',
'ë': 'e',
'ì': 'i',
'í': 'i',
'î': 'i',
'ï': 'i',
'ð': 'd',
'ñ': 'n',
'ò': 'o',
'ó': 'o',
'ô': 'o',
'õ': 'o',
'ö': 'o',
'ø': 'o',
'ù': 'u',
'ú': 'u',
'û': 'u',
'ü': 'u',
'ý': 'y',
'þ': 'th',
'ÿ': 'y',
'Ł': 'L',
'ł': 'l',
'Ń': 'N',
'ń': 'n',
'Ņ': 'N',
'ņ': 'n',
'Ň': 'N',
'ň': 'n',
'Ŋ': 'ng',
'ŋ': 'NG',
'Ō': 'O',
'ō': 'o',
'Ŏ': 'O',
'ŏ': 'o',
'Ő': 'O',
'ő': 'o',
'Œ': 'OE',
'œ': 'oe',
'Ŕ': 'R',
'ŕ': 'r',
'Ŗ': 'R',
'ŗ': 'r',
'Ř': 'R',
'ř': 'r',
'Ś': 'S',
'ś': 's',
'Ŝ': 'S',
'ŝ': 's',
'Ş': 'S',
'ş': 's',
'Š': 'S',
'š': 's',
'Ţ': 'T',
'ţ': 't',
'Ť': 'T',
'ť': 't',
'Ŧ': 'T',
'ŧ': 't',
'Ũ': 'U',
'ũ': 'u',
'Ū': 'U',
'ū': 'u',
'Ŭ': 'U',
'ŭ': 'u',
'Ů': 'U',
'ů': 'u',
'Ű': 'U',
'ű': 'u',
'Ŵ': 'W',
'ŵ': 'w',
'Ŷ': 'Y',
'ŷ': 'y',
'Ÿ': 'Y',
'Ź': 'Z',
'ź': 'z',
'Ż': 'Z',
'ż': 'z',
'Ž': 'Z',
'ž': 'z',
'ſ': 's',
'Α': 'A',
'Β': 'B',
'Γ': 'G',
'Δ': 'D',
'Ε': 'E',
'Ζ': 'Z',
'Η': 'E',
'Θ': 'Th',
'Ι': 'I',
'Κ': 'K',
'Λ': 'L',
'Μ': 'M',
'Ν': 'N',
'Ξ': 'Ks',
'Ο': 'O',
'Π': 'P',
'Ρ': 'R',
'Σ': 'S',
'Τ': 'T',
'Υ': 'U',
'Φ': 'Ph',
'Χ': 'Kh',
'Ψ': 'Ps',
'Ω': 'O',
'α': 'a',
'β': 'b',
'γ': 'g',
'δ': 'd',
'ε': 'e',
'ζ': 'z',
'η': 'e',
'θ': 'th',
'ι': 'i',
'κ': 'k',
'λ': 'l',
'μ': 'm',
'ν': 'n',
'ξ': 'x',
'ο': 'o',
'π': 'p',
'ρ': 'r',
'ς': 's',
'σ': 's',
'τ': 't',
'υ': 'u',
'φ': 'ph',
'χ': 'kh',
'ψ': 'ps',
'ω': 'o',
'А': 'A',
'Б': 'B',
'В': 'V',
'Г': 'G',
'Д': 'D',
'Е': 'E',
'Ж': 'Zh',
'З': 'Z',
'И': 'I',
'Й': 'I',
'К': 'K',
'Л': 'L',
'М': 'M',
'Н': 'N',
'О': 'O',
'П': 'P',
'Р': 'R',
'С': 'S',
'Т': 'T',
'У': 'U',
'Ф': 'F',
'Х': 'Kh',
'Ц': 'Ts',
'Ч': 'Ch',
'Ш': 'Sh',
'Щ': 'Shch',
'Ъ': "'",
'Ы': 'Y',
'Ь': "'",
'Э': 'E',
'Ю': 'Iu',
'Я': 'Ia',
'а': 'a',
'б': 'b',
'в': 'v',
'г': 'g',
'д': 'd',
'е': 'e',
'ж': 'zh',
'з': 'z',
'и': 'i',
'й': 'i',
'к': 'k',
'л': 'l',
'м': 'm',
'н': 'n',
'о': 'o',
'п': 'p',
'р': 'r',
'с': 's',
'т': 't',
'у': 'u',
'ф': 'f',
'х': 'kh',
'ц': 'ts',
'ч': 'ch',
'ш': 'sh',
'щ': 'shch',
'ъ': "'",
'ы': 'y',
'ь': "'",
'э': 'e',
'ю': 'iu',
'я': 'ia',
# 'ᴀ': '',
# 'ᴁ': '',
# 'ᴂ': '',
# 'ᴃ': '',
# 'ᴄ': '',
# 'ᴅ': '',
# 'ᴆ': '',
# 'ᴇ': '',
# 'ᴈ': '',
# 'ᴉ': '',
# 'ᴊ': '',
# 'ᴋ': '',
# 'ᴌ': '',
# 'ᴍ': '',
# 'ᴎ': '',
# 'ᴏ': '',
# 'ᴐ': '',
# 'ᴑ': '',
# 'ᴒ': '',
# 'ᴓ': '',
# 'ᴔ': '',
# 'ᴕ': '',
# 'ᴖ': '',
# 'ᴗ': '',
# 'ᴘ': '',
# 'ᴙ': '',
# 'ᴚ': '',
# 'ᴛ': '',
# 'ᴜ': '',
# 'ᴝ': '',
# 'ᴞ': '',
# 'ᴟ': '',
# 'ᴠ': '',
# 'ᴡ': '',
# 'ᴢ': '',
# 'ᴣ': '',
# 'ᴤ': '',
# 'ᴥ': '',
'ᴦ': 'G',
'ᴧ': 'L',
'ᴨ': 'P',
'ᴩ': 'R',
'ᴪ': 'PS',
'ẞ': 'Ss',
'Ỳ': 'Y',
'ỳ': 'y',
'Ỵ': 'Y',
'ỵ': 'y',
'Ỹ': 'Y',
'ỹ': 'y',
}
####################################################################
# Used by `Workflow.filter`
####################################################################
# Anchor characters in a name
#: Characters that indicate the beginning of a "word" in CamelCase
INITIALS = string.ascii_uppercase + string.digits
#: Split on non-letters, numbers
split_on_delimiters = re.compile('[^a-zA-Z0-9]').split
# Match filter flags
#: Match items that start with ``query``
MATCH_STARTSWITH = 1
#: Match items whose capital letters start with ``query``
MATCH_CAPITALS = 2
#: Match items with a component "word" that matches ``query``
MATCH_ATOM = 4
#: Match items whose initials (based on atoms) start with ``query``
MATCH_INITIALS_STARTSWITH = 8
#: Match items whose initials (based on atoms) contain ``query``
MATCH_INITIALS_CONTAIN = 16
#: Combination of :const:`MATCH_INITIALS_STARTSWITH` and
#: :const:`MATCH_INITIALS_CONTAIN`
MATCH_INITIALS = 24
#: Match items if ``query`` is a substring
MATCH_SUBSTRING = 32
#: Match items if all characters in ``query`` appear in the item in order
MATCH_ALLCHARS = 64
#: Combination of all other ``MATCH_*`` constants
MATCH_ALL = 127
####################################################################
# Used by `Workflow.check_update`
####################################################################
# Number of days to wait between checking for updates to the workflow
DEFAULT_UPDATE_FREQUENCY = 1
####################################################################
# Keychain access errors
####################################################################
class KeychainError(Exception):
"""Raised by methods :meth:`Workflow.save_password`,
:meth:`Workflow.get_password` and :meth:`Workflow.delete_password`
when ``security`` CLI app returns an unknown error code.
"""
class PasswordNotFound(KeychainError):
"""Raised by method :meth:`Workflow.get_password` when ``account``
is unknown to the Keychain.
"""
class PasswordExists(KeychainError):
"""Raised when trying to overwrite an existing account password.
You should never receive this error: it is used internally
by the :meth:`Workflow.save_password` method to know if it needs
to delete the old password first (a Keychain implementation detail).
"""
####################################################################
# Helper functions
####################################################################
def isascii(text):
"""Test if ``text`` contains only ASCII characters
:param text: text to test for ASCII-ness
:type text: ``unicode``
:returns: ``True`` if ``text`` contains only ASCII characters
:rtype: ``Boolean``
"""
try:
text.encode('ascii')
except UnicodeEncodeError:
return False
return True
####################################################################
# Implementation classes
####################################################################
class SerializerManager(object):
"""Contains registered serializers.
.. versionadded:: 1.8
A configured instance of this class is available at
``workflow.manager``.
Use :meth:`register()` to register new (or replace
existing) serializers, which you can specify by name when calling
:class:`Workflow` data storage methods.
See :ref:`manual-serialization` and :ref:`manual-persistent-data`
for further information.
"""
def __init__(self):
self._serializers = {}
def register(self, name, serializer):
"""Register ``serializer`` object under ``name``.
Raises :class:`AttributeError` if ``serializer`` in invalid.
.. note::
``name`` will be used as the file extension of the saved files.
:param name: Name to register ``serializer`` under
:type name: ``unicode`` or ``str``
:param serializer: object with ``load()`` and ``dump()``
methods
"""
# Basic validation
getattr(serializer, 'load')
getattr(serializer, 'dump')
self._serializers[name] = serializer
def serializer(self, name):
"""Return serializer object for ``name`` or ``None`` if no such
serializer is registered
:param name: Name of serializer to return
:type name: ``unicode`` or ``str``
:returns: serializer object or ``None``
"""
return self._serializers.get(name)
def unregister(self, name):
"""Remove registered serializer with ``name``
Raises a :class:`ValueError` if there is no such registered
serializer.
:param name: Name of serializer to remove
:type name: ``unicode`` or ``str``
:returns: serializer object
"""
if name not in self._serializers:
raise ValueError('No such serializer registered : {}'.format(name))
serializer = self._serializers[name]
del self._serializers[name]
return serializer
@property
def serializers(self):
"""Return names of registered serializers"""
return sorted(self._serializers.keys())
class JSONSerializer(object):
"""Wrapper around :mod:`json`. Sets ``indent`` and ``encoding``.
.. versionadded:: 1.8
Use this serializer if you need readable data files. JSON doesn't
support Python objects as well as ``cPickle``/``pickle``, so be
careful which data you try to serialize as JSON.
"""
@classmethod
def load(cls, file_obj):
"""Load serialized object from open JSON file.
.. versionadded:: 1.8
:param file_obj: file handle
:type file_obj: ``file`` object
:returns: object loaded from JSON file
:rtype: object
"""
return json.load(file_obj)
@classmethod
def dump(cls, obj, file_obj):
"""Serialize object ``obj`` to open JSON file.
.. versionadded:: 1.8
:param obj: Python object to serialize
:type obj: JSON-serializable data structure
:param file_obj: file handle
:type file_obj: ``file`` object
"""
return json.dump(obj, file_obj, indent=2, encoding='utf-8')
class CPickleSerializer(object):
"""Wrapper around :mod:`cPickle`. Sets ``protocol``.
.. versionadded:: 1.8
This is the default serializer and the best combination of speed and
flexibility.
"""
@classmethod
def load(cls, file_obj):
"""Load serialized object from open pickle file.
.. versionadded:: 1.8
:param file_obj: file handle
:type file_obj: ``file`` object
:returns: object loaded from pickle file
:rtype: object
"""
return cPickle.load(file_obj)
@classmethod
def dump(cls, obj, file_obj):
"""Serialize object ``obj`` to open pickle file.
.. versionadded:: 1.8
:param obj: Python object to serialize
:type obj: Python object
:param file_obj: file handle
:type file_obj: ``file`` object
"""
return cPickle.dump(obj, file_obj, protocol=-1)
class PickleSerializer(object):
"""Wrapper around :mod:`pickle`. Sets ``protocol``.
.. versionadded:: 1.8
Use this serializer if you need to add custom pickling.
"""
@classmethod
def load(cls, file_obj):
"""Load serialized object from open pickle file.
.. versionadded:: 1.8
:param file_obj: file handle
:type file_obj: ``file`` object
:returns: object loaded from pickle file
:rtype: object
"""
return pickle.load(file_obj)
@classmethod
def dump(cls, obj, file_obj):
"""Serialize object ``obj`` to open pickle file.
.. versionadded:: 1.8
:param obj: Python object to serialize
:type obj: Python object
:param file_obj: file handle
:type file_obj: ``file`` object
"""
return pickle.dump(obj, file_obj, protocol=-1)
# Set up default manager and register built-in serializers
manager = SerializerManager()
manager.register('cpickle', CPickleSerializer)
manager.register('pickle', PickleSerializer)
manager.register('json', JSONSerializer)
class Item(object):
"""Represents a feedback item for Alfred. Generates Alfred-compliant
XML for a single item.
You probably shouldn't use this class directly, but via
:meth:`Workflow.add_item`. See :meth:`~Workflow.add_item`
for details of arguments.
"""
def __init__(self, title, subtitle='', modifier_subtitles=None,
arg=None, autocomplete=None, valid=False, uid=None,
icon=None, icontype=None, type=None, largetext=None,
copytext=None):
"""Arguments the same as for :meth:`Workflow.add_item`.
"""
self.title = title
self.subtitle = subtitle
self.modifier_subtitles = modifier_subtitles or {}
self.arg = arg
self.autocomplete = autocomplete
self.valid = valid
self.uid = uid
self.icon = icon
self.icontype = icontype
self.type = type
self.largetext = largetext
self.copytext = copytext
@property
def elem(self):
"""Create and return feedback item for Alfred.
:returns: :class:`ElementTree.Element <xml.etree.ElementTree.Element>`
instance for this :class:`Item` instance.
"""
attr = {}
if self.valid:
attr['valid'] = 'yes'
else:
attr['valid'] = 'no'
# Optional attributes
for name in ('uid', 'type', 'autocomplete'):
value = getattr(self, name, None)
if value:
attr[name] = value
root = ET.Element('item', attr)
ET.SubElement(root, 'title').text = self.title
ET.SubElement(root, 'subtitle').text = self.subtitle
# Add modifier subtitles
for mod in ('cmd', 'ctrl', 'alt', 'shift', 'fn'):
if mod in self.modifier_subtitles:
ET.SubElement(root, 'subtitle',
{'mod': mod}).text = self.modifier_subtitles[mod]
if self.arg:
ET.SubElement(root, 'arg').text = self.arg
# Add icon if there is one
if self.icon:
if self.icontype:
attr = dict(type=self.icontype)
else:
attr = {}
ET.SubElement(root, 'icon', attr).text = self.icon
if self.largetext:
ET.SubElement(root, 'text',
{'type': 'largetype'}).text = self.largetext
if self.copytext:
ET.SubElement(root, 'text',
{'type': 'copy'}).text = self.copytext
return root
class Settings(dict):
"""A dictionary that saves itself when changed.
Dictionary keys & values will be saved as a JSON file
at ``filepath``. If the file does not exist, the dictionary
(and settings file) will be initialised with ``defaults``.
:param filepath: where to save the settings
:type filepath: :class:`unicode`
:param defaults: dict of default settings
:type defaults: :class:`dict`
An appropriate instance is provided by :class:`Workflow` instances at
:attr:`Workflow.settings`.
"""
def __init__(self, filepath, defaults=None):
super(Settings, self).__init__()
self._filepath = filepath
self._nosave = False
if os.path.exists(self._filepath):
self._load()
elif defaults:
for key, val in defaults.items():
self[key] = val
self.save() # save default settings
def _load(self):
"""Load cached settings from JSON file `self._filepath`"""
self._nosave = True
with open(self._filepath, 'rb') as file_obj:
for key, value in json.load(file_obj, encoding='utf-8').items():
self[key] = value
self._nosave = False
def save(self):
"""Save settings to JSON file specified in ``self._filepath``
If you're using this class via :attr:`Workflow.settings`, which
you probably are, ``self._filepath`` will be ``settings.json``
in your workflow's data directory (see :attr:`~Workflow.datadir`).
"""
if self._nosave:
return
data = {}
for key, value in self.items():
data[key] = value
with open(self._filepath, 'wb') as file_obj:
json.dump(data, file_obj, sort_keys=True, indent=2,
encoding='utf-8')
# dict methods
def __setitem__(self, key, value):
super(Settings, self).__setitem__(key, value)
self.save()
def __delitem__(self, key):
super(Settings, self).__delitem__(key)
self.save()
def update(self, *args, **kwargs):
"""Override :class:`dict` method to save on update."""
super(Settings, self).update(*args, **kwargs)
self.save()
def setdefault(self, key, value=None):
"""Override :class:`dict` method to save on update."""
ret = super(Settings, self).setdefault(key, value)
self.save()
return ret
class Workflow(object):
"""Create new :class:`Workflow` instance.
:param default_settings: default workflow settings. If no settings file
exists, :class:`Workflow.settings` will be pre-populated with
``default_settings``.
:type default_settings: :class:`dict`
:param update_settings: settings for updating your workflow from GitHub.
This must be a :class:`dict` that contains ``github_slug`` and
``version`` keys. ``github_slug`` is of the form ``username/repo``
and ``version`` **must** correspond to the tag of a release.
See :ref:`updates` for more information.
:type update_settings: :class:`dict`
:param input_encoding: encoding of command line arguments
:type input_encoding: :class:`unicode`
:param normalization: normalisation to apply to CLI args.
See :meth:`Workflow.decode` for more details.
:type normalization: :class:`unicode`
:param capture_args: capture and act on ``workflow:*`` arguments. See
:ref:`Magic arguments <magic-arguments>` for details.
:type capture_args: :class:`Boolean`
:param libraries: sequence of paths to directories containing
libraries. These paths will be prepended to ``sys.path``.
:type libraries: :class:`tuple` or :class:`list`
"""
# Which class to use to generate feedback items. You probably
# won't want to change this
item_class = Item
def __init__(self, default_settings=None, update_settings=None,
input_encoding='utf-8', normalization='NFC',
capture_args=True, libraries=None):
self._default_settings = default_settings or {}
self._update_settings = update_settings or {}
self._input_encoding = input_encoding
self._normalizsation = normalization
self._capture_args = capture_args
self._workflowdir = None
self._settings_path = None
self._settings = None
self._bundleid = None
self._name = None
self._cache_serializer = 'cpickle'
self._data_serializer = 'cpickle'
# info.plist should be in the directory above this one
self._info_plist = self.workflowfile('info.plist')
self._info = None
self._info_loaded = False
self._logger = None
self._items = []
self._alfred_env = None
self._search_pattern_cache = {}
if libraries:
sys.path = libraries + sys.path
if update_settings:
self.check_update()
####################################################################
# API methods
####################################################################
# info.plist contents and alfred_* environment variables ----------
@property
def alfred_env(self):
"""Alfred's environmental variables minus the ``alfred_`` prefix.
.. versionadded:: 1.7
The variables Alfred 2.4+ exports are:
============================ =========================================
Variable Description
============================ =========================================
alfred_preferences Path to Alfred.alfredpreferences
(where your workflows and settings are
stored).
alfred_preferences_localhash Machine-specific preferences are stored
in ``Alfred.alfredpreferences/preferences/local/<hash>``
(see ``alfred_preferences`` above for
the path to ``Alfred.alfredpreferences``)
alfred_theme ID of selected theme
alfred_theme_background Background colour of selected theme in
format ``rgba(r,g,b,a)``
alfred_theme_subtext Show result subtext.
``0`` = Always,
``1`` = Alternative actions only,
``2`` = Selected result only,
``3`` = Never
alfred_version Alfred version number, e.g. ``'2.4'``
alfred_version_build Alfred build number, e.g. ``277``
alfred_workflow_bundleid Bundle ID, e.g.
``net.deanishe.alfred-mailto``
alfred_workflow_cache Path to workflow's cache directory
alfred_workflow_data Path to workflow's data directory
alfred_workflow_name Name of current workflow
alfred_workflow_uid UID of workflow
============================ =========================================
**Note:** all values are Unicode strings except ``version_build`` and
``theme_subtext``, which are integers.
:returns: ``dict`` of Alfred's environmental variables without the
``alfred_`` prefix, e.g. ``preferences``, ``workflow_data``.
"""
if self._alfred_env is not None:
return self._alfred_env
data = {}
for key in (
'alfred_preferences',
'alfred_preferences_localhash',
'alfred_theme',
'alfred_theme_background',
'alfred_theme_subtext',
'alfred_version',
'alfred_version_build',
'alfred_workflow_bundleid',
'alfred_workflow_cache',
'alfred_workflow_data',
'alfred_workflow_name',
'alfred_workflow_uid'):
value = os.getenv(key)
if isinstance(value, str):
if key in ('alfred_version_build', 'alfred_theme_subtext'):
value = int(value)
else:
value = self.decode(value)
data[key[7:]] = value
self._alfred_env = data
return self._alfred_env
@property
def info(self):
""":class:`dict` of ``info.plist`` contents."""
if not self._info_loaded:
self._load_info_plist()
return self._info
@property
def bundleid(self):
"""Workflow bundle ID from Alfred's environmental vars or ``info.plist``.
:returns: bundle ID
:rtype: ``unicode``
"""
if not self._bundleid:
if self.alfred_env.get('workflow_bundleid'):
self._bundleid = self.alfred_env.get('workflow_bundleid')
else:
self._bundleid = unicode(self.info['bundleid'], 'utf-8')
return self._bundleid
@property
def name(self):
"""Workflow name from Alfred's environmental vars or ``info.plist``.
:returns: workflow name
:rtype: ``unicode``
"""
if not self._name:
if self.alfred_env.get('workflow_name'):
self._name = self.decode(self.alfred_env.get('workflow_name'))
else:
self._name = self.decode(self.info['name'])
return self._name
# Workflow utility methods -----------------------------------------
@property
def args(self):
"""Return command line args as normalised unicode.
Args are decoded and normalised via :meth:`~Workflow.decode`.
The encoding and normalisation are the ``input_encoding`` and
``normalization`` arguments passed to :class:`Workflow` (``UTF-8``
and ``NFC`` are the defaults).
If :class:`Workflow` is called with ``capture_args=True``
(the default), :class:`Workflow` will look for certain
``workflow:*`` args and, if found, perform the corresponding
actions and exit the workflow.
See :ref:`Magic arguments <magic-arguments>` for details.
"""
msg = None
args = [self.decode(arg) for arg in sys.argv[1:]]
if len(args) and self._capture_args:
if 'workflow:openlog' in args:
msg = 'Opening workflow log file'
self.open_log()
elif 'workflow:reset' in args:
self.reset()
msg = 'Reset workflow'
elif 'workflow:delcache' in args:
self.clear_cache()
msg = 'Deleted workflow cache'
elif 'workflow:deldata' in args:
self.clear_data()
msg = 'Deleted workflow data'
elif 'workflow:delsettings' in args:
self.clear_settings()
msg = 'Deleted workflow settings'
elif 'workflow:openworkflow' in args:
msg = 'Opening workflow directory'
self.open_workflowdir()
elif 'workflow:opendata' in args:
msg = 'Opening workflow data directory'
self.open_datadir()
elif 'workflow:opencache' in args:
msg = 'Opening workflow cache directory'
self.open_cachedir()
elif 'workflow:openterm' in args:
msg = 'Opening workflow root directory in Terminal'
self.open_terminal()
elif 'workflow:foldingon' in args:
msg = 'Diacritics will always be folded'
self.settings['__workflow_diacritic_folding'] = True
elif 'workflow:foldingoff' in args:
msg = 'Diacritics will never be folded'
self.settings['__workflow_diacritic_folding'] = False
elif 'workflow:foldingdefault' in args:
msg = 'Diacritics folding reset'
if '__workflow_diacritic_folding' in self.settings:
del self.settings['__workflow_diacritic_folding']
elif 'workflow:update' in args:
if self.start_update():
msg = 'Downloading and installing update ...'
else:
msg = 'No update available'
if msg:
self.logger.debug(msg)
if not sys.stdout.isatty(): # Show message in Alfred
self.add_item(msg, valid=False, icon=ICON_INFO)
self.send_feedback()
sys.exit(0)
return args
@property
def cachedir(self):
"""Path to workflow's cache directory.
The cache directory is a subdirectory of Alfred's own cache directory in
``~/Library/Caches``. The full path is:
``~/Library/Caches/com.runningwithcrayons.Alfred-2/Workflow Data/<bundle id>``
:returns: full path to workflow's cache directory
:rtype: ``unicode``
"""
if self.alfred_env.get('workflow_cache'):
dirpath = self.alfred_env.get('workflow_cache')
else:
dirpath = os.path.join(
os.path.expanduser(
'~/Library/Caches/com.runningwithcrayons.Alfred-2/'
'Workflow Data/'),
self.bundleid)
return self._create(dirpath)
@property
def datadir(self):
"""Path to workflow's data directory.
The data directory is a subdirectory of Alfred's own data directory in
``~/Library/Application Support``. The full path is:
``~/Library/Application Support/Alfred 2/Workflow Data/<bundle id>``
:returns: full path to workflow data directory
:rtype: ``unicode``
"""
if self.alfred_env.get('workflow_data'):
dirpath = self.alfred_env.get('workflow_data')
else:
dirpath = os.path.join(os.path.expanduser(
'~/Library/Application Support/Alfred 2/Workflow Data/'),
self.bundleid)
return self._create(dirpath)
@property
def workflowdir(self):
"""Path to workflow's root directory (where ``info.plist`` is).
:returns: full path to workflow root directory
:rtype: ``unicode``
"""
if not self._workflowdir:
# Try the working directory first, then the directory
# the library is in. CWD will be the workflow root if
# a workflow is being run in Alfred
candidates = [
os.path.abspath(os.getcwdu()),
os.path.dirname(os.path.abspath(os.path.dirname(__file__)))]
# climb the directory tree until we find `info.plist`
for dirpath in candidates:
# Ensure directory path is Unicode
dirpath = self.decode(dirpath)
while True:
if os.path.exists(os.path.join(dirpath, 'info.plist')):
self._workflowdir = dirpath
break
elif dirpath == '/':
# no `info.plist` found
break
# Check the parent directory
dirpath = os.path.dirname(dirpath)
# No need to check other candidates
if self._workflowdir:
break
if not self._workflowdir:
raise IOError("'info.plist' not found in directory tree")
return self._workflowdir
def cachefile(self, filename):
"""Return full path to ``filename`` within your workflow's
:attr:`cache directory <Workflow.cachedir>`.
:param filename: basename of file
:type filename: ``unicode``
:returns: full path to file within cache directory
:rtype: ``unicode``
"""
return os.path.join(self.cachedir, filename)
def datafile(self, filename):
"""Return full path to ``filename`` within your workflow's
:attr:`data directory <Workflow.datadir>`.
:param filename: basename of file
:type filename: ``unicode``
:returns: full path to file within data directory
:rtype: ``unicode``
"""
return os.path.join(self.datadir, filename)
def workflowfile(self, filename):
"""Return full path to ``filename`` in workflow's root dir
(where ``info.plist`` is).
:param filename: basename of file
:type filename: ``unicode``
:returns: full path to file within data directory
:rtype: ``unicode``
"""
return os.path.join(self.workflowdir, filename)
@property
def logfile(self):
"""Return path to logfile
:returns: path to logfile within workflow's cache directory
:rtype: ``unicode``
"""
return self.cachefile('%s.log' % self.bundleid)
@property
def logger(self):
"""Create and return a logger that logs to both console and
a log file.
Use :meth:`open_log` to open the log file in Console.
:returns: an initialised :class:`~logging.Logger`
"""
if self._logger:
return self._logger
# Initialise new logger and optionally handlers
logger = logging.getLogger('workflow')
if not len(logger.handlers): # Only add one set of handlers
logfile = logging.handlers.RotatingFileHandler(
self.logfile,
maxBytes=1024*1024,
backupCount=0)
console = logging.StreamHandler()
fmt = logging.Formatter(
'%(asctime)s %(filename)s:%(lineno)s'
' %(levelname)-8s %(message)s',
datefmt='%H:%M:%S')
logfile.setFormatter(fmt)
console.setFormatter(fmt)
logger.addHandler(logfile)
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
self._logger = logger
return self._logger
@logger.setter
def logger(self, logger):
"""Set a custom logger.
:param logger: The logger to use
:type logger: `~logging.Logger` instance
"""
self._logger = logger
@property
def settings_path(self):
"""Path to settings file within workflow's data directory.
:returns: path to ``settings.json`` file
:rtype: ``unicode``
"""
if not self._settings_path:
self._settings_path = self.datafile('settings.json')
return self._settings_path
@property
def settings(self):
"""Return a dictionary subclass that saves itself when changed.
See :ref:`manual-settings` in the :ref:`user-manual` for more
information on how to use :attr:`settings` and **important
limitations** on what it can do.
:returns: :class:`~workflow.workflow.Settings` instance
initialised from the data in JSON file at
:attr:`settings_path` or if that doesn't exist, with the
``default_settings`` :class:`dict` passed to
:class:`Workflow` on instantiation.
:rtype: :class:`~workflow.workflow.Settings` instance
"""
if not self._settings:
self._settings = Settings(self.settings_path,
self._default_settings)
return self._settings
@property
def cache_serializer(self):
"""Name of default cache serializer.
.. versionadded:: 1.8
This serializer is used by :meth:`cache_data()` and
:meth:`cached_data()`
See :class:`SerializerManager` for details.
:returns: serializer name
:rtype: ``unicode``
"""
return self._cache_serializer
@cache_serializer.setter
def cache_serializer(self, serializer_name):
"""Set the default cache serialization format.
.. versionadded:: 1.8
This serializer is used by :meth:`cache_data()` and
:meth:`cached_data()`
The specified serializer must already by registered with the
:class:`SerializerManager` at `~workflow.workflow.manager`,
otherwise a :class:`ValueError` will be raised.
:param serializer_name: Name of default serializer to use.
:type serializer_name:
"""
if manager.serializer(serializer_name) is None:
raise ValueError(
'Unknown serializer : `{}`. Register your serializer '
'with `manager` first.'.format(serializer_name))
self.logger.debug(
'default cache serializer set to `{}`'.format(serializer_name))
self._cache_serializer = serializer_name
@property
def data_serializer(self):
"""Name of default data serializer.
.. versionadded:: 1.8
This serializer is used by :meth:`store_data()` and
:meth:`stored_data()`
See :class:`SerializerManager` for details.
:returns: serializer name
:rtype: ``unicode``
"""
return self._data_serializer
@data_serializer.setter
def data_serializer(self, serializer_name):
"""Set the default cache serialization format.
.. versionadded:: 1.8
This serializer is used by :meth:`store_data()` and
:meth:`stored_data()`
The specified serializer must already by registered with the
:class:`SerializerManager` at `~workflow.workflow.manager`,
otherwise a :class:`ValueError` will be raised.
:param serializer_name: Name of serializer to use by default.
"""
if manager.serializer(serializer_name) is None:
raise ValueError(
'Unknown serializer : `{}`. Register your serializer '
'with `manager` first.'.format(serializer_name))
self.logger.debug(
'default data serializer set to `{}`'.format(serializer_name))
self._data_serializer = serializer_name
def stored_data(self, name):
"""Retrieve data from data directory. Returns ``None`` if there
are no data stored.
.. versionadded:: 1.8
:param name: name of datastore
"""
metadata_path = self.datafile('.{}.alfred-workflow'.format(name))
if not os.path.exists(metadata_path):
self.logger.debug('No data stored for `{}`'.format(name))
return None
with open(metadata_path, 'rb') as file_obj:
serializer_name = file_obj.read().strip()
serializer = manager.serializer(serializer_name)
if serializer is None:
raise ValueError(
'Unknown serializer `{}`. Register a corresponding serializer '
'with `manager.register()` to load this data.'.format(
serializer_name))
self.logger.debug('Data `{}` stored in `{}` format'.format(
name, serializer_name))
filename = '{}.{}'.format(name, serializer_name)
data_path = self.datafile(filename)
if not os.path.exists(data_path):
self.logger.debug('No data stored for `{}`'.format(name))
if os.path.exists(metadata_path):
os.unlink(metadata_path)
return None
with open(data_path, 'rb') as file_obj:
data = serializer.load(file_obj)
self.logger.debug('Stored data loaded from : {}'.format(data_path))
return data
def store_data(self, name, data, serializer=None):
"""Save data to data directory.
.. versionadded:: 1.8
If ``data`` is ``None``, the datastore will be deleted.
:param name: name of datastore
:param data: object(s) to store. **Note:** some serializers
can only handled certain types of data.
:param serializer: name of serializer to use. If no serializer
is specified, the default will be used. See
:class:`SerializerManager` for more information.
:returns: data in datastore or ``None``
"""
serializer_name = serializer or self.data_serializer
if serializer_name == 'json' and name == 'settings':
raise ValueError(
'Cannot save data to `settings` with format `json`. '
"This would overwrite Alfred-Workflow's settings file.")
serializer = manager.serializer(serializer_name)
if serializer is None:
raise ValueError(
'Invalid serializer `{}`. Register your serializer with '
'`manager.register()` first.'.format(serializer_name))
# In order for `stored_data()` to be able to load data stored with
# an arbitrary serializer, yet still have meaningful file extensions,
# the format (i.e. extension) is saved to an accompanying file
metadata_path = self.datafile('.{}.alfred-workflow'.format(name))
filename = '{}.{}'.format(name, serializer_name)
data_path = self.datafile(filename)
if data is None: # Delete cached data
for path in (metadata_path, data_path):
if os.path.exists(path):
os.unlink(path)
self.logger.debug('Deleted data file : {}'.format(path))
return
# Save file extension
with open(metadata_path, 'wb') as file_obj:
file_obj.write(serializer_name)
with open(data_path, 'wb') as file_obj:
serializer.dump(data, file_obj)
self.logger.debug('Stored data saved at : {}'.format(data_path))
def cached_data(self, name, data_func=None, max_age=60):
"""Retrieve data from cache or re-generate and re-cache data if
stale/non-existant. If ``max_age`` is 0, return cached data no
matter how old.
:param name: name of datastore
:param data_func: function to (re-)generate data.
:type data_func: ``callable``
:param max_age: maximum age of cached data in seconds
:type max_age: ``int``
:returns: cached data, return value of ``data_func`` or ``None``
if ``data_func`` is not set
"""
serializer = manager.serializer(self.cache_serializer)
cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer))
age = self.cached_data_age(name)
if (age < max_age or max_age == 0) and os.path.exists(cache_path):
with open(cache_path, 'rb') as file_obj:
self.logger.debug('Loading cached data from : %s',
cache_path)
return serializer.load(file_obj)
if not data_func:
return None
data = data_func()
self.cache_data(name, data)
return data
def cache_data(self, name, data):
"""Save ``data`` to cache under ``name``.
If ``data`` is ``None``, the corresponding cache file will be
deleted.
:param name: name of datastore
:param data: data to store. This may be any object supported by
the cache serializer
"""
serializer = manager.serializer(self.cache_serializer)
cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer))
if data is None:
if os.path.exists(cache_path):
os.unlink(cache_path)
self.logger.debug('Deleted cache file : %s', cache_path)
return
with open(cache_path, 'wb') as file_obj:
serializer.dump(data, file_obj)
self.logger.debug('Cached data saved at : %s', cache_path)
def cached_data_fresh(self, name, max_age):
"""Is data cached at `name` less than `max_age` old?
:param name: name of datastore
:param max_age: maximum age of data in seconds
:type max_age: ``int``
:returns: ``True`` if data is less than ``max_age`` old, else
``False``
"""
age = self.cached_data_age(name)
if not age:
return False
return age < max_age
def cached_data_age(self, name):
"""Return age of data cached at `name` in seconds or 0 if
cache doesn't exist
:param name: name of datastore
:type name: ``unicode``
:returns: age of datastore in seconds
:rtype: ``int``
"""
cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer))
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def filter(self, query, items, key=lambda x: x, ascending=False,
include_score=False, min_score=0, max_results=0,
match_on=MATCH_ALL, fold_diacritics=True):
"""Fuzzy search filter. Returns list of ``items`` that match ``query``.
``query`` is case-insensitive. Any item that does not contain the
entirety of ``query`` is rejected.
.. warning::
If ``query`` is an empty string or contains only whitespace,
a :class:`ValueError` will be raised.
:param query: query to test items against
:type query: ``unicode``
:param items: iterable of items to test
:type items: ``list`` or ``tuple``
:param key: function to get comparison key from ``items``.
Must return a ``unicode`` string. The default simply returns
the item.
:type key: ``callable``
:param ascending: set to ``True`` to get worst matches first
:type ascending: ``Boolean``
:param include_score: Useful for debugging the scoring algorithm.
If ``True``, results will be a list of tuples
``(item, score, rule)``.
:type include_score: ``Boolean``
:param min_score: If non-zero, ignore results with a score lower
than this.
:type min_score: ``int``
:param max_results: If non-zero, prune results list to this length.
:type max_results: ``int``
:param match_on: Filter option flags. Bitwise-combined list of
``MATCH_*`` constants (see below).
:type match_on: ``int``
:param fold_diacritics: Convert search keys to ASCII-only
characters if ``query`` only contains ASCII characters.
:type fold_diacritics: ``Boolean``
:returns: list of ``items`` matching ``query`` or list of
``(item, score, rule)`` `tuples` if ``include_score`` is ``True``.
``rule`` is the ``MATCH_*`` rule that matched the item.
:rtype: ``list``
**Matching rules**
By default, :meth:`filter` uses all of the following flags (i.e.
:const:`MATCH_ALL`). The tests are always run in the given order:
1. :const:`MATCH_STARTSWITH` : Item search key startswith ``query`` (case-insensitive).
2. :const:`MATCH_CAPITALS` : The list of capital letters in item search key starts with ``query`` (``query`` may be lower-case). E.g., ``of`` would match ``OmniFocus``, ``gc`` would match ``Google Chrome``
3. :const:`MATCH_ATOM` : Search key is split into "atoms" on non-word characters (.,-,' etc.). Matches if ``query`` is one of these atoms (case-insensitive).
4. :const:`MATCH_INITIALS_STARTSWITH` : Initials are the first characters of the above-described "atoms" (case-insensitive).
5. :const:`MATCH_INITIALS_CONTAIN` : ``query`` is a substring of the above-described initials.
6. :const:`MATCH_INITIALS` : Combination of (4) and (5).
7. :const:`MATCH_SUBSTRING` : Match if ``query`` is a substring of item search key (case-insensitive).
8. :const:`MATCH_ALLCHARS` : Matches if all characters in ``query`` appear in item search key in the same order (case-insensitive).
9. :const:`MATCH_ALL` : Combination of all the above.
:const:`MATCH_ALLCHARS` is considerably slower than the other tests and
provides much less accurate results.
**Examples:**
To ignore :const:`MATCH_ALLCHARS` (tends to provide the worst
matches and is expensive to run), use
``match_on=MATCH_ALL ^ MATCH_ALLCHARS``.
To match only on capitals, use ``match_on=MATCH_CAPITALS``.
To match only on startswith and substring, use
``match_on=MATCH_STARTSWITH | MATCH_SUBSTRING``.
**Diacritic folding**
.. versionadded:: 1.3
If ``fold_diacritics`` is ``True`` (the default), and ``query``
contains only ASCII characters, non-ASCII characters in search keys
will be converted to ASCII equivalents (e.g. **ü** -> **u**,
**ß** -> **ss**, **é** -> **e**).
See :const:`ASCII_REPLACEMENTS` for all replacements.
If ``query`` contains non-ASCII characters, search keys will not be
altered.
"""
if not query:
raise ValueError('Empty `query`')
# Remove preceding/trailing spaces
query = query.strip()
if not query:
raise ValueError('`query` contains only whitespace')
# Use user override if there is one
fold_diacritics = self.settings.get('__workflow_diacritic_folding',
fold_diacritics)
results = []
for item in items:
skip = False
score = 0
words = [s.strip() for s in query.split(' ')]
value = key(item).strip()
if value == '':
continue
for word in words:
if word == '':
continue
s, rule = self._filter_item(value, word, match_on,
fold_diacritics)
if not s: # Skip items that don't match part of the query
skip = True
score += s
if skip:
continue
if score:
# use "reversed" `score` (i.e. highest becomes lowest) and
# `value` as sort key. This means items with the same score
# will be sorted in alphabetical not reverse alphabetical order
results.append(((100.0 / score, value.lower(), score),
(item, score, rule)))
# sort on keys, then discard the keys
results.sort(reverse=ascending)
results = [t[1] for t in results]
if max_results and len(results) > max_results:
results = results[:max_results]
if min_score:
results = [r for r in results if r[1] > min_score]
# return list of ``(item, score, rule)``
if include_score:
return results
# just return list of items
return [t[0] for t in results]
def _filter_item(self, value, query, match_on, fold_diacritics):
"""Filter ``value`` against ``query`` using rules ``match_on``
:returns: ``(score, rule)``
"""
query = query.lower()
if not isascii(query):
fold_diacritics = False
if fold_diacritics:
value = self.fold_to_ascii(value)
# pre-filter any items that do not contain all characters
# of ``query`` to save on running several more expensive tests
if not set(query) <= set(value.lower()):
return (0, None)
# item starts with query
if match_on & MATCH_STARTSWITH and value.lower().startswith(query):
score = 100.0 - (len(value) / len(query))
return (score, MATCH_STARTSWITH)
# query matches capitalised letters in item,
# e.g. of = OmniFocus
if match_on & MATCH_CAPITALS:
initials = ''.join([c for c in value if c in INITIALS])
if initials.lower().startswith(query):
score = 100.0 - (len(initials) / len(query))
return (score, MATCH_CAPITALS)
# split the item into "atoms", i.e. words separated by
# spaces or other non-word characters
if (match_on & MATCH_ATOM or
match_on & MATCH_INITIALS_CONTAIN or
match_on & MATCH_INITIALS_STARTSWITH):
atoms = [s.lower() for s in split_on_delimiters(value)]
# print('atoms : %s --> %s' % (value, atoms))
# initials of the atoms
initials = ''.join([s[0] for s in atoms if s])
if match_on & MATCH_ATOM:
# is `query` one of the atoms in item?
# similar to substring, but scores more highly, as it's
# a word within the item
if query in atoms:
score = 100.0 - (len(value) / len(query))
return (score, MATCH_ATOM)
# `query` matches start (or all) of the initials of the
# atoms, e.g. ``himym`` matches "How I Met Your Mother"
# *and* "how i met your mother" (the ``capitals`` rule only
# matches the former)
if (match_on & MATCH_INITIALS_STARTSWITH and
initials.startswith(query)):
score = 100.0 - (len(initials) / len(query))
return (score, MATCH_INITIALS_STARTSWITH)
# `query` is a substring of initials, e.g. ``doh`` matches
# "The Dukes of Hazzard"
elif (match_on & MATCH_INITIALS_CONTAIN and
query in initials):
score = 95.0 - (len(initials) / len(query))
return (score, MATCH_INITIALS_CONTAIN)
# `query` is a substring of item
if match_on & MATCH_SUBSTRING and query in value.lower():
score = 90.0 - (len(value) / len(query))
return (score, MATCH_SUBSTRING)
# finally, assign a score based on how close together the
# characters in `query` are in item.
if match_on & MATCH_ALLCHARS:
search = self._search_for_query(query)
match = search(value)
if match:
score = 100.0 / ((1 + match.start()) *
(match.end() - match.start() + 1))
return (score, MATCH_ALLCHARS)
# Nothing matched
return (0, None)
def _search_for_query(self, query):
if query in self._search_pattern_cache:
return self._search_pattern_cache[query]
# Build pattern: include all characters
pattern = []
for c in query:
# pattern.append('[^{0}]*{0}'.format(re.escape(c)))
pattern.append('.*?{0}'.format(re.escape(c)))
pattern = ''.join(pattern)
search = re.compile(pattern, re.IGNORECASE).search
self._search_pattern_cache[query] = search
return search
def run(self, func):
"""Call ``func`` to run your workflow
:param func: Callable to call with ``self`` (i.e. the :class:`Workflow`
instance) as first argument.
``func`` will be called with :class:`Workflow` instance as first argument.
``func`` should be the main entry point to your workflow.
Any exceptions raised will be logged and an error message will be
output to Alfred.
"""
start = time.time()
try:
func(self)
except Exception as err:
self.logger.exception(err)
if not sys.stdout.isatty(): # Show error in Alfred
self._items = []
if self._name:
name = self._name
elif self._bundleid:
name = self._bundleid
else: # pragma: no cover
name = os.path.dirname(__file__)
self.add_item("Error in workflow '%s'" % name, unicode(err),
icon=ICON_ERROR)
self.send_feedback()
return 1
finally:
self.logger.debug('Workflow finished in {:0.3f} seconds.'.format(
time.time() - start))
return 0
# Alfred feedback methods ------------------------------------------
def add_item(self, title, subtitle='', modifier_subtitles=None, arg=None,
autocomplete=None, valid=False, uid=None, icon=None,
icontype=None, type=None, largetext=None, copytext=None):
"""Add an item to be output to Alfred
:param title: Title shown in Alfred
:type title: ``unicode``
:param subtitle: Subtitle shown in Alfred
:type subtitle: ``unicode``
:param modifier_subtitles: Subtitles shown when modifier
(CMD, OPT etc.) is pressed. Use a ``dict`` with the lowercase
keys ``cmd``, ``ctrl``, ``shift``, ``alt`` and ``fn``
:type modifier_subtitles: ``dict``
:param arg: Argument passed by Alfred as ``{query}`` when item is
actioned
:type arg: ``unicode``
:param autocomplete: Text expanded in Alfred when item is TABbed
:type autocomplete: ``unicode``
:param valid: Whether or not item can be actioned
:type valid: ``Boolean``
:param uid: Used by Alfred to remember/sort items
:type uid: ``unicode``
:param icon: Filename of icon to use
:type icon: ``unicode``
:param icontype: Type of icon. Must be one of ``None`` , ``'filetype'``
or ``'fileicon'``. Use ``'filetype'`` when ``icon`` is a filetype
such as ``'public.folder'``. Use ``'fileicon'`` when you wish to
use the icon of the file specified as ``icon``, e.g.
``icon='/Applications/Safari.app', icontype='fileicon'``.
Leave as `None` if ``icon`` points to an actual
icon file.
:type icontype: ``unicode``
:param type: Result type. Currently only ``'file'`` is supported
(by Alfred). This will tell Alfred to enable file actions for
this item.
:type type: ``unicode``
:param largetext: Text to be displayed in Alfred's large text box
if user presses CMD+L on item.
:type largetext: ``unicode``
:param copytext: Text to be copied to pasteboard if user presses
CMD+C on item.
:type copytext: ``unicode``
:returns: :class:`Item` instance
See the :ref:`script-filter-results` section of the documentation
for a detailed description of what the various parameters do and how
they interact with one another.
See :ref:`icons` for a list of the supported system icons.
.. note::
Although this method returns an :class:`Item` instance, you don't
need to hold onto it or worry about it. All generated :class:`Item`
instances are also collected internally and sent to Alfred when
:meth:`send_feedback` is called.
The generated :class:`Item` is only returned in case you want to
edit it or do something with it other than send it to Alfred.
"""
item = self.item_class(title, subtitle, modifier_subtitles, arg,
autocomplete, valid, uid, icon, icontype, type,
largetext, copytext)
self._items.append(item)
return item
def send_feedback(self):
"""Print stored items to console/Alfred as XML."""
root = ET.Element('items')
for item in self._items:
root.append(item.elem)
sys.stdout.write('<?xml version="1.0" encoding="utf-8"?>\n')
sys.stdout.write(ET.tostring(root).encode('utf-8'))
sys.stdout.flush()
####################################################################
# Updating methods
####################################################################
@property
def update_available(self):
"""Is an update available?
.. versionadded:: 1.9
See :ref:`manual-updates` in the :ref:`user-manual` for detailed
information on how to enable your workflow to update itself.
:returns: ``True`` if an update is available, else ``False``
"""
update_data = self.cached_data('__workflow_update_status')
if not update_data or not update_data.get('available'):
return False
return update_data['available']
def check_update(self, force=False):
"""Call update script if it's time to check for a new release
.. versionadded:: 1.9
The update script will be run in the background, so it won't
interfere in the execution of your workflow.
See :ref:`manual-updates` in the :ref:`user-manual` for detailed
information on how to enable your workflow to update itself.
:param force: Force update check
:type force: ``Boolean``
"""
frequency = self._update_settings.get('frequency',
DEFAULT_UPDATE_FREQUENCY)
# Check for new version if it's time
if (force or not self.cached_data_fresh(
'__workflow_update_status', frequency * 86400)):
github_slug = self._update_settings['github_slug']
version = self._update_settings['version']
from background import run_in_background
# update.py is adjacent to this file
update_script = os.path.join(os.path.dirname(__file__),
b'update.py')
cmd = ['/usr/bin/python', update_script, 'check', github_slug,
version]
self.logger.info('Checking for update ...')
run_in_background('__workflow_update_check', cmd)
else:
self.logger.debug('Update check not due')
def start_update(self):
"""Check for update and download and install new workflow file
.. versionadded:: 1.9
See :ref:`manual-updates` in the :ref:`user-manual` for detailed
information on how to enable your workflow to update itself.
:returns: ``True`` if an update is available and will be
installed, else ``False``
"""
import update
github_slug = self._update_settings['github_slug']
version = self._update_settings['version']
if not update.check_update(github_slug, version):
return False
from background import run_in_background
# update.py is adjacent to this file
update_script = os.path.join(os.path.dirname(__file__),
b'update.py')
cmd = ['/usr/bin/python', update_script, 'install', github_slug,
version]
self.logger.debug('Downloading update ...')
run_in_background('__workflow_update_install', cmd)
return True
####################################################################
# Keychain password storage methods
####################################################################
def save_password(self, account, password, service=None):
"""Save account credentials.
If the account exists, the old password will first be deleted
(Keychain throws an error otherwise).
If something goes wrong, a :class:`KeychainError` exception will
be raised.
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param password: the password to secure
:type password: ``unicode``
:param service: Name of the service. By default, this is the
workflow's bundle ID
:type service: ``unicode``
"""
if not service:
service = self.bundleid
try:
self._call_security('add-generic-password', service, account,
'-w', password)
self.logger.debug('Saved password : %s:%s', service, account)
except PasswordExists:
self.logger.debug('Password exists : %s:%s', service, account)
current_password = self.get_password(account, service)
if current_password == password:
self.logger.debug('Password unchanged')
else:
self.delete_password(account, service)
self._call_security('add-generic-password', service,
account, '-w', password)
self.logger.debug('save_password : %s:%s', service, account)
def get_password(self, account, service=None):
"""Retrieve the password saved at ``service/account``. Raise
:class:`PasswordNotFound` exception if password doesn't exist.
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param service: Name of the service. By default, this is the workflow's
bundle ID
:type service: ``unicode``
:returns: account password
:rtype: ``unicode``
"""
if not service:
service = self.bundleid
password = self._call_security('find-generic-password', service,
account, '-w')
self.logger.debug('Got password : %s:%s', service, account)
return password
def delete_password(self, account, service=None):
"""Delete the password stored at ``service/account``. Raises
:class:`PasswordNotFound` if account is unknown.
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param service: Name of the service. By default, this is the workflow's
bundle ID
:type service: ``unicode``
"""
if not service:
service = self.bundleid
self._call_security('delete-generic-password', service, account)
self.logger.debug('Deleted password : %s:%s', service, account)
####################################################################
# Methods for workflow:* magic args
####################################################################
def clear_cache(self):
"""Delete all files in workflow's :attr:`cachedir`."""
self._delete_directory_contents(self.cachedir)
def clear_data(self):
"""Delete all files in workflow's :attr:`datadir`."""
self._delete_directory_contents(self.datadir)
def clear_settings(self):
"""Delete workflow's :attr:`settings_path`."""
if os.path.exists(self.settings_path):
os.unlink(self.settings_path)
self.logger.debug('Deleted : %r', self.settings_path)
def reset(self):
"""Delete :attr:`settings <settings_path>`, :attr:`cache <cachedir>`
and :attr:`data <datadir>`
"""
self.clear_cache()
self.clear_data()
self.clear_settings()
def open_log(self):
"""Open workflows :attr:`logfile` in standard
application (usually Console.app).
"""
subprocess.call(['open', self.logfile])
def open_cachedir(self):
"""Open the workflow's :attr:`cachedir` in Finder."""
subprocess.call(['open', self.cachedir])
def open_datadir(self):
"""Open the workflow's :attr:`datadir` in Finder."""
subprocess.call(['open', self.datadir])
def open_workflowdir(self):
"""Open the workflow's :attr:`directory <workflowdir` in Finder."""
subprocess.call(['open', self.workflowdir])
def open_terminal(self):
"""Open a Terminal window at workflow's :attr:`directory <workflowdir`."""
subprocess.call(['open', '-a', 'Terminal',
self.workflowdir])
####################################################################
# Helper methods
####################################################################
def decode(self, text, encoding=None, normalization=None):
"""Return ``text`` as normalised unicode.
If ``encoding`` and/or ``normalization`` is ``None``, the
``input_encoding``and ``normalization`` parameters passed to
:class:`Workflow` are used.
:param text: string
:type text: encoded or Unicode string. If ``text`` is already a
Unicode string, it will only be normalised.
:param encoding: The text encoding to use to decode ``text`` to
Unicode.
:type encoding: ``unicode`` or ``None``
:param normalization: The nomalisation form to apply to ``text``.
:type normalization: ``unicode`` or ``None``
:returns: decoded and normalised ``unicode``
:class:`Workflow` uses "NFC" normalisation by default. This is the
standard for Python and will work well with data from the web (via
:mod:`~workflow.web` or :mod:`json`).
OS X, on the other hand, uses "NFD" normalisation (nearly), so data
coming from the system (e.g. via :mod:`subprocess` or
:func:`os.listdir`/:mod:`os.path`) may not match. You should either
normalise this data, too, or change the default normalisation used by
:class:`Workflow`.
"""
encoding = encoding or self._input_encoding
normalization = normalization or self._normalizsation
if not isinstance(text, unicode):
text = unicode(text, encoding)
return unicodedata.normalize(normalization, text)
def fold_to_ascii(self, text):
"""Convert non-ASCII characters to closest ASCII equivalent.
.. versionadded:: 1.3
.. note:: This only works for a subset of European languages.
:param text: text to convert
:type text: ``unicode``
:returns: text containing only ASCII characters
:rtype: ``unicode``
"""
if isascii(text):
return text
text = ''.join([ASCII_REPLACEMENTS.get(c, c) for c in text])
return unicode(unicodedata.normalize('NFKD',
text).encode('ascii', 'ignore'))
def _delete_directory_contents(self, dirpath):
"""Delete all files in a directory
:param dirpath: path to directory to clear
:type dirpath: ``unicode`` or ``str``
"""
if os.path.exists(dirpath):
for filename in os.listdir(dirpath):
path = os.path.join(dirpath, filename)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
self.logger.debug('Deleted : %r', path)
def _load_info_plist(self):
"""Load workflow info from ``info.plist``
"""
self._info = plistlib.readPlist(self._info_plist)
self._info_loaded = True
def _create(self, dirpath):
"""Create directory `dirpath` if it doesn't exist
:param dirpath: path to directory
:type dirpath: ``unicode``
:returns: ``dirpath`` argument
:rtype: ``unicode``
"""
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return dirpath
def _call_security(self, action, service, account, *args):
"""Call the ``security`` CLI app that provides access to keychains.
May raise `PasswordNotFound`, `PasswordExists` or `KeychainError`
exceptions (the first two are subclasses of `KeychainError`).
:param action: The ``security`` action to call, e.g.
``add-generic-password``
:type action: ``unicode``
:param service: Name of the service.
:type service: ``unicode``
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param password: the password to secure
:type password: ``unicode``
:param *args: list of command line arguments to be passed to
``security``
:type *args: `list` or `tuple`
:returns: ``(retcode, output)``. ``retcode`` is an `int`, ``output`` a
``unicode`` string.
:rtype: `tuple` (`int`, ``unicode``)
"""
cmd = ['security', action, '-s', service, '-a', account] + list(args)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
retcode, output = p.wait(), p.stdout.read().strip().decode('utf-8')
if retcode == 44: # password does not exist
raise PasswordNotFound()
elif retcode == 45: # password already exists
raise PasswordExists()
elif retcode > 0:
err = KeychainError('Unknown Keychain error : %s' % output)
err.retcode = retcode
raise err
return output
| fniephaus/alfred-hackernews | src/workflow/workflow.py | Python | mit | 75,128 |
#!/usr/bin/env python3
'''Test IXFR-from-diff with DNSSEC'''
from dnstest.test import Test
t = Test(stress=False)
master = t.server("knot")
slave = t.server("knot")
if not master.valgrind:
zones = t.zone_rnd(12)
else:
zones = t.zone_rnd(4, records=100)
slave.tcp_remote_io_timeout = 20000
master.ctl_params_append = ["-t", "30"]
t.link(zones, master, slave, ixfr=True)
master.semantic_check = False
master.zonefile_sync = "-1"
for zone in zones:
master.dnssec(zone).enable = True
t.start()
ser1 = master.zones_wait(zones, serials_zfile=True, greater=True, equal=False)
slave.zones_wait(zones, ser1, greater=False, equal=True)
for zone in zones:
slave.zone_backup(zone, flush=True)
master.flush(wait=True)
for zone in zones:
master.update_zonefile(zone, random=True)
master.ctl("zone-reload %s" % zone.name)
ser2 = master.zones_wait(zones, serials_zfile=True, greater=True, equal=False)
slave.zones_wait(zones, ser2, greater=False, equal=True)
master.stop()
t.sleep(3)
master.start()
master.zones_wait(zones, ser2, greater=False, equal=True)
t.xfr_diff(master, slave, zones) # AXFR diff
t.xfr_diff(master, slave, zones, ser1) # IXFR diff
t.end()
| CZ-NIC/knot | tests-extra/tests/dnssec/ixfr_diff/test.py | Python | gpl-3.0 | 1,178 |
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, LabControl development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from itertools import chain
from tornado.web import authenticated, HTTPError
from tornado.escape import json_encode, json_decode
from labcontrol.gui.handlers.base import BaseHandler
from labcontrol.db.exceptions import LabControlUnknownIdError
from labcontrol.db.plate import PlateConfiguration, Plate
from labcontrol.db.composition import SampleComposition
from labcontrol.db.container import Well
from labcontrol.db.process import (
SamplePlatingProcess, GDNAExtractionProcess, LibraryPrep16SProcess,
LibraryPrepShotgunProcess, NormalizationProcess,
GDNAPlateCompressionProcess)
def _get_plate(plate_id):
"""Returns the plate object if it exists
Parameters
----------
plate_id : str
The plate id
Raises
------
HTTPError
404, if the plate doesn't exist
"""
plate_id = int(plate_id)
try:
plate = Plate(plate_id)
except LabControlUnknownIdError:
raise HTTPError(404, 'Plate %s doesn\'t exist' % plate_id)
return plate
class PlateSearchHandler(BaseHandler):
@authenticated
def get(self):
control_names = SampleComposition.get_control_samples()
self.render('plate_search.html',
control_names=json_encode(control_names))
@authenticated
def post(self):
plate_comment_keywords = self.get_argument("plate_comment_keywords")
well_comment_keywords = self.get_argument("well_comment_keywords")
operation = self.get_argument("operation")
sample_names = json_decode(self.get_argument('sample_names'))
res = {"data": [[p.id, p.external_id]
for p in Plate.search(samples=sample_names,
plate_notes=plate_comment_keywords,
well_notes=well_comment_keywords,
query_type=operation)]}
self.write(res)
class PlateListingHandler(BaseHandler):
@authenticated
def get(self):
self.render('plate_list.html')
class PlateListHandler(BaseHandler):
@authenticated
def get(self):
plate_type = self.get_argument('plate_type', None)
only_quantified = self.get_argument('only_quantified', False)
plate_type = (json_decode(plate_type)
if plate_type is not None else None)
only_quantified = True if only_quantified == 'true' else False
rows_list = [[p['plate_id'],
p['external_id'],
p['creation_timestamp'],
p['studies'] if p['studies'] is not None else []]
for p in Plate.list_plates(
plate_type, only_quantified=only_quantified,
include_study_titles=True)]
res = {"data": rows_list}
self.write(res)
def plate_map_handler_get_request(process_id):
plate_id = None
if process_id is not None:
try:
process = SamplePlatingProcess(process_id)
except LabControlUnknownIdError:
raise HTTPError(404, reason="Plating process %s doesn't exist"
% process_id)
plate_id = process.plate.id
plate_confs = [[pc.id, pc.description, pc.num_rows, pc.num_columns]
for pc in PlateConfiguration.iter()
if 'plate map' not in pc.description]
cdesc = SampleComposition.get_control_sample_types_description()
return {'plate_confs': plate_confs, 'plate_id': plate_id,
'process_id': process_id, 'controls_description': cdesc}
class PlateMapHandler(BaseHandler):
@authenticated
def get(self):
process_id = self.get_argument('process_id', None)
res = plate_map_handler_get_request(process_id)
self.render("plate.html", **res)
class PlateNameHandler(BaseHandler):
@authenticated
def get(self):
new_name = self.get_argument('new-name')
status = 200 if Plate.external_id_exists(new_name) else 404
self.set_status(status)
self.finish()
def plate_handler_patch_request(user, plate_id, req_op, req_path,
req_value, req_from):
"""Performs the patch operation on the plate
Parameters
----------
user: labcontrol.db.user.User
User performing the request
plate_id: int
The SamplePlatingProcess to apply the patch operation
req_op: string
JSON PATCH op parameter
req_path: string
JSON PATCH path parameter
req_value: string
JSON PATCH value parameter
req_from: string
JSON PATCH from parameter
Raises
------
HTTPError
400: If req_op is not a supported operation
400: If req_path is incorrect
"""
plate = _get_plate(plate_id)
if req_op == 'replace':
req_path = [v for v in req_path.split('/') if v]
if len(req_path) != 1:
raise HTTPError(400, 'Incorrect path parameter')
attribute = req_path[0]
if attribute == 'name':
plate.external_id = req_value
elif attribute == 'discarded':
plate.discarded = req_value
else:
raise HTTPError(404, 'Attribute %s not recognized' % attribute)
else:
raise HTTPError(400, 'Operation %s not supported. Current supported '
'operations: replace' % req_op)
class PlateHandler(BaseHandler):
@authenticated
def get(self, plate_id):
plate = _get_plate(plate_id)
# sorting is done in plate.duplicates
duplicates = [
[sample_info[0].row, sample_info[0].column, sample_info[1]]
for sample_info in chain.from_iterable(plate.duplicates.values())]
# sorting of wells has to be done here as they are in a dictionary
previous_plates = []
prev_plated = plate.get_previously_plated_wells()
well_ids = sorted([w.id for w in prev_plated.keys()])
for curr_well_id in well_ids:
curr_well = Well(curr_well_id)
curr_plates = prev_plated[curr_well]
# plates are sorted in plate id order in
# get_previously_plated_wells
previous_plates.append([
[curr_well.row, curr_well.column],
[{'plate_id': p.id, 'plate_name': p.external_id} for p in
curr_plates]])
# sorting is done in plate.unknown_samples
unknowns = [[well.row, well.column] for well in plate.unknown_samples]
# sorting is done in plate.quantification processes
quantitation_processes = [[q.id, q.personnel.name, q.date.strftime(
q.get_date_format()), q.notes] for q in
plate.quantification_processes]
plate_config = plate.plate_configuration
result = {'plate_id': plate.id,
'plate_name': plate.external_id,
'discarded': plate.discarded,
'plate_configuration': [
plate_config.id, plate_config.description,
plate_config.num_rows, plate_config.num_columns],
'notes': plate.notes,
'process_notes': plate.process.notes,
'studies': sorted(s.id for s in plate.studies),
'duplicates': duplicates,
'previous_plates': previous_plates,
'unknowns': unknowns,
'quantitation_processes': quantitation_processes}
self.write(result)
self.finish()
@authenticated
def patch(self, plate_id):
# Follows the JSON PATCH specification
# https://tools.ietf.org/html/rfc6902
req_op = self.get_argument('op')
req_path = self.get_argument('path')
req_value = self.get_argument('value', None)
req_from = self.get_argument('from', None)
plate_handler_patch_request(self.current_user, plate_id, req_op,
req_path, req_value, req_from)
self.finish()
def plate_layout_handler_get_request(plate_id):
"""Returns the plate layout
Parameters
----------
plate_id : int
The plate id
Returns
-------
list of lists of {'sample': str, 'notes': str}
"""
plate = _get_plate(plate_id)
plate_layout = plate.layout
result = []
for l_row in plate_layout:
row = []
for l_well in l_row:
composition = l_well.composition
sample = composition.specimen_id
row.append({'sample': sample, 'notes': composition.notes})
result.append(row)
return result
class PlateLayoutHandler(BaseHandler):
@authenticated
def get(self, plate_id):
self.write(json_encode(plate_layout_handler_get_request(plate_id)))
class PlateProcessHandler(BaseHandler):
@authenticated
def get(self, plate_id):
urls = {
SamplePlatingProcess: '/plate',
GDNAExtractionProcess: '/process/gdna_extraction',
LibraryPrep16SProcess: '/process/library_prep_16S',
LibraryPrepShotgunProcess: '/process/library_prep_shotgun',
NormalizationProcess: '/process/normalize',
GDNAPlateCompressionProcess: '/process/gdna_compression'}
process = Plate(plate_id).process
self.redirect(urls[process.__class__] + '?process_id=%s' % process.id)
| jdereus/labman | labcontrol/gui/handlers/plate.py | Python | bsd-3-clause | 9,784 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class ProbePaged(Paged):
"""
A paging container for iterating over a list of :class:`Probe <azure.mgmt.network.v2017_10_01.models.Probe>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[Probe]'}
}
def __init__(self, *args, **kwargs):
super(ProbePaged, self).__init__(*args, **kwargs)
| lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_10_01/models/probe_paged.py | Python | mit | 917 |
#!/usr/bin/env python
import wx
import ape
import os
import genomics
class PCRSimulatorFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="PCR Simulator", size=(840, 400), name="PCR Simulator")
#self.icon = wx.Icon("res/icon.png", wx.BITMAP_TYPE_ANY)
#self.SetIcon(self.icon)
self.CreateStatusBar() # Access with self.GetStatusBar()
self.SetBackgroundColour(wx.WHITE)
self.SetAutoLayout(True)
self.CreateLayout()
self.Layout()
self.Show(True)
def load(self, onWrite, nameCtrl):
def onLoad(event):
loadDialog = wx.FileDialog(self, message="Choose a file", wildcard="Plasmid files (.ape, .str)|*.ape;*.str", style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST|wx.FD_CHANGE_DIR)
if loadDialog.ShowModal() == wx.ID_OK:
onWrite(bp=ape.readBP(os.path.join(loadDialog.GetDirectory(), loadDialog.GetFilename())))
nameCtrl.SetLabel(label=loadDialog.GetFilename())
loadDialog.Destroy()
return onLoad
def save(self, dataCtrl):
def onSave(event):
saveDialog = wx.FileDialog(self, message="Choose a file", wildcard="Plasmid files (.ape, .str)|*.ape;*.str", style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT|wx.FD_CHANGE_DIR)
if saveDialog.ShowModal() == wx.ID_OK:
ape.writeBP(filename=os.path.join(saveDialog.GetDirectory(), saveDialog.GetFilename()), bp=dataCtrl.GetValue())
saveDialog.Destroy()
return onSave
def writeCtrl(self, dataCtrl, nameCtrl, lengthCtrl, aCtrl, cCtrl):
def onWrite(bp): # Generates a function which will write bp to a column of textCtrl
nameCtrl.SetLabel("unsaved file")
lengthCtrl.SetLabel(label=str(len(bp)) + " base pairs")
aCtrl.SetLabel(label=str(bp.count('A') + bp.count('a')) + " adenine")
cCtrl.SetLabel(label=str(bp.count('C') + bp.count('c')) + " cytosine")
dataCtrl.SetValue(value=bp)
return onWrite
def generatePrimers(self, templateDataCtrl, outputDataCtrl, fPrimerOnWrite, rPrimerOnWrite):
def onGeneratePrimers(event):
templateBP = templateDataCtrl.GetValue()
outputBP = outputDataCtrl.GetValue()
restrictionMDialog = wx.MessageDialog(self, message='Load restriction site sequence for primer caps?', caption='Primer cap selection', style=wx.YES_NO|wx.ICON_QUESTION)
if restrictionMDialog.ShowModal() == wx.ID_YES:
fCapDialog = wx.FileDialog(self, message="Choose a forward primer cap", wildcard="Plasmid files (.ape, .str)|*.ape;*.str", style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)
if fCapDialog.ShowModal() == wx.ID_OK:
fPrimerCapBP = ape.readBP(os.path.join(fCapDialog.GetDirectory(), fCapDialog.GetFilename()))
rCapDialog = wx.FileDialog(self, message="Choose a reverse primer cap", wildcard="Plasmid files (.ape, .str)|*.ape;*.str", style=wx.FD_OPEN|wx.FD_FILE_MUST_EXIST)
if rCapDialog.ShowModal() == wx.ID_OK:
rPrimerCapBP = ape.readBP(os.path.join(rCapDialog.GetDirectory(), rCapDialog.GetFilename()))
else:
noFileMDialog = wx.MessageDialog(self, message='No primer cap selected: none will be used', caption='Primer cap selection error', style=wx.OK|wx.ICON_ERROR)
noFileMDialog.ShowModal()
noFileMDialog.Destroy()
rPrimerCapBP = ""
rCapDialog.Destroy()
else:
noFileMDialog = wx.MessageDialog(self, message='No primer cap selected: none will be used', caption='Primer cap selection error', style=wx.OK|wx.ICON_ERROR)
noFileMDialog.ShowModal()
noFileMDialog.Destroy()
fPrimerCapBP = ""
rPrimerCapBP = ""
fCapDialog.Destroy()
else:
fPrimerCapBP = ""
rPrimerCapBP = ""
restrictionMDialog.Destroy()
fPrimerBP, rPrimerBP = genomics.generatePrimers(template=templateBP, output=outputBP, fPrimerCap=fPrimerCapBP, rPrimerCap=rPrimerCapBP)
fPrimerOnWrite(bp=fPrimerBP)
rPrimerOnWrite(bp=rPrimerBP)
return onGeneratePrimers
def simulatePCR(self, templateDataCtrl, fPrimerDataCtrl, rPrimerDataCtrl, outputOnWrite):
def onSimulatePCR(event): # Generate callback function
templateBP = templateDataCtrl.GetValue()
fPrimerBP = fPrimerDataCtrl.GetValue()
rPrimerBP = rPrimerDataCtrl.GetValue()
outputBP = genomics.simulatePCR(template=templateBP, fPrimer=fPrimerBP, rPrimer=rPrimerBP)
if outputBP == None: # genomics.simulatePCR returns None when it finds the primers in the template strand output segment
resultMDialog = wx.MessageDialog(self, message='Primers homologous within output', caption='Polymerase reaction error', style=wx.OK|wx.ICON_ERROR)
resultMDialog.ShowModal()
resultMDialog.Destroy()
elif outputBP == "": # genomics.simulatePCR returns "" when it could not match both primers with the template strand
resultMDialog = wx.MessageDialog(self, message='Primers not homologous within template', caption='Polymerase reaction error', style=wx.OK|wx.ICON_ERROR)
resultMDialog.ShowModal()
resultMDialog.Destroy()
else:
outputOnWrite(bp=outputBP)
return onSimulatePCR
def verifyPrimers(self, templateDataCtrl, fPrimerDataCtrl, rPrimerDataCtrl, outputDataCtrl):
def onVerifyPrimers(event):
templateBP = templateDataCtrl.GetValue()
fPrimerBP = fPrimerDataCtrl.GetValue()
rPrimerBP = rPrimerDataCtrl.GetValue()
outputBP = outputDataCtrl.GetValue()
simOutputBP = genomics.simulatePCR(template=templateBP, fPrimer=fPrimerBP, rPrimer=rPrimerBP)
if simOutputBP == outputBP:
resultMDialog = wx.MessageDialog(self, message='Output verified: primers work', caption='Primer verification pass', style=wx.OK|wx.ICON_INFORMATION)
resultMDialog.ShowModal()
resultMDialog.Destroy()
elif simOutputBP == None:
resultMDialog = wx.MessageDialog(self, message='Output not verified: primers fail\nPrimers homologous within output', caption='Primer verification fail', style=wx.OK|wx.ICON_ERROR)
resultMDialog.ShowModal()
resultMDialog.Destroy()
elif simOutputBP == "":
resultMDialog = wx.MessageDialog(self, message='Output not verified: primers fail\nPrimers not homologous within template', caption='Primer verification fail', style=wx.OK|wx.ICON_ERROR)
resultMDialog.ShowModal()
resultMDialog.Destroy()
else:
resultTEDialog = wx.TextEntryDialog(self, message='Output not verified: primers fail\nSimulation yielded:', caption='Primer verification fail', style=wx.OK|wx.TE_MULTILINE|wx.TE_CHARWRAP|wx.TE_READONLY)
resultTEDialog.SetValue(value=simOutputBP)
resultTEDialog.ShowModal()
resultTEDialog.Destroy()
return onVerifyPrimers
def CreateLayout(self):
# Change text with wx.StaticText#SetLabel()
templateLoadButton = wx.Button(self, label="Load Template Strand")
templateSaveButton = wx.Button(self, label="Save Template Strand")
templateFileName = wx.StaticText(self, label="no file", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
templateLength = wx.StaticText(self, label="0 base pairs", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
templateACount = wx.StaticText(self, label="0 adenine", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
templateCCount = wx.StaticText(self, label="0 cytosine", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
templateData = wx.TextCtrl(self, style=wx.TE_READONLY|wx.TE_MULTILINE|wx.TE_CHARWRAP)
templateOnWrite = self.writeCtrl(dataCtrl=templateData, nameCtrl=templateFileName, lengthCtrl=templateLength, aCtrl=templateACount, cCtrl=templateCCount)
templateLoadButton.Bind(wx.EVT_BUTTON, self.load(onWrite=templateOnWrite, nameCtrl=templateFileName))
templateSaveButton.Bind(wx.EVT_BUTTON, self.save(dataCtrl=templateData))
templateSizer = wx.StaticBoxSizer(wx.StaticBox(self, label="Template Strand", style=wx.ALIGN_CENTER), wx.VERTICAL)
templateSizer.Add(templateLoadButton, 0, wx.EXPAND)
templateSizer.Add(templateSaveButton, 0, wx.EXPAND)
templateSizer.Add(templateFileName, 0, wx.EXPAND)
templateSizer.Add(templateLength, 0, wx.EXPAND)
templateSizer.Add(templateACount, 0, wx.EXPAND)
templateSizer.Add(templateCCount, 0, wx.EXPAND)
templateSizer.Add(templateData, 1, wx.EXPAND)
fPrimerLoadButton = wx.Button(self, label="Load Initialization Primer")
fPrimerSaveButton = wx.Button(self, label="Save Initialization Primer")
fPrimerFileName = wx.StaticText(self, label="no file", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
fPrimerLength = wx.StaticText(self, label="0 base pairs", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
fPrimerACount = wx.StaticText(self, label="0 adenine", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
fPrimerCCount = wx.StaticText(self, label="0 cytosine", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
fPrimerData = wx.TextCtrl(self, style=wx.TE_READONLY|wx.TE_MULTILINE|wx.TE_CHARWRAP)
fPrimerOnWrite = self.writeCtrl(dataCtrl=fPrimerData, nameCtrl=fPrimerFileName, lengthCtrl=fPrimerLength, aCtrl=fPrimerACount, cCtrl=fPrimerCCount)
fPrimerLoadButton.Bind(wx.EVT_BUTTON, self.load(onWrite=fPrimerOnWrite, nameCtrl=fPrimerFileName))
fPrimerSaveButton.Bind(wx.EVT_BUTTON, self.save(dataCtrl=fPrimerData))
fPrimerSizer = wx.StaticBoxSizer(wx.StaticBox(self, label="Initialization Primer", style=wx.ALIGN_CENTER), wx.VERTICAL)
fPrimerSizer.Add(fPrimerLoadButton, 0, wx.EXPAND)
fPrimerSizer.Add(fPrimerSaveButton, 0, wx.EXPAND)
fPrimerSizer.Add(fPrimerFileName, 0, wx.EXPAND)
fPrimerSizer.Add(fPrimerLength, 0, wx.EXPAND)
fPrimerSizer.Add(fPrimerACount, 0, wx.EXPAND)
fPrimerSizer.Add(fPrimerCCount, 0, wx.EXPAND)
fPrimerSizer.Add(fPrimerData, 1, wx.EXPAND)
rPrimerLoadButton = wx.Button(self, label="Load Termination Primer")
rPrimerSaveButton = wx.Button(self, label="Save Termination Primer")
rPrimerFileName = wx.StaticText(self, label="no file", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
rPrimerLength = wx.StaticText(self, label="0 base pairs", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
rPrimerACount = wx.StaticText(self, label="0 adenine", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
rPrimerCCount = wx.StaticText(self, label="0 cytosine", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
rPrimerData = wx.TextCtrl(self, style=wx.TE_READONLY|wx.TE_MULTILINE|wx.TE_CHARWRAP)
rPrimerOnWrite = self.writeCtrl(dataCtrl=rPrimerData, nameCtrl=rPrimerFileName, lengthCtrl=rPrimerLength, aCtrl=rPrimerACount, cCtrl=rPrimerCCount)
rPrimerLoadButton.Bind(wx.EVT_BUTTON, self.load(onWrite=rPrimerOnWrite, nameCtrl=rPrimerFileName))
rPrimerSaveButton.Bind(wx.EVT_BUTTON, self.save(dataCtrl=rPrimerData))
rPrimerSizer = wx.StaticBoxSizer(wx.StaticBox(self, label="Termination Primer", style=wx.ALIGN_CENTER), wx.VERTICAL)
rPrimerSizer.Add(rPrimerLoadButton, 0, wx.EXPAND)
rPrimerSizer.Add(rPrimerSaveButton, 0, wx.EXPAND)
rPrimerSizer.Add(rPrimerFileName, 0, wx.EXPAND)
rPrimerSizer.Add(rPrimerLength, 0, wx.EXPAND)
rPrimerSizer.Add(rPrimerACount, 0, wx.EXPAND)
rPrimerSizer.Add(rPrimerCCount, 0, wx.EXPAND)
rPrimerSizer.Add(rPrimerData, 1, wx.EXPAND)
outputLoadButton = wx.Button(self, label="Load PCR Output")
outputSaveButton = wx.Button(self, label="Save PCR Output")
outputFileName = wx.StaticText(self, label="no file", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
outputLength = wx.StaticText(self, label="0 base pairs", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
outputACount = wx.StaticText(self, label="0 adenine", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
outputCCount = wx.StaticText(self, label="0 cytosine", style=wx.ALIGN_CENTER|wx.ST_NO_AUTORESIZE)
outputData = wx.TextCtrl(self, style=wx.TE_READONLY|wx.TE_MULTILINE|wx.TE_CHARWRAP)
outputOnWrite = self.writeCtrl(dataCtrl=outputData, nameCtrl=outputFileName, lengthCtrl=outputLength, aCtrl=outputACount, cCtrl=outputCCount)
outputLoadButton.Bind(wx.EVT_BUTTON, self.load(onWrite=outputOnWrite, nameCtrl=outputFileName))
outputSaveButton.Bind(wx.EVT_BUTTON, self.save(dataCtrl=outputData))
outputSizer = wx.StaticBoxSizer(wx.StaticBox(self, label="PCR Output Strand", style=wx.ALIGN_CENTER), wx.VERTICAL)
outputSizer.Add(outputLoadButton, 0, wx.EXPAND)
outputSizer.Add(outputSaveButton, 0, wx.EXPAND)
outputSizer.Add(outputFileName, 0, wx.EXPAND)
outputSizer.Add(outputLength, 0, wx.EXPAND)
outputSizer.Add(outputACount, 0, wx.EXPAND)
outputSizer.Add(outputCCount, 0, wx.EXPAND)
outputSizer.Add(outputData, 1, wx.EXPAND)
generatePrimers = wx.Button(self, label="Generate Primers")
simulatePCR = wx.Button(self, label="Simulate PCR")
verifyPrimers = wx.Button(self, label="Verify Primers")
generatePrimers.Bind(wx.EVT_BUTTON, self.generatePrimers(templateDataCtrl=templateData, outputDataCtrl=outputData, fPrimerOnWrite=fPrimerOnWrite, rPrimerOnWrite=rPrimerOnWrite))
simulatePCR.Bind(wx.EVT_BUTTON, self.simulatePCR(templateDataCtrl=templateData, fPrimerDataCtrl=fPrimerData, rPrimerDataCtrl=rPrimerData, outputOnWrite=outputOnWrite))
verifyPrimers.Bind(wx.EVT_BUTTON, self.verifyPrimers(templateDataCtrl=templateData, fPrimerDataCtrl=fPrimerData, rPrimerDataCtrl=rPrimerData, outputDataCtrl=outputData))
actionSizer = wx.BoxSizer(wx.VERTICAL)
actionSizer.AddStretchSpacer()
actionSizer.Add(generatePrimers, 1, wx.EXPAND)
actionSizer.Add(simulatePCR, 1, wx.EXPAND)
actionSizer.Add(verifyPrimers, 1, wx.EXPAND)
sizer = wx.FlexGridSizer(1, 5, 5, 5)
sizer.AddGrowableRow(0)
sizer.AddGrowableCol(0)
sizer.AddGrowableCol(1)
sizer.AddGrowableCol(2)
sizer.AddGrowableCol(3)
sizer.Add(templateSizer, 1, wx.EXPAND)
sizer.Add(fPrimerSizer, 1, wx.EXPAND)
sizer.Add(rPrimerSizer, 1, wx.EXPAND)
sizer.Add(outputSizer, 1, wx.EXPAND)
sizer.Add(actionSizer, 1)
self.SetSizer(sizer)
class PCRSimulatorApp(wx.App):
def OnInit(self):
self.frame = PCRSimulatorFrame()
self.frame.Show()
self.SetTopWindow(self.frame)
return True | tricorder42/pcr-simulator-2015 | interface.py | Python | mit | 13,594 |
# -*- coding: latin-1 -*-
##
## Copyright (c) 2000, 2001, 2002, 2003 Thomas Heller
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the
## "Software"), to deal in the Software without restriction, including
## without limitation the rights to use, copy, modify, merge, publish,
## distribute, sublicense, and/or sell copies of the Software, and to
## permit persons to whom the Software is furnished to do so, subject to
## the following conditions:
##
## The above copyright notice and this permission notice shall be
## included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
## NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
## LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
## OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
## WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##
#
# $Id: VersionInfo.py 392 2004-03-12 17:00:21Z theller $
#
# $Log$
# Revision 1.3 2004/01/16 10:45:31 theller
# Move py2exe from the sandbox directory up to the root dir.
#
# Revision 1.3 2003/12/29 13:44:57 theller
# Adapt for Python 2.3.
#
# Revision 1.2 2003/09/18 20:19:57 theller
# Remove a 2.3 warning, but mostly this checkin is to test the brand new
# py2exe-checkins mailing list.
#
# Revision 1.1 2003/08/29 12:30:52 mhammond
# New py2exe now uses the old resource functions :)
#
# Revision 1.1 2002/01/29 09:30:55 theller
# version 0.3.0
#
# Revision 1.2 2002/01/14 19:08:05 theller
# Better (?) Unicode handling.
#
# Revision 1.1 2002/01/07 10:30:32 theller
# Create a version resource.
#
#
import struct
VOS_NT_WINDOWS32 = 0x00040004
VFT_APP = 0x00000001
RT_VERSION = 16
class VersionError(Exception):
pass
def w32_uc(text):
"""convert a string into unicode, then encode it into UTF-16
little endian, ready to use for win32 apis"""
if type(text) is str:
return unicode(text, "unicode-escape").encode("utf-16-le")
return unicode(text).encode("utf-16-le")
class VS_FIXEDFILEINFO:
dwSignature = 0xFEEF04BDL
dwStrucVersion = 0x00010000
dwFileVersionMS = 0x00010000
dwFileVersionLS = 0x00000001
dwProductVersionMS = 0x00010000
dwProductVersionLS = 0x00000001
dwFileFlagsMask = 0x3F
dwFileFlags = 0
dwFileOS = VOS_NT_WINDOWS32
dwFileType = VFT_APP
dwFileSubtype = 0
dwFileDateMS = 0
dwFileDateLS = 0
fmt = "13L"
def __init__(self, version):
import string
version = string.replace(version, ",", ".")
fields = string.split(version + '.0.0.0.0', ".")[:4]
fields = map(string.strip, fields)
try:
self.dwFileVersionMS = int(fields[0]) * 65536 + int(fields[1])
self.dwFileVersionLS = int(fields[2]) * 65536 + int(fields[3])
except ValueError:
raise VersionError, "could not parse version number '%s'" % version
def __str__(self):
return struct.pack(self.fmt,
self.dwSignature,
self.dwStrucVersion,
self.dwFileVersionMS,
self.dwFileVersionLS,
self.dwProductVersionMS,
self.dwProductVersionLS,
self.dwFileFlagsMask,
self.dwFileFlags,
self.dwFileOS,
self.dwFileType,
self.dwFileSubtype,
self.dwFileDateMS,
self.dwFileDateLS)
def align(data):
pad = - len(data) % 4
return data + '\000' * pad
class VS_STRUCT:
items = ()
def __str__(self):
szKey = w32_uc(self.name)
ulen = len(szKey)+2
value = self.get_value()
data = struct.pack("h%ss0i" % ulen, self.wType, szKey) + value
data = align(data)
for item in self.items:
data = data + str(item)
wLength = len(data) + 4 # 4 bytes for wLength and wValueLength
wValueLength = len(value)
return self.pack("hh", wLength, wValueLength, data)
def pack(self, fmt, len, vlen, data):
return struct.pack(fmt, len, vlen) + data
def get_value(self):
return ""
class String(VS_STRUCT):
wType = 1
items = ()
def __init__(self, (name, value)):
self.name = name
if value:
self.value = value + '\000' # strings must be zero terminated
else:
self.value = value
def pack(self, fmt, len, vlen, data):
# ValueLength is measured in WORDS, not in BYTES!
return struct.pack(fmt, len, vlen/2) + data
def get_value(self):
return w32_uc(self.value)
class StringTable(VS_STRUCT):
wType = 1
def __init__(self, name, strings):
self.name = name
self.items = map(String, strings)
class StringFileInfo(VS_STRUCT):
wType = 1
name = "StringFileInfo"
def __init__(self, name, strings):
self.items = [StringTable(name, strings)]
class Var(VS_STRUCT):
# MSDN says:
# If you use the Var structure to list the languages your
# application or DLL supports instead of using multiple version
# resources, use the Value member to contain an array of DWORD
# values indicating the language and code page combinations
# supported by this file. The low-order word of each DWORD must
# contain a Microsoft language identifier, and the high-order word
# must contain the IBM® code page number. Either high-order or
# low-order word can be zero, indicating that the file is language
# or code page independent. If the Var structure is omitted, the
# file will be interpreted as both language and code page
# independent.
wType = 0
name = "Translation"
def __init__(self, value):
self.value = value
def get_value(self):
return struct.pack("l", self.value)
class VarFileInfo(VS_STRUCT):
wType = 1
name = "VarFileInfo"
def __init__(self, *names):
self.items = map(Var, names)
def get_value(self):
return ""
class VS_VERSIONINFO(VS_STRUCT):
wType = 0 # 0: binary data, 1: text data
name = "VS_VERSION_INFO"
def __init__(self, version, items):
self.value = VS_FIXEDFILEINFO(version)
self.items = items
def get_value(self):
return str(self.value)
class Version(object):
def __init__(self,
version,
comments = None,
company_name = None,
file_description = None,
internal_name = None,
legal_copyright = None,
legal_trademarks = None,
original_filename = None,
private_build = None,
product_name = None,
product_version = None,
special_build = None):
self.version = version
strings = []
if comments is not None:
strings.append(("Comments", comments))
if company_name is not None:
strings.append(("CompanyName", company_name))
if file_description is not None:
strings.append(("FileDescription", file_description))
strings.append(("FileVersion", version))
if internal_name is not None:
strings.append(("InternalName", internal_name))
if legal_copyright is not None:
strings.append(("LegalCopyright", legal_copyright))
if legal_trademarks is not None:
strings.append(("LegalTrademarks", legal_trademarks))
if original_filename is not None:
strings.append(("OriginalFilename", original_filename))
if private_build is not None:
strings.append(("PrivateBuild", private_build))
if product_name is not None:
strings.append(("ProductName", product_name))
strings.append(("ProductVersion", product_version or version))
if special_build is not None:
strings.append(("SpecialBuild", special_build))
self.strings = strings
def resource_bytes(self):
vs = VS_VERSIONINFO(self.version,
[StringFileInfo("040904B0",
self.strings),
VarFileInfo(0x04B00409)])
return str(vs)
def test():
import sys
sys.path.append("c:/tmp")
from hexdump import hexdump
version = Version("1, 0, 0, 1",
comments = "ümläut comments",
company_name = "No Company",
file_description = "silly application",
internal_name = "silly",
legal_copyright = u"Copyright © 2003",
## legal_trademark = "",
original_filename = "silly.exe",
private_build = "test build",
product_name = "silly product",
product_version = None,
## special_build = ""
)
hexdump(version.resource_bytes())
if __name__ == '__main__':
import sys
sys.path.append("d:/nbalt/tmp")
from hexdump import hexdump
test()
| skeenp/Roam | libs/py2exe-0.6.9/py2exe/resources/VersionInfo.py | Python | gpl-2.0 | 9,595 |
from captcha.fields import ReCaptchaField
from django import forms
from .models import Url
def get_client_ip(meta):
x_forwarded_for = meta.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = meta.get('REMOTE_ADDR')
return ip
class UrlForm(forms.ModelForm):
address = forms.URLField()
name = forms.CharField()
captcha = ReCaptchaField()
class Meta:
model = Url
fields = ('address', 'name', 'captcha', 'user_ip')
def __init__(self, *args, **kwargs):
self.meta = kwargs.pop('meta', None) # Now you use self.request to access request object.
super(UrlForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
instance = super(UrlForm, self).save(commit=False)
instance.user_ip = get_client_ip(self.meta)
if commit:
instance.save()
return instance
| andrewnsk/dorokhin.moscow | urlshortener/forms.py | Python | mit | 934 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import sys
import json
import urllib
import Cookie
from twisted.internet import reactor
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
import autobahn
from autobahn.util import newid, utcnow
from autobahn.websocket import http
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource
class PersonaServerProtocol(WebSocketServerProtocol):
"""
WebSocket server protocol that tracks WebSocket connections using HTTP cookies,
and authenticates WebSocket connections using Mozilla Persona.
"""
def onConnect(self, request):
# This is called during the initial WebSocket opening handshake.
protocol, headers = None, {}
# our cookie tracking ID
self._cbtid = None
# see if there already is a cookie set ..
if 'cookie' in request.headers:
try:
cookie = Cookie.SimpleCookie()
cookie.load(str(request.headers['cookie']))
except Cookie.CookieError:
pass
else:
if 'cbtid' in cookie:
cbtid = cookie['cbtid'].value
if cbtid in self.factory._cookies:
self._cbtid = cbtid
log.msg("Cookie already set: %s" % self._cbtid)
# if no cookie is set, create a new one ..
if self._cbtid is None:
self._cbtid = newid()
maxAge = 86400
cbtData = {'created': utcnow(),
'authenticated': None,
'maxAge': maxAge,
'connections': set()}
self.factory._cookies[self._cbtid] = cbtData
# do NOT add the "secure" cookie attribute! "secure" refers to the
# scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie'] = 'cbtid=%s;max-age=%d' % (self._cbtid, maxAge)
log.msg("Setting new cookie: %s" % self._cbtid)
# add this WebSocket connection to the set of connections
# associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)
# accept the WebSocket connection, speaking subprotocol `protocol`
# and setting HTTP headers `headers`
return (protocol, headers)
def onOpen(self):
# This is called when initial WebSocket opening handshake has
# been completed.
# see if we are authenticated ..
authenticated = self.factory._cookies[self._cbtid]['authenticated']
if not authenticated:
# .. if not, send authentication request
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_REQUIRED'}))
else:
# .. if yes, send info on authenticated user
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATED', 'email': authenticated}))
def onClose(self, wasClean, code, reason):
# This is called when WebSocket connection is gone
# remove this connection from list of connections associated with
# same cookie
self.factory._cookies[self._cbtid]['connections'].remove(self)
# if list gets empty, possibly do something ..
if not self.factory._cookies[self._cbtid]['connections']:
log.msg("All connections for {} gone".format(self._cbtid))
def onMessage(self, payload, isBinary):
# This is called when we receive a WebSocket message
if not isBinary:
msg = json.loads(payload)
if msg['cmd'] == 'AUTHENTICATE':
# The client did it's Mozilla Persona authentication thing
# and now wants to verify the authentication and login.
assertion = msg.get('assertion')
audience = msg.get('audience')
# To verify the authentication, we need to send a HTTP/POST
# to Mozilla Persona. When successful, Persona will send us
# back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "tobias.oberstein@gmail.com",
# "status": "okay"
# }
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urllib.urlencode({'audience': audience, 'assertion': assertion})
from twisted.web.client import getPage
d = getPage(url="https://verifier.login.persona.org/verify",
method='POST',
postdata=body,
headers=headers)
log.msg("Authentication request sent.")
def done(res):
res = json.loads(res)
if res['status'] == 'okay':
# Mozilla Persona successfully authenticated the user
# remember the user's email address. this marks the cookie as
# authenticated
self.factory._cookies[self._cbtid]['authenticated'] = res['email']
# inform _all_ WebSocket connections of the successful auth.
msg = json.dumps({'cmd': 'AUTHENTICATED', 'email': res['email']})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
log.msg("Authenticated user {}".format(res['email']))
else:
log.msg("Authentication failed: {}".format(res.get('reason')))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': res.get('reason')}))
self.sendClose()
def error(err):
log.msg("Authentication request failed: {}".format(err.value))
self.sendMessage(json.dumps({'cmd': 'AUTHENTICATION_FAILED', 'reason': str(err.value)}))
self.sendClose()
d.addCallbacks(done, error)
elif msg['cmd'] == 'LOGOUT':
# user wants to logout ..
if self.factory._cookies[self._cbtid]['authenticated']:
self.factory._cookies[self._cbtid]['authenticated'] = False
# inform _all_ WebSocket connections of the logout
msg = json.dumps({'cmd': 'LOGGED_OUT'})
for proto in self.factory._cookies[self._cbtid]['connections']:
proto.sendMessage(msg)
else:
log.msg("unknown command {}".format(msg))
class PersonaServerFactory(WebSocketServerFactory):
"""
WebSocket server factory with cookie/sessions map.
"""
protocol = PersonaServerProtocol
def __init__(self, url):
WebSocketServerFactory.__init__(self, url, debug=False)
# map of cookies
self._cookies = {}
if __name__ == '__main__':
log.startLogging(sys.stdout)
print("Running Autobahn|Python {}".format(autobahn.version))
# our WebSocket server factory
factory = PersonaServerFactory("ws://localhost:8080")
# we serve static files under "/" ..
root = File(".")
# .. and our WebSocket server under "/ws"
resource = WebSocketResource(factory)
root.putChild("ws", resource)
# run both under one Twisted Web Site
site = Site(root)
site.log = lambda _: None # disable any logging
reactor.listenTCP(8080, site)
reactor.run()
| dash-dash/AutobahnPython | examples/twisted/websocket/auth_persona/server.py | Python | mit | 9,063 |
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2015 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.6.0'
__build__ = 0x020503
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| gandarez/wakatime | wakatime/packages/requests/__init__.py | Python | bsd-3-clause | 1,861 |
ACCOUNT_NAME = 'Lego'
| 0--key/lib | portfolio/Python/scrapy/lego/__init__.py | Python | apache-2.0 | 22 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pytest
import os
collect_ignore = [
"setup.py",
".pythonrc.py"
]
# Also ignore everything that git ignores.
git_ignore = os.path.join(os.path.dirname(__file__), '.gitignore')
collect_ignore += list(filter(None, open(git_ignore).read().split('\n')))
# Run slow tests separately with command-line option, filter tests
# ================================================================
def pytest_addoption(parser):
parser.addoption("--filter", action="store",
help="only run tests with the given mark")
parser.addoption("--slow", action="store_true", help="run slow tests")
parser.addoption("--veryslow", action="store_true",
help="run very slow tests")
def pytest_runtest_setup(item):
filt = item.config.getoption("--filter")
if filt:
if filt not in item.keywords:
pytest.skip("only running tests with the '{}' mark".format(filt))
else:
if 'slow' in item.keywords and not item.config.getoption("--slow"):
pytest.skip("need --slow option to run")
if ('veryslow' in item.keywords and
not item.config.getoption("--veryslow")):
pytest.skip("need --veryslow option to run")
| slipperyhank/pyphi | conftest.py | Python | gpl-3.0 | 1,279 |
#This Source Code Form is subject to the terms of the Mozilla Public
#License, v. 2.0. If a copy of the MPL was not distributed with this
#file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#Created by Alexandre GAUTHIER-FOICHAT on 01/27/2015.
#To import the variable "natron"
import NatronEngine
def addFormats(app):
app.addFormat ("720p 1280x720 1.0")
app.addFormat ("2k_185 2048x1108 1.0")
def afterNodeCreatedCallback(thisNode, app, userEdited):
#Turn-off the Clamp black for new grade nodes
if thisNode.getPluginID() == "net.sf.openfx.GradePlugin":
thisNode.clampBlack.setDefaultValue(False)
#Set the blur size to (3,3) upon creation
elif thisNode.getPluginID() == "net.sf.cimg.CImgBlur":
thisNode.size.setDefaultValue(3,0)
thisNode.size.setDefaultValue(3,1)
#This will set the After Node Created callback on the project to tweek default values for parameters
def setNodeDefaults(app):
app.afterNodeCreated.set("afterNodeCreatedCallback")
def myCallback(app):
addFormats(app)
setNodeDefaults(app)
#Set the After Project Created/Loaded callbacks
NatronEngine.natron.setOnProjectCreatedCallback("myCallback")
NatronEngine.natron.setOnProjectLoadedCallback("myCallback")
#Add this path to the Natron search paths so that our PyPlug can be found.
#Note that we could also set this from the NATRON_PLUGIN_PATH environment variable
#or even in the Preferences panel, Plug-ins tab, with the "Pyplugs search path"
NatronEngine.natron.appendToNatronPath("/Library/Natron/PyPlugs")
| AxelAF/Natron | Documentation/source/init.py | Python | gpl-2.0 | 1,571 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibAuthority Regression Test Suite."""
__revision__ = "$Id$"
from invenio.legacy.bibauthority.config import \
CFG_BIBAUTHORITY_RECORD_CONTROL_NUMBER_FIELD, \
CFG_BIBAUTHORITY_TYPE_NAMES, \
CFG_BIBAUTHORITY_PREFIX_SEP
from invenio.testsuite import make_test_suite, run_test_suite, \
InvenioTestCase
from invenio.base.wrappers import lazy_import
is_authority_record = lazy_import('invenio.legacy.bibauthority.engine:is_authority_record')
get_dependent_records_for_control_no = lazy_import('invenio.legacy.bibauthority.engine:get_dependent_records_for_control_no')
get_dependent_records_for_recID = lazy_import('invenio.legacy.bibauthority.engine:get_dependent_records_for_recID')
guess_authority_types = lazy_import('invenio.legacy.bibauthority.engine:guess_authority_types')
get_low_level_recIDs_from_control_no = lazy_import('invenio.legacy.bibauthority.engine:get_low_level_recIDs_from_control_no')
get_control_nos_from_recID = lazy_import('invenio.legacy.bibauthority.engine:get_control_nos_from_recID')
get_index_strings_by_control_no = lazy_import('invenio.legacy.bibauthority.engine:get_index_strings_by_control_no')
guess_main_name_from_authority_recID = lazy_import('invenio.legacy.bibauthority.engine:guess_main_name_from_authority_recID')
get_fieldvalues = lazy_import('invenio.legacy.bibrecord:get_fieldvalues')
class BibAuthorityEngineTest(InvenioTestCase):
"""Check BibEdit web pages whether they are up or not."""
def test_bibauthority_is_authority_record(self):
"""bibauthority - test is_authority_record()"""
self.assertFalse(is_authority_record(1))
self.assertTrue(is_authority_record(118))
def test_bibauthority_get_dependent_records_for_control_no(self):
"""bibauthority - test get_dependent_records_for_control_no()"""
control_no_field = CFG_BIBAUTHORITY_RECORD_CONTROL_NUMBER_FIELD
control_nos = get_fieldvalues(118, control_no_field)
count = 0
for control_no in control_nos:
count += len(get_dependent_records_for_control_no(control_no))
self.assertTrue(count)
def test_bibauthority_get_dependent_records_for_recID(self):
"""bibauthority - test get_dependent_records_for_recID()"""
self.assertTrue(len(get_dependent_records_for_recID(118)))
def test_bibauthority_guess_authority_types(self):
"""bibauthority - test guess_authority_types()"""
_type = CFG_BIBAUTHORITY_TYPE_NAMES['AUTHOR']
self.assertEqual(guess_authority_types(118), [_type])
def test_bibauthority_get_low_level_recIDs(self):
"""bibauthority - test get_low_level_recIDs_from_control_no()"""
_type = CFG_BIBAUTHORITY_TYPE_NAMES['INSTITUTION']
control_no = _type + CFG_BIBAUTHORITY_PREFIX_SEP + "(SzGeCERN)iii0002"
recIDs = [121]
self.assertEqual(get_low_level_recIDs_from_control_no(control_no),
recIDs)
def test_bibauthority_get_control_nos_from_recID(self):
"""bibauthority - test get_control_nos_from_recID()"""
self.assertTrue(len(get_control_nos_from_recID(118)))
def test_bibauthority_guess_main_name(self):
"""bibauthority - test guess_main_name_from_authority_recID()"""
recID = 118
main_name = 'Ellis, John'
self.assertEqual(guess_main_name_from_authority_recID(recID),
main_name)
def test_authority_record_string_by_control_no(self):
"""bibauthority - simple test of get_index_strings_by_control_no()"""
# vars
_type = CFG_BIBAUTHORITY_TYPE_NAMES['AUTHOR']
control_no = _type + CFG_BIBAUTHORITY_PREFIX_SEP + '(SzGeCERN)aaa0005'
string = 'Ellis, Jonathan Richard'
# run test
self.assertTrue(string in get_index_strings_by_control_no(control_no))
TEST_SUITE = make_test_suite(
BibAuthorityEngineTest,
)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| jirikuncar/invenio-demosite | invenio_demosite/testsuite/regression/test_bibauthority.py | Python | gpl-2.0 | 4,781 |
"""
Volume Drivers for aws storage arrays.
"""
import time
from cinder.i18n import _
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.volume import driver
from oslo.config import cfg
import cinder.context
import io
import subprocess
import math
from cinder.openstack.common import units
from cinder.openstack.common import strutils
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
rbd_opts = [
cfg.StrOpt('rbd_pool',
default='rbd',
help='The RADOS pool where rbd volumes are stored'),
cfg.StrOpt('rbd_user',
default=None,
help='The RADOS client name for accessing rbd volumes '
'- only set when using cephx authentication'),
cfg.StrOpt('rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file'),
cfg.BoolOpt('rbd_flatten_volume_from_snapshot',
default=False,
help='Flatten volumes created from snapshots to remove '
'dependency from volume to snapshot'),
cfg.StrOpt('rbd_secret_uuid',
default=None,
help='The libvirt uuid of the secret for the rbd_user '
'volumes'),
cfg.StrOpt('volume_tmp_dir',
default=None,
help='Directory where temporary image files are stored '
'when the volume driver does not write them directly '
'to the volume.'),
cfg.IntOpt('rbd_max_clone_depth',
default=5,
help='Maximum number of nested volume clones that are '
'taken before a flatten occurs. Set to 0 to disable '
'cloning.'),
cfg.IntOpt('rbd_store_chunk_size', default=4,
help=_('Volumes will be chunked into objects of this size '
'(in megabytes).')),
cfg.IntOpt('rados_connect_timeout', default=-1,
help=_('Timeout value (in seconds) used when connecting to '
'ceph cluster. If value < 0, no timeout is set and '
'default librados value is used.'))
]
ceph_iscsi_opts = [
cfg.StrOpt('iscsi_server_ip',
default='',
help=''),
cfg.StrOpt('iscsi_server_user',
default='',
help=''),
cfg.StrOpt('iscsi_server_pem',
default='',
help='')
]
CONF = cfg.CONF
CONF.register_opts(rbd_opts)
CONF.register_opts(ceph_iscsi_opts)
IMAGE_SCAN_ATTEMPTS_DEFAULT = 5
class RBDImageMetadata(object):
"""RBD image metadata to be used with RBDImageIOWrapper."""
def __init__(self, image, pool, user, conf):
self.image = image
self.pool = strutils.safe_encode(pool)
self.user = strutils.safe_encode(user)
self.conf = strutils.safe_encode(conf)
class RBDImageIOWrapper(io.RawIOBase):
"""Enables LibRBD.Image objects to be treated as Python IO objects.
Calling unimplemented interfaces will raise IOError.
"""
def __init__(self, rbd_meta):
super(RBDImageIOWrapper, self).__init__()
self._rbd_meta = rbd_meta
self._offset = 0
def _inc_offset(self, length):
self._offset += length
@property
def rbd_image(self):
return self._rbd_meta.image
@property
def rbd_user(self):
return self._rbd_meta.user
@property
def rbd_pool(self):
return self._rbd_meta.pool
@property
def rbd_conf(self):
return self._rbd_meta.conf
def read(self, length=None):
offset = self._offset
total = self._rbd_meta.image.size()
# NOTE(dosaboy): posix files do not barf if you read beyond their
# length (they just return nothing) but rbd images do so we need to
# return empty string if we have reached the end of the image.
if (offset >= total):
return ''
if length is None:
length = total
if (offset + length) > total:
length = total - offset
self._inc_offset(length)
return self._rbd_meta.image.read(int(offset), int(length))
def write(self, data):
self._rbd_meta.image.write(data, self._offset)
self._inc_offset(len(data))
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence == 0:
new_offset = offset
elif whence == 1:
new_offset = self._offset + offset
elif whence == 2:
new_offset = self._rbd_meta.image.size()
new_offset += offset
else:
raise IOError(_("Invalid argument - whence=%s not supported") %
(whence))
if (new_offset < 0):
raise IOError(_("Invalid argument"))
self._offset = new_offset
def tell(self):
return self._offset
def flush(self):
try:
self._rbd_meta.image.flush()
except AttributeError:
LOG.warning(_("flush() not supported in this version of librbd"))
def fileno(self):
"""RBD does not have support for fileno() so we raise IOError.
Raising IOError is recommended way to notify caller that interface is
not supported - see http://docs.python.org/2/library/io.html#io.IOBase
"""
raise IOError(_("fileno() not supported by RBD()"))
# NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes
# it which, if this is not overridden, calls flush() prior to close which
# in this case is unwanted since the rbd image may have been closed prior
# to the autoclean - currently triggering a segfault in librbd.
def close(self):
pass
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
if snapshot is not None:
snapshot = strutils.safe_encode(snapshot)
try:
self.volume = driver.rbd.Image(ioctx, strutils.safe_encode(name),
snapshot=snapshot,
read_only=read_only)
except driver.rbd.Error:
LOG.exception(_("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
raise
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
class CephIscsiDriver(driver.ISCSIDriver):
VERSION = "1.0"
def __init__(self, *args, **kwargs):
super(CephIscsiDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(rbd_opts)
self.configuration.append_config_values(ceph_iscsi_opts)
self.rados = kwargs.get('rados', rados)
self.rbd = kwargs.get('rbd', rbd)
self._stats = {}
self.image_scan_attempts = IMAGE_SCAN_ATTEMPTS_DEFAULT
for attr in ['rbd_user', 'rbd_ceph_conf', 'rbd_pool']:
val = getattr(self.configuration, attr)
if val is not None:
setattr(self.configuration, attr, strutils.safe_encode(val))
def _connect_to_rados(self, pool=None):
LOG.debug("opening connection to ceph cluster (timeout=%s)." %
(self.configuration.rados_connect_timeout))
client = self.rados.Rados(rados_id=self.configuration.rbd_user,
conffile=self.configuration.rbd_ceph_conf)
if pool is not None:
pool = strutils.safe_encode(pool)
else:
pool = self.configuration.rbd_pool
try:
if self.configuration.rados_connect_timeout >= 0:
client.connect(timeout=
self.configuration.rados_connect_timeout)
else:
client.connect()
ioctx = client.open_ioctx(pool)
return client, ioctx
except self.rados.Error as exc:
LOG.error("error connecting to ceph cluster.")
# shutdown cannot raise an exception
client.shutdown()
raise exception.VolumeBackendAPIException(data=str(exc))
def _delete_backup_snaps(self, rbd_image):
backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps:
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug("volume has no backup snaps")
def _get_backup_snaps(self, rbd_image):
"""Get list of any backup snapshots that exist on this volume.
There should only ever be one but accept all since they need to be
deleted before the volume can be.
"""
# NOTE(dosaboy): we do the import here otherwise we get import conflict
# issues between the rbd driver and the ceph backup driver. These
# issues only seem to occur when NOT using them together and are
# triggered when the ceph backup driver imports the rbd volume driver.
from cinder.backup.drivers import ceph
return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
def _get_clone_info(self, volume, volume_name, snap=None):
"""If volume is a clone, return its parent info.
Returns a tuple of (pool, parent, snap). A snapshot may optionally be
provided for the case where a cloned volume has been flattened but it's
snapshot still depends on the parent.
"""
try:
snap and volume.set_snap(snap)
pool, parent, parent_snap = tuple(volume.parent_info())
snap and volume.set_snap(None)
# Strip the tag off the end of the volume name since it will not be
# in the snap name.
if volume_name.endswith('.deleted'):
volume_name = volume_name[:-len('.deleted')]
# Now check the snap name matches.
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug("volume %s is not a clone" % volume_name)
volume.set_snap(None)
return (None, None, None)
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def _delete_clone_parent_refs(self, client, parent_name, parent_snap):
"""Walk back up the clone chain and delete references.
Deletes references i.e. deleted parent volumes and snapshots.
"""
parent_rbd = self.rbd.Image(client.ioctx, parent_name)
parent_has_snaps = False
try:
# Check for grandparent
_pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd,
parent_name,
parent_snap)
LOG.debug("deleting parent snapshot %s" % (parent_snap))
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
parent_has_snaps = bool(list(parent_rbd.list_snaps()))
finally:
parent_rbd.close()
def _update_volume_stats(self):
stats = {
'vendor_name': 'Open Source',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name
try:
with RADOSClient(self) as client:
new_stats = client.cluster.get_cluster_stats()
stats['total_capacity_gb'] = new_stats['kb'] / units.Mi
stats['free_capacity_gb'] = new_stats['kb_avail'] / units.Mi
except self.rados.Error:
# just log and return unknown capacities
LOG.exception(_('error refreshing volume stats'))
self._stats = stats
def _supports_layering(self):
return hasattr(self.rbd, 'RBD_FEATURE_LAYERING')
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
backup_des = backup.get('display_description', None)
if backup_des.find('cross_az') >= 0:
return
volume = self.db.volume_get(context, backup['volume_id'])
with RBDVolumeProxy(self, volume['name'],
self.configuration.rbd_pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.backup(backup, rbd_fd)
LOG.debug("volume backup complete.")
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
backup_des = backup.get('display_description', None)
if backup_des and 'cross_az' in backup_des:
res = backup_des.split(':')
backup['volume_id'] = res[-1]
backup['id'] = res[-2]
LOG.info(_("ceph iscsi driver, got backup_id:%(backup_id)s,"
"%(source_volume_id)s, backup_des:%(backup_des)s") %
{'backup_id': backup['id'],
'source_volume_id': backup['volume_id'],
'backup_des': backup_des})
with RBDVolumeProxy(self, volume['name'],
self.configuration.rbd_pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.restore(backup, volume['id'], rbd_fd)
LOG.debug("volume restore complete.")
def do_setup(self, context):
"""Instantiate common class and log in storage system."""
pass
def check_for_setup_error(self):
"""Check configuration file."""
pass
def create_volume(self, volume):
"""Create a volume."""
ctx = cinder.context.get_admin_context()
if ctx:
volume_metadata = self.db.volume_metadata_get(ctx, volume['id'])
if volume_metadata:
identify_flag = volume_metadata.get('cross_az', None)
if identify_flag:
model_update = {'provider_location': 'fake_flag'}
return model_update
if int(volume['size']) == 0:
size = 100 * units.Mi
else:
size = int(volume['size']) * units.Gi
LOG.debug("creating volume '%s'" % (volume['name']))
old_format = True
features = 0
chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
if self._supports_layering():
old_format = False
features = self.rbd.RBD_FEATURE_LAYERING
with RADOSClient(self) as client:
self.rbd.RBD().create(client.ioctx,
strutils.safe_encode(volume['name']),
size,
order,
old_format=old_format,
features=features)
if self.image_found(volume['name']):
command = "ssh -i %s %s@%s sudo bash /home/%s/ceph_iscsi.sh %s %s %s" % \
(self.configuration.iscsi_server_pem,
self.configuration.iscsi_server_user,
self.configuration.iscsi_server_ip,
self.configuration.iscsi_server_user,
'create', self.configuration.rbd_pool, volume['name'])
result = subprocess.call([command], shell=True)
if result != 0:
LOG.debug("create iscsi target failed '%s'" % (volume['id']))
else:
LOG.debug("can not find rbd image,create failed")
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from a snapshot."""
pass
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of the specified volume."""
pass
def extend_volume(self, volume, new_size):
"""Extend a volume."""
pass
def delete_volume(self, volume):
"""Delete a volume."""
"""Deletes a logical volume."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
if 'fake_flag' == volume.get('provider_location', None):
return
volume_name = strutils.safe_encode(volume['name'])
with RADOSClient(self) as client:
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
LOG.info(_("volume %s no longer exists in backend")
% (volume_name))
return
clone_snap = None
parent = None
# Ensure any backup snapshots are deleted
self._delete_backup_snaps(rbd_image)
# If the volume has non-clone snapshots this delete is expected to
# raise VolumeIsBusy so do so straight away.
try:
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug("volume has clone snapshot(s)")
# We grab one of these and use it when fetching parent
# info in case the volume has been flattened.
clone_snap = snap['name']
break
raise exception.VolumeIsBusy(volume_name=volume_name)
# Determine if this volume is itself a clone
pool, parent, parent_snap = self._get_clone_info(rbd_image,
volume_name,
clone_snap)
finally:
rbd_image.close()
if clone_snap is None:
LOG.debug("deleting rbd volume %s" % (volume_name))
try:
self.rbd.RBD().remove(client.ioctx, volume_name)
except self.rbd.ImageBusy:
msg = (_("ImageBusy error raised while deleting rbd "
"volume. This may have been caused by a "
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
LOG.warn(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug("volume is a clone so cleaning references")
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
# delete it. Instead we will keep it as a silent volume which
# will be deleted when it's snapshot and clones are deleted.
new_name = "%s.deleted" % (volume_name)
self.rbd.RBD().rename(client.ioctx, volume_name, new_name)
command = "ssh -i %s %s@%s sudo bash /home/%s/ceph_iscsi.sh %s %s %s" % \
(self.configuration.iscsi_server_pem,
self.configuration.iscsi_server_user,
self.configuration.iscsi_server_ip,
self.configuration.iscsi_server_user,
'delete', self.configuration.rbd_pool, volume['name'])
result = subprocess.call([command], shell=True)
if result != 0:
LOG.debug("delete iscsi target failed '%s'" % (volume['id']))
def create_snapshot(self, snapshot):
"""Create a snapshot."""
pass
def delete_snapshot(self, snapshot):
"""Delete a snapshot."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
self._update_volume_stats()
return self._stats
def image_found(self, image_name):
try_count = 0
while try_count < self.image_scan_attempts:
image_list = self.rbd.RBD().list(RADOSClient(self).ioctx)
for image in image_list:
if image == image_name:
return True
try_count = try_count + 1
time.sleep(try_count ** 2)
return False
def initialize_connection(self, volume, connector):
"""Map a volume to a host."""
LOG.info("attach volume: %s; voluem_name: %s " % (volume['id'], volume['name']))
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = ('%s:%s' % (self.configuration.iscsi_server_ip, '3260'))
properties['target_iqn'] = 'iqn.2015-08.rbdstore.' + volume['name'] + '.com:iscsi'
properties['volume_id'] = volume['id']
LOG.info("initialize_connection_iscsi success. Return data: %s."
% properties)
return {'driver_volume_type': 'iscsi', 'data': properties}
def terminate_connection(self, volume, connector, **kwargs):
pass
def create_export(self, context, volume):
"""Export the volume."""
pass
def ensure_export(self, context, volume):
"""Synchronously recreate an export for a volume."""
pass
def remove_export(self, context, volume):
"""Remove an export for a volume."""
pass
| Hybrid-Cloud/badam | fs_patches_of_hybrid_cloud/cherry_for_111T/cinder_cascading_proxy_normal/cinder/volume/drivers/cephiscsi/cephiscsi.py | Python | apache-2.0 | 23,563 |
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import SimpleTestCase
from base.utils.string import unaccent
class TestUnaccent(SimpleTestCase):
def setUp(self):
pass
def test_when_uppercased_character_should_lowercased_them(self):
self.assert_unaccent_equal(
"Hello",
"hello"
)
def test_when_special_characters_should_remove_them_from_string(self):
self.assert_unaccent_equal(
"Hello-World Haha'Test",
"helloworldhahatest"
)
def test_when_accent_character_should_replace_them_to_unaccent_character(self):
self.assert_unaccent_equal(
"François",
"francois"
)
self.assert_unaccent_equal(
"Déflander",
"deflander"
)
def assert_unaccent_equal(self, s, expected):
result = unaccent(s)
self.assertEqual(expected, result)
| uclouvain/osis | base/tests/utils/test_string.py | Python | agpl-3.0 | 2,098 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for unique."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_unique_tests(options):
"""Make a set of tests for Unique op."""
test_parameters = [{
"input_shape": [[1]],
"index_type": [tf.int32, tf.int64, None],
"input_values": [3]
}, {
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 2, 3]]
}, {
"input_shape": [[7]],
"index_type": [tf.int32, tf.int64],
"input_values": [[1, 1, 1, 1, 1, 1, 1]]
}, {
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 0, -1]]
}]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.compat.v1.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["index_type"] is None:
output = tf.unique(input_tensor)
else:
output = tf.unique(input_tensor, parameters["index_type"])
return [input_tensor], output
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| karllessard/tensorflow | tensorflow/lite/testing/op_tests/unique.py | Python | apache-2.0 | 2,365 |
from mininet.net import Mininet
from mininet.node import Node, Switch
from mininet.link import Link, Intf
from mininet.log import setLogLevel, info
from mininet.cli import CLI
import mininet.ns3
from mininet.ns3 import WIFISegment
import ns.core
import ns.wifi
if __name__ == '__main__':
setLogLevel( 'info' )
info( '*** ns-3 network demo\n' )
net = Mininet()
info( '*** Creating Network\n' )
h0 = net.addHost( 'h0' )
h1 = net.addHost( 'h1' )
h2 = net.addHost( 'h2' )
wifi = WIFISegment()
wifi.machelper.SetType ( "ns3::AdhocWifiMac" )
# set datarate for node h0
wifi.wifihelper.SetRemoteStationManager( "ns3::ConstantRateWifiManager",
"DataMode", ns.core.StringValue( "OfdmRate54Mbps" ) )
wifi.add( h0 )
# set datarate for node h1
wifi.wifihelper.SetRemoteStationManager( "ns3::ConstantRateWifiManager",
"DataMode", ns.core.StringValue( "OfdmRate6Mbps" ) )
wifi.add( h1 )
# set datarate for node h2
wifi.wifihelper.SetRemoteStationManager( "ns3::ConstantRateWifiManager",
"DataMode", ns.core.StringValue( "OfdmRate54Mbps" ) )
wifi.add( h2 )
info( '*** Configuring hosts\n' )
h0.setIP( '192.168.123.1/24' )
h1.setIP( '192.168.123.2/24')
h2.setIP( '192.168.123.3/24')
mininet.ns3.start()
info( '*** Testing network connectivity\n' )
net.pingAll()
info( '*** Starting TCP iperf server on h2\n' )
h2.sendCmd( "iperf -s" )
info( '*** Testing bandwidth between h0 and h2 while h1 is not transmitting\n' )
h0.cmdPrint( "iperf -c 192.168.123.3" )
info( '*** Testing bandwidth between h0 and h2 while h1 is transmitting at 6Mbps\n' )
h1.sendCmd( "iperf -c 192.168.123.3" )
h0.cmdPrint( "iperf -c 192.168.123.3" )
CLI(net)
| pichuang/OpenNet | mininet-patch/examples/ns3/wifi-rate-anomaly.py | Python | gpl-2.0 | 1,903 |
##
# \namespace reviewTool.gui.delegates
#
# \remarks Contains reusable delegates for the Reveiw Tool system
#
# \author Dr. D Studios
# \date 08/03/11
#
# Copyright 2008-2012 Dr D Studios Pty Limited (ACN 127 184 954) (Dr. D Studios)
#
# This file is part of anim-studio-tools.
#
# anim-studio-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# anim-studio-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with anim-studio-tools. If not, see <http://www.gnu.org/licenses/>.
| xxxIsaacPeralxxx/anim-studio-tools | review_tool/sources/reviewTool/gui/delegates/__init__.py | Python | gpl-3.0 | 970 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-16 00:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Configure',
fields=[
('key', models.CharField(max_length=20, primary_key=True, serialize=False)),
('value', models.CharField(default='', max_length=20)),
],
),
]
| 827992983/yue | admin/migrations/0001_initial.py | Python | gpl-3.0 | 543 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from kolibri.core.webpack.hooks import WebpackInclusionHook
class DeviceManagementSyncHook(WebpackInclusionHook):
"""
Inherit a hook defining assets to be loaded sychronously in the template
"""
class Meta:
abstract = True
| lyw07/kolibri | kolibri/plugins/device_management/hooks.py | Python | mit | 368 |
# -*- coding: utf-8 -*-
#
# This file is part of reprints released under the AGPLv3 license.
# See the NOTICE for more information.
# future compatibilty
from __future__ import absolute_import
# standard
import unittest
# local
from reprints import response
ONExONE_PNG = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x06\x00' + \
b'\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\rIDATx\x9cc````\x00\x00\x00\x05\x00\x01' + \
b'\xa5\xf6E@\x00\x00\x00\x00IEND\xaeB`\x82'
class TestResponse(unittest.TestCase):
def test_get_data(self):
resp = response.SuccessResponse(data=ONExONE_PNG, format='PNG')
self.assertEqual(resp.get_data(), ONExONE_PNG)
resp = response.BadRequestResponse()
self.assertEqual(resp.get_data(), '400 BAD REQUEST')
resp = response.NotFoundResponse()
self.assertEqual(resp.get_data(), '404 NOT FOUND')
resp = response.UnsupportedMediaTypeResponse()
self.assertEqual(resp.get_data(), '415 UNSUPPORTED MEDIA TYPE')
resp = response.FatalResponse()
self.assertEqual(resp.get_data(), '500 INTERNAL SERVER ERROR')
def test_mime_type(self):
resp = response.SuccessResponse(data=ONExONE_PNG, format='PNG')
self.assertEqual(resp.mime_type, 'image/png')
resp = response.BadRequestResponse()
self.assertEqual(resp.mime_type, 'text/plain')
resp = response.NotFoundResponse()
self.assertEqual(resp.mime_type, 'text/plain')
resp = response.UnsupportedMediaTypeResponse()
self.assertEqual(resp.mime_type, 'text/plain')
resp = response.FatalResponse()
self.assertEqual(resp.mime_type, 'text/plain')
def test_get_headers(self):
resp = response.SuccessResponse(data=ONExONE_PNG, format='PNG')
self.assertEqual(resp.get_headers(),
[('Content-type', 'image/png'), ('Content-length', '70')])
resp = response.BadRequestResponse()
self.assertEqual(resp.get_headers(),
[('Content-type', 'text/plain'), ('Content-length', '15')])
resp = response.NotFoundResponse()
self.assertEqual(resp.get_headers(),
[('Content-type', 'text/plain'), ('Content-length', '13')])
resp = response.UnsupportedMediaTypeResponse()
self.assertEqual(resp.get_headers(),
[('Content-type', 'text/plain'), ('Content-length', '26')])
resp = response.FatalResponse()
self.assertEqual(resp.get_headers(),
[('Content-type', 'text/plain'), ('Content-length', '25'),
('X-Error-Detail', 'The server received a fatal error.')])
def test_get_status(self):
resp = response.SuccessResponse(data=ONExONE_PNG, format='PNG')
self.assertEqual(resp.get_status(), '200 OK')
resp = response.BadRequestResponse()
self.assertEqual(resp.get_status(), '400 BAD REQUEST')
resp = response.NotFoundResponse()
self.assertEqual(resp.get_status(), '404 NOT FOUND')
resp = response.UnsupportedMediaTypeResponse()
self.assertEqual(resp.get_status(), '415 UNSUPPORTED MEDIA TYPE')
resp = response.FatalResponse()
self.assertEqual(resp.get_status(), '500 INTERNAL SERVER ERROR')
if __name__ == "__main__":
unittest.main()
| gaelenh/python-reprints | reprints/response_test.py | Python | agpl-3.0 | 3,420 |
# Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
from datetime import *
from dateutil.relativedelta import *
from netforce.access import get_active_company
def deduct_period(date, num, period):
d = datetime.strptime(date, "%Y-%m-%d")
if period == "month":
if (d + timedelta(days=1)).month != d.month:
d -= relativedelta(months=num, day=31)
else:
d -= relativedelta(months=num)
elif period == "year":
d -= relativedelta(years=num)
else:
raise Exception("Invalid period")
return d.strftime("%Y-%m-%d")
class ReportBalanceSheet(Model):
_name = "report.balance.sheet"
_transient = True
_fields = {
"date": fields.Date("Balance Date"),
"compare_with": fields.Selection([["month", "Previous Month"], ["year", "Previous Year"]], "Compare With"),
"compare_periods": fields.Selection([["1", "Previous 1 Period"], ["2", "Previous 2 Periods"], ["3", "Previous 3 Periods"], ["4", "Previous 4 Periods"], ["5", "Previous 5 Periods"], ["6", "Previous 6 Periods"], ["7", "Previous 7 Periods"], ["8", "Previous 8 Periods"], ["9", "Previous 9 Periods"], ["10", "Previous 10 Periods"], ["11", "Previous 11 Periods"]], "Compare Periods"),
"track_id": fields.Many2One("account.track.categ", "Tracking"),
"track2_id": fields.Many2One("account.track.categ", "Tracking"),
"convert_currency": fields.Boolean("Convert Currency"),
}
_defaults = {
"date": lambda *a: (date.today() + relativedelta(day=31)).strftime("%Y-%m-%d"),
}
def get_report_data(self, ids, context={}):
company_id = get_active_company()
comp = get_model("company").browse(company_id)
if ids:
params = self.read(ids, load_m2o=False)[0]
else:
params = self.default_get(load_m2o=False, context=context)
settings = get_model("settings").browse(1)
date_to = params.get("date")
if not date_to:
date_to = (date.today() + relativedelta(day=31)).strftime("%Y-%m-%d")
compare_with = params.get("compare_with")
compare_periods = params.get("compare_periods")
if compare_periods:
compare_periods = int(compare_periods)
else:
compare_periods = 0
if not compare_with:
compare_periods = 0
track_id = params.get("track_id")
if track_id:
track_id = int(track_id)
track2_id = params.get("track2_id")
if track2_id:
track2_id = int(track2_id)
convert_currency = params.get("convert_currency")
bs_types = ["bank", "cash", "cheque", "receivable", "cur_asset", "noncur_asset",
"fixed_asset", "payable", "cust_deposit", "cur_liability", "noncur_liability", "equity"]
ctx = {
"date_to": date_to,
"track_id": track_id,
"track2_id": track2_id,
"active_test": False,
}
if convert_currency:
ctx["currency_id"]=settings.currency_id.id
res = get_model("account.account").search_read(
["type", "in", bs_types], ["code", "name", "balance", "balance_cur", "parent_id", "type"], order="code", context=ctx)
accounts = {}
parent_ids = []
for r in res:
r["balance"] = r["balance_cur"] if convert_currency else r["balance"]
accounts[r["id"]] = r
if r["parent_id"]:
parent_ids.append(r["parent_id"][0])
compare = {}
for i in range(1, compare_periods + 1):
date_to_c = deduct_period(date_to, i, compare_with)
compare[i] = {
"date_to": date_to_c,
}
ctx["date_to"] = date_to_c
res = get_model("account.account").search_read(["type", "in", bs_types], ["balance", "balance_cur"], context=ctx)
for r in res:
accounts[r["id"]]["balance%d" % i] = r["balance_cur"] if convert_currency else r["balance"]
i = 0
while parent_ids:
i += 1
if i > 100:
raise Exception("Cycle detected!")
parent_ids = list(set(parent_ids))
res = get_model("account.account").read(parent_ids, ["name", "parent_id", "type", "code"])
parent_ids = []
for r in res:
accounts[r["id"]] = r
if r["parent_id"]:
parent_ids.append(r["parent_id"][0])
root_accounts = []
for acc in accounts.values():
if not acc["parent_id"]:
root_accounts.append(acc)
continue
parent_id = acc["parent_id"][0]
parent = accounts[parent_id]
parent.setdefault("children", []).append(acc)
assets = {
"name": "Assets",
"types": ["bank", "cash", "cheque", "receivable", "cur_asset", "noncur_asset", "fixed_asset"],
}
liabilities = {
"name": "Liabilities",
"types": ["payable", "cust_deposit", "cur_liability", "noncur_liability"],
}
net_assets = {
"summary": "Net Assets",
"children": [assets, liabilities],
"separator": "single",
}
equity = {
"name": "Equity",
"types": ["equity"],
}
def _make_groups(accs, types):
groups = []
for acc in accs:
if acc["type"] == "view":
children = _make_groups(acc["children"], types)
if children:
group = {
"code": acc["code"],
"name": acc["name"],
"children": children,
"id": acc["id"],
}
groups.append(group)
elif acc["type"] in types:
if acc.get("balance") or any(acc.get("balance%d" % i) for i in range(1, compare_periods + 1)):
group = {
"code": acc["code"],
"name": acc["name"],
"balance": acc["balance"],
"id": acc["id"],
}
for i in range(1, compare_periods + 1):
group["balance%d" % i] = acc["balance%d" % i]
groups.append(group)
return groups
for group in [assets, liabilities, equity]:
types = group["types"]
group["children"] = _make_groups(root_accounts, types)
net_profit = {
"name": "Current Year Earnings",
"balance": -get_model("report.profit.loss").get_net_profit(date_to, track_id=track_id, track2_id=track2_id, convert_currency=convert_currency, context=context),
}
for i in range(1, compare_periods + 1):
date_to_c = compare[i]["date_to"]
net_profit["balance%d" % i] = - \
get_model("report.profit.loss").get_net_profit(
date_to_c, track_id=track_id, track2_id=track2_id, convert_currency=convert_currency, context=context)
equity["children"].append(net_profit)
def _set_totals(acc):
children = acc.get("children")
if not children:
return
total = 0
comp_totals = {i: 0 for i in range(1, compare_periods + 1)}
for child in children:
_set_totals(child)
total += child.get("balance", 0)
for i in range(1, compare_periods + 1):
comp_totals[i] += child.get("balance%d" % i, 0)
acc["balance"] = total
for i in range(1, compare_periods + 1):
acc["balance%d" % i] = comp_totals[i]
_set_totals(net_assets)
_set_totals(equity)
def _remove_dup_parents(group):
if not group.get("children"):
return
children = []
for c in group["children"]:
_remove_dup_parents(c)
if c["name"] == group["name"]:
if c.get("children"):
children += c["children"]
else:
children.append(c)
group["children"] = children
_remove_dup_parents(assets)
_remove_dup_parents(liabilities)
_remove_dup_parents(equity)
def _join_groups(group):
if not group.get("children"):
return
child_names = {}
for c in group["children"]:
k = (c.get("code", ""), c["name"])
if k in child_names:
c2 = child_names[k]
if c2.get("children") and c.get("children"):
c2["children"] += c["children"]
c2["balance"] += c["balance"]
for i in range(1, compare_periods + 1):
c2["balance%d" % i] += c["balance%d" % i]
else:
child_names[k] = c
group["children"] = []
for k in sorted(child_names):
c = child_names[k]
group["children"].append(c)
for c in group["children"]:
_join_groups(c)
_join_groups(assets)
_join_groups(liabilities)
_join_groups(equity)
lines = []
def _add_lines(group, depth=0, max_depth=None, sign=1):
if max_depth is not None and depth > max_depth:
return
children = group.get("children")
if children is None:
line_vals = {
"type": "account",
"string": group.get("code") and "[%s] %s" % (group["code"], group["name"]) or group["name"],
"amount": group["balance"] * sign,
"padding": 20 * depth,
"id": group.get("id"),
}
for i in range(1, compare_periods + 1):
line_vals["amount%d" % i] = group.get("balance%d" % i, 0) * sign
lines.append(line_vals)
return
name = group.get("name")
if name:
lines.append({
"type": "group_header",
"string": name,
"padding": 20 * depth,
})
for child in children:
_add_lines(child, depth + 1, max_depth=max_depth, sign=sign)
summary = group.get("summary")
if not summary:
summary = "Total " + name
line_vals = ({
"type": "group_footer",
"string": summary,
"padding": 20 * (depth + 1),
"amount": group.get("balance", 0) * sign,
"separator": group.get("separator"),
})
for i in range(1, compare_periods + 1):
line_vals["amount%d" % i] = group.get("balance%d" % i, 0) * sign
lines.append(line_vals)
_add_lines(assets)
_add_lines(liabilities, sign=-1)
_add_lines(net_assets, depth=-1, max_depth=-1)
_add_lines(equity, sign=-1)
data = {
"date": date_to,
"track_id": track_id,
"track2_id": track2_id,
"col0": date_to,
"lines": lines,
"company_name": comp.name,
}
for i, comp in compare.items():
data["date%d" % i] = comp["date_to"]
data["col%d" % i] = comp["date_to"]
return data
def get_report_data_custom(self, ids, context={}):
company_id = get_active_company()
comp = get_model("company").browse(company_id)
if ids:
params = self.read(ids, load_m2o=False)[0]
else:
params = self.default_get(load_m2o=False, context=context)
settings = get_model("settings").browse(1)
date_to = params.get("date")
d0 = datetime.strptime(date_to, "%Y-%m-%d")
prev_month_date_to = (d0 - relativedelta(day=1) - timedelta(days=1)).strftime("%Y-%m-%d")
prev_year_date_to = (d0 - relativedelta(years=1)).strftime("%Y-%m-%d")
data = {
"date_to": date_to,
"prev_month_date_to": prev_month_date_to,
"prev_year_date_to": prev_year_date_to,
"company_name": comp.name,
}
print("data", data)
return data
ReportBalanceSheet.register()
| anastue/netforce | netforce_account_report/netforce_account_report/models/report_balance_sheet.py | Python | mit | 13,744 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import module as mojom
# This module provides a mechanism for determining the packed order and offsets
# of a mojom.Struct.
#
# ps = pack.PackedStruct(struct)
# ps.packed_fields will access a list of PackedField objects, each of which
# will have an offset, a size and a bit (for mojom.BOOLs).
# Size of struct header in bytes: num_bytes [4B] + version [4B].
HEADER_SIZE = 8
class PackedField(object):
kind_to_size = {
mojom.BOOL: 1,
mojom.INT8: 1,
mojom.UINT8: 1,
mojom.INT16: 2,
mojom.UINT16: 2,
mojom.INT32: 4,
mojom.UINT32: 4,
mojom.FLOAT: 4,
mojom.HANDLE: 4,
mojom.MSGPIPE: 4,
mojom.SHAREDBUFFER: 4,
mojom.DCPIPE: 4,
mojom.DPPIPE: 4,
mojom.NULLABLE_HANDLE: 4,
mojom.NULLABLE_MSGPIPE: 4,
mojom.NULLABLE_SHAREDBUFFER: 4,
mojom.NULLABLE_DCPIPE: 4,
mojom.NULLABLE_DPPIPE: 4,
mojom.INT64: 8,
mojom.UINT64: 8,
mojom.DOUBLE: 8,
mojom.STRING: 8,
mojom.NULLABLE_STRING: 8
}
@classmethod
def GetSizeForKind(cls, kind):
if isinstance(kind, (mojom.Array, mojom.Map, mojom.Struct,
mojom.Interface)):
return 8
if isinstance(kind, mojom.Union):
return 16
if isinstance(kind, mojom.InterfaceRequest):
kind = mojom.MSGPIPE
if isinstance(kind, mojom.Enum):
# TODO(mpcomplete): what about big enums?
return cls.kind_to_size[mojom.INT32]
if not kind in cls.kind_to_size:
raise Exception("Invalid kind: %s" % kind.spec)
return cls.kind_to_size[kind]
@classmethod
def GetAlignmentForKind(cls, kind):
if isinstance(kind, mojom.Interface):
return 4
return cls.GetSizeForKind(kind)
def __init__(self, field, index, ordinal):
"""
Args:
field: the original field.
index: the position of the original field in the struct.
ordinal: the ordinal of the field for serialization.
"""
self.field = field
self.index = index
self.ordinal = ordinal
self.size = self.GetSizeForKind(field.kind)
self.alignment = self.GetAlignmentForKind(field.kind)
self.offset = None
self.bit = None
self.min_version = None
def GetPad(offset, alignment):
"""Returns the pad necessary to reserve space so that |offset + pad| equals to
some multiple of |alignment|."""
return (alignment - (offset % alignment)) % alignment
def GetFieldOffset(field, last_field):
"""Returns a 2-tuple of the field offset and bit (for BOOLs)."""
if (field.field.kind == mojom.BOOL and
last_field.field.kind == mojom.BOOL and
last_field.bit < 7):
return (last_field.offset, last_field.bit + 1)
offset = last_field.offset + last_field.size
pad = GetPad(offset, field.alignment)
return (offset + pad, 0)
def GetPayloadSizeUpToField(field):
"""Returns the payload size (not including struct header) if |field| is the
last field.
"""
if not field:
return 0
offset = field.offset + field.size
pad = GetPad(offset, 8)
return offset + pad
class PackedStruct(object):
def __init__(self, struct):
self.struct = struct
# |packed_fields| contains all the fields, in increasing offset order.
self.packed_fields = []
# |packed_fields_in_ordinal_order| refers to the same fields as
# |packed_fields|, but in ordinal order.
self.packed_fields_in_ordinal_order = []
# No fields.
if (len(struct.fields) == 0):
return
# Start by sorting by ordinal.
src_fields = self.packed_fields_in_ordinal_order
ordinal = 0
for index, field in enumerate(struct.fields):
if field.ordinal is not None:
ordinal = field.ordinal
src_fields.append(PackedField(field, index, ordinal))
ordinal += 1
src_fields.sort(key=lambda field: field.ordinal)
# Set |min_version| for each field.
next_min_version = 0
for packed_field in src_fields:
if packed_field.field.min_version is None:
assert next_min_version == 0
else:
assert packed_field.field.min_version >= next_min_version
next_min_version = packed_field.field.min_version
packed_field.min_version = next_min_version
if (packed_field.min_version != 0 and
mojom.IsReferenceKind(packed_field.field.kind) and
not packed_field.field.kind.is_nullable):
raise Exception("Non-nullable fields are only allowed in version 0 of "
"a struct. %s.%s is defined with [MinVersion=%d]."
% (self.struct.name, packed_field.field.name,
packed_field.min_version))
src_field = src_fields[0]
src_field.offset = 0
src_field.bit = 0
dst_fields = self.packed_fields
dst_fields.append(src_field)
# Then find first slot that each field will fit.
for src_field in src_fields[1:]:
last_field = dst_fields[0]
for i in xrange(1, len(dst_fields)):
next_field = dst_fields[i]
offset, bit = GetFieldOffset(src_field, last_field)
if offset + src_field.size <= next_field.offset:
# Found hole.
src_field.offset = offset
src_field.bit = bit
dst_fields.insert(i, src_field)
break
last_field = next_field
if src_field.offset is None:
# Add to end
src_field.offset, src_field.bit = GetFieldOffset(src_field, last_field)
dst_fields.append(src_field)
class ByteInfo(object):
def __init__(self):
self.is_padding = False
self.packed_fields = []
def GetByteLayout(packed_struct):
total_payload_size = GetPayloadSizeUpToField(
packed_struct.packed_fields[-1] if packed_struct.packed_fields else None)
bytes = [ByteInfo() for i in xrange(total_payload_size)]
limit_of_previous_field = 0
for packed_field in packed_struct.packed_fields:
for i in xrange(limit_of_previous_field, packed_field.offset):
bytes[i].is_padding = True
bytes[packed_field.offset].packed_fields.append(packed_field)
limit_of_previous_field = packed_field.offset + packed_field.size
for i in xrange(limit_of_previous_field, len(bytes)):
bytes[i].is_padding = True
for byte in bytes:
# A given byte cannot both be padding and have a fields packed into it.
assert not (byte.is_padding and byte.packed_fields)
return bytes
class VersionInfo(object):
def __init__(self, version, num_fields, num_bytes):
self.version = version
self.num_fields = num_fields
self.num_bytes = num_bytes
def GetVersionInfo(packed_struct):
"""Get version information for a struct.
Args:
packed_struct: A PackedStruct instance.
Returns:
A non-empty list of VersionInfo instances, sorted by version in increasing
order.
Note: The version numbers may not be consecutive.
"""
versions = []
last_version = 0
last_num_fields = 0
last_payload_size = 0
for packed_field in packed_struct.packed_fields_in_ordinal_order:
if packed_field.min_version != last_version:
versions.append(
VersionInfo(last_version, last_num_fields,
last_payload_size + HEADER_SIZE))
last_version = packed_field.min_version
last_num_fields += 1
# The fields are iterated in ordinal order here. However, the size of a
# version is determined by the last field of that version in pack order,
# instead of ordinal order. Therefore, we need to calculate the max value.
last_payload_size = max(GetPayloadSizeUpToField(packed_field),
last_payload_size)
assert len(versions) == 0 or last_num_fields != versions[-1].num_fields
versions.append(VersionInfo(last_version, last_num_fields,
last_payload_size + HEADER_SIZE))
return versions
| guorendong/iridium-browser-ubuntu | third_party/mojo/src/mojo/public/tools/bindings/pylib/mojom/generate/pack.py | Python | bsd-3-clause | 8,182 |
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-configure
# Author : Ricardo Graciani
########################################################################
"""
Main script to write dirac.cfg for a new DIRAC installation and initial download of CAs and CRLs
if necessary.
To be used by VO specific scripts to configure new DIRAC installations
There are 2 mandatory arguments:
-S --Setup=<setup> To define the DIRAC setup for the current installation
-C --ConfigurationServer=<server>|-W --Gateway To define the reference Configuration Servers/Gateway for the current installation
others are optional
-I --IncludeAllServers To include all Configuration Servers (by default only those in -C option are included)
-n --SiteName=<sitename> To define the DIRAC Site Name for the installation
-N --CEName=<cename> To determine the DIRAC Site Name from the CE Name
-V --VO=<vo> To define the VO for the installation
-U --UseServerCertificate To use Server Certificate for all clients
-H --SkipCAChecks To skip check of CAs for all clients
-D --SkipCADownload To skip download of CAs
-M --SkipVOMSDownload To skip download of VOMS info
-v --UseVersionsDir Use versions directory (This option will properly define RootPath and InstancePath)
-A --Architecture=<architecture> To define /LocalSite/Architecture=<architecture>
-L --LocalSE=<localse> To define /LocalSite/LocalSE=<localse>
-F --ForceUpdate Forces the update of cfg file (i.e. dirac.cfg), even if it does already exists (use with care)
-O --Output define output configuration file
Other arguments will take proper defaults if not defined.
Additionally all options can all be passed inside a .cfg file passed as argument. The following options are recognized:
Setup
ConfigurationServer
IncludeAllServers
Gateway
SiteName
CEName
VirtualOrganization
UseServerCertificate
SkipCAChecks
SkipCADownload
UseVersionsDir
Architecture
LocalSE
LogLevel
As in any other script command line option take precedence over .cfg files passed as arguments.
The combination of both is written into the installed dirac.cfg.
Notice: It will not overwrite exiting info in current dirac.cfg if it exists.
Example: dirac-configure -d -S LHCb-Development -C 'dips://lhcbprod.pic.es:9135/Configuration/Server' -W 'dips://lhcbprod.pic.es:9135' --SkipCAChecks
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers import cfgInstallPath, cfgPath, Registry
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
import sys, os
logLevel = None
setup = None
configurationServer = None
includeAllServers = False
gatewayServer = None
siteName = None
useServerCert = False
skipCAChecks = False
skipCADownload = False
useVersionsDir = False
architecture = None
localSE = None
ceName = None
vo = None
update = False
outputFile = ''
skipVOMSDownload = False
def setGateway( optionValue ):
global gatewayServer
gatewayServer = optionValue
setServer( gatewayServer + '/Configuration/Server' )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'Gateway' ), gatewayServer )
return DIRAC.S_OK()
def setOutput( optionValue ):
global outputFile
outputFile = optionValue
return DIRAC.S_OK()
def setServer( optionValue ):
global configurationServer
configurationServer = optionValue
Script.localCfg.addDefaultEntry( '/DIRAC/Configuration/Servers', configurationServer )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'ConfigurationServer' ), configurationServer )
return DIRAC.S_OK()
def setAllServers( optionValue ):
global includeAllServers
includeAllServers = True
def setSetup( optionValue ):
global setup
setup = optionValue
DIRAC.gConfig.setOptionValue( '/DIRAC/Setup', setup )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'Setup' ), setup )
return DIRAC.S_OK()
def setSiteName( optionValue ):
global siteName
siteName = optionValue
Script.localCfg.addDefaultEntry( '/LocalSite/Site', siteName )
DIRAC.__siteName = False
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'SiteName' ), siteName )
return DIRAC.S_OK()
def setCEName( optionValue ):
global ceName
ceName = optionValue
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'CEName' ), ceName )
return DIRAC.S_OK()
def setServerCert( optionValue ):
global useServerCert
useServerCert = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'UseServerCertificate' ), useServerCert )
return DIRAC.S_OK()
def setSkipCAChecks( optionValue ):
global skipCAChecks
skipCAChecks = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'SkipCAChecks' ), skipCAChecks )
return DIRAC.S_OK()
def setSkipCADownload( optionValue ):
global skipCADownload
skipCADownload = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'SkipCADownload' ), skipCADownload )
return DIRAC.S_OK()
def setSkipVOMSDownload( optionValue ):
global skipVOMSDownload
skipVOMSDownload = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'SkipVOMSDownload' ), skipVOMSDownload )
return DIRAC.S_OK()
def setUseVersionsDir( optionValue ):
global useVersionsDir
useVersionsDir = True
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'UseVersionsDir' ), useVersionsDir )
return DIRAC.S_OK()
def setArchitecture( optionValue ):
global architecture
architecture = optionValue
Script.localCfg.addDefaultEntry( '/LocalSite/Architecture', architecture )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'Architecture' ), architecture )
return DIRAC.S_OK()
def setLocalSE( optionValue ):
global localSE
localSE = optionValue
Script.localCfg.addDefaultEntry( '/LocalSite/LocalSE', localSE )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'LocalSE' ), localSE )
return DIRAC.S_OK()
def setVO( optionValue ):
global vo
vo = optionValue
Script.localCfg.addDefaultEntry( '/DIRAC/VirtualOrganization', vo )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'VirtualOrganization' ), vo )
return DIRAC.S_OK()
def forceUpdate( optionValue ):
global update
update = True
return DIRAC.S_OK()
Script.disableCS()
Script.registerSwitch( "S:", "Setup=", "Set <setup> as DIRAC setup", setSetup )
Script.registerSwitch( "C:", "ConfigurationServer=", "Set <server> as DIRAC configuration server", setServer )
Script.registerSwitch( "I", "IncludeAllServers", "include all Configuration Servers", setAllServers )
Script.registerSwitch( "n:", "SiteName=", "Set <sitename> as DIRAC Site Name", setSiteName )
Script.registerSwitch( "N:", "CEName=", "Determiner <sitename> from <cename>", setCEName )
Script.registerSwitch( "V:", "VO=", "Set the VO name", setVO )
Script.registerSwitch( "W:", "gateway=", "Configure <gateway> as DIRAC Gateway for the site", setGateway )
Script.registerSwitch( "U", "UseServerCertificate", "Configure to use Server Certificate", setServerCert )
Script.registerSwitch( "H", "SkipCAChecks", "Configure to skip check of CAs", setSkipCAChecks )
Script.registerSwitch( "D", "SkipCADownload", "Configure to skip download of CAs", setSkipCADownload )
Script.registerSwitch( "M", "SkipVOMSDownload", "Configure to skip download of VOMS info", setSkipVOMSDownload )
Script.registerSwitch( "v", "UseVersionsDir", "Use versions directory", setUseVersionsDir )
Script.registerSwitch( "A:", "Architecture=", "Configure /Architecture=<architecture>", setArchitecture )
Script.registerSwitch( "L:", "LocalSE=", "Configure LocalSite/LocalSE=<localse>", setLocalSE )
Script.registerSwitch( "F", "ForceUpdate", "Force Update of cfg file (i.e. dirac.cfg) (otherwise nothing happens if dirac.cfg already exists)", forceUpdate )
Script.registerSwitch ( "O:", "output=", "output configuration file", setOutput )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'\nUsage:',
' %s [option|cfgfile] ...\n' % Script.scriptName ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getExtraCLICFGFiles()
if not logLevel:
logLevel = DIRAC.gConfig.getValue( cfgInstallPath( 'LogLevel' ), '' )
if logLevel:
DIRAC.gLogger.setLevel( logLevel )
else:
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'LogLevel' ), logLevel )
if not gatewayServer:
newGatewayServer = DIRAC.gConfig.getValue( cfgInstallPath( 'Gateway' ), '' )
if newGatewayServer:
setGateway( newGatewayServer )
if not configurationServer:
newConfigurationServer = DIRAC.gConfig.getValue( cfgInstallPath( 'ConfigurationServer' ), '' )
if newConfigurationServer:
setServer( newConfigurationServer )
if not includeAllServers:
newIncludeAllServer = DIRAC.gConfig.getValue( cfgInstallPath( 'IncludeAllServers' ), False )
if newIncludeAllServer:
setAllServers( True )
if not setup:
newSetup = DIRAC.gConfig.getValue( cfgInstallPath( 'Setup' ), '' )
if newSetup:
setSetup( newSetup )
if not siteName:
newSiteName = DIRAC.gConfig.getValue( cfgInstallPath( 'SiteName' ), '' )
if newSiteName:
setSiteName( newSiteName )
if not ceName:
newCEName = DIRAC.gConfig.getValue( cfgInstallPath( 'CEName' ), '' )
if newCEName:
setCEName( newCEName )
if not useServerCert:
newUserServerCert = DIRAC.gConfig.getValue( cfgInstallPath( 'UseServerCertificate' ), False )
if newUserServerCert:
setServerCert( newUserServerCert )
if not skipCAChecks:
newSkipCAChecks = DIRAC.gConfig.getValue( cfgInstallPath( 'SkipCAChecks' ), False )
if newSkipCAChecks:
setSkipCAChecks( newSkipCAChecks )
if not skipCADownload:
newSkipCADownload = DIRAC.gConfig.getValue( cfgInstallPath( 'SkipCADownload' ), False )
if newSkipCADownload:
setSkipCADownload( newSkipCADownload )
if not useVersionsDir:
newUseVersionsDir = DIRAC.gConfig.getValue( cfgInstallPath( 'UseVersionsDir' ), False )
if newUseVersionsDir:
setUseVersionsDir( newUseVersionsDir )
# Set proper Defaults in configuration (even if they will be properly overwrite by InstallTools
instancePath = os.path.dirname( os.path.dirname( DIRAC.rootPath ) )
rootPath = os.path.join( instancePath, 'pro' )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'InstancePath' ), instancePath )
DIRAC.gConfig.setOptionValue( cfgInstallPath( 'RootPath' ), rootPath )
if not architecture:
newArchitecture = DIRAC.gConfig.getValue( cfgInstallPath( 'Architecture' ), '' )
if newArchitecture:
setArchitecture( newArchitecture )
if not vo:
newVO = DIRAC.gConfig.getValue( cfgInstallPath( 'VirtualOrganization' ), '' )
if newVO:
setVO( newVO )
DIRAC.gLogger.notice( 'Executing: %s ' % ( ' '.join( sys.argv ) ) )
DIRAC.gLogger.notice( 'Checking DIRAC installation at "%s"' % DIRAC.rootPath )
if update:
if outputFile:
DIRAC.gLogger.notice( 'Will update the output file %s' % outputFile )
else:
DIRAC.gLogger.notice( 'Will update %s' % DIRAC.gConfig.diracConfigFilePath )
if setup:
DIRAC.gLogger.verbose( '/DIRAC/Setup =', setup )
if vo:
DIRAC.gLogger.verbose( '/DIRAC/VirtualOrganization =', vo )
if configurationServer:
DIRAC.gLogger.verbose( '/DIRAC/Configuration/Servers =', configurationServer )
if siteName:
DIRAC.gLogger.verbose( '/LocalSite/Site =', siteName )
if architecture:
DIRAC.gLogger.verbose( '/LocalSite/Architecture =', architecture )
if localSE:
DIRAC.gLogger.verbose( '/LocalSite/localSE =', localSE )
if not useServerCert:
DIRAC.gLogger.verbose( '/DIRAC/Security/UseServerCertificate =', 'no' )
#Being sure it was not there before
Script.localCfg.deleteOption( '/DIRAC/Security/UseServerCertificate' )
Script.localCfg.addDefaultEntry( '/DIRAC/Security/UseServerCertificate', 'no' )
else:
DIRAC.gLogger.verbose( '/DIRAC/Security/UseServerCertificate =', 'yes' )
#Being sure it was not there before
Script.localCfg.deleteOption( '/DIRAC/Security/UseServerCertificate' )
Script.localCfg.addDefaultEntry( '/DIRAC/Security/UseServerCertificate', 'yes' )
host = DIRAC.gConfig.getValue( cfgInstallPath( "Host" ), "" )
if host:
DIRAC.gConfig.setOptionValue( cfgPath( "DIRAC", "Hostname" ), host )
if skipCAChecks:
DIRAC.gLogger.verbose( '/DIRAC/Security/SkipCAChecks =', 'yes' )
#Being sure it was not there before
Script.localCfg.deleteOption( '/DIRAC/Security/SkipCAChecks' )
Script.localCfg.addDefaultEntry( '/DIRAC/Security/SkipCAChecks', 'yes' )
else:
# Necessary to allow initial download of CA's
if not skipCADownload:
DIRAC.gConfig.setOptionValue( '/DIRAC/Security/SkipCAChecks', 'yes' )
if not skipCADownload:
Script.enableCS()
try:
dirName = os.path.join( DIRAC.rootPath, 'etc', 'grid-security', 'certificates' )
if not os.path.exists( dirName ):
os.makedirs( dirName )
except:
DIRAC.gLogger.exception()
DIRAC.gLogger.fatal( 'Fail to create directory:', dirName )
DIRAC.exit( -1 )
try:
from DIRAC.FrameworkSystem.Client.BundleDeliveryClient import BundleDeliveryClient
bdc = BundleDeliveryClient()
result = bdc.syncCAs()
if result[ 'OK' ]:
result = bdc.syncCRLs()
except:
DIRAC.gLogger.exception( 'Could not import BundleDeliveryClient' )
pass
if not skipCAChecks:
Script.localCfg.deleteOption( '/DIRAC/Security/SkipCAChecks' )
if ceName or siteName:
# This is used in the pilot context, we should have a proxy, or a certificate, and access to CS
if useServerCert:
# Being sure it was not there before
Script.localCfg.deleteOption( '/DIRAC/Security/UseServerCertificate' )
Script.localCfg.addDefaultEntry( '/DIRAC/Security/UseServerCertificate', 'yes' )
Script.enableCS()
# Get the site resource section
gridSections = DIRAC.gConfig.getSections( '/Resources/Sites/' )
if not gridSections['OK']:
DIRAC.gLogger.warn( 'Could not get grid sections list' )
grids = []
else:
grids = gridSections['Value']
# try to get siteName from ceName or Local SE from siteName using Remote Configuration
for grid in grids:
siteSections = DIRAC.gConfig.getSections( '/Resources/Sites/%s/' % grid )
if not siteSections['OK']:
DIRAC.gLogger.warn( 'Could not get %s site list' % grid )
sites = []
else:
sites = siteSections['Value']
if not siteName:
if ceName:
for site in sites:
siteCEs = DIRAC.gConfig.getValue( '/Resources/Sites/%s/%s/CE' % ( grid, site ), [] )
if ceName in siteCEs:
siteName = site
break
if siteName:
DIRAC.gLogger.notice( 'Setting /LocalSite/Site = %s' % siteName )
Script.localCfg.addDefaultEntry( '/LocalSite/Site', siteName )
DIRAC.__siteName = False
if ceName:
DIRAC.gLogger.notice( 'Setting /LocalSite/GridCE = %s' % ceName )
Script.localCfg.addDefaultEntry( '/LocalSite/GridCE', ceName )
if not localSE and siteName in sites:
localSE = getSEsForSite( siteName )
if localSE['OK'] and localSE['Value']:
localSE = ','.join( localSE['Value'] )
DIRAC.gLogger.notice( 'Setting /LocalSite/LocalSE =', localSE )
Script.localCfg.addDefaultEntry( '/LocalSite/LocalSE', localSE )
break
if gatewayServer:
DIRAC.gLogger.verbose( '/DIRAC/Gateways/%s =' % DIRAC.siteName(), gatewayServer )
Script.localCfg.addDefaultEntry( '/DIRAC/Gateways/%s' % DIRAC.siteName(), gatewayServer )
# Create the local cfg if it is not yet there
if not outputFile:
outputFile = DIRAC.gConfig.diracConfigFilePath
outputFile = os.path.abspath( outputFile )
if not os.path.exists( outputFile ):
configDir = os.path.dirname( outputFile )
if not os.path.exists( configDir ):
os.makedirs( configDir )
update = True
DIRAC.gConfig.dumpLocalCFGToFile( outputFile )
# We need user proxy or server certificate to continue
if not useServerCert:
Script.enableCS()
result = getProxyInfo()
if not result['OK']:
DIRAC.gLogger.notice( 'Configuration is not completed because no user proxy is available' )
DIRAC.gLogger.notice( 'Create one using dirac-proxy-init and execute again with -F option' )
sys.exit( 1 )
else:
Script.localCfg.deleteOption( '/DIRAC/Security/UseServerCertificate' )
# When using Server Certs CA's will be checked, the flag only disables initial download
# this will be replaced by the use of SkipCADownload
Script.localCfg.deleteOption( '/DIRAC/Security/UseServerCertificate' )
Script.localCfg.addDefaultEntry( '/DIRAC/Security/UseServerCertificate', 'yes' )
Script.enableCS()
if includeAllServers:
DIRAC.gConfig.setOptionValue( '/DIRAC/Configuration/Servers', ','.join( DIRAC.gConfig.getServersList() ) )
DIRAC.gLogger.verbose( '/DIRAC/Configuration/Servers =', ','.join( DIRAC.gConfig.getServersList() ) )
if useServerCert:
# always removing before dumping
Script.localCfg.deleteOption( '/DIRAC/Security/UseServerCertificate' )
Script.localCfg.deleteOption( '/DIRAC/Security/SkipCAChecks' )
Script.localCfg.deleteOption( '/DIRAC/Security/SkipVOMSDownload' )
if update:
DIRAC.gConfig.dumpLocalCFGToFile( outputFile )
# ## LAST PART: do the vomsdir/vomses magic
# This has to be done for all VOs in the installation
if skipVOMSDownload:
# We stop here
sys.exit( 0 )
result = Registry.getVOMSServerInfo()
if not result['OK']:
sys.exit( 1 )
error = ''
vomsDict = result['Value']
for vo in vomsDict:
voName = vomsDict[vo]['VOMSName']
vomsDirHosts = vomsDict[vo]['Servers'].keys()
vomsDirPath = os.path.join( DIRAC.rootPath, 'etc', 'grid-security', 'vomsdir', voName )
vomsesDirPath = os.path.join( DIRAC.rootPath, 'etc', 'grid-security', 'vomses' )
for path in ( vomsDirPath, vomsesDirPath ):
if not os.path.isdir( path ):
try:
os.makedirs( path )
except Exception, e:
DIRAC.gLogger.error( "Could not create directory", str( e ) )
sys.exit( 1 )
vomsesLines = []
for vomsHost in vomsDirHosts:
hostFilePath = os.path.join( vomsDirPath, "%s.lsc" % vomsHost )
if "Servers" in vomsDict[vo]:
try:
DN = vomsDict[vo]['Servers'][vomsHost]['DN']
CA = vomsDict[vo]['Servers'][vomsHost]['CA']
port = vomsDict[vo]['Servers'][vomsHost]['Port']
if not DN or not CA or not port:
DIRAC.gLogger.error( 'DN = %s' % DN )
DIRAC.gLogger.error( 'CA = %s' % CA )
DIRAC.gLogger.error( 'Port = %s' % port )
DIRAC.gLogger.error( 'Missing Parameter for %s' % vomsHost )
continue
fd = open( hostFilePath, "wb" )
fd.write( "%s\n%s\n" % ( DN, CA ) )
fd.close()
vomsesLines.append( '"%s" "%s" "%s" "%s" "%s" "24"' % ( voName, vomsHost, port, DN, voName ) )
DIRAC.gLogger.notice( "Created vomsdir file %s" % hostFilePath )
except:
DIRAC.gLogger.exception( "Could not generate vomsdir file for host", vomsHost )
error = "Could not generate vomsdir file for VO %s, host %s" % (voName, vomsHost)
try:
vomsesFilePath = os.path.join( vomsesDirPath, voName )
fd = open( vomsesFilePath, "wb" )
fd.write( "%s\n" % "\n".join( vomsesLines ) )
fd.close()
DIRAC.gLogger.notice( "Created vomses file %s" % vomsesFilePath )
except:
DIRAC.gLogger.exception( "Could not generate vomses file" )
error = "Could not generate vomses file for VO %s" % voName
if useServerCert:
Script.localCfg.deleteOption( '/DIRAC/Security/UseServerCertificate' )
# When using Server Certs CA's will be checked, the flag only disables initial download
# this will be replaced by the use of SkipCADownload
Script.localCfg.deleteOption( '/DIRAC/Security/SkipCAChecks' )
if error:
sys.exit( 1 )
sys.exit( 0 )
| vmendez/DIRAC | Core/scripts/dirac-configure.py | Python | gpl-3.0 | 20,061 |
import unittest
import os
import asyncio
from openhomedevice.device import Device
from aioresponses import aioresponses
def async_test(coro):
def wrapper(*args, **kwargs):
loop = asyncio.new_event_loop()
try:
return loop.run_until_complete(coro(*args, **kwargs))
finally:
loop.close()
return wrapper
class DeviceWithNoPinsTests(unittest.TestCase):
@async_test
@aioresponses()
async def setUp(self, mocked):
LOCATION = "http://mydevice:12345/desc.xml"
with open(
os.path.join(os.path.dirname(__file__), "data/nopinsdevice.xml")
) as file:
mocked.get(LOCATION, body=file.read())
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-ConfigApp-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Product-3/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Volume-4/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Credentials-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Time-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Info-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Config-2/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Transport-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Pins-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-Update-2/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-Diagnostics-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-Volkano-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-Privacy-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Exakt-4/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-Configuration-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-Exakt2-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-ExaktInputs-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-Cloud-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Playlist-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Radio-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Receiver-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Sender-2/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/linn.co.uk-LipSync-1/service.xml",
body="",
)
mocked.get(
"http://mydevice:12345/4c494e4e-1234-ab12-abcd-01234567819f/Upnp/av.openhome.org-Debug-1/service.xml",
body="",
)
self.sut = Device(LOCATION)
await self.sut.init()
soap_request_calls = []
return super().setUp()
@async_test
async def test_pins(self):
result = await self.sut.pins()
self.assertListEqual(result, list())
@async_test
async def test_invoke_pin(self):
await self.sut.invoke_pin(42)
| bazwilliams/openhomedevice | tests/DeviceNoPinsTest.py | Python | mit | 5,425 |
# META: timeout=long
import pytest
from webdriver import error
from tests.support.asserts import assert_success
def execute_script(session, script, args=None):
if args is None:
args = []
body = {"script": script, "args": args}
return session.transport.send(
"POST", "/session/{session_id}/execute/sync".format(
session_id=session.session_id),
body)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_accept(session, dialog_type):
response = execute_script(session, "window.{}('Hello');".format(dialog_type))
assert_success(response, None)
session.title
with pytest.raises(error.NoSuchAlertException):
session.alert.accept()
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_accept_and_notify(session, dialog_type):
response = execute_script(session, "window.{}('Hello');".format(dialog_type))
assert_success(response, None)
with pytest.raises(error.UnexpectedAlertOpenException):
session.title
with pytest.raises(error.NoSuchAlertException):
session.alert.accept()
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_dismiss(session, dialog_type):
response = execute_script(session, "window.{}('Hello');".format(dialog_type))
assert_success(response, None)
session.title
with pytest.raises(error.NoSuchAlertException):
session.alert.dismiss()
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_dismiss_and_notify(session, dialog_type):
response = execute_script(session, "window.{}('Hello');".format(dialog_type))
assert_success(response, None)
with pytest.raises(error.UnexpectedAlertOpenException):
session.title
with pytest.raises(error.NoSuchAlertException):
session.alert.dismiss()
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_ignore(session, dialog_type):
response = execute_script(session, "window.{}('Hello');".format(dialog_type))
assert_success(response, None)
with pytest.raises(error.UnexpectedAlertOpenException):
session.title
session.alert.dismiss()
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_default(session, dialog_type):
response = execute_script(session, "window.{}('Hello');".format(dialog_type))
assert_success(response, None)
with pytest.raises(error.UnexpectedAlertOpenException):
session.title
with pytest.raises(error.NoSuchAlertException):
session.alert.dismiss()
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_handle_prompt_twice(session, dialog_type):
response = execute_script(
session, "window.{0}('Hello');window.{0}('Bye');".format(dialog_type))
assert_success(response, None)
session.alert.dismiss()
# The first alert has been accepted by the user prompt handler, the second one remains.
# FIXME: this is how browsers currently work, but the spec should clarify if this is the
# expected behavior, see https://github.com/w3c/webdriver/issues/1153.
assert session.alert.text == "Bye"
session.alert.dismiss()
| dati91/servo | tests/wpt/web-platform-tests/webdriver/tests/execute_script/user_prompts.py | Python | mpl-2.0 | 3,756 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import unittest
from datetime import datetime, timedelta
from yamlns.dateutils import Date
from yamlns import namespace as ns
from .scheduling import (
weekday,
weekstart,
nextweek,
choosers,
Scheduling,
)
class Scheduling_Test(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def assertNsEqual(self, dict1, dict2):
def parseIfString(nsOrString):
if type(nsOrString) in (dict, ns):
return nsOrString
return ns.loads(nsOrString)
def sorteddict(d):
if type(d) not in (dict, ns):
return d
return ns(sorted(
(k, sorteddict(v))
for k,v in d.items()
))
dict1 = sorteddict(parseIfString(dict1))
dict2 = sorteddict(parseIfString(dict2))
return self.assertMultiLineEqual(dict1.dump(), dict2.dump())
# weekday
def test_weekday_withSunday(self):
self.assertEqual(
'dg', weekday(Date("2017-10-01")))
def test_weekday_withMonday(self):
self.assertEqual(
'dl', weekday(Date("2017-10-02")))
def test_weekday_withWenesday(self):
self.assertEqual(
'dx', weekday(Date("2017-10-04")))
# weekstart
def test_weekstart_withMonday(self):
self.assertEqual(
weekstart(Date("2017-10-02")),
Date("2017-10-02"))
def test_weekstart_withFriday(self):
self.assertEqual(
weekstart(Date("2017-10-06")),
Date("2017-10-02"))
# nextweek
def test_nextweek_withMonday(self):
self.assertEqual(
nextweek(Date("2017-10-02")),
Date("2017-10-09"))
def test_nextweek_withFriday(self):
self.assertEqual(
nextweek(Date("2017-10-06")),
Date("2017-10-09"))
# Scheduling.extension
def test_extension_existing(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extension('cesar'),
'200')
def test_extension_badExtension(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extension('notExisting'),
None)
# extensionToName
def test_extensionToName_stringExtension(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extensionToName('200'),
'cesar')
def test_extensionToName_intExtension(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extensionToName(200),
'cesar')
def test_extensionToName_missingExtensionReturnsExtension(self):
schedule = Scheduling("""\
extensions:
cesar: 200
""")
self.assertEqual(
schedule.extensionToName('100'),
'100')
# Scheduling.properName
def test_properName_whenPresent(self):
schedule = Scheduling("""\
names:
cesar: César
""")
self.assertEqual(
schedule.properName('cesar'),
u'César')
def test_properName_missing_usesTitle(self):
schedule = Scheduling("""\
names:
cesar: César
""")
self.assertEqual(
schedule.properName('perico'),
u'Perico')
def test_properName_noNamesAtAll(self):
schedule = Scheduling("""\
otherkey:
""")
self.assertEqual(
schedule.properName('perico'),
u'Perico')
# Scheduling.intervals
def test_intervals_withOneDate_notEnough(self):
schedule = Scheduling("""\
hours:
- '09:00'
""")
self.assertEqual(
schedule.intervals(), [
])
def test_intervals_withTwoDates(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
""")
self.assertEqual(
schedule.intervals(), [
'09:00-10:15',
])
def test_intervals_withMoreThanTwo(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.intervals(), [
'09:00-10:15',
'10:15-11:30',
])
# Scheduling.peekInterval
def test_peekInterval_beforeAnyInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("08:59"),None)
def test_peekInterval_justInFirstInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("09:00"),0)
def test_peekInterval_justBeforeNextInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("10:14"),0)
def test_peekInterval_justInNextInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("10:15"),1)
def test_peekInterval_justAtTheEndOfLastInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("11:29"),1)
def test_peekInterval_pastLastInterval(self):
schedule = Scheduling("""\
hours:
- '09:00'
- '10:15'
- '11:30'
""")
self.assertEqual(
schedule.peekInterval("11:30"),None)
def test_peekInterval_withNoHours(self):
schedule = Scheduling("""\
other:
""")
with self.assertRaises(Exception) as ctx:
schedule.peekInterval("11:30")
self.assertEqual(str(ctx.exception),
"Schedule with no hours attribute")
# choosers
def test_choosers(self):
now = datetime(2017,10,20,15,25,35)
self.assertEqual(
choosers(now),
("2017-10-16", 'dv', "15:25"))
# Scheduling.peekQueue
def test_peekQueue_oneSlot_oneTurn(self):
schedule = Scheduling(u"""\
timetable:
dl:
-
- cesar
hours:
- '00:00'
- '23:59'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
'cesar',
])
def test_peekQueue_oneSlot_twoTurns(self):
schedule = Scheduling(u"""\
timetable:
'dl':
-
- cesar
- eduard
hours:
- '00:00'
- '23:59'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
'cesar',
'eduard',
])
def test_peekQueue_twoTimes(self):
schedule = Scheduling(u"""\
timetable:
dl:
-
- cesar
-
- eduard
hours:
- '00:00'
- '12:00'
- '23:59'
extensions:
cesar: 200
eduard: 201
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
'eduard',
])
def test_peekQueue_beforeTime(self):
schedule = Scheduling(u"""\
timetable:
dl:
-
- cesar
hours:
- '23:58'
- '23:59'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dl', '12:00'), [
])
def test_peekQueue_afterTime(self):
schedule = Scheduling(u"""\
timetable:
dl:
-
- cesar
hours:
- '00:00'
- '00:01'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
])
def test_peekQueue_holiday(self):
schedule = Scheduling(u"""\
timetable:
dm:
-
- cesar
hours:
- '00:00'
- '23:59'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
])
def test_peekQueue_aDifferentDay(self):
schedule = Scheduling(u"""\
timetable:
dm:
-
- cesar
hours:
- '00:00'
- '23:59'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dm','12:00'), [
'cesar',
])
def test_peekQueue_dictFormat(self):
schedule = Scheduling(u"""\
timetable:
dl:
1:
- cesar
hours:
- '00:00'
- '24:00'
extensions:
cesar: 200
""")
self.assertEqual(schedule.peekQueue('dl','12:00'), [
'cesar',
])
@unittest.skip("TODO")
def test_peekQueue_withNobodySlots(self): pass
# Scheduling.fromSolution
def config_singleSlot(self):
return ns.loads("""\
nTelefons: 1
diesVisualitzacio: ['dl']
hours: # La darrera es per tancar
- '09:00'
- '10:15'
colors:
ana: aa11aa
belen: bb22bb
extensions:
ana: 1001
belen: 1002
names: {}
""")
def config_twoLines(self):
c = self.config_singleSlot()
c.nTelefons = 2
return c
def config_twoDays(self):
c = self.config_singleSlot()
c.diesVisualitzacio.append('dm')
return c
def config_twoTimes(self):
c = self.config_singleSlot()
c.hours.append('11:30')
return c
def config_twoEverything(self):
c = self.config_singleSlot()
c.diesVisualitzacio.append('dm')
c.hours.append('11:30')
c.nTelefons = 2
return c
def test_fromSolution_oneholiday(self):
config = self.config_singleSlot()
result=Scheduling.fromSolution(
date=datetime.strptime(
'2016-07-11','%Y-%m-%d').date(),
config=config,
solution={},
)
self.assertNsEqual(result.data(), """\
week: '2016-07-11'
days:
- dl
hours:
- '09:00'
- '10:15'
turns:
- 'L1'
timetable:
dl:
- - festiu
colors:
ana: aa11aa
belen: bb22bb
extensions:
ana: 1001
belen: 1002
names: {}
""")
def test_fromSolution_noWeekNextMonday(self):
config = self.config_singleSlot()
result=Scheduling.fromSolution(
# no date specified
config=config,
solution={},
)
today = Date.today()
week=datetime.strptime(result.data().week, "%Y-%m-%d").date()
self.assertEqual(week.weekday(),0) # Monday
self.assertTrue(week > today) # in the future
self.assertTrue(week <= today+timedelta(days=7)) # A week at most
def test_fromSolution_oneslot(self):
config = self.config_singleSlot()
result=Scheduling.fromSolution(
date=datetime.strptime(
'2016-07-18','%Y-%m-%d').date(),
config=config,
solution={
('dl',0,0):'ana',
},
)
self.assertNsEqual( result.data(), """
week: '2016-07-18'
days:
- dl
hours:
- '09:00'
- '10:15'
turns:
- 'L1'
timetable:
dl:
- - ana
colors:
ana: aa11aa
belen: bb22bb
extensions:
ana: 1001
belen: 1002
names: {}
""")
def test_fromSolution_manyLines(self):
config = self.config_twoLines()
result=Scheduling.fromSolution(
date=datetime.strptime(
'2016-07-18','%Y-%m-%d').date(),
config=config,
solution={
('dl',0,0):'ana',
('dl',0,1):'belen',
},
)
self.assertNsEqual( result.data(), """
week: '2016-07-18'
days:
- dl
hours:
- '09:00'
- '10:15'
turns:
- 'L1'
- 'L2'
timetable:
dl:
- - ana
- belen
colors:
ana: 'aa11aa'
belen: 'bb22bb'
extensions:
ana: 1001
belen: 1002
names: {}
""")
def test_fromSolution_manyTimes(self):
config = self.config_twoTimes()
result=Scheduling.fromSolution(
date=datetime.strptime(
'2016-07-18','%Y-%m-%d').date(),
config=config,
solution={
('dl',0,0):'ana',
('dl',1,0):'belen',
},
)
self.assertNsEqual( result.data(), """
week: '2016-07-18'
days:
- dl
hours:
- '09:00'
- '10:15'
- '11:30'
turns:
- 'L1'
timetable:
dl:
- - ana
- - belen
colors:
ana: 'aa11aa'
belen: 'bb22bb'
extensions:
ana: 1001
belen: 1002
names: {}
""")
def test_fromSolution_manyDays(self):
config = self.config_twoDays()
result=Scheduling.fromSolution(
date=datetime.strptime(
'2016-07-18','%Y-%m-%d').date(),
config=config,
solution={
('dl',0,0):'ana',
('dm',0,0):'belen',
},
)
self.assertNsEqual(result.data(), """
week: '2016-07-18'
days:
- dl
- dm
hours:
- '09:00'
- '10:15'
turns:
- 'L1'
timetable:
dl:
- - ana
dm:
- - belen
colors:
ana: 'aa11aa'
belen: 'bb22bb'
extensions:
ana: 1001
belen: 1002
names: {}
""")
def test_fromSolution_manyEverything(self):
config = self.config_twoEverything()
result=Scheduling.fromSolution(
date=datetime.strptime(
'2016-07-18','%Y-%m-%d').date(),
config=config,
solution={
('dl',0,0):'ana',
('dl',1,0):'belen',
('dm',0,1):'carla',
('dm',1,1):'diana',
},
)
self.assertNsEqual(result.data(), """
week: '2016-07-18'
days:
- dl
- dm
hours:
- '09:00'
- '10:15'
- '11:30'
turns:
- 'L1'
- 'L2'
timetable:
dl:
- - ana
- festiu
- - belen
- festiu
dm:
- - festiu
- carla
- - festiu
- diana
colors:
ana: 'aa11aa'
belen: 'bb22bb'
extensions:
ana: 1001
belen: 1002
names: {}
""")
completeConfig="""\
nTelefons: 3
diesCerca: ['dx','dm','dj', 'dl', 'dv',] # Els mes conflictius davant
diesVisualitzacio: ['dl','dm','dx','dj','dv']
hours: # La darrera es per tancar
- '09:00'
- '10:15'
- '11:30'
- '12:45'
- '14:00'
randomColors: false # Si vols generar colors aleatoris o fer cas de 'colors'
colors:
marc: 'fbe8bc'
eduard: 'd8b9c5'
pere: '8f928e'
david: 'ffd3ac'
aleix: 'eed0eb'
carles: 'c98e98'
marta: 'eb9481'
monica: '7fada0'
yaiza: '90cdb9'
erola: '8789c8'
manel: '88dfe3'
tania: 'c8abf4'
judit: 'e781e8'
silvia: '8097fa'
joan: 'fae080'
ana: 'aa11aa'
victor: 'ff3333'
jordi: 'ff9999'
cesar: '889988'
extensions:
marta: 3040
monica: 3041
manel: 3042
erola: 3043
yaiza: 3044
eduard: 3045
marc: 3046
judit: 3047
judith: 3057
tania: 3048
carles: 3051
pere: 3052
aleix: 3053
david: 3054
silvia: 3055
joan: 3056
ana: 1001
victor: 3182
jordi: 3183
names: # Els que no només cal posar en majúscules
silvia: Sílvia
monica: Mònica
tania: Tània
cesar: César
victor: Víctor
"""
def test_fromSolution_completeTimetable(self):
result=Scheduling.fromSolution(
config=ns.loads(self.completeConfig),
solution={
('dl',0,0):'jordi',
('dl',0,1):'marta',
('dl',0,2):'tania',
('dl',1,0):'tania',
('dl',1,1):'yaiza',
('dl',1,2):'silvia',
('dl',2,0):'judith',
('dl',2,1):'pere',
('dl',2,2):'ana',
('dl',3,0):'ana',
('dl',3,1):'judith',
('dl',3,2):'erola',
('dm',0,0):'pere',
('dm',0,1):'jordi',
('dm',0,2):'victor',
('dm',1,0):'carles',
('dm',1,1):'victor',
('dm',1,2):'ana',
('dm',2,0):'joan',
('dm',2,1):'silvia',
('dm',2,2):'eduard',
('dm',3,0):'david',
('dm',3,1):'joan',
('dm',3,2):'monica',
('dx',0,0):'yaiza',
('dx',0,1):'monica',
('dx',0,2):'pere',
('dx',1,0):'erola',
('dx',1,1):'joan',
('dx',1,2):'marta',
('dx',2,0):'victor',
('dx',2,1):'eduard',
('dx',2,2):'jordi',
('dx',3,0):'eduard',
('dx',3,1):'david',
('dx',3,2):'victor',
('dj',0,0):'judith',
('dj',0,1):'jordi',
('dj',0,2):'carles',
('dj',1,0):'silvia',
('dj',1,1):'tania',
('dj',1,2):'judith',
('dj',2,0):'monica',
('dj',2,1):'ana',
('dj',2,2):'judit',
('dj',3,0):'judit',
('dj',3,1):'erola',
('dj',3,2):'joan',
('dv',0,0):'ana',
('dv',0,1):'judith',
('dv',0,2):'jordi',
('dv',1,0):'jordi',
('dv',1,1):'ana',
('dv',1,2):'judith',
('dv',2,0):'victor',
('dv',2,1):'carles',
('dv',2,2):'yaiza',
('dv',3,0):'marta',
('dv',3,1):'victor',
('dv',3,2):'silvia',
},
date=datetime.strptime(
'2016-07-11','%Y-%m-%d').date(),
)
self.assertNsEqual(result.data(), """\
week: '2016-07-11'
days:
- dl
- dm
- dx
- dj
- dv
hours:
- '09:00'
- '10:15'
- '11:30'
- '12:45'
- '14:00'
turns:
- L1
- L2
- L3
timetable:
dl:
- - jordi
- marta
- tania
- - tania
- yaiza
- silvia
- - judith
- pere
- ana
- - ana
- judith
- erola
dm:
- - pere
- jordi
- victor
- - carles
- victor
- ana
- - joan
- silvia
- eduard
- - david
- joan
- monica
dx:
- - yaiza
- monica
- pere
- - erola
- joan
- marta
- - victor
- eduard
- jordi
- - eduard
- david
- victor
dj:
- - judith
- jordi
- carles
- - silvia
- tania
- judith
- - monica
- ana
- judit
- - judit
- erola
- joan
dv:
- - ana
- judith
- jordi
- - jordi
- ana
- judith
- - victor
- carles
- yaiza
- - marta
- victor
- silvia
colors:
marc: 'fbe8bc'
eduard: 'd8b9c5'
pere: '8f928e'
david: 'ffd3ac'
aleix: 'eed0eb'
carles: 'c98e98'
marta: 'eb9481'
monica: '7fada0'
yaiza: '90cdb9'
erola: '8789c8'
manel: '88dfe3'
tania: 'c8abf4'
judit: 'e781e8'
silvia: '8097fa'
joan: 'fae080'
ana: 'aa11aa'
victor: 'ff3333'
jordi: 'ff9999'
cesar: '889988'
extensions:
marta: 3040
monica: 3041
manel: 3042
erola: 3043
yaiza: 3044
eduard: 3045
marc: 3046
judit: 3047
judith: 3057
tania: 3048
carles: 3051
pere: 3052
aleix: 3053
david: 3054
silvia: 3055
joan: 3056
ana: 1001
victor: 3182
jordi: 3183
names:
silvia: Sílvia
monica: Mònica
tania: Tània
cesar: César
victor: Víctor
""")
unittest.TestCase.__str__ = unittest.TestCase.id
# vim: ts=4 sw=4 et
| Som-Energia/somenergia-tomatic | tomatic/scheduling_test.py | Python | gpl-3.0 | 24,860 |
import re
from thefuck.utils import get_closest, for_app
def extract_possibilities(command):
possib = re.findall(r'\n\(did you mean one of ([^\?]+)\?\)', command.stderr)
if possib:
return possib[0].split(', ')
possib = re.findall(r'\n ([^$]+)$', command.stderr)
if possib:
return possib[0].split(' ')
return possib
@for_app('hg')
def match(command):
return ('hg: unknown command' in command.stderr
and '(did you mean one of ' in command.stderr
or "hg: command '" in command.stderr
and "' is ambiguous:" in command.stderr)
def get_new_command(command):
script = command.script_parts[:]
possibilities = extract_possibilities(command)
script[1] = get_closest(script[1], possibilities)
return ' '.join(script)
| PLNech/thefuck | thefuck/rules/mercurial.py | Python | mit | 807 |
from functools import reduce
from itertools import chain, combinations, product, permutations
# This class is used to represent and examine algebras on atom tables.
# It is intended to be used for nonassociative algebras, but this is not assumed.
class AtomicAlgebra:
# Create an algebra from a table of atoms, which gives compositions, and a converse structure.
# An atom table is a list of lists, with each entry a set (as distinct from set) of atoms.
# The set of atoms is interpreted as a union. Atoms are 'a', 'b', 'c', etc.
# The converse pair is a list of 2-tuples of atoms.
# If 'a' is converse to 'b', write as ('a','b').
# If 'a' is symmetric, write as ('a', 'a').
# Can also give converses as a dictionary.
# Algebra may not necessarily meet all the axioms.
def __init__(self, atom_table, converse = None):
if type(atom_table) == str:
atom_table = self._string_to_atom_table(atom_table)
self.n_atoms = len(atom_table[0])
self.atoms = [set([chr(i + 97)]) for i in range(self.n_atoms)]
self.atom_table = atom_table
# If no converses given assume all atoms are symmetric.
if converse == None:
self.converse = [(x,x) for x in [chr(i + 97) for i in range(self.n_atoms)]]
# Can give atoms as a dictionary on atoms...
if type(converse) is dict:
self.converse_pairs = self.converse_dict_to_pairs(converse)
self.converse_dict = converse
# ... or as a list of tuples.
else:
self.converse_pairs = converse
self.converse_dict = self.converse_pairs_to_dict(converse)
# set up the basic properties of the algebra.
self._non_identity_atoms = None
self.top = reduce(lambda x, y : x | y, self.atoms)
self.zero = set()
# The elements are the power set of the atoms.
self.elements = [combinations(list(self.top), n) for n in range(self.n_atoms + 1)]
self.elements = list(chain.from_iterable(self.elements))
self.elements = [set(element) for element in self.elements]
self.n_elements = 2**self.n_atoms
self.n_non_zero_elements = self.n_elements - 1
self.symmetric_atoms = [x[0] for x in self.converse_pairs if x[0] == x[1]]
self.non_symmetric_pairs = [x for x in self.converse_pairs if x[0] != x[1]]
self._cyclePartition = self.cycle_partition(self.converse_dict, self.n_atoms)
self._identity = None
self._semigroup = None
# properties
self._is_NA = None
self._satisfies_WA_axiom = None
self._is_WA = None
self._satisfies_SA_axiom = None
self._is_SA = None
self._is_associative = None
self._is_RA = None
# A human-readable description of each relation algebra axiom.
AXIOMS = {
"R01": "+-commutativity: x + y = y + x",
"R02": "+-associativity: x + (y + z) = (x + y) + z",
"R03": "Huntington's axiom: -(-x + -y) + -(-x + y) = x",
"R04": ";-associativity: x;(y;z) = (x;y);z",
"R05": ";-distributivity: (x + y);z = x;z + y;z",
"R06": "identity law: x;1' = x",
"R07": "converse-involution: con(con(x)) = x",
"R08": "converse-distributivity: con(x + y) = con(x) + con(y)",
"R09": "converse-involutive distributivity: con(x;y) = con(y);con(x)",
"R10": "Tarski/De Morgan axiom: con(x); -(x;y) + -y = y",
"WA" : "((id . x) . top) . top = (id . x) . (top . top)",
"SA" : "(x . top) . top = x . (top . top)"
}
# Given an atom table as a string, convert it to a matrix (list of lists).
# This method seems to be powered by magic, and should be redone.
@staticmethod
def _string_to_atom_table(matrix_string):
M0 = matrix_string.replace(" ", "")
M1 = M0.strip()[1:-1]
M2 = M1.strip()[1:-1]
M3 = [line.split(',') for line in M2.split('],[')]
M4 = [[set(entry.split("+"))-set(['0']) for entry in line] for line in M3]
return M4
# Converses can be given as a list of tuples [('a', 'a'), ('b', 'c')] or a
# dictionary on atoms {'a': 'a', 'b': 'c', 'c': 'b'}. Tne following
# methods convert between the two.
@staticmethod
def converse_pairs_to_dict(converse_pairs):
converse_dict = dict()
for converse_pair in converse_pairs:
if converse_pair[0] == converse_pair[1]: # symmetric atom
converse_dict[converse_pair[0]] = converse_pair[0]
else: # non-symmetric atoms
converse_dict[converse_pair[0]] = converse_pair[1]
converse_dict[converse_pair[1]] = converse_pair[0]
return converse_dict
@staticmethod
def converse_dict_to_pairs(converse_dict):
converse_pairs = []
for pair in converse_dict.items():
if pair not in converse_pairs and pair[::-1] not in converse_pairs:
converse_pairs.append(pair)
return converse_pairs
# Given a triple and a converse structure, generate the cycle including that triple.
# This is an implementation of the relation algebra concept of a Peircean transform.
# Cycle generated by (x,y,z) is:
# [ (x,y,z), (con(x),z,y), (y,con(z),con(x)),
# (con(y),con(x),con(z)),(con(z),x,con(y)), (z,con(y),x) ]
# A triple in a cycle is consistent if and only if all triples in the cycle are consistent.
@staticmethod
def cycle(triple, converse_dict):
if type(converse_dict) is not dict:
converse_dict = AtomicAlgebra.converse_pairs_to_dict(converse_dict)
x, y, z = triple
cycle = []
cycle.append(triple)
cycle.append((converse_dict[x], z, y))
cycle.append((y, converse_dict[z], converse_dict[x]))
cycle.append((converse_dict[y], converse_dict[x], converse_dict[z]))
cycle.append((converse_dict[z], x, converse_dict[y]))
cycle.append((z, converse_dict[y], x))
cycle.sort() # Prevents duplicates when using cycle_partition
return list(set(cycle)) # Remove duplicates.
# Given a converse structure, partition the triples of elements into cycles.
@staticmethod
def cycle_partition(converse_dict, n_atoms):
if type(converse_dict) is not dict:
converse_dict = AtomicAlgebra.converse_pairs_to_dict(converse_dict)
atoms = [chr(i + 97) for i in range(n_atoms)]
parts = []
for triple in product(atoms, repeat = 3):
cycle = AtomicAlgebra.cycle(triple, converse_dict)
if cycle not in parts: parts.append(cycle)
return parts
# Give a human readable report on a list of failed axioms, eg. ["R01", "R02", "R07"].
@staticmethod
def report_failed_axioms(failed_axioms):
if type(failed_axioms) is not list: failed_axioms = [failed_axioms]
for axiom in failed_axioms:
print("Fails axiom " + axiom + ": " + AtomicAlgebra.AXIOMS[axiom] + ".")
# Through unions, we can extend any map between atoms to a map between
# elements of algebras. For example, if 'a' -> 'b' and 'c' -> 'd', then
# {'a', 'b'} -> {'c', 'd'}. Thus, every map between atoms uniquely defines
# a map between elements. In practice we always define maps on atoms only.
# We use the term "function" in reference to a map between elements.
@staticmethod
def atom_function(atom_map, element):
if type(element) is str:
return atom_map[element]
else:
return set([AtomicAlgebra.atom_function(atom_map, x) for x in element])
# Turns a single atom 'a' into a set(['a']).
@staticmethod
def make_set(x):
if type(x) == str:
x = set([x])
if type(x) != type(set()):
raise TypeError('An element of the algebra needs to be either a set of atoms or a string representing a single atom.')
return x
# Check if a map between atom structures preserves composition.
# This is a necessary, but not sufficient condition, for an atom_map or
# atom_function to be an isomorphism.
def preserves_composition(self, other, atom_map):
preserves_composition = True
for x, y in product(self.atoms, repeat = 2):
if AtomicAlgebra.atom_function(atom_map, self.compose(x, y)) != other.compose(AtomicAlgebra.atom_function(atom_map, x), AtomicAlgebra.atom_function(atom_map, y)):
preserves_composition = False
break
return preserves_composition
# Checks if a given algebra is isomorphic to the instance being called from.
# Can also return an isomorphism, if one exists.
def is_isomorphic(self, other, return_isomorphism = False):
# First we check that the algebras are the same size, and that the
# number of atoms in the identity is the same.
# These are necessary conditions for an isomorphism, so can save some time.
if self.n_atoms != other.n_atoms: return False
if len(self.identity) != len(other.identity): return False
# Next we check that the converse pairs match in number and structure.
# This is a necessary condition for isomorphism, so can save some time.
if len(self.symmetric_atoms) != len(other.symmetric_atoms):
return False
# Enumerate all possible functions respecting converse.
# First enumerate all possible ways to map symmetric atoms from
# the first algebra to self converse atoms from the second algebra.
possible_symmetric_maps = []
for perm in permutations(other.symmetric_atoms):
possible_symmetric_maps.append(zip(self.symmetric_atoms, perm))
possible_symmetric_maps = [list(p) for p in possible_symmetric_maps]
# Now enumerate all possible ways to map converse pairs from the
# first algebra to converse pairs from the second algebra.
possible_converse_pair_maps = []
for perm1 in list(product(*[[x,x[::-1]] for x in other.non_symmetric_pairs])):
for perm2 in permutations(perm1):
map = []
pairing = zip(self.non_symmetric_pairs, perm2)
for pair in pairing:
map.append((pair[0][0], pair[1][0]))
map.append((pair[0][1], pair[1][1]))
possible_converse_pair_maps.append(map)
# Now combine them to generate all maps respecting the converse structure.
possible_isomorphisms = []
for symmetric_map, converse_pair_map in product(possible_symmetric_maps, possible_converse_pair_maps):
possible_isomorphisms.append(symmetric_map + converse_pair_map)
possible_isomorphisms = [dict(x) for x in possible_isomorphisms]
# We can reduce the search space by exploiting the fact that an
# isomorphism will always map the identity of one algebra to the identity
# of the target algebra. We generate all possible maps from atoms in the
# identity of the first algebra to atoms in the identity of the second
# algebra, and then restrict the possible_isomorphisms to those that
# "agree" with one of the identity-preserving maps.
other_identity_permutations = [p for p in permutations(list(other.identity))]
possible_identity_maps = [dict((list(self.identity)[i], y[i])
for i in range(len(self.identity)))
for y in other_identity_permutations]
possible_isomorphisms = [iso for iso in possible_isomorphisms
if {k: iso[k] for k in list(self.identity)} in possible_identity_maps]
# Now we search through the possible isomorphisms.
# Our final search space includes only those that respect converse and
# identity. We now need to search through these for maps that respect
# composition. Break if an isomorphism is found, to save time.
is_isomorphic = False
for possible_isomorphism in possible_isomorphisms:
if self.preserves_composition(other, possible_isomorphism):
is_isomorphic = True
isomorphism = possible_isomorphism
break
if is_isomorphic and return_isomorphism:
return is_isomorphic, isomorphism
else:
return is_isomorphic
# Define composition of atoms or sets of atoms using the atom table.
# We allow for inputs of single atoms, but every element is properly
# viewed as a set of atoms.
def compose(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
# Composition with the 0 element
if x == set() or y == set():
output = set()
else:
output = set()
for i, j in product(x, y):
row_pos = ord(i) - 97
col_pos = ord(j) - 97
try:
output = output.union(self.atom_table[row_pos][col_pos])
except IndexError:
"Out of bounds: composition "+ str(x) + "*" + str(y) + " contains a non-atomic element."
return output
# Define intersection as set intersection.
def intersection(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
return x.intersection(y)
# Define union as set union.
def union(self, x, y):
x = self.make_set(x)
y = self.make_set(y)
return x.union(y)
# Define converse using the converse dictionary we made earlier.
def converse(self, x):
x = self.make_set(x)
return set([self.converse_dict[atom] for atom in x])
# Define complement as set complement relative to the top elemenet (set of all atoms).
def complement(self, x):
x = self.make_set(x)
return self.top.difference(x)
# Return the identity of an algebra if it exists, otherwise returns None
# If the identity element is not already recorded, will run through all
# elements and check for identity property.
@property
def identity(self):
if self._identity == None:
for candidate_identity in self.elements:
isId = True
for atom in self.atoms:
if self.compose(candidate_identity, atom) != atom or self.compose(atom, candidate_identity) != atom:
isId = False
break
if isId:
self._identity = candidate_identity
break
return self._identity
# All non-identity atoms.
@property
# Return a list of atoms which are not the identity atom.
def non_identity_atoms(self):
if self._non_identity_atoms == None:
if self.identity == None:
return self.atoms
else:
self._non_identity_atoms = [x for x in self.atoms if x != self.identity]
return self._non_identity_atoms
# Determines if the algebra generated by the atom table is a nonassociative algebra.
# Due to the construction, not all axioms need to be checked.
# Can control the amount of reporting done on failed axioms, if any.
def is_NA(self, what_fails = False, report = False):
if report:
what_fails = True
if self._is_NA == None or what_fails == True:
self._is_NA = True
failed_axioms = []
# Axiom R01: +-commutativity: x + y = y + x
# Axiom R02: +-associativity: x + (y + z) = (x + y) + z
# Axiom R03: Huntington's axiom: -(-x + -y) + -(-x + y) = x
for x,y in product(self.atoms, repeat = 2):
first_term = self.complement(self.union(self.complement(x), self.complement(y)))
second_term = self.complement(self.union(self.complement(x), y))
if self.union(first_term, second_term) != x:
failed_axioms.append("R03")
break
# Axiom R05: ;-distributivity: (x + y);z = x;z + y;z
# Axiom R06: identity law: x;1' = x
if self.identity == None:
failed_axioms.append("R06")
# Axiom R07: converse-involution: con(con(x)) = x
# - should not be needed if converse pairs are correctly defined.
for x in self.atoms:
if self.converse(self.converse(x)) != x:
failed_axioms.append("R07")
break
# Axiom R08: converse-distributivity: con(x + y) = con(x) + con(y)
for x,y in product(self.atoms, repeat = 2):
if self.converse(self.union(x,y)) != self.union(self.converse(x), self.converse(y)):
failed_axioms.append("R09")
break
# Axiom R09: converse-involutive distributivity: con(x;y) = con(y);con(x)
for x,y in product(self.atoms, repeat = 2):
if self.converse(self.compose(x,y)) != self.compose(self.converse(y), self.converse(x)):
failed_axioms.append("R09")
break
# Axiom R10: Tarski/De Morgan axiom: con(x); -(x;y) + -y = y
for x,y in product(self.atoms, repeat = 2):
if self.union(self.compose(self.converse(x), self.complement(self.compose(x,y))), self.complement(y)) != self.complement(y):
failed_axioms.append("R10")
break
if len(failed_axioms) > 0:
self._is_NA = False
if report:
self.report_failed_axioms(failed_axioms)
return self._is_NA
elif what_fails and not report:
return (self._is_NA, failed_axioms)
else:
return self._is_NA
# Determines if the algebra generated by the atom table satisfies the weakly associative axiom.
# Axiom WA: ((id . x) . top) . top = (id . x) . (top . top)
@property
def satisfies_WA_axiom(self):
if self._satisfies_WA_axiom == None:
if self.identity == None:
self._satisfies_WA_axiom = False
else:
self._satisfies_WA_axiom = True
for x in self.atoms:
LHS = self.compose(self.compose(
self.intersection(self.identity, x), self.top), self.top)
RHS = self.compose(self.compose(
self.intersection(self.identity, x), self.top), self.compose(self.top, self.top))
if LHS != RHS:
self._satisfies_WA_axiom = False
break
return self._satisfies_WA_axiom
# Determines if the algebra generated by the atom table is a weakly associative algebra.
# The algebra must be an nonassociative algebra and satisfy the weakly associative axiom.
def is_WA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_WA = True
failed_axioms = []
failed_axioms.extend(self.is_NA(True,False)[1])
if self.satisfies_WA_axiom == False:
failed_axioms.append("WA")
if len(failed_axioms) > 0:
self._is_WA = False
elif self._is_WA == None:
self._is_WA = (self.is_NA() and self.satisfies_WA_axiom)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_WA
elif what_fails and not report:
return (self._is_WA, failed_axioms)
else:
return self._is_WA
# Determines if the algebra generated by the atom table satisfies the semiassociative axiom.
# Axiom SA: (x . top) . top = x . (top . top)"
@property
def satisfies_SA_axiom(self):
if self._satisfies_SA_axiom == None:
self._satisfies_SA_axiom = True
for x in self.atoms:
if self.compose(self.compose(x, self.top), self.top) != self.compose(self.compose(x, self.top), self.compose(self.top, self.top)):
self._satisfies_SA_axiom = False
break
return self._satisfies_SA_axiom
# Determines if the algebra generated by the atom table is a semiassociative algebra.
# The algebra must be an nonassociative algebra and satisfy the semiassociative axiom.
def is_SA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_SA = True
failed_axioms = []
failed_axioms.extend(self.is_WA(True,False)[1])
if self.satisfies_SA_axiom == False:
failed_axioms.append("SA")
if len(failed_axioms) > 0:
self._is_SA = False
elif self._is_SA == None:
self._is_SA = (self.is_NA() and self.satisfies_SA_axiom)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_SA
elif what_fails and not report:
return (self._is_SA, failed_axioms)
else:
return self._is_SA
# Determines if the algebra generated by the atom table has an associative composition operation.
# Axiom R04: ;-associativity: x;(y;z) = (x;y);z."
@property
def is_associative(self):
if self._is_associative == None:
self._is_associative = True
for i, j, k in product(self.elements, repeat = 3):
if self.compose(self.compose(i,j), k) != self.compose(i, self.compose(j,k)):
self._is_associative = False
break
return self._is_associative
# Determines if the algebra generated by the atom table is a relation algebra.
# Must be an associative nonassociative algebra.
# If what_fails = True, will return a list of RA axioms that are not
# satisfied by the algebra.
# If report = True, a human-readable version of the failed axioms will
# instead be returned.
def is_RA(self, what_fails = False, report = False):
if report:
what_fails = True
if what_fails == True:
self._is_RA = True
failed_axioms = []
failed_axioms.extend(self.is_SA(True, False)[1])
if self.is_associative == False:
failed_axioms.append("R04")
if len(failed_axioms) > 0:
self._is_RA = False
elif self._is_RA == None:
self._is_RA = (self.is_NA() and self.is_associative)
if report:
self.report_failed_axioms(failed_axioms)
return self._is_RA
elif what_fails and not report:
return (self._is_RA, failed_axioms)
else:
return self._is_RA
| mdneuzerling/AtomicAlgebra | AtomicAlgebra.py | Python | gpl-3.0 | 23,406 |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3,), (3, 4)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestFlipUD(unittest.TestCase):
shape = (3, 4)
dtype = numpy.float32
def setUp(self):
self.x = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
self.g = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
y = functions.flipud(x)
testing.assert_allclose(y.data, numpy.flipud(self.x))
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
gradient_check.check_backward(
functions.FlipUD(), x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.g))
testing.run_module(__name__, __file__)
| kiyukuta/chainer | tests/chainer_tests/functions_tests/array_tests/test_flipud.py | Python | mit | 1,313 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name="pppcap",
version="0.1",
author="Ruy",
author_email="ruy.suzu7(at)gmail.com",
url="https://github.com/ainoniwa/pppcap",
description="Pppcap: pure python wrapper for libpcap/winpcap",
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: System :: Networking",
],
packages=['']
) | ainoniwa/pppcap | setup.py | Python | mit | 627 |
from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class City(NamedModel):
point = models.PointField(geography=True)
class Meta:
app_label = 'geogapp'
required_db_features = ['gis_enabled']
class Zipcode(NamedModel):
code = models.CharField(max_length=10)
poly = models.PolygonField(geography=True)
class County(NamedModel):
state = models.CharField(max_length=20)
mpoly = models.MultiPolygonField(geography=True)
class Meta:
app_label = 'geogapp'
required_db_features = ['gis_enabled']
def __str__(self):
return ' County, '.join([self.name, self.state])
| DONIKAN/django | tests/gis_tests/geogapp/models.py | Python | bsd-3-clause | 954 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.model import no_value_fields
class Workflow(Document):
def validate(self):
self.set_active()
self.create_custom_field_for_workflow_state()
self.update_default_workflow_status()
self.validate_docstatus()
def on_update(self):
self.update_doc_status()
frappe.clear_cache(doctype=self.document_type)
frappe.cache().delete_key('workflow_' + self.name) # clear cache created in model/workflow.py
def create_custom_field_for_workflow_state(self):
frappe.clear_cache(doctype=self.document_type)
meta = frappe.get_meta(self.document_type)
if not meta.get_field(self.workflow_state_field):
# create custom field
frappe.get_doc({
"doctype":"Custom Field",
"dt": self.document_type,
"__islocal": 1,
"fieldname": self.workflow_state_field,
"label": self.workflow_state_field.replace("_", " ").title(),
"hidden": 1,
"allow_on_submit": 1,
"no_copy": 1,
"fieldtype": "Link",
"options": "Workflow State",
"owner": "Administrator"
}).save()
frappe.msgprint(_("Created Custom Field {0} in {1}").format(self.workflow_state_field,
self.document_type))
def update_default_workflow_status(self):
docstatus_map = {}
states = self.get("states")
for d in states:
if not d.doc_status in docstatus_map:
frappe.db.sql("""
UPDATE `tab{doctype}`
SET `{field}` = %s
WHERE ifnull(`{field}`, '') = ''
AND `docstatus` = %s
""".format(doctype=self.document_type, field=self.workflow_state_field),
(d.state, d.doc_status))
docstatus_map[d.doc_status] = d.state
def update_doc_status(self):
'''
Checks if the docstatus of a state was updated.
If yes then the docstatus of the document with same state will be updated
'''
doc_before_save = self.get_doc_before_save()
before_save_states, new_states = {}, {}
if doc_before_save:
for d in doc_before_save.states:
before_save_states[d.state] = d
for d in self.states:
new_states[d.state] = d
for key in new_states:
if key in before_save_states:
if not new_states[key].doc_status == before_save_states[key].doc_status:
frappe.db.set_value(self.document_type, {
self.workflow_state_field: before_save_states[key].state
},
'docstatus',
new_states[key].doc_status,
update_modified = False)
def validate_docstatus(self):
def get_state(state):
for s in self.states:
if s.state==state:
return s
frappe.throw(frappe._("{0} not a valid State").format(state))
for t in self.transitions:
state = get_state(t.state)
next_state = get_state(t.next_state)
if state.doc_status=="2":
frappe.throw(frappe._("Cannot change state of Cancelled Document. Transition row {0}").format(t.idx))
if state.doc_status=="1" and next_state.doc_status=="0":
frappe.throw(frappe._("Submitted Document cannot be converted back to draft. Transition row {0}").format(t.idx))
if state.doc_status=="0" and next_state.doc_status=="2":
frappe.throw(frappe._("Cannot cancel before submitting. See Transition {0}").format(t.idx))
def set_active(self):
if int(self.is_active or 0):
# clear all other
frappe.db.sql("""UPDATE `tabWorkflow` SET `is_active`=0
WHERE `document_type`=%s""",
self.document_type)
@frappe.whitelist()
def get_fieldnames_for(doctype):
return [f.fieldname for f in frappe.get_meta(doctype).fields \
if f.fieldname not in no_value_fields]
@frappe.whitelist()
def get_workflow_state_count(doctype, workflow_state_field, states):
states = frappe.parse_json(states)
result = frappe.get_all(
doctype,
fields=[workflow_state_field, 'count(*) as count', 'docstatus'],
filters = {
workflow_state_field: ['not in', states]
},
group_by = workflow_state_field
)
return [r for r in result if r[workflow_state_field]]
| frappe/frappe | frappe/workflow/doctype/workflow/workflow.py | Python | mit | 3,996 |
r"""
Nonlinear solvers
-----------------
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
~~~~~~~~
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
~~~~~~~~
**Small problem**
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
**Large problem**
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
xin : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
https://archive.siam.org/books/kelley/fr16/
"""
# Can't use default parameters because it's being explicitly passed as None
# from the calling function, so we need to set it here.
tol_norm = maxnorm if tol_norm is None else tol_norm
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in range(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
n, tol_norm(Fx), s))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with SciPy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc., algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in range(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (i.e., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='broyden1'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='broyden2'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='anderson'`` in particular.
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in range(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in range(n):
for j in range(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in range(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in range(n):
for j in range(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='diagbroyden'`` in particular.
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='linearmixing'`` in particular.
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(np.full(self.shape[0], -1/self.alpha))
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='excitingmixing'`` in particular.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='krylov'`` in particular.
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
:doi:`10.1016/j.jcp.2003.08.010`
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
:doi:`10.1137/S0895479803422014`
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
self.method_kw.setdefault('atol', 0)
elif self.method is scipy.sparse.linalg.gcrotmk:
self.method_kw.setdefault('atol', 0)
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
self.method_kw.setdefault('prepend_outer_v', True)
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See e.g., Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
self.method_kw.setdefault('atol', 0)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and Jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
signature = _getfullargspec(jac.__init__)
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
if kwonlyargs:
raise ValueError('Unexpected signature %s' % signature)
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
| arokem/scipy | scipy/optimize/nonlin.py | Python | bsd-3-clause | 48,377 |
# TmLibrary - TissueMAPS library for distibuted image analysis routines.
# Copyright (C) 2016 Markus D. Herrmann, University of Zurich and Robin Hafen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import csv
import logging
import random
import collections
import pandas as pd
from cStringIO import StringIO
from sqlalchemy import func, case
from geoalchemy2 import Geometry
from geoalchemy2.shape import to_shape
from sqlalchemy.orm import Session
from sqlalchemy import (
Column, String, Integer, BigInteger, Boolean, ForeignKey, not_, Index,
UniqueConstraint, PrimaryKeyConstraint, ForeignKeyConstraint
)
from sqlalchemy.orm import relationship, backref
from sqlalchemy.ext.hybrid import hybrid_property
from tmlib import cfg
from tmlib.models.dialect import _compile_distributed_query
from tmlib.models.result import ToolResult, LabelValues
from tmlib.models.base import (
ExperimentModel, DistributedExperimentModel, DateMixIn, IdMixIn
)
from tmlib.models.feature import Feature, FeatureValues
from tmlib.models.types import ST_SimplifyPreserveTopology
from tmlib.models.site import Site
from tmlib.utils import autocreate_directory_property, create_partitions
logger = logging.getLogger(__name__)
class MapobjectType(ExperimentModel, IdMixIn):
'''A *mapobject type* represents a conceptual group of *mapobjects*
(segmented objects) that reflect different biological entities,
such as "cells" or "nuclei" for example.
Attributes
----------
records: List[tmlib.models.result.ToolResult]
records belonging to the mapobject type
features: List[tmlib.models.feature.Feature]
features belonging to the mapobject type
'''
__tablename__ = 'mapobject_types'
__table_args__ = (UniqueConstraint('name'), )
#: str: name given by user
name = Column(String(50), index=True, nullable=False)
#: str: name of another type that serves as a reference for "static"
#: mapobjects, i.e. objects that are pre-defined through the experiment
#: layout and independent of image segmentation (e.g. "Plate" or "Well")
ref_type = Column(String(50))
#: int: ID of parent experiment
experiment_id = Column(
Integer,
ForeignKey('experiment.id', onupdate='CASCADE', ondelete='CASCADE'),
index=True
)
#: tmlib.models.experiment.Experiment: parent experiment
experiment = relationship(
'Experiment',
backref=backref('mapobject_types', cascade='all, delete-orphan')
)
def __init__(self, name, experiment_id, ref_type=None):
'''
Parameters
----------
name: str
name of the map objects type, e.g. "cells"
experiment_id: int
ID of the parent
:class:`Experiment <tmlib.models.experiment.Experiment>`
ref_type: str, optional
name of another reference type (default: ``None``)
'''
self.name = name
self.ref_type = ref_type
self.experiment_id = experiment_id
@classmethod
def delete_cascade(cls, connection, static=None):
'''Deletes all instances as well as "children"
instances of :class:`Mapobject <tmlib.models.mapobject.Mapobject>`,
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`,
:class:`Feature <tmlib.models.feature.Feature>`,
:class:`FeatureValues <tmlib.models.feature.FeatureValues>`,
:class:`ToolResult <tmlib.models.result.ToolResult>`,
:class:`LabelLayer <tmlib.models.layer.LabelLayer>` and
:class:`LabelValues <tmlib.models.feature.LabelValues>`.
Parameters
----------
connection: tmlib.models.utils.ExperimentConnection
experiment-specific database connection
static: bool, optional
if ``True`` static types ("Plates", "Wells", "Sites") will be
deleted, if ``False`` non-static types will be delted, if ``None``
all types will be deleted (default: ``None``)
'''
ids = list()
if static is not None:
if static:
logger.debug('delete static mapobjects')
connection.execute('''
SELECT id FROM mapobject_types
WHERE name IN ('Plates', 'Wells', 'Sites')
''')
else:
logger.debug('delete static mapobjects')
connection.execute('''
SELECT id FROM mapobject_types
WHERE name NOT IN ('Plates', 'Wells', 'Sites')
''')
else:
connection.execute('''
SELECT id FROM mapobject_types
''')
records = connection.fetchall()
ids.extend([r.id for r in records])
for id in ids:
logger.debug('delete mapobjects of type %d', id)
Mapobject.delete_cascade(connection, id)
logger.debug('delete mapobject type %d', id)
connection.execute('''
DELETE FROM mapobject_types WHERE id = %(id)s;
''', {
'id': id
})
def get_site_geometry(self, site_id):
'''Gets the geometric representation of a
:class:`Site <tmlib.models.site.Site>`.
in form of a
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`.
Parameters
----------
site_id: int
ID of the :class:`Site <tmlib.models.site.Site>`
Returns
-------
geoalchemy2.elements.WKBElement
'''
session = Session.object_session(self)
mapobject_type = session.query(MapobjectType.id).\
filter_by(ref_type=Site.__name__).\
one()
segmentation = session.query(MapobjectSegmentation.geom_polygon).\
join(Mapobject).\
filter(
Mapobject.partition_key == site_id,
Mapobject.mapobject_type_id == mapobject_type.id
).\
one()
return segmentation.geom_polygon
def get_segmentations_per_site(self, site_id, tpoint, zplane,
as_polygons=True):
'''Gets each
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`
that intersects with the geometric representation of a given
:class:`Site <tmlib.models.site.Site>`.
Parameters
----------
site_id: int
ID of a :class:`Site <tmlib.models.site.Site>` for which
objects should be spatially filtered
tpoint: int
time point for which objects should be filtered
zplane: int
z-plane for which objects should be filtered
as_polygons: bool, optional
whether segmentations should be returned as polygons;
if ``False`` segmentations will be returned as centroid points
(default: ``True``)
Returns
-------
Tuple[Union[int, geoalchemy2.elements.WKBElement]]
label and geometry for each segmented object
'''
session = Session.object_session(self)
layer = session.query(SegmentationLayer.id).\
filter_by(mapobject_type_id=self.id, tpoint=tpoint, zplane=zplane).\
one()
if as_polygons:
segmentations = session.query(
MapobjectSegmentation.label,
MapobjectSegmentation.geom_polygon
)
else:
segmentations = session.query(
MapobjectSegmentation.label,
MapobjectSegmentation.geom_centroid
)
segmentations = segmentations.\
filter_by(segmentation_layer_id=layer.id, partition_key=site_id).\
order_by(MapobjectSegmentation.mapobject_id).\
all()
return segmentations
def get_feature_values_per_site(self, site_id, tpoint, feature_ids=None):
'''Gets all
:class:`FeatureValues <tmlib.models.feature.FeatureValues>`
for each :class:`Mapobject <tmlib.models.MapobjectSegmentation>`
where the corresponding
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`
intersects with the geometric representation of a given
:class:`Site <tmlib.models.site.Site>`.
Parameters
----------
site_id: int
ID of a :class:`Site <tmlib.models.site.Site>` for which
objects should be spatially filtered
tpoint: int
time point for which objects should be filtered
feature_ids: List[int], optional
ID of each :class:`Feature <tmlib.models.feature.Feature>` for
which values should be selected; by default all features will be
selected
Returns
-------
pandas.DataFrame[numpy.float]
feature values for each mapobject
'''
session = Session.object_session(self)
features = session.query(Feature.id, Feature.name).\
filter_by(mapobject_type_id=self.id)
if feature_ids is not None:
features = features.filter(Feature.id.in_(feature_ids))
features = features.all()
feature_map = {str(id): name for id, name in features}
if feature_ids is not None:
records = session.query(
FeatureValues.mapobject_id,
FeatureValues.values.slice(feature_map.keys()).label('values')
)
else:
records = session.query(
FeatureValues.mapobject_id,
FeatureValues.values
)
records = records.\
join(Mapobject).\
join(MapobjectSegmentation).\
filter(
Mapobject.mapobject_type_id == self.id,
FeatureValues.tpoint == tpoint,
FeatureValues.partition_key == site_id
).\
order_by(Mapobject.id).\
all()
values = [r.values for r in records]
mapobject_ids = [r.mapobject_id for r in records]
df = pd.DataFrame(values, index=mapobject_ids)
df.rename(columns=feature_map, inplace=True)
return df
def get_label_values_per_site(self, site_id, tpoint):
'''Gets all :class:`LabelValues <tmlib.models.result.LabelValues>`
for each :class:`Mapobject <tmlib.models.MapobjectSegmentation>`
where the corresponding
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`
intersects with the geometric representation of a given
:class:`Site <tmlib.models.site.Site>`.
Parameters
----------
site_id: int
ID of a :class:`Site <tmlib.models.site.Site>` for which
objects should be spatially filtered
tpoint: int, optional
time point for which objects should be filtered
Returns
-------
pandas.DataFrame[numpy.float]
label values for each mapobject
'''
session = Session.object_session(self)
labels = session.query(ToolResult.id, ToolResult.name).\
filter_by(mapobject_type_id=self.id).\
all()
label_map = {str(id): name for id, name in labels}
records = session.query(
LabelValues.mapobject_id, LabelValues.values
).\
join(Mapobject).\
join(MapobjectSegmentation).\
filter(
Mapobject.mapobject_type_id == self.id,
LabelValues.tpoint == tpoint,
LabelValues.partition_key == site_id
).\
order_by(Mapobject.id).\
all()
values = [r.values for r in records]
mapobject_ids = [r.mapobject_id for r in records]
df = pd.DataFrame(values, index=mapobject_ids)
df.rename(columns=label_map, inplace=True)
return df
def identify_border_objects_per_site(self, site_id, tpoint, zplane):
'''Determines for each :class:`Mapobject <tmlib.models.MapobjectSegmentation>`
where the corresponding
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`
intersects with the geometric representation of a given
:class:`Site <tmlib.models.site.Site>`, whether the objects is touches
at the border of the site.
Parameters
----------
site_id: int
ID of a :class:`Site <tmlib.models.site.Site>` for which
objects should be spatially filtered
tpoint: int
time point for which objects should be filtered
zplane: int
z-plane for which objects should be filtered
Returns
-------
pandas.Series[numpy.bool]
``True`` if the mapobject touches the border of the site and
``False`` otherwise
'''
session = Session.object_session(self)
site_geometry = self.get_site_geometry(site_id)
layer = session.query(SegmentationLayer.id).\
filter_by(mapobject_type_id=self.id, tpoint=tpoint, zplane=zplane).\
one()
records = session.query(
MapobjectSegmentation.mapobject_id,
case([(
MapobjectSegmentation.geom_polygon.ST_Intersects(
site_geometry.ST_Boundary()
)
, True
)], else_=False).label('is_border')
).\
filter(
MapobjectSegmentation.segmentation_layer_id == layer.id,
MapobjectSegmentation.partition_key == site_id
).\
order_by(MapobjectSegmentation.mapobject_id).\
all()
values = [r.is_border for r in records]
mapobject_ids = [r.mapobject_id for r in records]
s = pd.Series(values, index=mapobject_ids)
return s
def __repr__(self):
return '<MapobjectType(id=%d, name=%r)>' % (self.id, self.name)
class Mapobject(DistributedExperimentModel):
'''A *mapobject* represents a connected pixel component in an
image. It has one or more 2D segmentations that can be used to represent
the object on the map and may also be associated with measurements
(*features*), which can be queried or used for further analysis.
'''
#: str: name of the corresponding database table
__tablename__ = 'mapobjects'
__table_args__ = (
PrimaryKeyConstraint('id', 'partition_key'),
)
__distribute_by__ = 'partition_key'
__distribution_method__ = 'hash'
partition_key = Column(Integer, nullable=False)
id = Column(BigInteger, unique=True, autoincrement=True)
#: int: ID of another record to which the object is related.
#: This could refer to another mapobject in the same table, e.g. in order
#: to track proliferating cells, or a record in another reference table,
#: e.g. to identify the corresponding record of a "Well".
ref_id = Column(BigInteger, index=True)
#: int: ID of parent mapobject type
mapobject_type_id = Column(Integer, index=True, nullable=False)
def __init__(self, partition_key, mapobject_type_id, ref_id=None):
'''
Parameters
----------
partition_key: int
key that determines on which shard the object will be stored
mapobject_type_id: int
ID of parent
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
ref_id: int, optional
ID of the referenced record
See also
--------
:attr:`tmlib.models.mapobject.MapobjectType.ref_type`
'''
self.partition_key = partition_key
self.mapobject_type_id = mapobject_type_id
self.ref_id = ref_id
@classmethod
def _delete_cascade(cls, connection, mapobject_ids):
logger.debug('delete mapobjects')
# NOTE: Using ANY with an ARRAY is more performant than using IN.
# TODO: Ideally we would like to join with mapobject_types.
# However, at the moment there seems to be no way to DELETE entries
# from a distributed table with a complex WHERE clause.
# If the number of objects is too large this will lead to issues.
# Therefore, we delete rows in batches.
mapobject_id_partitions = create_partitions(mapobject_ids, 100000)
# This will DELETE all records of referenced tables as well.
# FIXME: How to cast to correct BigInteger type in $$ escaped query?
sql = '''
DELETE FROM mapobjects
WHERE id = ANY(%(mapobject_ids)s)
'''
for mids in mapobject_id_partitions:
connection.execute(
_compile_distributed_query(sql), {'mapobject_ids': mids}
)
@classmethod
def delete_objects_with_invalid_segmentation(cls, connection):
'''Deletes all instances with invalid segmentations as well as all
"children" instances of
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`
:class:`FeatureValues <tmlib.models.feature.FeatureValues>`,
:class:`LabelValues <tmlib.models.feature.LabelValues>`.
Parameters
----------
connection: tmlib.models.utils.ExperimentConnection
experiment-specific database connection
'''
connection.execute('''
SELECT mapobject_id FROM mapobject_segmentations
WHERE NOT ST_IsValid(geom_polygon);
''')
mapobject_segm = connection.fetchall()
mapobject_ids = [s.mapobject_id for s in mapobject_segm]
if mapobject_ids:
cls._delete_cascade(connection, mapobject_ids)
@classmethod
def delete_objects_with_missing_segmentations(cls, connection):
'''Deletes all instances that don't have a
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`
as well as their "children" instances of
:class:`FeatureValues <tmlib.models.feature.FeatureValues>`
and :class:`LabelValues <tmlib.models.feature.LabelValues>`.
Parameters
----------
connection: tmlib.models.utils.ExperimentConnection
experiment-specific database connection
'''
connection.execute('''
SELECT m.id FROM mapobjects m
LEFT OUTER JOIN mapobject_segmentations s
ON m.id = s.mapobject_id AND m.partition_key = s.partition_key
WHERE s.mapobject_id IS NULL
''')
mapobjects = connection.fetchall()
missing_ids = [s.id for s in mapobjects]
if missing_ids:
logger.info(
'delete %d mapobjects with missing segmentations',
len(missing_ids)
)
cls._delete_cascade(connection, missing_ids)
@classmethod
def delete_objects_with_missing_feature_values(cls, connection):
'''Deletes all instances that don't have
:class:`FeatureValues <tmlib.models.feature.FeatureValues>`
as well as their "children" instances of
:class:`MapobjectSegmentation <tmlib.models.mapobject.MapobjectSegmentation>`
and :class:`LabelValues <tmlib.models.feature.LabelValues>`.
Parameters
----------
connection: tmlib.models.utils.ExperimentConnection
experiment-specific database connection
'''
# Make sure only mapobject types are selected that have any features,
# otherwise all mapobjects of that type would be deleted.
connection.execute('''
SELECT m.mapobject_type_id, count(v.mapobject_id)
FROM feature_values AS v
JOIN mapobjects AS m
ON m.id = v.mapobject_id AND m.partition_key = v.partition_key
GROUP BY m.mapobject_type_id
''')
results = connection.fetchall()
missing_ids = []
for mapobject_type_id, count in results:
connection.execute('''
SELECT m.id FROM mapobjects AS m
LEFT OUTER JOIN feature_values AS v
ON m.id = v.mapobject_id AND m.partition_key = v.partition_key
WHERE m.mapobject_type_id = %(mapobject_type_id)s
AND v.mapobject_id IS NULL
''', {
'mapobject_type_id': mapobject_type_id
})
mapobjects = connection.fetchall()
missing_ids.extend([s.id for s in mapobjects])
if missing_ids:
logger.info(
'delete %d mapobjects of type %d with missing feature '
'values', len(missing_ids), mapobject_type_id
)
cls._delete_cascade(connection, missing_ids)
@classmethod
def _add(cls, connection, instance):
if not isinstance(instance, cls):
raise TypeError('Object must have type %s' % cls.__name__)
instance.id = cls.get_unique_ids(connection, 1)[0]
connection.execute('''
INSERT INTO mapobjects (
partition_key, id, mapobject_type_id, ref_id
)
VALUES (
%(partition_key)s, %(id)s, %(mapobject_type_id)s, %(ref_id)s
)
''', {
'id': instance.id,
'partition_key': instance.partition_key,
'mapobject_type_id': instance.mapobject_type_id,
'ref_id': instance.ref_id
})
return instance
@classmethod
def _bulk_ingest(cls, connection, instances):
if not instances:
return []
f = StringIO()
w = csv.writer(f, delimiter=';')
ids = cls.get_unique_ids(connection, len(instances))
for i, obj in enumerate(instances):
if not isinstance(obj, cls):
raise TypeError('Object must have type %s' % cls.__name__)
obj.id = ids[i]
w.writerow((
obj.partition_key, obj.id, obj.mapobject_type_id, obj.ref_id
))
columns = ('partition_key', 'id', 'mapobject_type_id', 'ref_id')
f.seek(0)
connection.copy_from(
f, cls.__table__.name, sep=';', columns=columns, null=''
)
f.close()
return instances
def __repr__(self):
return '<%s(id=%r, mapobject_type_id=%r)>' % (
self.__class__.__name__, self.id, self.mapobject_type_id
)
class MapobjectSegmentation(DistributedExperimentModel):
'''A *segmentation* provides the geometric representation
of a :class:`Mapobject <tmlib.models.mapobject.Mapobject>`.
'''
__tablename__ = 'mapobject_segmentations'
__table_args__ = (
PrimaryKeyConstraint(
'mapobject_id', 'partition_key', 'segmentation_layer_id'
),
ForeignKeyConstraint(
['mapobject_id', 'partition_key'],
['mapobjects.id', 'mapobjects.partition_key'],
ondelete='CASCADE'
)
)
__distribution_method__ = 'hash'
__distribute_by__ = 'partition_key'
__colocate_with__ = 'mapobjects'
partition_key = Column(Integer, nullable=False)
#: str: EWKT POLYGON geometry
geom_polygon = Column(Geometry('POLYGON'))
#: str: EWKT POINT geometry
geom_centroid = Column(Geometry('POINT'), nullable=False)
#: int: label assigned to the object upon segmentation
label = Column(Integer, index=True)
#: int: ID of parent mapobject
mapobject_id = Column(BigInteger)
#: int: ID of parent segmentation layer
segmentation_layer_id = Column(Integer)
def __init__(self, partition_key, geom_polygon, geom_centroid, mapobject_id,
segmentation_layer_id, label=None):
'''
Parameters
----------
partition_key: int
key that determines on which shard the object will be stored
geom_polygon: shapely.geometry.polygon.Polygon
polygon geometry of the mapobject contour
geom_centroid: shapely.geometry.point.Point
point geometry of the mapobject centroid
mapobject_id: int
ID of parent :class:`Mapobject <tmlib.models.mapobject.Mapobject>`
segmentation_layer_id: int
ID of parent
:class:`SegmentationLayer <tmlib.models.layer.SegmentationLayer>`
label: int, optional
label assigned to the segmented object
'''
self.partition_key = partition_key
self.geom_polygon = getattr(geom_polygon, 'wkt', None)
self.geom_centroid = geom_centroid.wkt
self.mapobject_id = mapobject_id
self.segmentation_layer_id = segmentation_layer_id
self.label = label
@classmethod
def _add(cls, connection, instance):
if not isinstance(instance, cls):
raise TypeError('Object must have type %s' % cls.__name__)
connection.execute('''
INSERT INTO mapobject_segmentations AS s (
partition_key, mapobject_id, segmentation_layer_id,
geom_polygon, geom_centroid, label
)
VALUES (
%(partition_key)s, %(mapobject_id)s, %(segmentation_layer_id)s,
%(geom_polygon)s, %(geom_centroid)s, %(label)s
)
ON CONFLICT
ON CONSTRAINT mapobject_segmentations_pkey
DO UPDATE
SET geom_polygon = %(geom_polygon)s, geom_centroid = %(geom_centroid)s
WHERE s.mapobject_id = %(mapobject_id)s
AND s.partition_key = %(partition_key)s
AND s.segmentation_layer_id = %(segmentation_layer_id)s
''', {
'partition_key': instance.partition_key,
'mapobject_id': instance.mapobject_id,
'segmentation_layer_id': instance.segmentation_layer_id,
'geom_polygon': instance.geom_polygon,
'geom_centroid': instance.geom_centroid,
'label': instance.label
})
@classmethod
def _bulk_ingest(cls, connection, instances):
if not instances:
return
f = StringIO()
w = csv.writer(f, delimiter=';')
for obj in instances:
if not isinstance(obj, cls):
raise TypeError('Object must have type %s' % cls.__name__)
w.writerow((
obj.partition_key,
obj.geom_polygon,
obj.geom_centroid,
obj.mapobject_id, obj.segmentation_layer_id, obj.label
))
columns = (
'partition_key', 'geom_polygon', 'geom_centroid', 'mapobject_id',
'segmentation_layer_id', 'label'
)
f.seek(0)
connection.copy_from(
f, cls.__table__.name, sep=';', columns=columns, null=''
)
f.close()
def __repr__(self):
return '<%s(id=%r, mapobject_id=%r, segmentation_layer_id=%r)>' % (
self.__class__.__name__, self.id, self.mapobject_id,
self.segmentation_layer_id
)
class SegmentationLayer(ExperimentModel, IdMixIn):
__tablename__ = 'segmentation_layers'
__table_args__ = (
UniqueConstraint('tpoint', 'zplane', 'mapobject_type_id'),
)
#: int: zero-based index in time series
tpoint = Column(Integer, index=True)
#: int: zero-based index in z stack
zplane = Column(Integer, index=True)
#: int: zoom level threshold below which polygons will not be visualized
polygon_thresh = Column(Integer)
#: int: zoom level threshold below which centroids will not be visualized
centroid_thresh = Column(Integer)
#: int: ID of parent channel
mapobject_type_id = Column(
Integer,
ForeignKey('mapobject_types.id', onupdate='CASCADE', ondelete='CASCADE'),
index=True
)
#: tmlib.models.mapobject.MapobjectType: parent mapobject type
mapobject_type = relationship(
'MapobjectType',
backref=backref('layers', cascade='all, delete-orphan'),
)
def __init__(self, mapobject_type_id, tpoint=None, zplane=None):
'''
Parameters
----------
mapobject_type_id: int
ID of parent
:class:`MapobjectType <tmlib.models.mapobject.MapobjectType>`
tpoint: int, optional
zero-based time point index
zplane: int, optional
zero-based z-resolution index
'''
self.tpoint = tpoint
self.zplane = zplane
self.mapobject_type_id = mapobject_type_id
@classmethod
def get_tile_bounding_box(cls, x, y, z, maxzoom):
'''Calculates the bounding box of a layer tile.
Parameters
----------
x: int
horizontal tile coordinate
y: int
vertical tile coordinate
z: int
zoom level
maxzoom: int
maximal zoom level of layers belonging to the visualized experiment
Returns
-------
Tuple[int]
bounding box coordinates (x_top, y_top, x_bottom, y_bottom)
'''
# The extent of a tile of the current zoom level in mapobject
# coordinates (i.e. coordinates on the highest zoom level)
size = 256 * 2 ** (maxzoom - z)
# Coordinates of the top-left corner of the tile
x0 = x * size
y0 = y * size
# Coordinates with which to specify all corners of the tile
# NOTE: y-axis is inverted!
minx = x0
maxx = x0 + size
miny = -y0
maxy = -y0 - size
return (minx, miny, maxx, maxy)
def calculate_zoom_thresholds(self, maxzoom_level, represent_as_polygons):
'''Calculates the zoom level below which mapobjects are
represented on the map as centroids rather than polygons and the
zoom level below which mapobjects are no longer visualized at all.
These thresholds are necessary, because it would result in too much
network traffic and the client would be overwhelmed by the large number
of objects.
Parameters
----------
maxzoom_level: int
maximum zoom level of the pyramid
represent_as_polygons: bool
whether the objects should be represented as polygons or only as
centroid points
Returns
-------
Tuple[int]
threshold zoom levels for visualization of polygons and centroids
Note
----
The optimal threshold levels depend on the number of points on the
contour of objects, but also the size of the browser window and the
resolution settings of the browser.
'''
# TODO: This is a bit too simplistic. Ideally, we would calculate
# the optimal zoom level by sampling mapobjects at the highest
# resolution level and approximate number of points that would be sent
# to the client. This is tricky, however, because the current view
# and thus the number of requested mapobject segmentations dependents
# on the size of monitor.
if self.tpoint is None and self.zplane is None:
if self.mapobject_type.ref_type == 'Plate':
polygon_thresh = 0
centroid_thresh = 0
elif self.mapobject_type.ref_type == 'Well':
polygon_thresh = maxzoom_level - 11
centroid_thresh = 0
elif self.mapobject_type.ref_type == 'Site':
polygon_thresh = maxzoom_level - 8
centroid_thresh = 0
else:
if represent_as_polygons:
polygon_thresh = maxzoom_level - 4
else:
polygon_thresh = maxzoom_level + 1
centroid_thresh = polygon_thresh - 2
polygon_thresh = 0 if polygon_thresh < 0 else polygon_thresh
centroid_thresh = 0 if centroid_thresh < 0 else centroid_thresh
return (polygon_thresh, centroid_thresh)
def get_segmentations(self, x, y, z, tolerance=2):
'''Get outlines of each
:class:`Mapobject <tmlib.models.mapobject.Mapobject>`
contained by a given pyramid tile.
Parameters
----------
x: int
zero-based column map coordinate at the given `z` level
y: int
zero-based row map coordinate at the given `z` level
(negative integer values due to inverted *y*-axis)
z: int
zero-based zoom level index
tolerance: int, optional
maximum distance in pixels between points on the contour of
original polygons and simplified polygons;
the higher the `tolerance` the less coordinates will be used to
describe the polygon and the less accurate it will be
approximated and; if ``0`` the original polygon is used
(default: ``2``)
Returns
-------
List[Tuple[int, str]]
GeoJSON representation of each selected mapobject
Note
----
If *z* > `polygon_thresh` mapobjects are represented by polygons, if
`polygon_thresh` < *z* < `centroid_thresh`,
mapobjects are represented by points and if *z* < `centroid_thresh`
they are not represented at all.
'''
logger.debug('get mapobject outlines falling into tile')
session = Session.object_session(self)
maxzoom = self.mapobject_type.experiment.pyramid_depth - 1
minx, miny, maxx, maxy = self.get_tile_bounding_box(x, y, z, maxzoom)
tile = (
'POLYGON(('
'{maxx} {maxy}, {minx} {maxy}, {minx} {miny}, {maxx} {miny}, '
'{maxx} {maxy}'
'))'.format(minx=minx, maxx=maxx, miny=miny, maxy=maxy)
)
do_simplify = self.centroid_thresh <= z < self.polygon_thresh
do_nothing = z < self.centroid_thresh
if do_nothing:
logger.debug('dont\'t represent objects')
return list()
elif do_simplify:
logger.debug('represent objects by centroids')
query = session.query(
MapobjectSegmentation.mapobject_id,
MapobjectSegmentation.geom_centroid.ST_AsGeoJSON()
)
outlines = query.filter(
MapobjectSegmentation.segmentation_layer_id == self.id,
MapobjectSegmentation.geom_centroid.ST_Intersects(tile)
).\
all()
else:
logger.debug('represent objects by polygons')
tolerance = (maxzoom - z) ** 2 + 1
logger.debug('simplify polygons using tolerance %d', tolerance)
query = session.query(
MapobjectSegmentation.mapobject_id,
MapobjectSegmentation.geom_polygon.
ST_SimplifyPreserveTopology(tolerance).ST_AsGeoJSON()
)
outlines = query.filter(
MapobjectSegmentation.segmentation_layer_id == self.id,
MapobjectSegmentation.geom_polygon.ST_Intersects(tile)
).\
all()
if len(outlines) == 0:
logger.warn(
'no outlines found for objects of type "%s" within tile: '
'x=%d, y=%d, z=%d', self.mapobject_type.name, x, y, z
)
return outlines
def __repr__(self):
return (
'<%s(id=%d, mapobject_type_id=%r)>'
% (self.__class__.__name__, self.id, self.mapobject_type_id)
)
| TissueMAPS/TmLibrary | tmlib/models/mapobject.py | Python | agpl-3.0 | 36,238 |
from couchpotato import get_session
from couchpotato.core.event import addEvent, fireEventAsync, fireEvent
from couchpotato.core.helpers.encoding import toUnicode, simplifyString
from couchpotato.core.logger import CPLog
from couchpotato.core.media._base.library import LibraryBase
from couchpotato.core.settings.model import Library, LibraryTitle, File
from string import ascii_letters
import time
import traceback
import six
log = CPLog(__name__)
class MovieLibraryPlugin(LibraryBase):
default_dict = {'titles': {}, 'files': {}}
def __init__(self):
addEvent('library.add.movie', self.add)
addEvent('library.update.movie', self.update)
addEvent('library.update.movie.release_date', self.updateReleaseDate)
def add(self, attrs = None, update_after = True):
if not attrs: attrs = {}
primary_provider = attrs.get('primary_provider', 'imdb')
try:
db = get_session()
l = db.query(Library).filter_by(identifier = attrs.get('identifier')).first()
if not l:
status = fireEvent('status.get', 'needs_update', single = True)
l = Library(
year = attrs.get('year'),
identifier = attrs.get('identifier'),
plot = toUnicode(attrs.get('plot')),
tagline = toUnicode(attrs.get('tagline')),
status_id = status.get('id'),
info = {}
)
title = LibraryTitle(
title = toUnicode(attrs.get('title')),
simple_title = self.simplifyTitle(attrs.get('title')),
)
l.titles.append(title)
db.add(l)
db.commit()
# Update library info
if update_after is not False:
handle = fireEventAsync if update_after is 'async' else fireEvent
handle('library.update.movie', identifier = l.identifier, default_title = toUnicode(attrs.get('title', '')))
library_dict = l.to_dict(self.default_dict)
return library_dict
except:
log.error('Failed adding media: %s', traceback.format_exc())
db.rollback()
finally:
db.close()
return {}
def update(self, identifier, default_title = '', extended = False):
if self.shuttingDown():
return
try:
db = get_session()
library = db.query(Library).filter_by(identifier = identifier).first()
done_status = fireEvent('status.get', 'done', single = True)
info = fireEvent('movie.info', merge = True, extended = extended, identifier = identifier)
# Don't need those here
try: del info['in_wanted']
except: pass
try: del info['in_library']
except: pass
if not info or len(info) == 0:
log.error('Could not update, no movie info to work with: %s', identifier)
return False
# Main info
library.plot = toUnicode(info.get('plot', ''))
library.tagline = toUnicode(info.get('tagline', ''))
library.year = info.get('year', 0)
library.status_id = done_status.get('id')
library.info.update(info)
db.commit()
# Titles
[db.delete(title) for title in library.titles]
db.commit()
titles = info.get('titles', [])
log.debug('Adding titles: %s', titles)
counter = 0
def_title = None
for title in titles:
if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title):
def_title = toUnicode(title)
break
counter += 1
if not def_title:
def_title = toUnicode(titles[0])
for title in titles:
if not title:
continue
title = toUnicode(title)
t = LibraryTitle(
title = title,
simple_title = self.simplifyTitle(title),
default = title == def_title
)
library.titles.append(t)
db.commit()
# Files
images = info.get('images', [])
for image_type in ['poster']:
for image in images.get(image_type, []):
if not isinstance(image, (str, unicode)):
continue
file_path = fireEvent('file.download', url = image, single = True)
if file_path:
file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True)
try:
file_obj = db.query(File).filter_by(id = file_obj.get('id')).one()
library.files.append(file_obj)
db.commit()
break
except:
log.debug('Failed to attach to library: %s', traceback.format_exc())
db.rollback()
library_dict = library.to_dict(self.default_dict)
return library_dict
except:
log.error('Failed update media: %s', traceback.format_exc())
db.rollback()
finally:
db.close()
return {}
def updateReleaseDate(self, identifier):
try:
db = get_session()
library = db.query(Library).filter_by(identifier = identifier).first()
if not library.info:
library_dict = self.update(identifier)
dates = library_dict.get('info', {}).get('release_date')
else:
dates = library.info.get('release_date')
if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates:
dates = fireEvent('movie.release_date', identifier = identifier, merge = True)
library.info.update({'release_date': dates})
db.commit()
return dates
except:
log.error('Failed updating release dates: %s', traceback.format_exc())
db.rollback()
finally:
db.close()
return {}
def simplifyTitle(self, title):
title = toUnicode(title)
nr_prefix = '' if title[0] in ascii_letters else '#'
title = simplifyString(title)
for prefix in ['the ']:
if prefix == title[:len(prefix)]:
title = title[len(prefix):]
break
return nr_prefix + title
| entomb/CouchPotatoServer | couchpotato/core/media/movie/library/movie/main.py | Python | gpl-3.0 | 6,972 |
# -*- coding: utf-8 -*-
from django import forms
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import ModelForm
from django.template import TemplateSyntaxError
from django.test.utils import override_settings
from django.utils.encoding import smart_text
from django.utils.translation import ugettext as _
from cms.api import create_page, publish_page
from cms.cms_wizards import CMSPageWizard
from cms.constants import TEMPLATE_INHERITANCE_MAGIC
from cms.forms.wizards import CreateCMSPageForm, CreateCMSSubPageForm
from cms.models import Page, PageType, UserSettings
from cms.test_utils.testcases import CMSTestCase, TransactionCMSTestCase
from cms.utils import get_current_site
from cms.utils.conf import get_cms_setting
from cms.wizards.forms import step2_form_factory, WizardStep2BaseForm
from cms.wizards.wizard_base import Wizard
from cms.wizards.wizard_pool import wizard_pool, AlreadyRegisteredException
CreateCMSPageForm = step2_form_factory(
mixin_cls=WizardStep2BaseForm,
entry_form_class=CreateCMSPageForm,
)
CreateCMSSubPageForm = step2_form_factory(
mixin_cls=WizardStep2BaseForm,
entry_form_class=CreateCMSSubPageForm,
)
class WizardForm(forms.Form):
pass
class ModelWizardForm(ModelForm):
class Meta:
model = UserSettings
exclude = []
class BadModelForm(ModelForm):
class Meta:
pass
class WizardTestMixin(object):
page_wizard = None
title_wizard = None
def assertSequencesEqual(self, seq_a, seq_b):
seq_a = list(seq_a)
seq_b = list(seq_b)
zipped = list(zip(seq_a, seq_b))
if len(zipped) < len(seq_a) or len(zipped) < len(seq_b):
self.fail("Sequence lengths are not the same.")
for idx, (a, b) in enumerate(zipped):
if a != b:
self.fail("Sequences differ at index {0}".format(idx))
@classmethod
def setUpClass(cls):
super(WizardTestMixin, cls).setUpClass()
# This prevents auto-discovery, which would otherwise occur as soon as
# tests start, creating unexpected starting conditions.
wizard_pool._discovered = True
class PageWizard(Wizard):
pass
# This is a basic Wizard
cls.page_wizard = PageWizard(
title=_(u"Page"),
weight=100,
form=WizardForm,
model=Page,
template_name='my_template.html', # This doesn't exist anywhere
)
class SettingsWizard(Wizard):
pass
# This is a Wizard that uses a ModelForm to define the model
cls.user_settings_wizard = SettingsWizard(
title=_(u"UserSettings"),
weight=200,
form=ModelWizardForm,
)
class TitleWizard(Wizard):
pass
# This is a bad wizard definition as it neither defines a model, nor
# uses a ModelForm that has model defined in Meta
cls.title_wizard = TitleWizard(
title=_(u"Page"),
weight=100,
form=BadModelForm,
template_name='my_template.html', # This doesn't exist anywhere
)
class TestWizardBase(WizardTestMixin, TransactionCMSTestCase):
def test_user_has_add_permission(self):
# Test does not have permission
user = self.get_staff_user_with_no_permissions()
self.assertFalse(self.page_wizard.user_has_add_permission(user))
# Test has permission
user = self.get_superuser()
self.assertTrue(self.page_wizard.user_has_add_permission(user))
def test_get_success_url(self):
user = self.get_superuser()
page = create_page(
title="Sample Page",
template=TEMPLATE_INHERITANCE_MAGIC,
language="en",
created_by=smart_text(user),
parent=None,
in_navigation=True,
published=False
)
url = "{0}?edit".format(page.get_absolute_url(language="en"))
self.assertEqual(self.page_wizard.get_success_url(
page, language="en"), url)
# Now again without a language code
url = "{0}?edit".format(page.get_absolute_url())
self.assertEqual(self.page_wizard.get_success_url(page), url)
def test_get_model(self):
self.assertEqual(self.page_wizard.get_model(), Page)
self.assertEqual(self.user_settings_wizard.get_model(), UserSettings)
with self.assertRaises(ImproperlyConfigured):
self.title_wizard.get_model()
def test_endpoint_auth_required(self):
endpoint = reverse('cms_wizard_create')
staff_active = self._create_user("staff-active", is_staff=True, is_superuser=False, is_active=True)
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 403)
with self.login_user_context(staff_active):
response = self.client.get(endpoint)
self.assertEqual(response.status_code, 200)
class TestWizardPool(WizardTestMixin, CMSTestCase):
def test_discover(self):
wizard_pool._reset()
self.assertFalse(wizard_pool._discovered)
self.assertEqual(len(wizard_pool._entries), 0)
wizard_pool._discover()
self.assertTrue(wizard_pool._discovered)
def test_register_unregister_isregistered(self):
wizard_pool._clear()
self.assertEqual(len(wizard_pool._entries), 0)
wizard_pool.register(self.page_wizard)
# Now, try to register the same thing
with self.assertRaises(AlreadyRegisteredException):
wizard_pool.register(self.page_wizard)
self.assertEqual(len(wizard_pool._entries), 1)
self.assertTrue(wizard_pool.is_registered(self.page_wizard))
self.assertTrue(wizard_pool.unregister(self.page_wizard))
self.assertEqual(len(wizard_pool._entries), 0)
# Now, try to unregister something that is not registered
self.assertFalse(wizard_pool.unregister(self.user_settings_wizard))
def test_get_entry(self):
wizard_pool._clear()
wizard_pool.register(self.page_wizard)
entry = wizard_pool.get_entry(self.page_wizard)
self.assertEqual(entry, self.page_wizard)
def test_get_entries(self):
"""
Test that the registered entries are returned in weight-order, no matter
which order they were added.
"""
wizard_pool._clear()
wizard_pool.register(self.page_wizard)
wizard_pool.register(self.user_settings_wizard)
wizards = [self.page_wizard, self.user_settings_wizard]
wizards = sorted(wizards, key=lambda e: getattr(e, 'weight'))
entries = wizard_pool.get_entries()
self.assertSequencesEqual(entries, wizards)
wizard_pool._clear()
wizard_pool.register(self.user_settings_wizard)
wizard_pool.register(self.page_wizard)
wizards = [self.page_wizard, self.user_settings_wizard]
wizards = sorted(wizards, key=lambda e: getattr(e, 'weight'))
entries = wizard_pool.get_entries()
self.assertSequencesEqual(entries, wizards)
class TestPageWizard(WizardTestMixin, CMSTestCase):
def test_str(self):
page_wizard = [
entry for entry in wizard_pool.get_entries()
if isinstance(entry, CMSPageWizard)
][0]
self.assertEqual(str(page_wizard), page_wizard.title)
def test_repr(self):
page_wizard = [
entry for entry in wizard_pool.get_entries()
if isinstance(entry, CMSPageWizard)
][0]
self.assertIn("cms.cms_wizards.CMSPageWizard", repr(page_wizard))
self.assertIn("id={}".format(page_wizard.id), repr(page_wizard))
self.assertIn(hex(id(page_wizard)), repr(page_wizard))
def test_wizard_first_page_published(self):
superuser = self.get_superuser()
data = {
'title': 'page 1',
'slug': 'page_1',
'page_type': None,
}
form = CreateCMSPageForm(
data=data,
wizard_page=None,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
page = form.save()
self.assertTrue(page.is_published('en'))
with self.login_user_context(superuser):
url = page.get_absolute_url('en')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_wizard_create_child_page(self):
superuser = self.get_superuser()
parent_page = create_page(
title="Parent",
template=TEMPLATE_INHERITANCE_MAGIC,
language="en",
)
data = {
'title': 'Child',
'slug': 'child',
'page_type': None,
}
form = CreateCMSSubPageForm(
data=data,
wizard_page=parent_page,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
child_page = form.save()
self.assertEqual(child_page.node.depth, 2)
self.assertEqual(child_page.parent_page, parent_page)
self.assertEqual(child_page.get_title('en'), 'Child')
self.assertEqual(child_page.get_path('en'), 'parent/child')
def test_wizard_create_child_page_under_page_type(self):
"""
When a user creates a child page through the wizard,
if the parent page is a page-type, the child page should
also be a page-type.
"""
site = get_current_site()
superuser = self.get_superuser()
source_page = create_page(
title="Source",
template=TEMPLATE_INHERITANCE_MAGIC,
language="en",
)
with self.login_user_context(superuser):
self.client.post(
self.get_admin_url(PageType, 'add'),
data={'source': source_page.pk, 'title': 'type1', 'slug': 'type1', '_save': 1},
)
types_root = PageType.get_root_page(site)
parent_page = types_root.get_child_pages()[0]
data = {
'title': 'page-type-child',
'slug': 'page-type-child',
'page_type': None,
}
form = CreateCMSSubPageForm(
data=data,
wizard_page=parent_page,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
child_page = form.save()
self.assertTrue(child_page.is_page_type)
self.assertFalse(child_page.in_navigation)
self.assertEqual(child_page.node.depth, 3)
self.assertEqual(child_page.parent_page, parent_page)
self.assertEqual(child_page.get_title('en'), 'page-type-child')
self.assertEqual(child_page.get_path('en'), 'page_types/type1/page-type-child')
def test_wizard_create_atomic(self):
# Ref: https://github.com/divio/django-cms/issues/5652
# We'll simulate a scenario where a user creates a page with an
# invalid template which causes Django to throw an error when the
# template is scanned for placeholders and thus short circuits the
# creation mechanism.
superuser = self.get_superuser()
data = {
'title': 'page 1',
'slug': 'page_1',
'page_type': None,
}
form = CreateCMSPageForm(
data=data,
wizard_page=None,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
self.assertFalse(Page.objects.filter(template=TEMPLATE_INHERITANCE_MAGIC).exists())
with self.settings(CMS_TEMPLATES=[("col_invalid.html", "notvalid")]):
self.assertRaises(TemplateSyntaxError, form.save)
# The template raised an exception which should cause the database to roll back
# instead of committing a page in a partial state.
self.assertFalse(Page.objects.filter(template=TEMPLATE_INHERITANCE_MAGIC).exists())
def test_wizard_content_placeholder_setting(self):
"""
Tests that the PageWizard respects the
CMS_PAGE_WIZARD_CONTENT_PLACEHOLDER setting.
"""
templates = get_cms_setting('TEMPLATES')
# NOTE, there are 4 placeholders on this template, defined in this
# order: 'header', 'content', 'sub-content', 'footer'.
# 'footer' is a static-placeholder.
templates.append(('page_wizard.html', 'page_wizard.html', ))
settings = {
'CMS_TEMPLATES': templates,
'CMS_PAGE_WIZARD_DEFAULT_TEMPLATE': 'page_wizard.html',
'CMS_PAGE_WIZARD_CONTENT_PLACEHOLDER': 'sub-content',
}
with override_settings(**settings):
superuser = self.get_superuser()
page = create_page("wizard home", "page_wizard.html", "en")
publish_page(page, superuser, "en")
content = '<p>sub-content content.</p>'
data = {
'title': 'page 1',
'slug': 'page_1',
'page_type': None,
'content': content,
}
form = CreateCMSPageForm(
data=data,
wizard_page=page,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
page = form.save()
page.publish('en')
with self.login_user_context(superuser):
url = page.get_absolute_url('en')
expected = '<div class="sub-content">{0}</div>'.format(content)
unexpected = '<div class="content">{0}</div>'.format(content)
response = self.client.get(url)
self.assertContains(response, expected, status_code=200)
self.assertNotContains(response, unexpected, status_code=200)
def test_wizard_content_placeholder_bad_setting(self):
"""
Tests that the PageWizard won't respect a 'bad' setting such as
targeting a static-placeholder. In this case, will just fail to
add the content (without error).
"""
templates = get_cms_setting('TEMPLATES')
# NOTE, there are 4 placeholders on this template, defined in this
# order: 'header', 'content', 'sub-content', 'footer'.
# 'footer' is a static-placeholder.
templates.append(('page_wizard.html', 'page_wizard.html', ))
settings = {
'CMS_TEMPLATES': templates,
'CMS_PAGE_WIZARD_DEFAULT_TEMPLATE': 'page_wizard.html',
# This is a bad setting.
'CMS_PAGE_WIZARD_CONTENT_PLACEHOLDER': 'footer',
}
with override_settings(**settings):
superuser = self.get_superuser()
page = create_page("wizard home", "page_wizard.html", "en")
publish_page(page, superuser, "en")
content = '<p>footer content.</p>'
data = {
'title': 'page 1',
'slug': 'page_1',
'page_type': None,
'content': content,
}
form = CreateCMSPageForm(
data=data,
wizard_page=page,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
page = form.save()
page.publish('en')
with self.login_user_context(superuser):
url = page.get_absolute_url('en')
response = self.client.get(url)
self.assertNotContains(response, content, status_code=200)
def test_create_page_with_empty_fields(self):
superuser = self.get_superuser()
data = {
'title': '',
'slug': '',
'page_type': None,
}
form = CreateCMSPageForm(
data=data,
wizard_page=None,
wizard_user=superuser,
wizard_language='en',
)
self.assertFalse(form.is_valid())
def test_create_page_with_existing_slug(self):
superuser = self.get_superuser()
data = {
'title': 'page',
'slug': 'page',
'page_type': None,
}
create_page(
'page',
'nav_playground.html',
language='en',
published=True,
slug='page'
)
# slug -> page-1
form = CreateCMSPageForm(
data=data,
wizard_page=None,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
self.assertTrue(form.save().title_set.filter(slug='page-2'))
# slug -> page-2
form = CreateCMSPageForm(
data=data,
wizard_page=None,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
self.assertTrue(form.save().title_set.filter(slug='page-3'))
# Now explicitly request the page-2 slug
data['slug'] = 'page-2'
# slug -> page-2-2
form = CreateCMSPageForm(
data=data,
wizard_page=None,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
self.assertTrue(form.save().title_set.filter(slug='page-2-2'))
# slug -> page-2-3
form = CreateCMSPageForm(
data=data,
wizard_page=None,
wizard_user=superuser,
wizard_language='en',
)
self.assertTrue(form.is_valid())
self.assertTrue(form.save().title_set.filter(slug='page-2-3'))
| czpython/django-cms | cms/tests/test_wizards.py | Python | bsd-3-clause | 17,882 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_project.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', include('apps.home.urls', namespace='home', app_name='home')),
url(r'^app/', include('apps.gen.urls', namespace='gen', app_name='gen')),
url(r'^admin/', include(admin.site.urls)),
)
| MDA2014/django-xpand | django_project/django_project/urls.py | Python | mit | 439 |
# this will go to src/common/xmpp later, for now it is in src/common
# -*- coding:utf-8 -*-
## src/common/dataforms.py
##
## Copyright (C) 2006-2007 Tomasz Melcer <liori AT exroot.org>
## Copyright (C) 2006-2017 Yann Leboulanger <asterix AT lagaule.org>
## Copyright (C) 2007 Stephan Erb <steve-e AT h3c.de>
##
## This file is part of Gajim.
##
## Gajim is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published
## by the Free Software Foundation; version 3 only.
##
## Gajim is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Gajim. If not, see <http://www.gnu.org/licenses/>.
##
"""
This module contains wrappers for different parts of data forms (JEP 0004). For
information how to use them, read documentation
"""
import nbxmpp
import helpers
# exceptions used in this module
# base class
class Error(Exception): pass
# when we get nbxmpp.Node which we do not understand
class UnknownDataForm(Error): pass
# when we get nbxmpp.Node which contains bad fields
class WrongFieldValue(Error): pass
# helper class to change class of already existing object
class ExtendedNode(nbxmpp.Node, object):
@classmethod
def __new__(cls, *a, **b):
if 'extend' not in b.keys() or not b['extend']:
return object.__new__(cls)
extend = b['extend']
assert issubclass(cls, extend.__class__)
extend.__class__ = cls
return extend
# helper decorator to create properties in cleaner way
def nested_property(f):
ret = f()
p = {'doc': f.__doc__}
for v in ('fget', 'fset', 'fdel', 'doc'):
if v in ret.keys(): p[v]=ret[v]
return property(**p)
# helper to create fields from scratch
def Field(typ, **attrs):
''' Helper function to create a field of given type. '''
f = {
'boolean': BooleanField,
'fixed': StringField,
'hidden': StringField,
'text-private': StringField,
'text-single': StringField,
'jid-multi': JidMultiField,
'jid-single': JidSingleField,
'list-multi': ListMultiField,
'list-single': ListSingleField,
'text-multi': TextMultiField,
}[typ](typ=typ, **attrs)
return f
def ExtendField(node):
"""
Helper function to extend a node to field of appropriate type
"""
# when validation (XEP-122) will go in, we could have another classes
# like DateTimeField - so that dicts in Field() and ExtendField() will
# be different...
typ=node.getAttr('type')
f = {
'boolean': BooleanField,
'fixed': StringField,
'hidden': StringField,
'text-private': StringField,
'text-single': StringField,
'jid-multi': JidMultiField,
'jid-single': JidSingleField,
'list-multi': ListMultiField,
'list-single': ListSingleField,
'text-multi': TextMultiField,
}
if typ not in f:
typ = 'text-single'
return f[typ](extend=node)
def ExtendForm(node):
"""
Helper function to extend a node to form of appropriate type
"""
if node.getTag('reported') is not None:
return MultipleDataForm(extend=node)
else:
return SimpleDataForm(extend=node)
class DataField(ExtendedNode):
"""
Keeps data about one field - var, field type, labels, instructions... Base
class for different kinds of fields. Use Field() function to construct one
of these
"""
def __init__(self, typ=None, var=None, value=None, label=None, desc=None,
required=False, options=None, extend=None):
if extend is None:
ExtendedNode.__init__(self, 'field')
self.type_ = typ
self.var = var
if value is not None:
self.value = value
if label is not None:
self.label = label
if desc is not None:
self.desc = desc
self.required = required
self.options = options
@nested_property
def type_():
"""
Type of field. Recognized values are: 'boolean', 'fixed', 'hidden',
'jid-multi', 'jid-single', 'list-multi', 'list-single', 'text-multi',
'text-private', 'text-single'. If you set this to something different,
DataField will store given name, but treat all data as text-single
"""
def fget(self):
t = self.getAttr('type')
if t is None:
return 'text-single'
return t
def fset(self, value):
assert isinstance(value, basestring)
self.setAttr('type', value)
return locals()
@nested_property
def var():
"""
Field identifier
"""
def fget(self):
return self.getAttr('var')
def fset(self, value):
assert isinstance(value, basestring)
self.setAttr('var', value)
def fdel(self):
self.delAttr('var')
return locals()
@nested_property
def label():
"""
Human-readable field name
"""
def fget(self):
l = self.getAttr('label')
if not l:
l = self.var
return l
def fset(self, value):
assert isinstance(value, basestring)
self.setAttr('label', value)
def fdel(self):
if self.getAttr('label'):
self.delAttr('label')
return locals()
@nested_property
def description():
"""
Human-readable description of field meaning
"""
def fget(self):
return self.getTagData('desc') or u''
def fset(self, value):
assert isinstance(value, basestring)
if value == '':
fdel(self)
else:
self.setTagData('desc', value)
def fdel(self):
t = self.getTag('desc')
if t is not None:
self.delChild(t)
return locals()
@nested_property
def required():
"""
Controls whether this field required to fill. Boolean
"""
def fget(self):
return bool(self.getTag('required'))
def fset(self, value):
t = self.getTag('required')
if t and not value:
self.delChild(t)
elif not t and value:
self.addChild('required')
return locals()
@nested_property
def media():
"""
Media data
"""
def fget(self):
media = self.getTag('media', namespace=nbxmpp.NS_DATA_MEDIA)
if media:
return Media(media)
def fset(self, value):
fdel(self)
self.addChild(node=value)
def fdel(self):
t = self.getTag('media')
if t is not None:
self.delChild(t)
return locals()
def is_valid(self):
return True
class Uri(nbxmpp.Node):
def __init__(self, uri_tag):
nbxmpp.Node.__init__(self, node=uri_tag)
@nested_property
def type_():
"""
uri type
"""
def fget(self):
return self.getAttr('type')
def fset(self, value):
self.setAttr('type', value)
def fdel(self):
self.delAttr('type')
return locals()
@nested_property
def uri_data():
"""
uri data
"""
def fget(self):
return self.getData()
def fset(self, value):
self.setData(value)
def fdel(self):
self.setData(None)
return locals()
class Media(nbxmpp.Node):
def __init__(self, media_tag):
nbxmpp.Node.__init__(self, node=media_tag)
@nested_property
def uris():
"""
URIs of the media element.
"""
def fget(self):
return map(Uri, self.getTags('uri'))
def fset(self, values):
fdel(self)
for uri in values:
self.addChild(node=uri)
def fdel(self):
for element in self.getTags('uri'):
self.delChild(element)
return locals()
class BooleanField(DataField):
@nested_property
def value():
"""
Value of field. May contain True, False or None
"""
def fget(self):
v = self.getTagData('value')
if v in ('0', 'false'):
return False
if v in ('1', 'true'):
return True
if v is None:
return False # default value is False
raise WrongFieldValue
def fset(self, value):
self.setTagData('value', value and '1' or '0')
def fdel(self, value):
t = self.getTag('value')
if t is not None:
self.delChild(t)
return locals()
class StringField(DataField):
"""
Covers fields of types: fixed, hidden, text-private, text-single
"""
@nested_property
def value():
"""
Value of field. May be any unicode string
"""
def fget(self):
return self.getTagData('value') or u''
def fset(self, value):
assert isinstance(value, basestring)
if value == '' and not self.required:
return fdel(self)
self.setTagData('value', value)
def fdel(self):
try:
self.delChild(self.getTag('value'))
except ValueError: # if there already were no value tag
pass
return locals()
class ListField(DataField):
"""
Covers fields of types: jid-multi, jid-single, list-multi, list-single
"""
@nested_property
def options():
"""
Options
"""
def fget(self):
options = []
for element in self.getTags('option'):
v = element.getTagData('value')
if v is None:
raise WrongFieldValue
l = element.getAttr('label')
if not l:
l = v
options.append((l, v))
return options
def fset(self, values):
fdel(self)
for value, label in values:
self.addChild('option', {'label': label}).setTagData('value', value)
def fdel(self):
for element in self.getTags('option'):
self.delChild(element)
return locals()
def iter_options(self):
for element in self.iterTags('option'):
v = element.getTagData('value')
if v is None:
raise WrongFieldValue
l = element.getAttr('label')
if not l:
l = v
yield (v, l)
class ListSingleField(ListField, StringField):
"""
Covers list-single field
"""
def is_valid(self):
if not self.required:
return True
if not self.value:
return False
return True
class JidSingleField(ListSingleField):
"""
Covers jid-single fields
"""
def is_valid(self):
if self.value:
try:
helpers.parse_jid(self.value)
return True
except:
return False
if self.required:
return False
return True
class ListMultiField(ListField):
"""
Covers list-multi fields
"""
@nested_property
def values():
"""
Values held in field
"""
def fget(self):
values = []
for element in self.getTags('value'):
values.append(element.getData())
return values
def fset(self, values):
fdel(self)
for value in values:
self.addChild('value').setData(value)
def fdel(self):
for element in self.getTags('value'):
self.delChild(element)
return locals()
def iter_values(self):
for element in self.getTags('value'):
yield element.getData()
def is_valid(self):
if not self.required:
return True
if not self.values:
return False
return True
class JidMultiField(ListMultiField):
"""
Covers jid-multi fields
"""
def is_valid(self):
if len(self.values):
for value in self.values:
try:
helpers.parse_jid(value)
except:
return False
return True
if self.required:
return False
return True
class TextMultiField(DataField):
@nested_property
def value():
"""
Value held in field
"""
def fget(self):
value = u''
for element in self.iterTags('value'):
value += '\n' + element.getData()
return value[1:]
def fset(self, value):
fdel(self)
if value == '':
return
for line in value.split('\n'):
self.addChild('value').setData(line)
def fdel(self):
for element in self.getTags('value'):
self.delChild(element)
return locals()
class DataRecord(ExtendedNode):
"""
The container for data fields - an xml element which has DataField elements
as children
"""
def __init__(self, fields=None, associated=None, extend=None):
self.associated = associated
self.vars = {}
if extend is None:
# we have to build this object from scratch
nbxmpp.Node.__init__(self)
if fields is not None:
self.fields = fields
else:
# we already have nbxmpp.Node inside - try to convert all
# fields into DataField objects
if fields is None:
for field in self.iterTags('field'):
if not isinstance(field, DataField):
ExtendField(field)
self.vars[field.var] = field
else:
for field in self.getTags('field'):
self.delChild(field)
self.fields = fields
@nested_property
def fields():
"""
List of fields in this record
"""
def fget(self):
return self.getTags('field')
def fset(self, fields):
fdel(self)
for field in fields:
if not isinstance(field, DataField):
ExtendField(extend=field)
self.addChild(node=field)
def fdel(self):
for element in self.getTags('field'):
self.delChild(element)
return locals()
def iter_fields(self):
"""
Iterate over fields in this record. Do not take associated into account
"""
for field in self.iterTags('field'):
yield field
def iter_with_associated(self):
"""
Iterate over associated, yielding both our field and associated one
together
"""
for field in self.associated.iter_fields():
yield self[field.var], field
def __getitem__(self, item):
return self.vars[item]
def is_valid(self):
for f in self.iter_fields():
if not f.is_valid():
return False
return True
class DataForm(ExtendedNode):
def __init__(self, type_=None, title=None, instructions=None, extend=None):
if extend is None:
# we have to build form from scratch
nbxmpp.Node.__init__(self, 'x', attrs={'xmlns': nbxmpp.NS_DATA})
if type_ is not None:
self.type_=type_
if title is not None:
self.title=title
if instructions is not None:
self.instructions=instructions
@nested_property
def type_():
"""
Type of the form. Must be one of: 'form', 'submit', 'cancel', 'result'.
'form' - this form is to be filled in; you will be able soon to do:
filledform = DataForm(replyto=thisform)
"""
def fget(self):
return self.getAttr('type')
def fset(self, type_):
assert type_ in ('form', 'submit', 'cancel', 'result')
self.setAttr('type', type_)
return locals()
@nested_property
def title():
"""
Title of the form
Human-readable, should not contain any \\r\\n.
"""
def fget(self):
return self.getTagData('title')
def fset(self, title):
self.setTagData('title', title)
def fdel(self):
try:
self.delChild('title')
except ValueError:
pass
return locals()
@nested_property
def instructions():
"""
Instructions for this form
Human-readable, may contain \\r\\n.
"""
# TODO: the same code is in TextMultiField. join them
def fget(self):
value = u''
for valuenode in self.getTags('instructions'):
value += '\n' + valuenode.getData()
return value[1:]
def fset(self, value):
fdel(self)
if value == '': return
for line in value.split('\n'):
self.addChild('instructions').setData(line)
def fdel(self):
for value in self.getTags('instructions'):
self.delChild(value)
return locals()
class SimpleDataForm(DataForm, DataRecord):
def __init__(self, type_=None, title=None, instructions=None, fields=None, \
extend=None):
DataForm.__init__(self, type_=type_, title=title,
instructions=instructions, extend=extend)
DataRecord.__init__(self, fields=fields, extend=self, associated=self)
def get_purged(self):
c = SimpleDataForm(extend=self)
del c.title
c.instructions = ''
to_be_removed = []
for f in c.iter_fields():
if f.required:
# add <value> if there is not
if hasattr(f, 'value') and not f.value:
f.value = ''
# Keep all required fields
continue
if (hasattr(f, 'value') and not f.value and f.value != 0) or (
hasattr(f, 'values') and len(f.values) == 0):
to_be_removed.append(f)
else:
del f.label
del f.description
del f.media
for f in to_be_removed:
c.delChild(f)
return c
class MultipleDataForm(DataForm):
def __init__(self, type_=None, title=None, instructions=None, items=None,
extend=None):
DataForm.__init__(self, type_=type_, title=title,
instructions=instructions, extend=extend)
# all records, recorded into DataRecords
if extend is None:
if items is not None:
self.items = items
else:
# we already have nbxmpp.Node inside - try to convert all
# fields into DataField objects
if items is None:
self.items = list(self.iterTags('item'))
else:
for item in self.getTags('item'):
self.delChild(item)
self.items = items
reported_tag = self.getTag('reported')
self.reported = DataRecord(extend=reported_tag)
@nested_property
def items():
"""
A list of all records
"""
def fget(self):
return list(self.iter_records())
def fset(self, records):
fdel(self)
for record in records:
if not isinstance(record, DataRecord):
DataRecord(extend=record)
self.addChild(node=record)
def fdel(self):
for record in self.getTags('item'):
self.delChild(record)
return locals()
def iter_records(self):
for record in self.getTags('item'):
yield record
# @nested_property
# def reported():
# """
# DataRecord that contains descriptions of fields in records
# """
# def fget(self):
# return self.getTag('reported')
# def fset(self, record):
# try:
# self.delChild('reported')
# except:
# pass
#
# record.setName('reported')
# self.addChild(node=record)
# return locals()
| jabber-at/gajim | src/common/dataforms.py | Python | gpl-3.0 | 21,186 |
# SPDX-License-Identifier: GPL-2.0+
# Copyright (c) 2016 Google, Inc
# Written by Simon Glass <sjg@chromium.org>
#
# Test for the fdt modules
import os
import sys
import tempfile
import unittest
from dtoc import fdt
from dtoc import fdt_util
from dtoc.fdt import FdtScan
from patman import tools
class TestFdt(unittest.TestCase):
@classmethod
def setUpClass(self):
self._binman_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
self._indir = tempfile.mkdtemp(prefix='binmant.')
tools.PrepareOutputDir(self._indir, True)
@classmethod
def tearDownClass(self):
tools._FinaliseForTest()
def TestFile(self, fname):
return os.path.join(self._binman_dir, 'test', fname)
def GetCompiled(self, fname):
return fdt_util.EnsureCompiled(self.TestFile(fname))
def _DeleteProp(self, dt):
node = dt.GetNode('/microcode/update@0')
node.DeleteProp('data')
def testFdtNormal(self):
fname = self.GetCompiled('034_x86_ucode.dts')
dt = FdtScan(fname)
self._DeleteProp(dt)
def testFdtNormalProp(self):
fname = self.GetCompiled('045_prop_test.dts')
dt = FdtScan(fname)
node = dt.GetNode('/binman/intel-me')
self.assertEquals('intel-me', node.name)
val = fdt_util.GetString(node, 'filename')
self.assertEquals(str, type(val))
self.assertEquals('me.bin', val)
prop = node.props['intval']
self.assertEquals(fdt.TYPE_INT, prop.type)
self.assertEquals(3, fdt_util.GetInt(node, 'intval'))
prop = node.props['intarray']
self.assertEquals(fdt.TYPE_INT, prop.type)
self.assertEquals(list, type(prop.value))
self.assertEquals(2, len(prop.value))
self.assertEquals([5, 6],
[fdt_util.fdt32_to_cpu(val) for val in prop.value])
prop = node.props['byteval']
self.assertEquals(fdt.TYPE_BYTE, prop.type)
self.assertEquals(chr(8), prop.value)
prop = node.props['bytearray']
self.assertEquals(fdt.TYPE_BYTE, prop.type)
self.assertEquals(list, type(prop.value))
self.assertEquals(str, type(prop.value[0]))
self.assertEquals(3, len(prop.value))
self.assertEquals([chr(1), '#', '4'], prop.value)
prop = node.props['longbytearray']
self.assertEquals(fdt.TYPE_INT, prop.type)
self.assertEquals(0x090a0b0c, fdt_util.GetInt(node, 'longbytearray'))
prop = node.props['stringval']
self.assertEquals(fdt.TYPE_STRING, prop.type)
self.assertEquals('message2', fdt_util.GetString(node, 'stringval'))
prop = node.props['stringarray']
self.assertEquals(fdt.TYPE_STRING, prop.type)
self.assertEquals(list, type(prop.value))
self.assertEquals(3, len(prop.value))
self.assertEquals(['another', 'multi-word', 'message'], prop.value)
| Digilent/u-boot-digilent | tools/binman/fdt_test.py | Python | gpl-2.0 | 2,920 |
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
from jnius import autoclass
class PassByReferenceOrValueTest(unittest.TestCase):
def _verify(self, numbers, changed):
for i in range(len(numbers)):
self.assertEqual(numbers[i], i * i if changed else i)
def _verify_all(self, numbers, changed):
for n, c in zip(numbers, changed):
self._verify(n, c)
def test_single_param_static(self):
VariablePassing = autoclass('org.jnius.VariablePassing')
# passed by reference (default), numbers should change
numbers = list(range(10))
VariablePassing.singleParamStatic(numbers)
self._verify(numbers, True)
# passed by reference, numbers should change
numbers = list(range(10))
VariablePassing.singleParamStatic(numbers, pass_by_reference=True)
self._verify(numbers, True)
# passed by value, numbers should not change
numbers = list(range(10))
VariablePassing.singleParamStatic(numbers, pass_by_reference=False)
self._verify(numbers, False)
def test_single_param(self):
VariablePassing = autoclass('org.jnius.VariablePassing')
variablePassing = VariablePassing()
# passed by reference (default), numbers should change
numbers = list(range(10))
variablePassing.singleParam(numbers)
self._verify(numbers, True)
# passed by reference, numbers should change
numbers = list(range(10))
variablePassing.singleParam(numbers, pass_by_reference=True)
self._verify(numbers, True)
# passed by value, numbers should not change
numbers = list(range(10))
variablePassing.singleParam(numbers, pass_by_reference=False)
self._verify(numbers, False)
def test_multiple_params_static(self):
VariablePassing = autoclass('org.jnius.VariablePassing')
# passed by reference (default), all numbers should change
numbers = [list(range(10)) for _ in range(4)]
VariablePassing.multipleParamsStatic(*numbers)
self._verify_all(numbers, [True] * 4)
# passed by reference, all numbers should change
numbers = [list(range(10)) for _ in range(4)]
VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=True)
self._verify_all(numbers, [True] * 4)
# passed by value, no numbers should change
numbers = [list(range(10)) for _ in range(4)]
VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=False)
self._verify_all(numbers, [False] * 4)
# only the first set of numbers should change
numbers = [list(range(10)) for _ in range(4)]
VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=[True, False])
self._verify_all(numbers, [True, False, False, False])
# only the first set of numbers should not change
numbers = [list(range(10)) for _ in range(4)]
VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=[False, True])
self._verify_all(numbers, [False, True, True, True])
# only the odd sets of numbers should change
numbers = [list(range(10)) for _ in range(4)]
changed = (True, False, True, False)
VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=changed)
self._verify_all(numbers, changed)
# only the even sets of numbers should change
numbers = [list(range(10)) for _ in range(4)]
changed = (False, True, False, True)
VariablePassing.multipleParamsStatic(*numbers, pass_by_reference=changed)
self._verify_all(numbers, changed)
def test_multiple_params(self):
VariablePassing = autoclass('org.jnius.VariablePassing')
variablePassing = VariablePassing()
# passed by reference (default), all numbers should change
numbers = [list(range(10)) for _ in range(4)]
variablePassing.multipleParams(*numbers)
self._verify_all(numbers, [True] * 4)
# passed by reference, all numbers should change
numbers = [list(range(10)) for _ in range(4)]
variablePassing.multipleParams(*numbers, pass_by_reference=True)
self._verify_all(numbers, [True] * 4)
# passed by value, no numbers should change
numbers = [list(range(10)) for _ in range(4)]
variablePassing.multipleParams(*numbers, pass_by_reference=False)
self._verify_all(numbers, [False] * 4)
# only the first set of numbers should change
numbers = [list(range(10)) for _ in range(4)]
variablePassing.multipleParams(*numbers, pass_by_reference=[True, False])
self._verify_all(numbers, [True, False, False, False])
# only the first set of numbers should not change
numbers = [list(range(10)) for _ in range(4)]
variablePassing.multipleParams(*numbers, pass_by_reference=[False, True])
self._verify_all(numbers, [False, True, True, True])
# only the odd sets of numbers should change
numbers = [list(range(10)) for _ in range(4)]
changed = (True, False, True, False)
variablePassing.multipleParams(*numbers, pass_by_reference=changed)
self._verify_all(numbers, changed)
# only the even sets of numbers should change
numbers = [list(range(10)) for _ in range(4)]
changed = (False, True, False, True)
variablePassing.multipleParams(*numbers, pass_by_reference=changed)
self._verify_all(numbers, changed)
def test_contructor_single_param(self):
VariablePassing = autoclass('org.jnius.VariablePassing')
# passed by reference (default), numbers should change
numbers = list(range(10))
variablePassing = VariablePassing(numbers)
self._verify(numbers, True)
# passed by reference, numbers should change
numbers = list(range(10))
variablePassing = VariablePassing(numbers, pass_by_reference=True)
self._verify(numbers, True)
# passed by value, numbers should not change
numbers = list(range(10))
variablePassing = VariablePassing(numbers, pass_by_reference=False)
self._verify(numbers, False)
def test_contructor_multiple_params(self):
VariablePassing = autoclass('org.jnius.VariablePassing')
# passed by reference (default), all numbers should change
numbers = [list(range(10)) for _ in range(4)]
variablePassing = VariablePassing(*numbers)
self._verify_all(numbers, [True] * 4)
# passed by reference, all numbers should change
numbers = [list(range(10)) for _ in range(4)]
variablePassing = VariablePassing(*numbers, pass_by_reference=True)
self._verify_all(numbers, [True] * 4)
# passed by value, no numbers should change
numbers = [list(range(10)) for _ in range(4)]
variablePassing = VariablePassing(*numbers, pass_by_reference=False)
self._verify_all(numbers, [False] * 4)
# only the first set of numbers should change
numbers = [list(range(10)) for _ in range(4)]
variablePassing = VariablePassing(*numbers, pass_by_reference=[True, False])
self._verify_all(numbers, [True, False, False, False])
# only the first set of numbers should not change
numbers = [list(range(10)) for _ in range(4)]
variablePassing = VariablePassing(*numbers, pass_by_reference=[False, True])
self._verify_all(numbers, [False, True, True, True])
# only the odd sets of numbers should change
numbers = [list(range(10)) for _ in range(4)]
changed = (True, False, True, False)
variablePassing = VariablePassing(*numbers, pass_by_reference=changed)
self._verify_all(numbers, changed)
# only the even sets of numbers should change
numbers = [list(range(10)) for _ in range(4)]
changed = (False, True, False, True)
variablePassing = VariablePassing(*numbers, pass_by_reference=changed)
self._verify_all(numbers, changed)
| kivy/pyjnius | tests/test_pass_by_reference_or_value.py | Python | mit | 8,201 |
from setuptools import setup
setup(
name = 'mcnotify',
version = '1.0.1',
author = 'Shusui Moyatani',
author_email = 'syusui.s@gmail.com',
url = 'https://github.com/syusui-s/mcnotify',
license = ['MIT'],
description = 'Minecraft status notifier',
scripts = ['scripts/mcnotify_update'],
install_requires = ['mcstatus>=2.1'],
)
| syusui-s/mcnotify | setup.py | Python | mit | 364 |
import collections
import nltk.classify.util, nltk.metrics
from nltk.classify import NaiveBayesClassifier
from nltk.classify import DecisionTreeClassifier
from nltk.corpus import CategorizedPlaintextCorpusReader
from sklearn import svm
from sklearn.svm import LinearSVC
import string
from tabulate import tabulate
corpus_root1='/Users/tianhan/Dropbox/Advanced_big_data_Project/aclImdb/train'
train=CategorizedPlaintextCorpusReader(corpus_root1,r'(pos|neg)/.*\.txt',cat_pattern=r'(pos|neg)/.*\.txt')
corpus_root2='/Users/tianhan/Dropbox/Advanced_big_data_Project/aclImdb/test'
test=CategorizedPlaintextCorpusReader(corpus_root2,r'(pos|neg)/.*\.txt',cat_pattern=r'(pos|neg)/.*\.txt')
def evaluate_classifier_Naive(featx):
train_negids = train.fileids('neg')
train_posids = train.fileids('pos')
test_negids = test.fileids('neg')
test_posids = test.fileids('pos')
train_negfeats = [(featx(train.words(fileids=[f])), 'neg') for f in train_negids]
train_posfeats = [(featx(train.words(fileids=[f])), 'pos') for f in train_posids]
test_negfeats = [(featx(test.words(fileids=[f])), 'neg') for f in test_negids]
test_posfeats = [(featx(test.words(fileids=[f])), 'pos') for f in test_posids]
trainfeats = train_negfeats + train_posfeats
testfeats = test_negfeats + test_posfeats
Naive_classifier = NaiveBayesClassifier.train(trainfeats)
refsets = collections.defaultdict(set)
testsets_Naive = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed_Naive = Naive_classifier.classify(feats)
testsets_Naive[observed_Naive].add(i)
accuracy1 = nltk.classify.util.accuracy(Naive_classifier, testfeats)
pos_precision1 = nltk.metrics.precision(refsets['pos'], testsets_Naive['pos'])
pos_recall1 = nltk.metrics.recall(refsets['pos'], testsets_Naive['pos'])
neg_precision1 = nltk.metrics.precision(refsets['neg'], testsets_Naive['neg'])
neg_recall1 = nltk.metrics.recall(refsets['neg'], testsets_Naive['neg'])
Naive_classifier.show_most_informative_features(50)
return(['NaiveBayes',accuracy1,pos_precision1,pos_recall1,neg_precision1,neg_recall1])
def evaluate_classifier_SVM(featx):
train_negids = train.fileids('neg')
train_posids = train.fileids('pos')
test_negids = test.fileids('neg')
test_posids = test.fileids('pos')
train_negfeats = [(featx(train.words(fileids=[f])), 'neg') for f in train_negids]
train_posfeats = [(featx(train.words(fileids=[f])), 'pos') for f in train_posids]
test_negfeats = [(featx(test.words(fileids=[f])), 'neg') for f in test_negids]
test_posfeats = [(featx(test.words(fileids=[f])), 'pos') for f in test_posids]
trainfeats = train_negfeats + train_posfeats
testfeats = test_negfeats + test_posfeats
classifier = nltk.classify.SklearnClassifier(LinearSVC())
SVM_classifier = classifier.train(trainfeats)
refsets = collections.defaultdict(set)
testsets_SVM = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed_SVM = classifier.classify(feats)
testsets_SVM[observed_SVM].add(i)
accuracy2 = nltk.classify.util.accuracy(classifier, testfeats)
pos_precision2 = nltk.metrics.precision(refsets['pos'], testsets_SVM['pos'])
pos_recall2 = nltk.metrics.recall(refsets['pos'], testsets_SVM['pos'])
neg_precision2 = nltk.metrics.precision(refsets['neg'], testsets_SVM['neg'])
neg_recall2 = nltk.metrics.recall(refsets['neg'], testsets_SVM['neg'])
return(['SVM',accuracy2,pos_precision2,pos_recall2,neg_precision2,neg_recall2])
def evaluate_classifier_Decision(featx):
train_negids = train.fileids('neg')
train_posids = train.fileids('pos')
test_negids = test.fileids('neg')
test_posids = test.fileids('pos')
train_negfeats = [(featx(train.words(fileids=[f])), 'neg') for f in train_negids]
train_posfeats = [(featx(train.words(fileids=[f])), 'pos') for f in train_posids]
test_negfeats = [(featx(test.words(fileids=[f])), 'neg') for f in test_negids]
test_posfeats = [(featx(test.words(fileids=[f])), 'pos') for f in test_posids]
trainfeats = train_negfeats + train_posfeats
testfeats = test_negfeats + test_posfeats
train_negcutoff = len(train_negfeats)*1/100
train_poscutoff = len(train_posfeats)*1/100
trainfeats_Decision = train_negfeats[:train_negcutoff] + train_posfeats[:train_poscutoff]
DecisionTree_classifier = DecisionTreeClassifier.train(trainfeats_Decision)
refsets = collections.defaultdict(set)
testsets_Decision = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed_Decision = DecisionTree_classifier.classify(feats)
testsets_Decision[observed_Decision].add(i)
accuracy3 = nltk.classify.util.accuracy(DecisionTree_classifier, testfeats)
pos_precision3 = nltk.metrics.precision(refsets['pos'], testsets_Decision['pos'])
pos_recall3 = nltk.metrics.recall(refsets['pos'], testsets_Decision['pos'])
neg_precision3 = nltk.metrics.precision(refsets['neg'], testsets_Decision['neg'])
neg_recall3 = nltk.metrics.recall(refsets['neg'], testsets_Decision['neg'])
return(['DecisionTree',accuracy3,pos_precision3,pos_recall3,neg_precision3,neg_recall3])
def word_feats(words):
return dict([(word, True) for word in words])
table1 = []
table1.append(evaluate_classifier_Naive(word_feats))
table1.append(evaluate_classifier_SVM(word_feats))
table1.append(evaluate_classifier_Decision(word_feats))
print('Single word features:')
print(tabulate(table1, headers=["Classifier","Accuracy","Positive precision", "Positive recall", "Negative precision", "Negative recall"]))
import itertools
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
def bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=200):
words_nopunc = [word for word in words if word not in string.punctuation]
bigram_finder = BigramCollocationFinder.from_words(words_nopunc)
bigrams = bigram_finder.nbest(score_fn, n)
return dict([(ngram, True) for ngram in itertools.chain(words_nopunc, bigrams)])
table2 = []
table2.append(evaluate_classifier_Naive(bigram_word_feats))
table2.append(evaluate_classifier_SVM(bigram_word_feats))
table2.append(evaluate_classifier_Decision(bigram_word_feats))
print('Bigram word features:')
print(tabulate(table2, headers=["Classifier","Accuracy","Positive precision", "Positive recall", "Negative precision", "Negative recall"]))
| Sapphirine/Movie-Review-Sentiment-Analysis | Sentiment Analysis /evaluation.py | Python | mit | 6,756 |
'''
Created on Jun 12, 2014
Modified on Jun 16, 2014
Version 0.03
@author: rainier.madruga@gmail.com
A simple Python Program to scrape the BBC News website for content.
'''
from bs4 import BeautifulSoup
import os, sys, csv
import urllib2
import datetime
# Define current path for the Script
currentPath = os.path.dirname(os.path.abspath("__file__"))
# Get the BBC News Website to a Variable
webpage = urllib2.urlopen("http://www.bbc.com/news/world/latin_america/")
soup = BeautifulSoup(webpage)
# Define and output the raw HTML to a local file for reference
outputTxt = currentPath + '/BBC_news.html'
with open(outputTxt, "w") as f:
f.write(soup.prettify("utf-8"))
f.close()
# Create writer object
# writer = csv.writer(outputTxt, delimiter='|')
# Find the Block Main Navigation
divNavMain = soup.find("ul", {"id":"blq-nav-main"})
#print divNavMain.prettify("utf-8")
# Pull out the navigation elements from the blq-nav-main list
divElements = divNavMain.find_all("a")
# for i in divElements:
# print i.a
# print i.href
divListMain = divNavMain.find_all("li")
for i in divListMain:
print i.a
# print i.prettify("utf-8")
# Get Main Contents of the Page
divMain = soup.find("div", {"id":"main-content"})
# Get Page Last Update Date & Time
divUpdateSpan = soup.find("div", {"class":"index-date"})
divUpdateDate = divUpdateSpan.find("span", {"class":"date"})
divUpdateTime = divUpdateSpan.find("span", {"class":"time"})
print divUpdateDate.get_text() + ' ' + divUpdateTime.get_text()
print outputTxt
ts = datetime.datetime.now().strftime("%H:%M:%S")
ds = datetime.datetime.now().strftime("%Y-%m-%d")
# Output to File Header
with open('Update.txt', "a") as f:
f.write(ds + '|' + ts + '|' + divUpdateDate.get_text() + '|' + divUpdateTime.get_text() + '|' + soup.title.get_text() +'\n')
f.close()
| rainier-m/python-soccer | parseBBC.py | Python | gpl-2.0 | 1,877 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Phillip Gentry <phillip@cx.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: github_hooks
short_description: Manages github service hooks.
description:
- Adds service hooks and removes service hooks that have an error status.
version_added: "1.4"
options:
user:
description:
- Github username.
required: true
oauthkey:
description:
- The oauth key provided by github. It can be found/generated on github under "Edit Your Profile" >> "Applications" >> "Personal Access Tokens"
required: true
repo:
description:
- "This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:. Note this is different than the normal repo url."
required: true
hookurl:
description:
- When creating a new hook, this is the url that you want github to post to. It is only required when creating a new hook.
required: false
action:
description:
- This tells the githooks module what you want it to do.
required: true
choices: [ "create", "cleanall", "list", "clean504" ]
validate_certs:
description:
- If C(no), SSL certificates for the target repo will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
content_type:
description:
- Content type to use for requests made to the webhook
required: false
default: 'json'
choices: ['json', 'form']
author: "Phillip Gentry, CX Inc (@pcgentry)"
'''
EXAMPLES = '''
# Example creating a new service hook. It ignores duplicates.
- github_hooks:
action: create
hookurl: http://11.111.111.111:2222
user: '{{ gituser }}'
oauthkey: '{{ oauthkey }}'
repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy
# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would be called from a handler.
- github_hooks:
action: cleanall
user: '{{ gituser }}'
oauthkey: '{{ oauthkey }}'
repo: '{{ repo }}'
delegate_to: localhost
'''
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
import base64
def _list(module, hookurl, oauthkey, repo, user):
url = "%s/hooks" % repo
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
headers = {
'Authorization': 'Basic %s' % auth,
}
response, info = fetch_url(module, url, headers=headers)
if info['status'] != 200:
return False, ''
else:
return False, response.read()
def _clean504(module, hookurl, oauthkey, repo, user):
current_hooks = _list(hookurl, oauthkey, repo, user)[1]
decoded = json.loads(current_hooks)
for hook in decoded:
if hook['last_response']['code'] == 504:
# print "Last response was an ERROR for hook:"
# print hook['id']
_delete(module, hookurl, oauthkey, repo, user, hook['id'])
return 0, current_hooks
def _cleanall(module, hookurl, oauthkey, repo, user):
current_hooks = _list(hookurl, oauthkey, repo, user)[1]
decoded = json.loads(current_hooks)
for hook in decoded:
if hook['last_response']['code'] != 200:
# print "Last response was an ERROR for hook:"
# print hook['id']
_delete(module, hookurl, oauthkey, repo, user, hook['id'])
return 0, current_hooks
def _create(module, hookurl, oauthkey, repo, user, content_type):
url = "%s/hooks" % repo
values = {
"active": True,
"name": "web",
"config": {
"url": "%s" % hookurl,
"content_type": "%s" % content_type
}
}
data = json.dumps(values)
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
headers = {
'Authorization': 'Basic %s' % auth,
}
response, info = fetch_url(module, url, data=data, headers=headers)
if info['status'] != 200:
return 0, '[]'
else:
return 0, response.read()
def _delete(module, hookurl, oauthkey, repo, user, hookid):
url = "%s/hooks/%s" % (repo, hookid)
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
headers = {
'Authorization': 'Basic %s' % auth,
}
response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
return response.read()
def main():
module = AnsibleModule(
argument_spec=dict(
action=dict(required=True, choices=['list','clean504','cleanall','create']),
hookurl=dict(required=False),
oauthkey=dict(required=True, no_log=True),
repo=dict(required=True),
user=dict(required=True),
validate_certs=dict(default='yes', type='bool'),
content_type=dict(default='json', choices=['json', 'form']),
)
)
action = module.params['action']
hookurl = module.params['hookurl']
oauthkey = module.params['oauthkey']
repo = module.params['repo']
user = module.params['user']
content_type = module.params['content_type']
if action == "list":
(rc, out) = _list(module, hookurl, oauthkey, repo, user)
if action == "clean504":
(rc, out) = _clean504(module, hookurl, oauthkey, repo, user)
if action == "cleanall":
(rc, out) = _cleanall(module, hookurl, oauthkey, repo, user)
if action == "create":
(rc, out) = _create(module, hookurl, oauthkey, repo, user, content_type)
if rc != 0:
module.fail_json(msg="failed", result=out)
module.exit_json(msg="success", result=out)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| GustavoHennig/ansible | lib/ansible/modules/source_control/github_hooks.py | Python | gpl-3.0 | 6,852 |
# basic plugin
# example by Ryan Gaus
# from
from base import *
"""
A sample parser, that should explain everything nicely
"""
class sample_parser(parser):
# tells the main program if it should use this plugin to parse its query
# the query is contained within self.query
def validate(self):
return "sample" in self.query
# the real code of the plugin, this should parse the incoming
# query (again, self.query) and return status information
def parse(self, parent):
# the response
self.resp["text"] = "This is a sample response!"
# an id: give your plugin a unique one, it is used to distinguish
# which plugins made queries
self.resp["type"] = "sample"
# the query went fine! Also, STATUS_ERR for a general error, or
# STATUS_NO_HIT if the program couldn't find what it was looking for
self.resp["status"] = STATUS_OK
# return the query
return self.resp | 1egoman/qmcstats | sample.py | Python | mit | 923 |
import warnings
from chempy.util.testing import requires
from chempy.units import units_library
from ..water_diffusivity_holz_2000 import water_self_diffusion_coefficient as w_sd
def test_water_self_diffusion_coefficient():
warnings.filterwarnings("error")
assert abs(w_sd(273.15 + 0.0) - 1.099e-9) < 0.027e-9
assert abs(w_sd(273.15 + 4.0) - 1.261e-9) < 0.011e-9
assert abs(w_sd(273.15 + 10) - 1.525e-9) < 0.007e-9
assert abs(w_sd(273.15 + 15) - 1.765e-9) < 0.006e-9
assert abs(w_sd(273.15 + 20) - 2.023e-9) < 0.001e-9
assert abs(w_sd(273.15 + 25) - 2.299e-9) < 0.001e-9
assert abs(w_sd(273.15 + 30) - 2.594e-9) < 0.001e-9
assert abs(w_sd(273.15 + 35) - 2.907e-9) < 0.004e-9
try:
w_sd(1)
except UserWarning:
pass # good warning raised
else:
raise
warnings.resetwarnings()
@requires(units_library)
def test_water_self_diffusion_coefficient__units():
from chempy.units import allclose, linspace, default_units as u
unit = u.m ** 2 / u.s
assert allclose(
1e9 * w_sd(298.15 * u.K, units=u), 2.299 * unit, rtol=1e-3, atol=1e-8 * unit
)
assert allclose(
1e9 * w_sd(linspace(297, 299) * u.K, units=u),
2.299 * u.m ** 2 / u.s,
rtol=5e-2,
atol=1e-2 * unit,
)
| bjodah/aqchem | chempy/properties/tests/test_water_diffusivity_holz_2000.py | Python | bsd-2-clause | 1,297 |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Build input pipelines that span TPU pods for optimal performance.
It's common to batch sequences according to their length. Unfortunately, a
naive scaling of such an input pipeline across a pod will result in each host
choosing the sequence length bucket independently. Concretely, host A may select
sequences of a short length, while host B may select sequences of a very long
length. Because every step involves a blocking all-reduce phase, host A must
wait for host B.
The input pipeline designed within synchronizes the hosts such that they all
select a sequence length bucket of the same length, resulting in up to 50%
performance improvements across large TPU pod slices.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow.python.data.ops import multi_device_iterator_ops
from tpu import low_level_runner
from tpu.utils import vocab_utils
class DistributedPipeline(tf.train.SessionRunHook):
"""DistributedPipeline encapsulates constructing the distributed pipeline.
We use a class because we need to construct the pipeline in a graph managed
by [TPU]Estimator. As a result, we cannot pre-construct it using a normal
function, as Estimator wants to manage the graph itself.
We use a class because we need to capture the initializer and pass it to the
train call to TPUEstimator while simultaneously passing ourselves as the input
function.
"""
def __init__(self, hparams, num_hosts):
"""Constructs a DistributedPipeline.
Args:
hparams: The hparams object for this model.
num_hosts: The number of hosts in the slice of the TPU pod.
Throws:
ValueError: If the passed values are invalid.
"""
self._hparams = hparams
self._num_hosts = num_hosts
self._iterator = None
self._outputs = None
global_batch_size = hparams.batch_size
if global_batch_size % num_hosts != 0:
raise ValueError(
"global_batch_size (%s) must be a multiple of num_hosts (%s)" %
(global_batch_size, num_hosts))
def after_create_session(self, session, coord):
del coord
start = time.time()
session.run(self._iterator.initializer)
tf.logging.info("Initialized multi-host dataset iterators in %d seconds",
time.time() - start)
def iterator(self):
return self._iterator
def __call__(self, params):
if not self._outputs:
self._iterator = _make_distributed_pipeline(self._hparams,
self._num_hosts)
self._outputs = self._iterator.get_next()
if "context" in params:
current_host = params["context"].current_input_fn_deployment()[1]
elif "dataset_index" in params:
current_host = params["dataset_index"]
else:
raise ValueError('Expect "context" or "dataset_index" in params.')
return self._outputs[current_host]
def _make_distributed_pipeline(hparams, num_hosts):
"""Makes the distributed input pipeline.
make_distributed_pipeline must be used in the PER_HOST_V1 configuration.
Note: we return both the input function and the hook because
MultiDeviceIterator is not compatible with Estimator / TPUEstimator.
Args:
hparams: The hyperparameters to use.
num_hosts: The number of hosts we're running across.
Returns:
A MultiDeviceIterator.
"""
# TODO: Merge with the original copy in iterator_utils.py.
# pylint: disable=g-long-lambda,line-too-long
global_batch_size = hparams.batch_size
if global_batch_size % num_hosts != 0:
raise ValueError(
"global_batch_size (%s) must be a multiple of num_hosts (%s)" %
(global_batch_size, num_hosts))
# Optionally choose from `choose_buckets` buckets simultaneously.
if hparams.choose_buckets:
window_batch_size = int(global_batch_size / hparams.choose_buckets)
else:
window_batch_size = global_batch_size
per_host_batch_size = global_batch_size / num_hosts
output_buffer_size = global_batch_size * 50
resolver = low_level_runner.get_resolver(hparams)
if resolver:
job_name = resolver.get_job_name() or hparams.tpu_job_name or "tpu_worker"
if hparams.master == "local":
job_name = "localhost"
with tf.device("/job:%s/task:0/cpu:0" % job_name):
# From estimator.py
src_file = "%s.%s" % (hparams.train_prefix, hparams.src)
tgt_file = "%s.%s" % (hparams.train_prefix, hparams.tgt)
src_vocab_file = hparams.src_vocab_file
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(
src_vocab_file)
src_dataset = tf.data.TextLineDataset(src_file).prefetch(output_buffer_size)
tgt_dataset = tf.data.TextLineDataset(tgt_file).prefetch(output_buffer_size)
# Define local variables that are parameters in iterator_utils.make_input_fn
sos = hparams.sos
eos = hparams.eos
random_seed = hparams.random_seed
num_buckets = hparams.num_buckets
src_max_len = hparams.src_max_len
tgt_max_len = hparams.tgt_max_len
num_parallel_calls = 100 # constant in iterator_utils.py
skip_count = None # constant in estimator.py
reshuffle_each_iteration = True # constant in estimator.py
filter_oversized_sequences = True # constant in estimator.py
# From iterator_utils.py
src_eos_id = tf.cast(src_vocab_table.lookup(tf.constant(eos)), tf.int32)
tgt_sos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(sos)), tf.int32)
tgt_eos_id = tf.cast(tgt_vocab_table.lookup(tf.constant(eos)), tf.int32)
src_tgt_dataset = tf.data.Dataset.zip((src_dataset, tgt_dataset))
if skip_count is not None:
src_tgt_dataset = src_tgt_dataset.skip(skip_count)
def map_fn_1(src, tgt):
src = tf.string_split([src]).values
tgt = tf.string_split([tgt]).values
src_size = tf.size(src)
tgt_size = tf.size(tgt)
size_ok_bool = tf.logical_and(src_size > 0, tgt_size > 0)
if filter_oversized_sequences:
oversized = tf.logical_and(src_size < src_max_len,
tgt_size < tgt_max_len)
size_ok_bool = tf.logical_and(size_ok_bool, oversized)
if src_max_len:
src = src[:src_max_len]
if tgt_max_len:
tgt = tgt[:tgt_max_len]
return (src, tgt, size_ok_bool)
src_tgt_bool_dataset = src_tgt_dataset.map(
map_fn_1, num_parallel_calls=num_parallel_calls)
src_tgt_bool_dataset = src_tgt_bool_dataset.filter(
lambda src, tgt, filter_bool: filter_bool)
def map_fn_2(src, tgt, unused_filter_bool):
src = tf.cast(src_vocab_table.lookup(src), tf.int32)
tgt = tf.cast(tgt_vocab_table.lookup(tgt), tf.int32)
# Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.
tgt_in = tf.concat(([tgt_sos_id], tgt), 0)
tgt_out = tf.concat((tgt, [tgt_eos_id]), 0)
# Add in sequence lengths.
src_len = tf.size(src)
tgt_len = tf.size(tgt_in)
return src, tgt_in, tgt_out, src_len, tgt_len
# Convert the word strings to ids. Word strings that are not in the
# vocab get the lookup table's default_value integer.
src_tgt_dataset = src_tgt_bool_dataset.map(
map_fn_2, num_parallel_calls=num_parallel_calls)
def map_fn_3(src, tgt_in, tgt_out, src_len, tgt_len): # pylint: disable=missing-docstring
# Pairs with length [0, bucket_width) go to bucket 0, length
# [bucket_width, 2 * bucket_width) go to bucket 1, etc. Pairs with length
# over ((num_bucket-1) * bucket_width) words all go into the last bucket.
if src_max_len:
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
else:
bucket_width = 10
# Bucket sentence pairs by the length of their source sentence and target
# sentence.
bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)
return tf.to_int64(tf.minimum(
num_buckets, bucket_id)), src, tgt_in, tgt_out, src_len, tgt_len
src_tgt_dataset = src_tgt_dataset.map(
map_fn_3, num_parallel_calls=num_parallel_calls)
src_tgt_dataset = src_tgt_dataset.prefetch(output_buffer_size)
src_tgt_dataset = src_tgt_dataset.cache()
src_tgt_dataset = src_tgt_dataset.shuffle(
output_buffer_size, random_seed, reshuffle_each_iteration).repeat()
# Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)
def batching_func(x):
return x.padded_batch(
window_batch_size,
# The first three entries are the source and target line rows;
# these have unknown-length vectors. The last two entries are
# the source and target row sizes; these are scalars.
padded_shapes=(
tf.TensorShape([]), # key
tf.TensorShape([src_max_len]), # src
tf.TensorShape([tgt_max_len]), # tgt_input
tf.TensorShape([tgt_max_len]), # tgt_output
tf.TensorShape([]), # src_len
tf.TensorShape([])), # tgt_len
# Pad the source and target sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(
tf.to_int64(0), # key
src_eos_id, # src
tgt_eos_id, # tgt_input
tgt_eos_id, # tgt_output
0, # src_len -- unused
0),
# For TPU, must set drop_remainder to True or batch size will be None
drop_remainder=True) # tgt_len -- unused
def key_func(key, unused_1, unused_2, unused_3, unused_4, unused_5):
return key
def reduce_func(unused_key, windowed_data):
return batching_func(windowed_data)
if num_buckets > 1:
batched_dataset = src_tgt_dataset.apply(
tf.contrib.data.group_by_window(
key_func=key_func,
reduce_func=reduce_func,
window_size=window_batch_size))
else:
batched_dataset = batching_func(src_tgt_dataset)
batched_dataset = batched_dataset.map(
lambda unused_key, src, tgt_in, tgt_out, source_size, tgt_in_size: (
{"source": src,
"target_input": tgt_in,
"target_output": tgt_out,
"source_sequence_length": source_size,
"target_sequence_length": tgt_in_size}))
re_batched_dataset = batched_dataset.unbatch().batch(
int(per_host_batch_size), drop_remainder=True)
output_devices = [
"/job:%s/task:%d/cpu:0" % (job_name, i) for i in range(num_hosts)
]
options = tf.data.Options()
options.experimental_optimization.filter_fusion = True
options.experimental_optimization.map_and_filter_fusion = True
re_batched_dataset = re_batched_dataset.with_options(options)
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
dataset=re_batched_dataset,
devices=output_devices,
max_buffer_size=10,
prefetch_buffer_size=10,
source_device=("/job:%s/task:0/cpu:0" % job_name))
return multi_device_iterator
| mlperf/inference_results_v0.5 | closed/Google/code/gnmt/tpu-gnmt/home/kbuilder/mlperf-inference/google3/third_party/mlperf/inference/gnmt/nmt/tpu/distributed_iterator_utils.py | Python | apache-2.0 | 11,852 |
# ############################################################################
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
# ############################################################################
from django.test import SimpleTestCase
from base.utils import operator
class TestIsYearLower(SimpleTestCase):
def test_should_return_false_when_base_year_is_none(self):
self.assertFalse(
operator.is_year_lower(None, 2025)
)
def test_should_return_true_when_year_to_compare_to_is_none(self):
self.assertTrue(
operator.is_year_lower(2029, None)
)
def test_should_return_true_when_base_year_is_inferior_to_other_year(self):
self.assertTrue(
operator.is_year_lower(2017, 2029)
)
def test_should_return_false_when_base_year_is_equal_to_other_year(self):
self.assertFalse(
operator.is_year_lower(2017, 2017)
)
def test_should_return_false_when_base_year_is_greater_to_other_year(self):
self.assertFalse(
operator.is_year_lower(2019, 2017)
)
| uclouvain/osis | base/tests/utils/test_operator.py | Python | agpl-3.0 | 2,143 |
import glob
import os
import sys
from typing import Any, Dict, Iterator, List
from warnings import warn
import setuptools # type: ignore
from . import api
from .settings import DEFAULT_CONFIG
class ISortCommand(setuptools.Command): # type: ignore
"""The :class:`ISortCommand` class is used by setuptools to perform
imports checks on registered modules.
"""
description = "Run isort on modules registered in setuptools"
user_options: List[Any] = []
def initialize_options(self) -> None:
default_settings = vars(DEFAULT_CONFIG).copy()
for key, value in default_settings.items():
setattr(self, key, value)
def finalize_options(self) -> None:
"""Get options from config files."""
self.arguments: Dict[str, Any] = {} # skipcq: PYL-W0201
self.arguments["settings_path"] = os.getcwd()
def distribution_files(self) -> Iterator[str]:
"""Find distribution packages."""
# This is verbatim from flake8
if self.distribution.packages: # pragma: no cover
package_dirs = self.distribution.package_dir or {}
for package in self.distribution.packages:
pkg_dir = package
if package in package_dirs:
pkg_dir = package_dirs[package]
elif "" in package_dirs: # pragma: no cover
pkg_dir = package_dirs[""] + os.path.sep + pkg_dir
yield pkg_dir.replace(".", os.path.sep)
if self.distribution.py_modules:
for filename in self.distribution.py_modules:
yield "%s.py" % filename
# Don't miss the setup.py file itself
yield "setup.py"
def run(self) -> None:
arguments = self.arguments
wrong_sorted_files = False
for path in self.distribution_files():
for python_file in glob.iglob(os.path.join(path, "*.py")):
try:
if not api.check_file(python_file, **arguments):
wrong_sorted_files = True # pragma: no cover
except OSError as error: # pragma: no cover
warn(f"Unable to parse file {python_file} due to {error}")
if wrong_sorted_files:
sys.exit(1) # pragma: no cover
| PyCQA/isort | isort/setuptools_commands.py | Python | mit | 2,299 |
from ..broker import Broker
class FailOverConfigurationBroker(Broker):
controller = "fail_over_configurations"
def get_config(self, **kwargs):
"""Get the failover configuration for the specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. While not set in OC environment, the API request returns the failover configuration of all units.
:type unit_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return failover_progress: The id of the failover action output file.
:rtype failover_progress: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return fail_over_configuration: Text (json,xml or csv) interpretation of current failover configuration.
:rtype fail_over_configuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sync_ok: Success indicator of sync to neighbour operation.
:rtype sync_ok: Boolean
"""
return self.api_request(self._get_method_fullname("get_config"), kwargs)
def action_status(self, **kwargs):
"""Shows failover action progress for specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the session output file.
:type id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param read: File offset to show
:type read: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return read: Offset in bytes from the start of the file, to be used in the next get_progress call, in order to retrieve the next lines of the output.
:rtype read: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return output: Result of the failover action.
:rtype output: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: Status of the remaining output data to dump: 0 - no data to dump, 1 - more data is available
:rtype status: Integer
"""
return self.api_request(self._get_method_fullname("action_status"), kwargs)
def action(self, **kwargs):
"""Performs the failover action (enable or disable) for the specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param name: Failover action name, possible values: 'enable', 'disable'
:type name: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return failover_progress: The internal id of the failover action progress.
:rtype failover_progress: String
"""
return self.api_request(self._get_method_fullname("action"), kwargs)
def failover(self, **kwargs):
"""Switches the specified unit to the secondary role.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: Text (json,xml or csv) interpretation of the operation result. Contains just unit_id and current status.
:rtype status: String
"""
return self.api_request(self._get_method_fullname("failover"), kwargs)
def set_config(self, **kwargs):
"""Sets the failover configuration for the specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param primary_index: Primary index. It indicates who is primary now (1-first, 2-second).
:type primary_index: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_ip: Virtual IP address.
:type virtual_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_hostname: Virtual hostname.
:type virtual_hostname: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param replication_direct_connect: Indicates if replication uses a direct connection through HA port. Default value is true.
:type replication_direct_connect: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param replication_port: Replication port. Required for non direct connection replication.
:type replication_port: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_replication_ip: First replication IP. Required for non direct connection replication.
:type first_replication_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_management_ip: First management IP. Required for secondary peer.
:type first_management_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_management_hostname: First management hostname. Required for secondary peer.
:type first_management_hostname: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param first_replication_subnet: First replication subnet. Required for non direct connection replication.
:type first_replication_subnet: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param second_replication_ip: Second replication IP. Required for non direct connection replication.
:type second_replication_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param second_management_ip: Second management IP. Required for secondary peer.
:type second_management_ip: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param second_management_hostname: Second management hostname. Required for secondary peer.
:type second_management_hostname: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param second_replication_subnet: Second replication subnet. Required for non direct connection replication.
:type second_replication_subnet: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return fail_over_configuration: Text (json,xml or csv) interpretation of current failover configuration for the specified unit.
:rtype fail_over_configuration: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return sync_ok: Success indicator of sync to neighbour operation.
:rtype sync_ok: Boolean
"""
return self.api_request(self._get_method_fullname("set_config"), kwargs)
def status(self, **kwargs):
"""Get detailed failover replication and connection status for the specified unit.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment. Default value is 0.
:type unit_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: JSON structure with the status information.
:rtype status: String
"""
return self.api_request(self._get_method_fullname("status"), kwargs)
def reset_config_for_collector(self, **kwargs):
"""Drop failover collector on the collector to re-fetch it next time failover preferences are opened
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param unit_id: Unit ID. Should be specified in OC/Collector environment
:type unit_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: Status of the operation
:rtype status: String
"""
return self.api_request(self._get_method_fullname("reset_config_for_collector"), kwargs)
| infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_8_0/fail_over_configuration_broker.py | Python | apache-2.0 | 12,204 |
import libtorrent as lt
from core.torrent import Torrent
from core.export import create_exporter
class Session(lt.session):
"""Libtorrent Session wrapper.
This class provides several methods around the libtorrent session class.
"""
# Create export decorator
export = create_exporter()
def __init__(self, config):
"""Constructs a torrent session from a ConfigObj."""
super(Session, self).__init__(config["libtorrent"])
@export
def listen_on(self, port_min, port_max):
'''
Instructs libtorrent to listen on a port.
Params:
- port_min
- port_max
'''
if port_min > port_max:
raise ValueError("Min port has to be higher then or equal to max port")
super(lt.session, self).listen_on(port_min, port_max)
def find_torrent(self, info_hash):
"""Looks for a torrent with the given info-hash.
Returns None otherwise.
"""
info_hash = string_to_hash(info_hash)
torrent = super(Session, self).find_torrent(info_hash)
if torrent.is_valid():
return Torrent.create(torrent)
else:
return None
def get_torrents(self):
"""Returns a list of all the torrents currently in the session."""
torrents = super(Session,self).get_torrents()
return [ Torrent.create(t) for t in torrents ]
@export
def get_torrents_export(self):
"""Returns a list of the info-hashes of all the torrents currently in the session."""
torrents = self.get_torrents()
return [ t.info_hash() for t in torrents ]
def add_torrent(self, params):
"""Add a torrent to the session.
Params:
- params: dictionary containing all parameters
"""
torrent = super(Session, self).add_torrent(params)
if torrent.is_valid():
return Torrent.create(torrent)
else:
return None
@export
def add_torrent_export(self, params):
"""See add_torrent."""
torrent = self.add_torrent(params)
if torrent:
return torrent.info_hash()
else:
return None
@export
def is_listening():
"""Checks whether or not the session has successfully opened a listening port."""
return super(Session,self).is_listening()
@export
def listen_port():
"""Returns the port we ended up listening on."""
return super(Session,self).listen_port()
| DenBrahe/wetsuit | wetsuit/core/session.py | Python | apache-2.0 | 2,514 |
"""
Objects for dealing with Laguerre series.
This module provides a number of objects (mostly functions) useful for
dealing with Laguerre series, including a `Laguerre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `lagdomain` -- Laguerre series default domain, [-1,1].
- `lagzero` -- Laguerre series that evaluates identically to 0.
- `lagone` -- Laguerre series that evaluates identically to 1.
- `lagx` -- Laguerre series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `lagmulx` -- multiply a Laguerre series in ``P_i(x)`` by ``x``.
- `lagadd` -- add two Laguerre series.
- `lagsub` -- subtract one Laguerre series from another.
- `lagmul` -- multiply two Laguerre series.
- `lagdiv` -- divide one Laguerre series by another.
- `lagval` -- evaluate a Laguerre series at given points.
- `lagval2d` -- evaluate a 2D Laguerre series at given points.
- `lagval3d` -- evaluate a 3D Laguerre series at given points.
- `laggrid2d` -- evaluate a 2D Laguerre series on a Cartesian product.
- `laggrid3d` -- evaluate a 3D Laguerre series on a Cartesian product.
Calculus
--------
- `lagder` -- differentiate a Laguerre series.
- `lagint` -- integrate a Laguerre series.
Misc Functions
--------------
- `lagfromroots` -- create a Laguerre series with specified roots.
- `lagroots` -- find the roots of a Laguerre series.
- `lagvander` -- Vandermonde-like matrix for Laguerre polynomials.
- `lagvander2d` -- Vandermonde-like matrix for 2D power series.
- `lagvander3d` -- Vandermonde-like matrix for 3D power series.
- `laggauss` -- Gauss-Laguerre quadrature, points and weights.
- `lagweight` -- Laguerre weight function.
- `lagcompanion` -- symmetrized companion matrix in Laguerre form.
- `lagfit` -- least-squares fit returning a Laguerre series.
- `lagtrim` -- trim leading coefficients from a Laguerre series.
- `lagline` -- Laguerre series of given straight line.
- `lag2poly` -- convert a Laguerre series to a polynomial.
- `poly2lag` -- convert a polynomial to a Laguerre series.
Classes
-------
- `Laguerre` -- A Laguerre series class.
See also
--------
`numpy.polynomial`
"""
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
import warnings
from .polytemplate import polytemplate
__all__ = ['lagzero', 'lagone', 'lagx', 'lagdomain', 'lagline',
'lagadd', 'lagsub', 'lagmulx', 'lagmul', 'lagdiv', 'lagpow',
'lagval', 'lagder', 'lagint', 'lag2poly', 'poly2lag', 'lagfromroots',
'lagvander', 'lagfit', 'lagtrim', 'lagroots', 'Laguerre', 'lagval2d',
'lagval3d', 'laggrid2d', 'laggrid3d', 'lagvander2d', 'lagvander3d',
'lagcompanion', 'laggauss', 'lagweight']
lagtrim = pu.trimcoef
def poly2lag(pol) :
"""
poly2lag(pol)
Convert a polynomial to a Laguerre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Laguerre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Laguerre
series.
See Also
--------
lag2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import poly2lag
>>> poly2lag(np.arange(4))
array([ 23., -63., 58., -18.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = lagadd(lagmulx(res), pol[i])
return res
def lag2poly(c) :
"""
Convert a Laguerre series to a polynomial.
Convert an array representing the coefficients of a Laguerre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Laguerre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2lag
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, polysub((2*i - 1)*c1, polymulx(c1))/i)
return polyadd(c0, polysub(c1, polymulx(c1)))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Laguerre
lagdomain = np.array([0,1])
# Laguerre coefficients representing zero.
lagzero = np.array([0])
# Laguerre coefficients representing one.
lagone = np.array([1])
# Laguerre coefficients representing the identity x.
lagx = np.array([1, -1])
def lagline(off, scl) :
"""
Laguerre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Laguerre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.laguerre import lagline, lagval
>>> lagval(0,lagline(3, 2))
3.0
>>> lagval(1,lagline(3, 2))
5.0
"""
if scl != 0 :
return np.array([off + scl, -scl])
else :
return np.array([off])
def lagfromroots(roots) :
"""
Generate a Laguerre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Laguerre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Laguerre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, chebfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [lagline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [lagmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = lagmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def lagadd(c1, c2):
"""
Add one Laguerre series to another.
Returns the sum of two Laguerre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Laguerre series of their sum.
See Also
--------
lagsub, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Laguerre series
is a Laguerre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagsub(c1, c2):
"""
Subtract one Laguerre series from another.
Returns the difference of two Laguerre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their difference.
See Also
--------
lagadd, lagmul, lagdiv, lagpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Laguerre
series is a Laguerre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.laguerre import lagsub
>>> lagsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def lagmulx(c):
"""Multiply a Laguerre series by x.
Multiply the Laguerre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Laguerre
polynomials in the form
.. math::
xP_i(x) = (-(i + 1)*P_{i + 1}(x) + (2i + 1)P_{i}(x) - iP_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
array([ -1., -1., 11., -9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]
prd[1] = -c[0]
for i in range(1, len(c)):
prd[i + 1] = -c[i]*(i + 1)
prd[i] += c[i]*(2*i + 1)
prd[i - 1] -= c[i]*i
return prd
def lagmul(c1, c2):
"""
Multiply one Laguerre series by another.
Returns the product of two Laguerre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Laguerre series coefficients representing their product.
See Also
--------
lagadd, lagsub, lagdiv, lagpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Laguerre polynomial basis set. Thus, to express
the product as a Laguerre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagmul
>>> lagmul([1, 2, 3], [0, 1, 2])
array([ 8., -13., 38., -51., 36.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else :
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = lagsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = lagadd(tmp, lagsub((2*nd - 1)*c1, lagmulx(c1))/nd)
return lagadd(c0, lagsub(c1, lagmulx(c1)))
def lagdiv(c1, c2):
"""
Divide one Laguerre series by another.
Returns the quotient-with-remainder of two Laguerre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Laguerre series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Laguerre series coefficients representing the quotient and
remainder.
See Also
--------
lagadd, lagsub, lagmul, lagpow
Notes
-----
In general, the (polynomial) division of one Laguerre series by another
results in quotient and remainder terms that are not in the Laguerre
polynomial basis set. Thus, to express these results as a Laguerre
series, it is necessary to "reproject" the results onto the Laguerre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagdiv
>>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = lagmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def lagpow(c, pow, maxpower=16) :
"""Raise a Laguerre series to a power.
Returns the Laguerre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Laguerre series of power.
See Also
--------
lagadd, lagsub, lagmul, lagdiv
Examples
--------
>>> from numpy.polynomial.laguerre import lagpow
>>> lagpow([1, 2, 3], 2)
array([ 14., -16., 56., -72., 54.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=c.dtype)
elif power == 1 :
return c
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1) :
prd = lagmul(prd, c)
return prd
def lagder(c, m=1, scl=1, axis=0) :
"""
Differentiate a Laguerre series.
Returns the Laguerre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c: array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Laguerre series of the derivative.
See Also
--------
lagint
Notes
-----
In general, the result of differentiating a Laguerre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagder
>>> lagder([ 1., 1., 1., -3.])
array([ 1., 2., 3.])
>>> lagder([ 1., 0., 0., -4., 3.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else :
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 1, -1):
der[j - 1] = -c[j]
c[j - 1] += c[j]
der[0] = -c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Laguerre series.
Returns the Laguerre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Laguerre series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Laguerre series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
lagder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.laguerre import lagint
>>> lagint([1,2,3])
array([ 1., 1., 1., -3.])
>>> lagint([1,2,3], m=2)
array([ 1., 0., 0., -4., 3.])
>>> lagint([1,2,3], k=1)
array([ 2., 1., 1., -3.])
>>> lagint([1,2,3], lbnd=-1)
array([ 11.5, 1. , 1. , -3. ])
>>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
array([ 11.16666667, -5. , -3. , 2. ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0 :
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt :
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]
tmp[1] = -c[0]
for j in range(1, n):
tmp[j] += c[j]
tmp[j + 1] = -c[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def lagval(x, c, tensor=True):
"""
Evaluate a Laguerre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
lagval2d, laggrid2d, lagval3d, laggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.laguerre import lagval
>>> coef = [1,2,3]
>>> lagval(1, coef)
-0.5
>>> lagval([[1,2],[3,4]], coef)
array([[-0.5, -4. ],
[-4.5, -2. ]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1 :
c0 = c[0]
c1 = 0
elif len(c) == 2 :
c0 = c[0]
c1 = c[1]
else :
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*((2*nd - 1) - x))/nd
return c0 + c1*(1 - x)
def lagval2d(x, y, c):
"""
Evaluate a 2-D Laguerre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
lagval, laggrid2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
return c
def laggrid2d(x, y, c):
"""
Evaluate a 2-D Laguerre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
lagval, lagval2d, lagval3d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
return c
def lagval3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimension polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
lagval, lagval2d, laggrid2d, laggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = lagval(x, c)
c = lagval(y, c, tensor=False)
c = lagval(z, c, tensor=False)
return c
def laggrid3d(x, y, z, c):
"""
Evaluate a 3-D Laguerre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
lagval, lagval2d, laggrid2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
c = lagval(x, c)
c = lagval(y, c)
c = lagval(z, c)
return c
def lagvander(x, deg) :
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Laguerre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = lagvander(x, n)``, then ``np.dot(V, c)`` and
``lagval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Laguerre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander: ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Laguerre polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.laguerre import lagvander
>>> x = np.array([0, 1, 2])
>>> lagvander(x, 3)
array([[ 1. , 1. , 1. , 1. ],
[ 1. , 0. , -0.5 , -0.66666667],
[ 1. , -1. , -1. , -0.33333333]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0 :
v[1] = 1 - x
for i in range(2, ideg + 1) :
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def lagvander2d(x, y, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Laguerre polynomials.
If ``V = lagvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``lagval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
v = vx[..., None]*vy[..., None, :]
return v.reshape(v.shape[:-2] + (-1,))
def lagvander3d(x, y, z, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Laguerre polynomials.
If ``V = lagvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``lagval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Laguerre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
lagvander, lagvander3d. lagval2d, lagval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = lagvander(x, degx)
vy = lagvander(y, degy)
vz = lagvander(z, degz)
v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :]
return v.reshape(v.shape[:-3] + (-1,))
def lagfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Laguerre series to data.
Return the coefficients of a Laguerre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Since numpy version 1.7.0, lagfit also supports NA. If any of the
elements of `x`, `y`, or `w` are NA, then the corresponding rows of the
linear least squares problem (see Notes) are set to 0. If `y` is 2-D,
then an NA in any row of `y` invalidates that whole row.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Laguerre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, hermefit
lagval : Evaluates a Laguerre series.
lagvander : pseudo Vandermonde matrix of Laguerre series.
lagweight : Laguerre weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Laguerre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Laguerre series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Laguerre
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `lagweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.laguerre import lagfit, lagval
>>> x = np.linspace(0, 10)
>>> err = np.random.randn(len(x))/10
>>> y = lagval(x, [1, 2, 3]) + err
>>> lagfit(x, y, 2)
array([ 0.96971004, 2.00193749, 3.00288744])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = lagvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# scale the design matrix and solve the least squares equation
scl = np.sqrt((lhs*lhs).sum(1))
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def lagcompanion(c):
"""
Return the companion matrix of c.
The usual companion matrix of the Laguerre polynomials is already
symmetric when `c` is a basis Laguerre polynomial, so no scaling is
applied.
Parameters
----------
c : array_like
1-D array of Laguerre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
accprod = np.multiply.accumulate
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array(1 + c[0]/c[1])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
top = mat.reshape(-1)[1::n+1]
mid = mat.reshape(-1)[0::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = -np.arange(1,n)
mid[...] = 2.*np.arange(n) + 1.
bot[...] = top
mat[:,-1] += (c[:-1]/c[-1])*n
return mat
def lagroots(c):
"""
Compute the roots of a Laguerre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, chebroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Laguerre series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.laguerre import lagroots, lagfromroots
>>> coef = lagfromroots([0, 1, 2])
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1 :
return np.array([], dtype=c.dtype)
if len(c) == 2 :
return np.array([1 + c[0]/c[1]])
m = lagcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def laggauss(deg):
"""
Gauss-Laguerre quadrature.
Computes the sample points and weights for Gauss-Laguerre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[0, \inf]` with the
weight function :math:`f(x) = \exp(-x)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = lagcompanion(c)
x = la.eigvals(m)
x.sort()
# improve roots by one application of Newton
dy = lagval(x, c)
df = lagval(x, lagder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = lagval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# scale w to get the right value, 1 in this case
w /= w.sum()
return x, w
def lagweight(x):
"""Weight function of the Laguerre polynomials.
The weight function is :math:`exp(-x)` and the interval of integration
is :math:`[0, \inf]`. The Laguerre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x)
return w
#
# Laguerre series class
#
exec(polytemplate.substitute(name='Laguerre', nick='lag', domain='[-1,1]'))
| cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/python/lib/python3.3/site-packages/numpy/polynomial/laguerre.py | Python | gpl-3.0 | 54,045 |
__author__ = 'kiruba'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import itertools
from matplotlib import rc
from spread import spread
import meteolib as met
import evaplib
from bisect import bisect_left, bisect_right
from scipy.optimize import curve_fit
import math
import scipy as sp
from datetime import timedelta
import matplotlib as mpl
# latex parameters
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=18)
# Weather file
weather_file = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/smgollahalli/corrected_weather.csv'
# Rain file
rain_file = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/smgollahalli/corrected_rain.csv'
# convert to pandas dataframe
weather_df = pd.read_csv(weather_file, sep=',', header=0)
# set index
date_format = '%Y-%m-%d %H:%M:%S'
weather_df['Date_Time'] = pd.to_datetime(weather_df['Date_Time'], format=date_format)
weather_df.set_index(weather_df['Date_Time'], inplace=True)
# sort based on index
weather_df.sort_index(inplace=True)
# drop date time column
weather_df = weather_df.drop('Date_Time', 1)
# print weather_df.head()
# Rain data frame
rain_df = pd.read_csv(rain_file, sep=',', header=0)
# set index
rain_df['Date_Time'] = pd.to_datetime(rain_df['Date_Time'], format=date_format)
rain_df.set_index(rain_df['Date_Time'], inplace=True)
# sort based on index
rain_df.sort_index(inplace=True)
# drop date time column
rain_df = rain_df.drop('Date_Time', 1)
# print rain_df.head()
# rain_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/weather_station/smgollahalli/rain_corr.csv')
# create daily value
rain_daily_df = rain_df.resample('D', how=np.sum)
rain_daily_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/weather_station/smgollahalli/rain_daily_corr.csv')
# create average daily weather
weather_daily_df = weather_df.resample('D', how=np.mean)
weather_daily_df = weather_daily_df.join(rain_daily_df, how='left')
"""
Evaporation from open water
Equation according to J.D. Valiantzas (2006). Simplified versions
for the Penman evaporation equation using routine weather data.
J. Hydrology 331: 690-702. Following Penman (1948,1956). Albedo set
at 0.06 for open water.
Input (measured at 2 m height):
- airtemp: (array of) daily average air temperatures [Celsius]
- rh: (array of) daily average relative humidity [%]
- airpress: (array of) daily average air pressure data [Pa]
- Rs: (array of) daily incoming solar radiation [J/m2/day]
- N: (array of) maximum daily sunshine hours [h]
- Rext: (array of) daily extraterrestrial radiation [J/m2/day]
- u: (array of) daily average wind speed at 2 m [m/s]
- Z: (array of) site elevation [m a.s.l.], default is zero...
Output:
- E0: (array of) Penman open water evaporation values [mm/day]
"""
"""
air pressure (Pa) = 101325(1-2.25577 10^-5 h)^5.25588
h = altitude above sea level (m)
http://www.engineeringtoolbox.com/air-altitude-pressure-d_462.html
mean elevation over watershed = 803.441589 m
Elevation at the check dam = 799 m
"""
z = 799
p = (1-(2.25577*(10**-5)*z))
air_p_pa = 101325*(p**5.25588)
# give air pressure value
weather_daily_df['AirPr(Pa)'] = air_p_pa
"""
Sunshine hours calculation
591 calculation - lat = 13.260196, long = 77.5120849
floor division(//): http://www.tutorialspoint.com/python/python_basic_operators.htm
"""
#select radiation data separately
sunshine_df = weather_df[['Solar Radiation (W/mm2)']]
# give value 1 if there is radiation and rest 0
sunshine_df['sunshine hours (h)'] = (sunshine_df['Solar Radiation (W/mm2)'] != 0).astype(int)
# gives 1 for true and 0 for false
# print sunshine_df.head()
#aggregate the values to daily and divide it by 2 to get sunshine hourly values
sunshine_daily_df = sunshine_df.resample('D', how=np.sum) // 2 # // floor division
sunshine_daily_df = sunshine_daily_df.drop(sunshine_daily_df.columns.values[0], 1)
# print sunshine_daily_df.head()
weather_daily_df = weather_daily_df.join(sunshine_daily_df, how='left')
"""
Daily Extraterrestrial Radiation Calculation(J/m2/day)
"""
def rext_calc(df, lat=float):
"""
Function to calculate extraterrestrial radiation output in J/m2/day
Ref:http://www.fao.org/docrep/x0490e/x0490e07.htm
:param df: dataframe with datetime index
:param lat: latitude (negative for Southern hemisphere)
:return: Rext (J/m2)
"""
# set solar constant [MJ m^-2 min^-1]
s = 0.08166
#convert latitude [degrees] to radians
latrad = lat*math.pi / 180.0
#have to add in function for calculating single value here
# extract date, month, year from index
date = pd.DatetimeIndex(df.index).day
month = pd.DatetimeIndex(df.index).month
year = pd.DatetimeIndex(df.index).year
doy = met.date2doy(dd=date, mm=month, yyyy=year) # create day of year(1-366) acc to date
l = sp.size(doy)
if l < 2:
dt = 0.409 * math.sin(2 * math.pi / 365 * doy - 1.39)
ws = sp.arccos(-math.tan(latrad) * math.tan(dt))
j = 2 * math.pi / 365.25 * doy
dr = 1.0 + 0.03344 * math.cos(j - 0.048869)
rext = s * 1440 / math.pi * dr * (ws * math.sin(latrad) * math.sin(dt) + math.sin(ws) * math.cos(latrad) * math.cos(dt))
#Create dummy output arrays sp refers to scipy
else:
rext = sp.zeros(l)
dt = sp.zeros(l)
ws = sp.zeros(l)
j = sp.zeros(l)
dr = sp.zeros(l)
#calculate Rext
for i in range(0, l):
#Calculate solar decimation dt(d in FAO) [rad]
dt[i] = 0.409 * math.sin(2 * math.pi / 365 * doy[i] - 1.39)
#calculate sunset hour angle [rad]
ws[i] = sp.arccos(-math.tan(latrad) * math.tan(dt[i]))
# calculate day angle j [radians]
j[i] = 2 * math.pi / 365.25 * doy[i]
# calculate relative distance to sun
dr[i] = 1.0 + 0.03344 * math.cos(j[i] - 0.048869)
#calculate Rext dt = d(FAO) and latrad = j(FAO)
rext[i] = (s * 1440.0 / math.pi) * dr[i] * (ws[i] * math.sin(latrad) * math.sin(dt[i]) + math.sin(ws[i])* math.cos(latrad) * math.cos(dt[i]))
rext = sp.array(rext) * 1000000
return rext
weather_daily_df['Rext (J/m2)'] = rext_calc(weather_daily_df, lat=13.260196)
"""
wind speed from km/h to m/s
1 kmph = 0.277778 m/s
"""
weather_daily_df['Wind Speed (mps)'] = weather_daily_df['Wind Speed (kmph)'] * 0.277778
"""
Radiation unit conversion
"""
weather_daily_df['Solar Radiation (J/m2/day)'] = weather_daily_df['Solar Radiation (W/mm2)'] * 86400
"""
Pot Evaporation calculation
"""
airtemp = weather_daily_df['Air Temperature (C)']
hum = weather_daily_df['Humidity (%)']
airpress = weather_daily_df['AirPr(Pa)']
rs = weather_daily_df['Solar Radiation (J/m2/day)']
sun_hr = weather_daily_df['sunshine hours (h)']
rext = weather_daily_df['Rext (J/m2)']
wind_speed = weather_daily_df['Wind Speed (mps)']
weather_daily_df['Evaporation (mm/day)'] = evaplib.E0(airtemp=airtemp, rh=hum, airpress=airpress, Rs=rs, N=sun_hr, Rext=rext, u=wind_speed, Z=z )
"""
Plot Evaporation
"""
fig = plt.figure(figsize=(11.69, 8.27))
plt.plot_date(weather_daily_df.index, weather_daily_df['Evaporation (mm/day)'], '-g', label='Evaporation (mm/day)')
plt.ylabel(r'\textbf{Evaporation ($mm/day$)}')
fig.autofmt_xdate(rotation=90)
plt.title(r"Daily Evaporation for Check Dam - 591", fontsize=20)
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/evaporation_591')
"""
Check dam calibration
"""
# Polynomial fitting function
def polyfit(x, y, degree):
results = {}
coeffs = np.polyfit(x, y, degree)
results['polynomial'] = coeffs.tolist()
#r squared
p = np.poly1d(coeffs)
yhat = p(x)
ybar = np.sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2)
sstot = np.sum((y-ybar)**2)
results['determination'] = ssreg/sstot
return results
#check dam calibration values
y_cal = [10, 40, 100, 160, 225, 275, 300]
x_cal = [2036, 2458, 3025, 4078, 5156, 5874, 6198]
a_stage = polyfit(x_cal, y_cal, 1)
# coefficients of polynomial are stored in following list
coeff_cal = a_stage['polynomial']
## Read check dam data
block_1 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_001.CSV'
water_level_1 = pd.read_csv(block_1, skiprows=9, sep=',', header=0, names=['scan no', 'date',
'time', 'raw value', 'calibrated value'])
water_level_1['calibrated value'] = (water_level_1['raw value']*coeff_cal[0]) + coeff_cal[1] # in cm
# convert to metre
water_level_1['calibrated value'] /= 100
#change the column name
water_level_1.columns.values[4] = 'stage(m)'
# create date time index
format = '%d/%m/%Y %H:%M:%S'
# change 24:00:00 to 23:59:59
water_level_1['time'] = water_level_1['time'].replace(' 24:00:00', ' 23:59:59')
water_level_1['date_time'] = pd.to_datetime(water_level_1['date'] + water_level_1['time'], format=format)
water_level_1.set_index(water_level_1['date_time'], inplace=True)
# drop unneccessary columns before datetime aggregation
water_level_1.drop(['scan no', 'date', 'time', 'raw value'], inplace=True, axis=1)
#aggregate daily
water_level_1 = water_level_1.resample('D', how=np.mean)
# print water_level_1
block_2 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_002.CSV'
water_level_2 = pd.read_csv(block_2, skiprows=9, sep=',', header=0, names=['scan no', 'date', 'time', 'raw value', 'calibrated value'])
water_level_2['calibrated value'] = (water_level_2['raw value']*coeff_cal[0]) + coeff_cal[1] # in cm
# convert to metre
water_level_2['calibrated value'] /= 100
#change the column name
water_level_2.columns.values[4] = 'stage(m)'
# create date time index
format = '%d/%m/%Y %H:%M:%S'
# change 24:00:00 to 23:59:59
water_level_2['time'] = water_level_2['time'].replace(' 24:00:00', ' 23:59:59')
water_level_2['date_time'] = pd.to_datetime(water_level_2['date'] + water_level_2['time'], format=format)
water_level_2.set_index(water_level_2['date_time'], inplace=True)
# drop unneccessary columns before datetime aggregation
water_level_2.drop(['scan no', 'date', 'time', 'raw value'], inplace=True, axis=1)
#aggregate daily
water_level_2 = water_level_2.resample('D', how=np.mean)
# print water_level_2
block_3 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_003.CSV'
water_level_3 = pd.read_csv(block_3, skiprows=9, sep=',', header=0, names=['scan no', 'date', 'time', 'raw value', 'calibrated value'])
water_level_3['calibrated value'] = (water_level_3['raw value']*coeff_cal[0]) + coeff_cal[1] # in cm
# convert to metre
water_level_3['calibrated value'] /= 100
#change the column name
water_level_3.columns.values[4] = 'stage(m)'
# create date time index
format = '%d/%m/%Y %H:%M:%S'
# change 24:00:00 to 23:59:59
water_level_3['time'] = water_level_3['time'].replace(' 24:00:00', ' 23:59:59')
water_level_3['date_time'] = pd.to_datetime(water_level_3['date'] + water_level_3['time'], format=format)
water_level_3.set_index(water_level_3['date_time'], inplace=True)
# drop unneccessary columns before datetime aggregation
water_level_3.drop(['scan no', 'date', 'time', 'raw value'], inplace=True, axis=1)
#aggregate daily
water_level_3 = water_level_3.resample('D', how=np.mean)
# print water_level_3
# bloack 4
block_4 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_004.CSV'
water_level_4 = pd.read_csv(block_4, skiprows=9, sep=',', header=0, names=['scan no', 'date',
'time', 'raw value', 'calibrated value'])
water_level_4['calibrated value'] = (water_level_4['raw value']*coeff_cal[0]) + coeff_cal[1] # in cm
# convert to metre
water_level_4['calibrated value'] /= 100
#change the column name
water_level_4.columns.values[4] = 'stage(m)'
# create date time index
format = '%d/%m/%Y %H:%M:%S'
# change 24:00:00 to 23:59:59
water_level_4['time'] = water_level_4['time'].replace(' 24:00:00', ' 23:59:59')
water_level_4['date_time'] = pd.to_datetime(water_level_4['date'] + water_level_4['time'], format=format)
water_level_4.set_index(water_level_4['date_time'], inplace=True)
# drop unneccessary columns before datetime aggregation
water_level_4.drop(['scan no', 'date', 'time', 'raw value'], inplace=True, axis=1)
#aggregate daily
water_level_4 = water_level_4.resample('D', how=np.mean)
water_level = pd.concat([water_level_1, water_level_2, water_level_3, water_level_4], axis=0)
weather_daily_df = weather_daily_df.join(water_level, how='left')
"""
Remove Duplicates
"""
# check for duplicates
# df2 = dry_weather.groupby(level=0).filter(lambda x: len(x) > 1)
# print(df2)
weather_daily_df['index'] = weather_daily_df.index
weather_daily_df.drop_duplicates(subset='index', take_last=True, inplace=True)
del weather_daily_df['index']
weather_daily_df = weather_daily_df.sort()
"""
Stage Volume relation estimation from survey data
"""
# neccessary functions
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2,s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
#function to create stage volume output
def calcvolume(profile, order, dy):
"""
Profile = df.Y1,df.Y2,.. and order = 1,2,3
:param profile: series of Z values
:param order: distance from origin
:param dy: thickness of profile in m
:param dam_height: Height of check dam in m
:return: output: pandas dataframe volume for profile
"""
# print 'profile length = %s' % len(profile)
results = []
for stage in dz:
water_area = 0
for z1, z2 in pairwise(profile):
delev = (z2 - z1) / 10
elev = z1
for b in range(1, 11, 1):
elev += delev
if stage > elev:
# print 'elev = %s' % elev
water_area += (0.1 * (stage-elev))
# print 'order = %s and dy = %s' %(order, dy)
# print 'area = %s' % water_area
calc_vol = water_area * dy
# print 'calc vol = %s' % calc_vol
results.append(calc_vol)
# print 'results = %s' % results
# print 'results length = %s' % len(results)
output[('Volume_%s' % order)] = results
#input parameters
base_file_591 = '/media/kiruba/New Volume/r/r_dir/stream_profile/new_code/591/base_profile_591.csv'
check_dam_no = 591
check_dam_height = 1.9 # m
df_591 = pd.read_csv(base_file_591, sep=',')
df_591_trans = df_591.T # Transpose
no_of_stage_interval = check_dam_height/.05
dz = list((spread(0.00, check_dam_height, int(no_of_stage_interval), mode=3)))
index = [range(len(dz))] # no of stage intervals
columns = ['stage_m']
data = np.array(dz)
output = pd.DataFrame(data, index=index, columns=columns)
# print(df_591_trans)
# print len(df_591_trans.ix[1:, 0])
### Renaming the column and dropping y values
y_name_list = []
for y_value in df_591_trans.ix[0, 0:]:
y_name_list.append(('Y_%d' %y_value))
df_591_trans.columns = y_name_list
# print df_591_trans
y_value_list = df_591_trans.ix[0, 0:]
# print y_value_list
# drop the y values from data
final_data = df_591_trans.ix[1:, 0:]
# print final_data
#volume calculation
for l1, l2 in pairwise(y_value_list):
calcvolume(profile=final_data["Y_%d" % l1], order=l1, dy=int(l2-l1))
output_series = output.filter(regex="Volume_") # filter the columns that have Volume_
output["total_vol_cu_m"] = output_series.sum(axis=1) # get total volume
# print output
# select only stage and total volume
stage_vol_df = output[['stage_m', "total_vol_cu_m"]]
"""
Select data where stage is available, Remove Overflowing days
"""
weather_stage_avl_df = weather_daily_df[min(water_level.index):max(water_level.index)]
# weather_stage_avl_df = weather_stage_avl_df[weather_stage_avl_df['stage(m)'] < 1.9]
# assumption cutoff stage to be 14 cm below which data is not considered reliable
# weather_stage_avl_df = weather_stage_avl_df[weather_stage_avl_df['stage(m)'] > 0.05]
# weather_stage_avl_df = weather_stage_avl_df[weather_stage_avl_df['change_storage(cu.m)'] > 0]
# print weather_stage_avl_df['stage(m)']
"""
Convert observed stage to volume by linear interpolation
"""
# set stage as index
stage_vol_df.set_index(stage_vol_df['stage_m'], inplace=True)
# function to find containing intervals
def find_range(array, ab):
if ab < max(array):
start = bisect_left(array, ab)
return array[start-1], array[start]
else:
return min(array), max(array)
# print weather_stage_avl_df.head()
water_balance_df = weather_stage_avl_df[['Rain Collection (mm)', 'Evaporation (mm/day)', 'stage(m)']]
# print find_range(stage_vol_df['stage_m'].tolist(), max(water_balance_df['stage(m)']))
water_balance_df['volume (cu.m)'] = 0.000
for index, row in water_balance_df.iterrows():
obs_stage = row['stage(m)'] # observed stage
x1, x2 = find_range(stage_vol_df['stage_m'].tolist(), obs_stage)
x_diff = x2-x1
y1 = stage_vol_df['total_vol_cu_m'][x1]
y2 = stage_vol_df['total_vol_cu_m'][x2]
y_diff = y2 - y1
slope = y_diff/x_diff
y_intercept = y2 - (slope*x2)
water_balance_df['volume (cu.m)'][index.strftime('%Y-%m-%d')] = (slope*obs_stage) + y_intercept
# fig = plt.figure(figsize=(11.69, 8.27))
# plt.plot_date(water_balance_df.index, water_balance_df['volume (cu.m)'], '-g')
# plt.hlines(stage_vol_df['total_vol_cu_m'][1.9], min(water_balance_df.index), max(water_balance_df.index))
# plt.title('before overflow correction')
"""
Overflow
"""
full_vol = stage_vol_df['total_vol_cu_m'][1.9]
print full_vol
water_balance_df['overflow(cu.m)'] = 0.000
for index, row in water_balance_df.iterrows():
obs_vol = row['volume (cu.m)']
if obs_vol > full_vol:
# print obs_vol
water_balance_df['overflow(cu.m)'][index.strftime('%Y-%m-%d')] = obs_vol - full_vol
water_balance_df['volume (cu.m)'][index.strftime('%Y-%m-%d')] = full_vol
water_balance_df['stage(m)'][index.strftime('%Y-%m-%d')] = 1.9
# fig = plt.figure(figsize=(11.69, 8.27))
# plt.plot_date(water_balance_df.index, water_balance_df['volume (cu.m)'], '-g')
# plt.hlines(stage_vol_df['total_vol_cu_m'][1.9], min(water_balance_df.index), max(water_balance_df.index))
# plt.title('after overflow correction')
"""
Stage vs area linear relationship
"""wrong_date_time.append(wrong_time)
stage_area_df = pd.read_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/cont_area.csv',
sep=',', header=0, names=['sno', 'stage_m', 'total_area_sq_m'])
stage_area_df.drop('sno', inplace=True, axis=1)
# set stage as index
stage_area_df.set_index(stage_area_df['stage_m'], inplace=True)
# print max(water_balance_df['stage(m)'])
# print find_range(stage_area_df['stage_m'].tolist(), max(water_balance_df['stage(m)']))
#create empty column
water_balance_df['ws_area(sq.m)'] = 0.000
for index, row in water_balance_df.iterrows():
obs_stage = row['stage(m)'] # observed stage
x1, x2 = find_range(stage_area_df['stage_m'].tolist(), obs_stage)
x_diff = x2-x1
y1 = stage_area_df['total_area_sq_m'][x1]
y2 = stage_area_df['total_area_sq_m'][x2]
y_diff = y2 - y1
slope = y_diff/x_diff
y_intercept = y2 - (slope*x2)
water_balance_df['ws_area(sq.m)'][index.strftime('%Y-%m-%d')] = (slope*obs_stage) + y_intercept
"""
Evaporation Volume estimation
"""
water_balance_df['Evaporation (cu.m)'] = (water_balance_df['Evaporation (mm/day)'] * 0.001) * water_balance_df['ws_area(sq.m)']
"""
change in storage
"""
#assume 0 initially
water_balance_df['change_storage(cu.m)'] = 0.000
#change in storage is today minus yesterday volume
for d1, d2 in pairwise(water_balance_df.index):
if d2 > d1:
diff = (d2-d1).days
if diff == 1:
water_balance_df['change_storage(cu.m)'][d2.strftime('%Y-%m-%d')] = water_balance_df['volume (cu.m)'][d2.strftime('%Y-%m-%d')] - water_balance_df['volume (cu.m)'][d1.strftime('%Y-%m-%d')]
# print water_balance_df
# water_balance_df = water_balance_df[water_balance_df['change_storage(cu.m)'] < 0]
def plot_date(dataframe, column_name):
"""
:param dataframe:
:param column_name:
:type column_name:str
:return:
"""
fig = plt.figure(figsize=(11.69, 8.27))
p = plt.plot(dataframe.index, dataframe[column_name], 'b-', label=r"%s" % column_name)
plt.hlines(0, min(dataframe.index), max(dataframe.index), 'r')
plt.legend(loc='best')
fig.autofmt_xdate(rotation=90)
return p
a = plot_date(water_balance_df, 'change_storage(cu.m)')
# plt.show()
#create average stage for two days
water_balance_df['average_stage_m'] = 0.000
for d1, d2 in pairwise(water_balance_df.index):
diff = abs((d2-d1).days)
if diff == 1:
water_balance_df['average_stage_m'][d2.strftime('%Y-%m-%d')] = (water_balance_df['stage(m)']
[d2.strftime('%Y-%m-%d')]
+ water_balance_df['stage(m)']
[d1.strftime('%Y-%m-%d')])/2
# print water_balance_df.head()
# print water_balance_df
"""
Separate inflow and no inflow days
"""
dry_water_balance_df = water_balance_df[water_balance_df['change_storage(cu.m)'] < 0]
rain_water_balance_df = water_balance_df[water_balance_df['change_storage(cu.m)'] > 0]
# b = plot_date(dry_water_balance_df, 'change_storage(cu.m)')
"""
Calculate infiltration
"""
# calculate infiltration
dry_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/dry_wb_check.CSV')
# print dry_water_balance_df.head()
# dry_water_balance_df['infiltration(cu.m)'] = 0.000
delta_s = dry_water_balance_df['change_storage(cu.m)']
evap = dry_water_balance_df['Evaporation (cu.m)']
outflow = dry_water_balance_df['overflow(cu.m)']
# for t1, t2 in pairwise(dry_water_balance_df.index):
# diff = abs((t2-t1).days)
# if diff == 1:
# print t1, t2
# dry_water_balance_df['infiltration(cu.m)'][t1.strftime('%Y-%m-%d')] = -1*(delta_s[t2.strftime('%Y-%m-%d')] + evap[t2.strftime('%Y-%m-%d')] + outflow[t2.strftime('%Y-%m-%d')])
# # for index, row in dry_water_balance_df.iterrows():
# if index > min(dry_water_balance_df.index):
# t_1 = index - timedelta(days=1)
# if t_1 < max(dry_water_balance_df.index):
# diff = abs((index-t_1).days)
# if diff == 1:
# print index
# # print t_1
# dry_water_balance_df['infiltration(cu.m)'][index.strftime('%Y-%m-%d')] = -1*(delta_s[index.strftime('%Y-%m-%d')] + evap[t_1.strftime('%Y-%m-%d')] + outflow[t_1.strftime('%Y-%m-%d')])
# print row
dry_water_balance_df['infiltration(cu.m)'] = -1.0*(evap + outflow + delta_s)
# print dry_water_balance_df.head()
# fig = plt.figure(figsize=(11.69, 8.27))
# plt.plot(dry_water_balance_df['average_stage_m'], dry_water_balance_df['infiltration(cu.m)'], 'bo')
# plt.show()
"""
Dry infiltration vs rainfall
"""
fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(11.69, 8.27))
# fig.subplots_adjust(right=0.8)
line1 = ax1.bar(water_balance_df.index, water_balance_df['Rain Collection (mm)'], 0.35, label=r'Rainfall(mm)')
plt.gca().invert_yaxis()
ax1.xaxis.tick_bottom()
ax1.yaxis.tick_left()
for t1 in ax1.get_yticklabels():
t1.set_color('b')
# plt.legend(loc='upper left')
ax2 = ax1.twinx()
cmap, norm = mpl.colors.from_levels_and_colors([0, 0.05, 1, 1.5, 2.0], ['red', 'yellow', 'green', 'blue'])
line2 = ax2.scatter(dry_water_balance_df.index, dry_water_balance_df['infiltration(cu.m)'], label='Infiltration (cu.m)', c=dry_water_balance_df['stage(m)'], cmap=cmap, norm=norm)
plt.hlines(0, min(dry_water_balance_df.index), max(dry_water_balance_df.index))
ax2.xaxis.tick_bottom()
ax2.yaxis.tick_right()
for t1 in ax2.get_yticklabels():
t1.set_color('r')
# plt.legend(loc='upper right')
# fig.autofmt_xdate(rotation=90)
# fig.subplots_adjust(right=0.8)
ax3 = ax2.twiny()
line3 = ax3.plot(water_balance_df.index, water_balance_df['Evaporation (cu.m)'], '-g', label='Evaporation (cu.m)' )
ax3.tick_params(axis='x',
which='both',
top='off',
bottom='off',
labeltop='off')
# ax3.xaxis.tick_bottom()
ax3.yaxis.tick_right()
fig.autofmt_xdate(rotation=90)
# lns = line1+line3
# labs = [l.get_label() for l in lns]
# ax3.legend(lns, labs, loc='upper center', fancybox=True, ncol=3, bbox_to_anchor=(0.5, 1.15))
# ax3.set_xlim([min(dry_water_balance_df.index), max(dry_water_balance_df.index)])
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.50, 0.05, 0.3]) #first one distance from plot, second height
# cax, kw = mpl.colorbar.make_axes([ax for ax in ax1.flat()])
cbar = fig.colorbar(line2, cax=cbar_ax)
cbar.ax.set_ylabel('Stage (m)')
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/dry_rain_infiltration_stage_591')
plt.show()
"""
Fitting exponential function
"""
# stage_cal = dry_water_balance_df['stage(m)']
stage_cal = dry_water_balance_df['average_stage_m']
inf_cal = dry_water_balance_df['infiltration(cu.m)']
def func(h, alpha, beta):
return alpha*(h**beta)
popt, pcov = curve_fit(func, stage_cal, inf_cal)
print popt
print pcov
# print np.diag(pcov)
print np.sqrt(np.diag(pcov))
# plot
stage_cal_new = np.linspace(min(stage_cal), max(stage_cal), 50)
inf_cal_new = func(stage_cal_new, *popt)
fig = plt.figure(figsize=(11.69, 8.27))
plt.plot(stage_cal, inf_cal, 'bo', label=r'Observation')
plt.plot(stage_cal_new, inf_cal_new, 'r-', label='Prediction')
plt.vlines(1.9, 0, max(inf_cal), 'g')
plt.hlines(0, min(stage_cal), max(stage_cal), 'y')
plt.legend(loc='upper left')
plt.xlabel(r'\textbf{Stage} (m)')
plt.ylabel(r'\textbf{Infiltration} ($m^3/day$)')
plt.title(r"No inflow day's stage - infiltration relationship for 591 check dam")
plt.text(x=0.15, y=40, fontsize=15, s=r'$Infiltration = {0:.2f}{{h_{{av}}}}^{{{1:.2f}}}$'.format(popt[0], popt[1]))
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/stage_inf_exp_dry_591')
# plt.show()
# print dry_water_balance_df
# print dry_water_balance_df[dry_water_balance_df['infiltration(cu.m)'] < 0]
# plot rainfall vs stage
fig, ax1 = plt.subplots(figsize=(11.69, 8.27))
ax1.bar(water_balance_df.index, water_balance_df['Rain Collection (mm)'], 0.35, color='b', label=r'Rainfall(mm)')
plt.gca().invert_yaxis()
for t1 in ax1.get_yticklabels():
t1.set_color('b')
ax1.set_ylabel('Rainfall(mm)')
plt.legend(loc='upper left')
ax2 = ax1.twinx()
ax2.plot_date(water_balance_df.index, water_balance_df['stage(m)'], 'r', label='stage (m)')
for t1 in ax2.get_yticklabels():
t1.set_color('r')
plt.legend(loc='upper right')
fig.autofmt_xdate(rotation=90)
# plt.show()
"""
Rainy day infiltration
"""
rain_water_balance_df['infiltration(cu.m)'] = popt[0]*(rain_water_balance_df['average_stage_m']**popt[1])
fig = plt.figure(figsize=(11.69, 8.27))
plt.plot(rain_water_balance_df['average_stage_m'], rain_water_balance_df['infiltration(cu.m)'], 'bo', label='Predicted Infiltration' )
# plt.vlines(1.9, 0, 100, 'g')
# plt.xlim([-1, 2.0])
# plt.legend(loc='upper left')
plt.xlabel(r'\textbf{Stage} (m)')
plt.ylabel(r'\textbf{Infiltration} ($m^3/day$)')
plt.title(r"Inflow day's stage - infiltration relationship for 591 check dam")
plt.show()
"""
Inflow calculation
"""
# print dry_water_balance_df.head()
dry_water_balance_df['status'] = 'D'
rain_water_balance_df['status'] = 'R'
# dry_water_balance_df = dry_water_balance_df.drop(['Evaporation (mm/day)', 'ws_area(sq.m)'], inplace=True, axis=1)
# rain_water_balance_df = rain_water_balance_df.drop(['Evaporation (mm/day)', 'ws_area(sq.m)'], inplace=True, axis=1)
# merged_table = dry_water_balance_df.join(rain_water_balance_df, how='right')
# print rain_water_balance_df.head()
dry_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/dry_wb.CSV')
rain_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/rain_wb.CSV') | tejasckulkarni/hydrology | ch_591/ch_591_water_balance.py | Python | gpl-3.0 | 28,192 |
from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__name__)
from . import _glyph_functions as gf
from .models import Axis, Grid, GridPlot, Legend, LogAxis, Plot
from .plotting_helpers import (
_list_attr_splat, _get_range, _get_axis_class, _get_num_minor_ticks, _process_tools_arg
)
# extra imports -- just things to add to 'from plotting import *'
from .document import Document
from .models import ColumnDataSource
from .session import Session
from .io import (
curdoc, cursession, output_file, output_notebook, output_server, push,
reset_output, save, show, gridplot, hplot, vplot)
# Names that we want in this namespace (fool pyflakes)
(GridPlot, Document, ColumnDataSource, Session, cursession, gridplot,
show, save, reset_output, push, output_file, output_notebook,
output_server, vplot, hplot)
DEFAULT_TOOLS = "pan,wheel_zoom,box_zoom,save,resize,reset,help"
class Figure(Plot):
''' A subclass of :class:`~bokeh.models.plots.Plot` that simplifies plot
creation with default axes, grids, tools, etc.
'''
__subtype__ = "Figure"
__view_model__ = "Plot"
def __init__(self, *arg, **kw):
tools = kw.pop("tools", DEFAULT_TOOLS)
x_range = kw.pop("x_range", None)
y_range = kw.pop("y_range", None)
x_axis_type = kw.pop("x_axis_type", "auto")
y_axis_type = kw.pop("y_axis_type", "auto")
x_minor_ticks = kw.pop('x_minor_ticks', 'auto')
y_minor_ticks = kw.pop('y_minor_ticks', 'auto')
x_axis_location = kw.pop("x_axis_location", "below")
y_axis_location = kw.pop("y_axis_location", "left")
x_axis_label = kw.pop("x_axis_label", "")
y_axis_label = kw.pop("y_axis_label", "")
super(Figure, self).__init__(*arg, **kw)
self.x_range = _get_range(x_range)
self.y_range = _get_range(y_range)
x_axiscls = _get_axis_class(x_axis_type, self.x_range)
if x_axiscls:
if x_axiscls is LogAxis:
self.x_mapper_type = 'log'
xaxis = x_axiscls(plot=self)
xaxis.ticker.num_minor_ticks = _get_num_minor_ticks(x_axiscls, x_minor_ticks)
axis_label = x_axis_label
if axis_label:
xaxis.axis_label = axis_label
xgrid = Grid(plot=self, dimension=0, ticker=xaxis.ticker); xgrid
if x_axis_location == "above":
self.above.append(xaxis)
elif x_axis_location == "below":
self.below.append(xaxis)
y_axiscls = _get_axis_class(y_axis_type, self.y_range)
if y_axiscls:
if y_axiscls is LogAxis:
self.y_mapper_type = 'log'
yaxis = y_axiscls(plot=self)
yaxis.ticker.num_minor_ticks = _get_num_minor_ticks(y_axiscls, y_minor_ticks)
axis_label = y_axis_label
if axis_label:
yaxis.axis_label = axis_label
ygrid = Grid(plot=self, dimension=1, ticker=yaxis.ticker); ygrid
if y_axis_location == "left":
self.left.append(yaxis)
elif y_axis_location == "right":
self.right.append(yaxis)
tool_objs = _process_tools_arg(self, tools)
self.add_tools(*tool_objs)
def _axis(self, *sides):
objs = []
for s in sides:
objs.extend(getattr(self, s, []))
axis = [obj for obj in objs if isinstance(obj, Axis)]
return _list_attr_splat(axis)
@property
def xaxis(self):
""" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the x dimension.
"""
return self._axis("above", "below")
@property
def yaxis(self):
""" Splattable list of :class:`~bokeh.models.axes.Axis` objects for the y dimension.
"""
return self._axis("left", "right")
@property
def axis(self):
""" Splattable list of :class:`~bokeh.models.axes.Axis` objects.
"""
return _list_attr_splat(self.xaxis + self.yaxis)
@property
def legend(self):
"""Splattable list of :class:`~bokeh.models.annotations.Legend` objects.
"""
legends = [obj for obj in self.renderers if isinstance(obj, Legend)]
return _list_attr_splat(legends)
def _grid(self, dimension):
grid = [obj for obj in self.renderers if isinstance(obj, Grid) and obj.dimension==dimension]
return _list_attr_splat(grid)
@property
def xgrid(self):
""" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the x dimension.
"""
return self._grid(0)
@property
def ygrid(self):
""" Splattable list of :class:`~bokeh.models.grids.Grid` objects for the y dimension.
"""
return self._grid(1)
@property
def grid(self):
""" Splattable list of :class:`~bokeh.models.grids.Grid` objects.
"""
return _list_attr_splat(self.xgrid + self.ygrid)
annular_wedge = gf.annular_wedge
annulus = gf.annulus
arc = gf.arc
asterisk = gf.asterisk
bezier = gf.bezier
circle = gf.circle
circle_cross = gf.circle_cross
circle_x = gf.circle_x
cross = gf.cross
diamond = gf.diamond
diamond_cross = gf.diamond_cross
image = gf.image
image_rgba = gf.image_rgba
image_url = gf.image_url
inverted_triangle = gf.inverted_triangle
line = gf.line
multi_line = gf.multi_line
oval = gf.oval
patch = gf.patch
patches = gf.patches
quad = gf.quad
quadratic = gf.quadratic
ray = gf.ray
rect = gf.rect
segment = gf.segment
square = gf.square
square_cross = gf.square_cross
square_x = gf.square_x
text = gf.text
triangle = gf.triangle
wedge = gf.wedge
x = gf.x
def scatter(self, *args, **kwargs):
""" Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in screen units
marker (str, optional): a valid marker_type, defaults to "circle"
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
If none is supplied, one is created for the user automatically.
**kwargs: :ref:`userguide_styling_line_properties` and :ref:`userguide_styling_fill_properties`
Examples:
>>> p.scatter([1,2,3],[4,5,6], fill_color="red")
>>> p.scatter("data1", "data2", source=data_source, ...)
"""
markertype = kwargs.pop("marker", "circle")
if markertype not in _marker_types:
raise ValueError("Invalid marker type '%s'. Use markers() to see a list of valid marker types." % markertype)
# TODO (bev) make better when plotting.scatter is removed
conversions = {
"*": "asterisk",
"+": "cross",
"o": "circle",
"ox": "circle_x",
"o+": "circle_cross"
}
if markertype in conversions:
markertype = conversions[markertype]
return getattr(self, markertype)(*args, **kwargs)
def figure(**kwargs):
''' Create a new :class:`~bokeh.plotting.Figure` for plotting, and add it to
the current document.
Returns:
Figure
'''
if 'plot_width' in kwargs and 'width' in kwargs:
raise ValueError("figure() called but both plot_width and width supplied, supply only one")
if 'plot_height' in kwargs and 'height' in kwargs:
raise ValueError("figure() called but both plot_height and height supplied, supply only one")
if 'height' in kwargs:
kwargs['plot_height'] = kwargs.pop('height')
if 'width' in kwargs:
kwargs['plot_width'] = kwargs.pop('width')
fig = Figure(**kwargs)
curdoc()._current_plot = fig
if curdoc().autoadd:
curdoc().add(fig)
return fig
_marker_types = [
"asterisk",
"circle",
"circle_cross",
"circle_x",
"cross",
"diamond",
"diamond_cross",
"inverted_triangle",
"square",
"square_x",
"square_cross",
"triangle",
"x",
"*",
"+",
"o",
"ox",
"o+",
]
def markers():
""" Prints a list of valid marker types for scatter()
Returns:
None
"""
print("Available markers: \n - " + "\n - ".join(_marker_types))
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
| khkaminska/bokeh | bokeh/plotting.py | Python | bsd-3-clause | 9,064 |
from __future__ import print_function
# Functions to import and export MT EDI files.
from SimPEG import mkvc
from scipy.constants import mu_0
from numpy.lib import recfunctions as recFunc
from .data_utils import rec_to_ndarr
# Import modules
import numpy as np
import os, sys, re
class EDIimporter:
"""
A class to import EDIfiles.
"""
# Define data converters
# Convert Z[mV/km/nT] (as in EDI)to Z[V/A] SI unit
_impUnitEDI2SI = 4 * np.pi * 1e-4
# ConvertZ[V/A] SI unit to Z[mV/km/nT] (as in EDI)_
_impUnitSI2EDI = 1.0 / _impUnitEDI2SI
# Properties
filesList = None
comps = None
# Hidden properties
_outEPSG = None # Project info
_2out = None # The projection operator
def __init__(self, EDIfilesList, compList=None, outEPSG=None):
# Set the fileList
self.filesList = EDIfilesList
# Set the components to import
if compList is None:
self.comps = [
"ZXXR",
"ZXYR",
"ZYXR",
"ZYYR",
"ZXXI",
"ZXYI",
"ZYXI",
"ZYYI",
"ZXX.VAR",
"ZXY.VAR",
"ZYX.VAR",
"ZYY.VAR",
]
else:
self.comps = compList
if outEPSG is not None:
self._outEPSG = outEPSG
def __call__(self, comps=None):
if comps is None:
return self._data
return self._data[comps]
def importFiles(self):
"""
Function to import EDI files into a object.
"""
# Constants that are needed for convertion of units
# Temp lists
tmpStaList = []
tmpCompList = ["freq", "x", "y", "z"]
tmpCompList.extend(self.comps)
# List of how to "rotate/shift" the data to comply with
shift_list = [["xx", "yy"], ["xy", "yx"], ["yx", "xy"], ["yy", "xx"]]
# Make the outarray
dtRI = [(compS.lower().replace(".", ""), float) for compS in tmpCompList]
# Loop through all the files
for nrEDI, EDIfile in enumerate(self.filesList):
# Read the file into a list of the lines
with open(EDIfile, "r") as fid:
EDIlines = fid.readlines()
# Find the location
latD, longD, elevM = _findLatLong(EDIlines)
# Transfrom coordinates
transCoord = self._transfromPoints(longD, latD)
# Extract the name of the file (station)
EDIname = EDIfile.split(os.sep)[-1].split(".")[0]
# Arrange the data
staList = [EDIname, EDIfile, transCoord[0], transCoord[1], elevM[0]]
# Add to the station list
tmpStaList.extend(staList)
# Read the frequency data
freq = _findEDIcomp(">FREQ", EDIlines)
# Make the temporary rec array.
tArrRec = (np.nan * np.ones((len(freq), len(dtRI)))).view(dtRI)
tArrRec["freq"] = mkvc(freq, 2)
tArrRec["x"] = mkvc(np.ones((len(freq), 1)) * transCoord[0], 2)
tArrRec["y"] = mkvc(np.ones((len(freq), 1)) * transCoord[1], 2)
tArrRec["z"] = mkvc(np.ones((len(freq), 1)) * elevM[0], 2)
for comp in self.comps:
# Deal with converting units of the impedance tensor
if "Z" in comp:
unitConvert = self._impUnitEDI2SI
else:
unitConvert = 1
# Rotate the data since EDI x is *north, y *east but Simpeg
# uses x *east, y *north (* means internal reference frame)
key = [
comp.lower().replace(".", "").replace(s, t)
for s, t in shift_list
if s in comp.lower()
][0]
tArrRec[key] = mkvc(unitConvert * _findEDIcomp(">" + comp, EDIlines), 2)
# Make a masked array
mArrRec = np.ma.MaskedArray(
rec_to_ndarr(tArrRec), mask=np.isnan(rec_to_ndarr(tArrRec))
).view(dtype=tArrRec.dtype)
try:
outTemp = recFunc.stack_arrays((outTemp, mArrRec))
except NameError:
outTemp = mArrRec
# Assign the data
self._data = outTemp
# % Assign the data to the obj
# nOutData=length(obj.data);
# obj.data(nOutData+1:nOutData+length(TEMP.data),:) = TEMP.data;
def _transfromPoints(self, longD, latD):
# Import the coordinate projections
try:
import osr
except ImportError as e:
print(
(
"Could not import osr, missing the gdal"
+ "package\nCan not project coordinates"
)
)
raise e
# Coordinates convertor
if self._2out is None:
src = osr.SpatialReference()
src.ImportFromEPSG(4326)
out = osr.SpatialReference()
if self._outEPSG is None:
# Find the UTM EPSG number
Nnr = 700 if latD < 0.0 else 600
utmZ = int(1 + (longD + 180.0) / 6.0)
self._outEPSG = 32000 + Nnr + utmZ
out.ImportFromEPSG(self._outEPSG)
self._2out = osr.CoordinateTransformation(src, out)
# Return the transfrom
return self._2out.TransformPoint(longD, latD)
# Hidden functions
def _findLatLong(fileLines):
latDMS = np.array(
fileLines[_findLine("LAT=", fileLines)[0]].split("=")[1].split()[0].split(":"),
float,
)
longDMS = np.array(
fileLines[_findLine("LONG=", fileLines)[0]].split("=")[1].split()[0].split(":"),
float,
)
elevM = np.array(
[fileLines[_findLine("ELEV=", fileLines)[0]].split("=")[1].split()[0]], float
)
# Convert to D.ddddd values
latS = np.sign(latDMS[0])
longS = np.sign(longDMS[0])
latD = latDMS[0] + latS * latDMS[1] / 60 + latS * latDMS[2] / 3600
longD = longDMS[0] + longS * longDMS[1] / 60 + longS * longDMS[2] / 3600
return latD, longD, elevM
def _findLine(comp, fileLines):
""" Find a line number in the file"""
# Line counter
c = 0
# List of indices for found lines
found = []
# Loop through all the lines
for line in fileLines:
if comp in line:
# Append if found
found.append(c)
# Increse the counter
c += 1
# Return the found indices
return found
def _findEDIcomp(comp, fileLines, dt=float):
"""
Extract the data vector.
Returns a list of the data.
"""
# Find the data
headLine, indHead = [
(st, nr) for nr, st in enumerate(fileLines) if re.search(comp, st)
][0]
# Extract the data
nrVec = int(headLine.split("//")[-1])
c = 0
dataList = []
while c < nrVec:
indHead += 1
dataList.extend(fileLines[indHead].split())
c = len(dataList)
return np.array(dataList, dt)
| simpeg/simpeg | SimPEG/electromagnetics/natural_source/utils/edi_files_utils.py | Python | mit | 7,066 |
# -*- coding: utf-8 -*-
###############################################################################
#
# GetTags
# Retrieves a list of all tags and the number of bookmarks that use them.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetTags(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetTags Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetTags, self).__init__(temboo_session, '/Library/Delicious/GetTags')
def new_input_set(self):
return GetTagsInputSet()
def _make_result_set(self, result, path):
return GetTagsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetTagsChoreographyExecution(session, exec_id, path)
class GetTagsInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetTags
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The password that corresponds to the specified Delicious account username.)
"""
super(GetTagsInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) A valid Delicious account username.)
"""
super(GetTagsInputSet, self)._set_input('Username', value)
class GetTagsResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetTags Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response returned from Delicious.)
"""
return self._output.get('Response', None)
class GetTagsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetTagsResultSet(response, path)
| jordanemedlock/psychtruths | temboo/core/Library/Delicious/GetTags.py | Python | apache-2.0 | 3,138 |
import datetime
import lxml.etree
import lxml.builder
NS = {
'container': 'urn:oasis:names:tc:opendocument:xmlns:container',
'opf': 'http://www.idpf.org/2007/opf',
'ops': 'http://www.idpf.org/2007/ops',
'dc': 'http://purl.org/dc/elements/1.1/',
'ncx': 'http://www.daisy.org/z3986/2005/ncx/',
'html': 'http://www.w3.org/1999/xhtml',
}
RNS = {v: k for k, v in NS.items()}
E = {k: lxml.builder.ElementMaker(namespace=v, nsmap=NS) for k, v in NS.items()}
def getxmlattr(tag, attr):
if ':' in attr:
ns, attr = attr.split(':', 1)
return tag.get('{' + NS[ns] + '}' + attr)
else:
try:
return tag.attrib[attr]
except KeyError:
qname = lxml.etree.QName(tag.tag)
return tag.get('{' + RNS[qname.namespace] + '}' + attr)
def ns(name):
if ':' in name:
ns, name = name.split(':', 1)
return '{' + NS[ns] + '}' + name
else:
return name
def parse_date(d):
for p, l in (
('%Y-%m-%dT%H:%M:%SZ', 20),
('%Y-%m-%d', 10),
('%Y-%m', 7),
('%Y', 4),
):
if len(d) == l:
return datetime.datetime.strptime(d, p)
| Glose/dawn | dawn/utils.py | Python | mit | 1,037 |
"""
NOTE: Since a documented API is nowhere to be found for Huomao; this plugin
simply extracts the videos stream_id, stream_url and stream_quality by
scraping the HTML and JS of one of Huomaos mobile webpages.
When viewing a stream on huomao.com, the base URL references a room_id. This
room_id is mapped one-to-one to a stream_id which references the actual .flv
video. Both stream_id, stream_url and stream_quality can be found in the
HTML and JS source of the mobile_page. Since one stream can occur in many
different qualities, we scrape all stream_url and stream_quality occurences
and return each option to the user.
"""
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.stream import HTTPStream
# URL pattern for recognizing inputed Huomao.tv / Huomao.com URL.
url_re = re.compile(r"""
(http(s)?://)?
(www\.)?
huomao
(\.tv|\.com)
/(?P<room_id>\d+)
""", re.VERBOSE)
# URL used to retrive the stream_id, stream_url and stream_quality based of
# a room_id.
mobile_url = "http://www.huomao.com/mobile/mob_live/{0}"
# Pattern for extracting the stream_id from the mobile_url HTML.
#
# Example from HTML:
# <input id="html_stream" value="efmrCH" type="hidden">
stream_id_pattern = re.compile(r'id=\"html_stream\" value=\"(?P<stream_id>\w+)\"')
# Pattern for extracting each stream_url, stream_quality_url and a prettified
# stream_quality_name used for quality naming.
#
# Example from HTML:
# "2: 'http://live-ws.huomaotv.cn/live/'+stream+'_720/playlist.m3u8'"
stream_info_pattern = re.compile(r"""
[1-9]:
\s+
'(?P<stream_url>(?:\w|\.|:|-|/)+)
'\+stream\+'
(?P<stream_quality_url>_?(?P<stream_quality_name>\d*))
/playlist.m3u8'
""", re.VERBOSE)
class Huomao(Plugin):
@classmethod
def can_handle_url(self, url):
return url_re.match(url)
def get_stream_id(self, html):
"""Returns the stream_id contained in the HTML."""
stream_id = stream_id_pattern.search(html)
if not stream_id:
self.logger.error("Failed to extract stream_id.")
return stream_id.group("stream_id")
def get_stream_info(self, html):
"""Returns a nested list of different stream options.
Each entry in the list will contain a stream_url, stream_quality_url
and stream_quality_name for each stream occurence that was found in
the JS.
"""
stream_info = stream_info_pattern.findall(html)
if not stream_info:
self.logger.error("Failed to extract stream_info.")
# Rename the "" quality to "source" by transforming the tuples to a
# list and reassigning.
stream_info_list = []
for info in stream_info:
if not info[2]:
stream_info_list.append([info[0], info[1], "source"])
else:
stream_info_list.append(list(info))
return stream_info_list
def _get_streams(self):
room_id = url_re.search(self.url).group("room_id")
html = http.get(mobile_url.format(room_id))
stream_id = self.get_stream_id(html.text)
stream_info = self.get_stream_info(html.text)
streams = {}
for info in stream_info:
streams[info[2]] = HTTPStream(self.session,
info[0] + stream_id + info[1] + ".flv")
return streams
__plugin__ = Huomao
| mmetak/streamlink | src/streamlink/plugins/huomao.py | Python | bsd-2-clause | 3,403 |
# -*- coding: utf-8 -*-
"""Process tasks in several modes."""
import logging
from .. import config
LOGGER = logging.getLogger(__name__)
def serial(named_tasks, arg):
"""Serial calls."""
results = {}
for name, task in named_tasks:
try:
results[name] = task(arg)
except: # pragma: no cover
LOGGER.debug("No result in 'serial' for %s[%s](%s)",
task, name, arg)
results[name] = None
return results
def parallel(named_tasks, arg):
"""Threaded calls."""
from threading import Thread
results = {}
def _worker(name, task, arg):
try:
results[name] = task(arg)
except: # pragma: no cover
LOGGER.debug("No result in 'parallel' for %s[%s](%s)",
task, name, arg)
results[name] = None
for name, task in named_tasks:
t = Thread(target=_worker, args=(name, task, arg))
t.start()
t.join(config.THREADS_TIMEOUT)
return results
def multi(named_tasks, arg):
"""Multiprocessing: using several cores (if available)."""
from multiprocessing import Process, Queue
results = {}
q = Queue()
def _worker(name, task, arg, q):
try: # pragma: no cover
q.put((name, task(arg)))
except: # pragma: no cover
LOGGER.debug("No result in 'multi' for %s[%s](%s)",
task, name, arg)
q.put((name, None))
for name, task in named_tasks:
p = Process(target=_worker, args=(name, task, arg, q))
p.start()
p.join(config.THREADS_TIMEOUT)
q.put('STOP')
while True:
el = q.get()
if el == 'STOP':
break
results[el[0]] = el[1]
return results
| jonasjberg/autonameow | autonameow/vendor/isbnlib/dev/vias.py | Python | gpl-2.0 | 1,808 |
__all__ = ['savetxt', 'loadtxt',
'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv',
'load', 'loads',
'save', 'savez',
'packbits', 'unpackbits',
'fromregex',
'DataSource']
import numpy as np
import format
import cStringIO
import os
import itertools
import warnings
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype
_file = file
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip, new
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError, "Illegal argument"
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = gzip.GzipFile(f)
f.seek = new.instancemethod(seek, f)
f.tell = new.instancemethod(tell, f)
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute lookups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute lookup is performed.
Examples
--------
>>> class BagDemo(object):
... def __getitem__(self, key):
... return key
...
>>> demo_obj = BagDemo()
>>> bagobj = np.lib.io.BagObj(demo_obj)
>>> bagobj.some_item
'some_item'
"""
def __init__(self, obj):
self._obj = obj
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError, key
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
_zip = zipfile.ZipFile(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = cStringIO.StringIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError, "%s is not a file in the archive" % key
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
zipped files.
A memory-mapped array is stored on disk, and not directly loaded
into memory. However, it can be accessed and sliced like any
ndarray. Memory mapping is especially useful for accessing
small fragments of large files without reading the entire file
into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever is stored in the
pickle is returned.
- If the file is a ``.npy`` file, then an array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
if isinstance(file, basestring):
fid = _file(file, "rb")
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = 'PK\x03\x04'
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
return NpzFile(fid)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError, \
"Failed to interpret file %s as a pickle" % repr(file)
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or string
File or filename to which the data is saved. If the filename
does not already have a ``.npy`` extension, it is added.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` compressed archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # only necessary in this example (with tempfile)
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
else:
fid = file
arr = np.asanyarray(arr)
format.write_array(fid, arr)
def savez(file, *args, **kwds):
"""
Save several arrays into a single, compressed file in ``.npz`` format.
If keyword arguments are given, the names for variables assigned to the
keywords are the keyword names (not the variable names in the caller).
If arguments are passed in with no keywords, the corresponding variable
names are arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
If file is a string, it names the output file. ".npz" will be appended
to the file name if it is not already there.
args : Arguments
Any function arguments other than the file name are variables to save.
Since it is not possible for Python to know their names outside
`savez`, they will be saved with names "arr_0", "arr_1", and so on.
These arguments can be any expression.
kwds : Keyword arguments
All keyword=value pairs cause the value to be saved with the name of
the keyword.
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. Each file contains one variable in ``.npy``
format. For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # only necessary in this example (with tempfile)
>>> npz = np.load(outfile)
>>> npz.files
['arr_1', 'arr_0']
>>> npz['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\*\\*kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> npz.files
['y', 'x']
>>> npz['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError, "Cannot use un-named variables and keyword %s" % key
namedict[key] = val
zip = zipfile.ZipFile(file, mode="w")
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
skiprows=0, usecols=None, unpack=False):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is ``.gz`` or
``.bz2``, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array. If this is a record data-type,
the resulting array will be 1-dimensional, and each row will be
interpreted as an element of the array. In this case, the number
of columns used must match the number of fields in the data-type.
comments : str, optional
The character used to indicate the start of a comment.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``.
skiprows : int, optional
Skip the first `skiprows` lines.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. Default is False.
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
scipy.io.loadmat : reads Matlab(R) data files
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
user_converters = converters
if usecols is not None:
usecols = list(usecols)
isstring = False
if _is_string_like(fname):
isstring = True
if fname.endswith('.gz'):
import gzip
fh = seek_gzip_factory(fname)
elif fname.endswith('.bz2'):
import bz2
fh = bz2.BZ2File(fname)
else:
fh = file(fname)
elif hasattr(fname, 'readline'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
return [dt.base] * int(np.prod(dt.shape))
else:
types = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt = flatten_dtype(tp)
types.extend(flat_dt)
return types
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = line.split(comments)[0].strip()
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.readline()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
while not first_vals:
first_line = fh.readline()
if first_line == '': # EOF reached
raise IOError('End-of-file reached before encountering data.')
first_vals = split_line(first_line)
N = len(usecols or first_vals)
dtype_types = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
X.append(tuple([conv(val) for (conv, val) in zip(converters, vals)]))
finally:
if isstring:
fh.close()
if len(dtype_types) > 1:
# We're dealing with a structured array, with a dtype such as
# [('x', int), ('y', [('s', int), ('t', float)])]
#
# First, create the array using a flattened dtype:
# [('x', int), ('s', int), ('t', float)]
#
# Then, view the array using the specified dtype.
try:
X = np.array(X, dtype=np.dtype([('', t) for t in dtype_types]))
X = X.view(dtype)
except TypeError:
# In the case we have an object dtype
X = np.array(X, dtype=dtype)
else:
X = np.array(X, dtype)
X = np.squeeze(X)
if unpack:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored.
delimiter : str
Character separating columns.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # X is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
fh = file(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
# `fmt` can be a string with multiple insertion points or a list of formats.
# E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = delimiter.join(fmt)
elif type(fmt) is str:
if fmt.count('%') == 1:
fmt = [fmt, ]*ncol
format = delimiter.join(fmt)
elif fmt.count('%') != ncol:
raise AttributeError('fmt has wrong number of %% formats. %s'
% fmt)
else:
format = fmt
for row in X:
fh.write(format % tuple(row) + '\n')
import re
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
[('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
if not hasattr(file, "read"):
file = open(file, 'r')
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None, excludelist=None, deletechars=None,
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skiprows` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File or filename to read. If the filename extension is `.gz` or
`.bz2`, the file is first decompressed.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable or None, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable or None, optional
The set of strings corresponding to missing data.
filling_values : variable or None, optional
The set of values to be used as default when the data are missing.
usecols : sequence or None, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skiprows` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
if isinstance(fname, basestring):
fhd = np.lib._datasource.open(fname)
elif not hasattr(fname, 'read'):
raise TypeError("The input should be a string or a filehandle. "\
"(got %s instead)" % type(fname))
else:
fhd = fname
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn("The use of `skiprows` is deprecated.\n"\
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.readline()
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = fhd.readline()
if first_line == '':
raise IOError('End-of-file reached before encountering data.')
if names is True:
if comments in first_line:
first_line = ''.join(first_line.split(comments)[1])
first_values = split_line(first_line)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_.strip() for _ in first_values])
first_line = ''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
names = dtype.names
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list(['']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, basestring):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != '':
warnings.warn("The use of `missing` is deprecated.\n"\
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(",")]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i].update(conv, locked=True,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
if len(invalid) > 0:
nbrows = len(rows)
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbrows -= skip_footer
errmsg = [template % (i + skip_header + 1, nb)
for (i, nb) in invalid if i < nbrows]
else:
errmsg = [template % (i + skip_header + 1, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*(map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)))
else:
rows = zip(*(map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
if usemask and output.dtype.names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
For a complete description of all the input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| plaes/numpy | numpy/lib/io.py | Python | bsd-3-clause | 53,226 |
#!/usr/bin/env python2
from __future__ import print_function
import os
import os.path
import pkgutil
import shutil
import sys
import tempfile
__all__ = ["version", "bootstrap"]
_SETUPTOOLS_VERSION = "18.2"
_PIP_VERSION = "7.1.2"
# pip currently requires ssl support, so we try to provide a nicer
# error message when that is missing (http://bugs.python.org/issue19744)
_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION))
try:
import ssl
except ImportError:
ssl = None
def _require_ssl_for_pip():
raise RuntimeError(_MISSING_SSL_MESSAGE)
else:
def _require_ssl_for_pip():
pass
_PROJECTS = [
("setuptools", _SETUPTOOLS_VERSION),
("pip", _PIP_VERSION),
]
def _run_pip(args, additional_paths=None):
# Add our bundled software to the sys.path so we can import it
if additional_paths is not None:
sys.path = additional_paths + sys.path
# Install the bundled software
import pip
pip.main(args)
def version():
"""
Returns a string specifying the bundled version of pip.
"""
return _PIP_VERSION
def _disable_pip_configuration_settings():
# We deliberately ignore all pip environment variables
# when invoking pip
# See http://bugs.python.org/issue19734 for details
keys_to_remove = [k for k in os.environ if k.startswith("PIP_")]
for k in keys_to_remove:
del os.environ[k]
# We also ignore the settings in the default pip configuration file
# See http://bugs.python.org/issue20053 for details
os.environ['PIP_CONFIG_FILE'] = os.devnull
def bootstrap(root=None, upgrade=False, user=False,
altinstall=False, default_pip=True,
verbosity=0):
"""
Bootstrap pip into the current Python installation (or the given root
directory).
Note that calling this function will alter both sys.path and os.environ.
"""
if altinstall and default_pip:
raise ValueError("Cannot use altinstall and default_pip together")
_require_ssl_for_pip()
_disable_pip_configuration_settings()
# By default, installing pip and setuptools installs all of the
# following scripts (X.Y == running Python version):
#
# pip, pipX, pipX.Y, easy_install, easy_install-X.Y
#
# pip 1.5+ allows ensurepip to request that some of those be left out
if altinstall:
# omit pip, pipX and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "altinstall"
elif not default_pip:
# omit pip and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "install"
tmpdir = tempfile.mkdtemp()
try:
# Put our bundled wheels into a temporary directory and construct the
# additional paths that need added to sys.path
additional_paths = []
for project, version in _PROJECTS:
wheel_name = "{}-{}-py2.py3-none-any.whl".format(project, version)
whl = pkgutil.get_data(
"ensurepip",
"_bundled/{}".format(wheel_name),
)
with open(os.path.join(tmpdir, wheel_name), "wb") as fp:
fp.write(whl)
additional_paths.append(os.path.join(tmpdir, wheel_name))
# Construct the arguments to be passed to the pip command
args = ["install", "--no-index", "--find-links", tmpdir]
if root:
args += ["--root", root]
if upgrade:
args += ["--upgrade"]
if user:
args += ["--user"]
if verbosity:
args += ["-" + "v" * verbosity]
_run_pip(args + [p[0] for p in _PROJECTS], additional_paths)
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
def _uninstall_helper(verbosity=0):
"""Helper to support a clean default uninstall process on Windows
Note that calling this function may alter os.environ.
"""
# Nothing to do if pip was never installed, or has been removed
try:
import pip
except ImportError:
return
# If the pip version doesn't match the bundled one, leave it alone
if pip.__version__ != _PIP_VERSION:
msg = ("ensurepip will only uninstall a matching version "
"({!r} installed, {!r} bundled)")
print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr)
return
_require_ssl_for_pip()
_disable_pip_configuration_settings()
# Construct the arguments to be passed to the pip command
args = ["uninstall", "-y", "--disable-pip-version-check"]
if verbosity:
args += ["-" + "v" * verbosity]
_run_pip(args + [p[0] for p in reversed(_PROJECTS)])
def _main(argv=None):
if ssl is None:
print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE),
file=sys.stderr)
return
import argparse
parser = argparse.ArgumentParser(prog="python -m ensurepip")
parser.add_argument(
"--version",
action="version",
version="pip {}".format(version()),
help="Show the version of pip that is bundled with this Python.",
)
parser.add_argument(
"-v", "--verbose",
action="count",
default=0,
dest="verbosity",
help=("Give more output. Option is additive, and can be used up to 3 "
"times."),
)
parser.add_argument(
"-U", "--upgrade",
action="store_true",
default=False,
help="Upgrade pip and dependencies, even if already installed.",
)
parser.add_argument(
"--user",
action="store_true",
default=False,
help="Install using the user scheme.",
)
parser.add_argument(
"--root",
default=None,
help="Install everything relative to this alternate root directory.",
)
parser.add_argument(
"--altinstall",
action="store_true",
default=False,
help=("Make an alternate install, installing only the X.Y versioned"
"scripts (Default: pipX, pipX.Y, easy_install-X.Y)"),
)
parser.add_argument(
"--default-pip",
action="store_true",
default=True,
dest="default_pip",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--no-default-pip",
action="store_false",
dest="default_pip",
help=("Make a non default install, installing only the X and X.Y "
"versioned scripts."),
)
args = parser.parse_args(argv)
bootstrap(
root=args.root,
upgrade=args.upgrade,
user=args.user,
verbosity=args.verbosity,
altinstall=args.altinstall,
default_pip=args.default_pip,
)
| nmercier/linux-cross-gcc | win32/bin/Lib/ensurepip/__init__.py | Python | bsd-3-clause | 6,913 |
from .predicate import Predicate
from .report import Report
__author__ = 'Mikael Vind Mikkelsen'
__maintainer__ = 'Alexander Brandborg'
class ColumnNotNullPredicate(Predicate):
"""
Predicate for asserting that nulls do not exist in the columns of a table
"""
def __init__(self, table_name, column_names=None,
column_names_exclude=False):
"""
:param table_name: name of the table we are testing.
Can be given as a list of tables if we want a join.
:param column_names: set of column names
:param column_names_exclude: bool indicating if all columns not in
column_names should instead be used in the assertion.
"""
if isinstance(table_name, str):
self.table_name = [table_name]
else:
self.table_name = table_name
self.column_names = column_names
self.column_names_exclude = column_names_exclude
def run(self, dw_rep):
"""
Runs SQL to return all rows containing null.
:param dw_rep: A DWRepresentation object allowing us to access DW
:return: Report object to inform whether assertion held
"""
# Gets the columns to concern
chosen_columns = self.setup_columns(dw_rep, self.table_name,
self.column_names,
self.column_names_exclude)
# Generates and runs SQL for finding rows with null
null_condition_sql = (x + " IS NULL" for x in chosen_columns)
pred_sql = " SELECT * " + \
" FROM " + " NATURAL JOIN ".join(self.table_name) + \
" WHERE " + " OR ".join(null_condition_sql)
cursor = dw_rep.connection.cursor()
cursor.execute(pred_sql)
query_result = cursor.fetchall()
cursor.close()
# Create dict, so that attributes have names
names = [t[0] for t in cursor.description]
dict_result = []
for row in query_result:
dict_result.append(dict(zip(names, row)))
# If any rows were fetched. Assertion fails
if not dict_result:
self.__result__ = True
return Report(result=self.__result__,
predicate=self,
tables=self.table_name,
elements=dict_result,
msg=None)
| Betaboxguugi/P6 | code/framework/predicates/column_not_null_predicate.py | Python | gpl-3.0 | 2,415 |
#!/usr/bin/env python
import unittest
import shutil
import os
import subprocess
import sys
from distutils.version import StrictVersion
def run(command):
""" run shell command & return unix exit code """
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
return(err, process.returncode)
class _01_CheckEnv(unittest.TestCase):
def setUp(self):
self.path_contents = []
for _ in os.environ['PATH'].strip(':').split(':'):
if os.path.isdir(_): self.path_contents += os.listdir(_)
self.python_contents = []
for _ in os.environ['PYTHONPATH'].strip(':').split(':'):
if os.path.isdir(_): self.python_contents += os.listdir(_)
def test_class(self):
self.assertTrue(
'run_midas.py' in self.path_contents,
msg="""\n\n'run_midas.py' not found in PATH environmental variable.\nMake sure '/path/to/MIDAS/scripts' has been added to your PATH:\nexport PATH=$PATH:/path/to/MIDAS/scripts"""
)
self.assertTrue(
'midas' in self.python_contents,
msg="""\n\n'midas' not found in PYTHONPATH environmental variable.\nMake sure '/path/to/MIDAS' has been added to your PYTHONPATH:\nexport PYTHONPATH=$PYTHONPATH:/path/to/MIDAS"""
)
self.assertTrue(
'MIDAS_DB' in os.environ,
msg="""\n\n'MIDAS_DB' environmental variable not set.\nSet this variable and rerun the test:\nexport MIDAS_DB=/path/to/midas_db_v1.1"""
)
class _02_ImportDependencies(unittest.TestCase):
def setUp(self):
self.failures = []
try: import numpy
except Exception: self.failures.append('numpy')
try: import pandas
except Exception: self.failures.append('pandas')
try: import pysam
except Exception: self.failures.append('pysam')
try: import midas
except Exception: self.failures.append('midas')
try: import Bio.SeqIO
except Exception: self.failures.append('Bio.SeqIO')
def test_class(self):
self.assertTrue(len(self.failures)==0,
msg="""\n\nThe following dependencies failed to import: %s.\nMake sure that dependencies have been properly installed""" % str(self.failures))
class _03_CheckVersions(unittest.TestCase):
def setUp(self):
self.modules = ['numpy', 'pandas', 'pysam', 'Bio.SeqIO']
self.installeds = [module.__version__ for module in map(__import__, self.modules)]
self.requireds = ['1.7.0', '0.17.1', '0.8.1', '1.6.2']
def test_class(self):
for module, installed, required in zip(self.modules, self.installeds, self.requireds):
if len(installed.split('.')) > 3:
installed = '.'.join(installed.split('.')[0:3])
self.assertTrue(
StrictVersion(installed) >= StrictVersion(required),
msg="""\n\nImported library '%s %s' is out of date. Required version is >= %s""" % (module, installed, required) )
class _04_HelpText(unittest.TestCase):
def test_class(self):
commands = [
'run_midas.py -h',
'run_midas.py species -h',
'run_midas.py genes -h',
'run_midas.py snps -h',
'merge_midas.py -h',
'merge_midas.py species -h',
'merge_midas.py genes -h',
'merge_midas.py snps -h']
for cmd in commands:
err, code = run(cmd)
self.assertTrue(code==0, msg=err)
class _05_RunSpecies(unittest.TestCase):
def test_class(self):
command = 'run_midas.py species ./sample -1 ./test.fq.gz -n 100'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _06_RunGenes(unittest.TestCase):
def test_class(self):
command = 'run_midas.py genes ./sample -1 ./test.fq.gz -n 100 --species_id Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _07_RunSNPs(unittest.TestCase):
def test_class(self):
command = 'run_midas.py snps ./sample -1 ./test.fq.gz -n 100 --species_id Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _08_MergeSpecies(unittest.TestCase):
def test_class(self):
command = 'merge_midas.py species ./species -i ./sample -t list'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _09_MergeGenes(unittest.TestCase):
def test_class(self):
command = 'merge_midas.py genes ./genes -i ./sample -t list --species_id Bacteroides_vulgatus_57955 --sample_depth 0.0'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _10_MergeSNPs(unittest.TestCase):
def test_class(self):
command = 'merge_midas.py snps ./snps -i ./sample -t list --species_id Bacteroides_vulgatus_57955 --all_samples --all_sites --max_sites 10000'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _11_SNPdiversity(unittest.TestCase):
def test_class(self):
command = 'snp_diversity.py snps/Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _12_CallConsensus(unittest.TestCase):
def test_class(self):
command = 'call_consensus.py snps/Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _13_CompareGeneContent(unittest.TestCase):
def test_class(self):
command = 'compare_genes.py genes/Bacteroides_vulgatus_57955'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _14_QueryByCompound(unittest.TestCase):
def test_class(self):
command = 'query_by_compound.py -i sample -t list -c C00312'
err, code = run(command)
self.assertTrue(code==0, msg=err)
class _15_BuildDB(unittest.TestCase):
def test_class(self):
command = 'tar -zxvf genomes.tar.gz'
err, code = run(command)
command = 'build_midas_db.py genomes genomes.mapfile db --threads 10'
err, code = run(command)
self.assertTrue(code==0, msg=err)
if __name__ == '__main__':
try:
dir_name = os.path.dirname(os.path.abspath(__file__))
os.chdir(dir_name)
unittest.main(exit=False)
for dir in ['sample', 'species', 'genes', 'snps', 'genomes', 'db']:
shutil.rmtree(dir)
except:
print("")
for dir in ['sample', 'species', 'genes', 'snps', 'genomes', 'db']:
if os.path.exists(dir): shutil.rmtree(dir)
| snayfach/PhyloCNV | test/test_midas.py | Python | gpl-3.0 | 5,928 |
"""
Vamos a construir una pila LIFO last in first out. Vamos a crear unas funciones para manejar dicha pila.
Estas funciones serán:
introduce(pila, x) Añadimos un elemento al final de la pila
saca(pila) Devolvemos el último elemento de la pila (si la pila está vacía, devuelve None
vacia(pila) Devuelve un boolean, true = pila vacía
"""
def introduce(lifo, elem):
"""
Añadimos elem al final de lifo
:param lifo:
:param elem:
:return:
"""
lifo.append(elem)
def saca(lifo):
"""
Devuelve el último elemento de lifo. Si está vacía devuelve NoneType
:param lifo:
:return:
"""
if vacia(lifo): # Comprobamos si lifo está vacía
return None
else:
return lifo.pop()
def vacia(lifo):
"""
Devuelve True si lifo está vacía. En caso contrario devuelve False
:param lifo:
:return:
"""
return len(lifo) == 0 # True si la longitud es 0, False en cualquier otro caso
| IhToN/DAW1-PRG | Ejercicios/PrimTrim/Ejercicio22.py | Python | apache-2.0 | 1,016 |
#!/usr/bin/env python
'''
Affichage de la webcam
Usage:
affichage_webcam.py [<video source>]
'''
# Librairies
import cv2
import sys
# Programme
# Recuperation de l'argument
try:
fn = sys.argv[1]
except:
fn = 0
# Creation de la fenetre
cv2.namedWindow('Webcam')
# Demarrage de la webcam
cap = cv2.VideoCapture(fn)
# Capture
while True:
flag, img = cap.read()
cv2.imshow('Webcam', img)
ch = cv2.waitKey(5)
if ch == 27:
break
# Fin du programme
cv2.destroyAllWindows()
| vportascarta/UQAC-8INF844-SPHERO | tracking/affichage_webcam.py | Python | gpl-3.0 | 510 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Functions that save the model's config into different formats.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.saving.saved_model import json_utils
from tensorflow.python.util.tf_export import keras_export
# pylint: disable=g-import-not-at-top
try:
import yaml
except ImportError:
yaml = None
# pylint: enable=g-import-not-at-top
@keras_export('keras.models.model_from_config')
def model_from_config(config, custom_objects=None):
"""Instantiates a Keras model from its config.
Usage:
```
# for a Functional API model
tf.keras.Model().from_config(model.get_config())
# for a Sequential model
tf.keras.Sequential().from_config(model.get_config())
```
Args:
config: Configuration dictionary.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
TypeError: if `config` is not a dictionary.
"""
if isinstance(config, list):
raise TypeError('`model_from_config` expects a dictionary, not a list. '
'Maybe you meant to use '
'`Sequential.from_config(config)`?')
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
@keras_export('keras.models.model_from_yaml')
def model_from_yaml(yaml_string, custom_objects=None):
"""Parses a yaml model configuration file and returns a model instance.
Usage:
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(5, input_shape=(3,)),
... tf.keras.layers.Softmax()])
>>> try:
... import yaml
... config = model.to_yaml()
... loaded_model = tf.keras.models.model_from_yaml(config)
... except ImportError:
... pass
Args:
yaml_string: YAML string or open file encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
Raises:
ImportError: if yaml module is not found.
"""
if yaml is None:
raise ImportError('Requires yaml module installed (`pip install pyyaml`).')
# The method unsafe_load only exists in PyYAML 5.x+, so which branch of the
# try block is covered by tests depends on the installed version of PyYAML.
try:
# PyYAML 5.x+
config = yaml.unsafe_load(yaml_string)
except AttributeError:
config = yaml.load(yaml_string)
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
@keras_export('keras.models.model_from_json')
def model_from_json(json_string, custom_objects=None):
"""Parses a JSON model configuration string and returns a model instance.
Usage:
>>> model = tf.keras.Sequential([
... tf.keras.layers.Dense(5, input_shape=(3,)),
... tf.keras.layers.Softmax()])
>>> config = model.to_json()
>>> loaded_model = tf.keras.models.model_from_json(config)
Args:
json_string: JSON string encoding a model configuration.
custom_objects: Optional dictionary mapping names
(strings) to custom classes or functions to be
considered during deserialization.
Returns:
A Keras model instance (uncompiled).
"""
config = json_utils.decode(json_string)
from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
return deserialize(config, custom_objects=custom_objects)
| annarev/tensorflow | tensorflow/python/keras/saving/model_config.py | Python | apache-2.0 | 4,477 |
from django.contrib import admin
import models
# Register your models here.
admin.site.register(models.UserProfile)
admin.site.register(models.Event)
| zhaogaolong/oneFinger | alarm/admin.py | Python | apache-2.0 | 150 |
import logging
from collections import defaultdict
from django.utils import six
from django.utils.safestring import mark_safe
from .base import (
Node, Template, TemplateSyntaxError, TextNode, Variable, token_kwargs,
)
from .library import Library
register = Library()
BLOCK_CONTEXT_KEY = 'block_context'
logger = logging.getLogger('django.template')
class ExtendsError(Exception):
pass
class BlockContext(object):
def __init__(self):
# Dictionary of FIFO queues.
self.blocks = defaultdict(list)
def add_blocks(self, blocks):
for name, block in six.iteritems(blocks):
self.blocks[name].insert(0, block)
def pop(self, name):
try:
return self.blocks[name].pop()
except IndexError:
return None
def push(self, name, block):
self.blocks[name].append(block)
def get_block(self, name):
try:
return self.blocks[name][-1]
except IndexError:
return None
class BlockNode(Node):
def __init__(self, name, nodelist, parent=None):
self.name, self.nodelist, self.parent = name, nodelist, parent
def __repr__(self):
return "<Block Node: %s. Contents: %r>" % (self.name, self.nodelist)
def render(self, context):
block_context = context.render_context.get(BLOCK_CONTEXT_KEY)
with context.push():
if block_context is None:
context['block'] = self
result = self.nodelist.render(context)
else:
push = block = block_context.pop(self.name)
if block is None:
block = self
# Create new block so we can store context without thread-safety issues.
block = type(self)(block.name, block.nodelist)
block.context = context
context['block'] = block
result = block.nodelist.render(context)
if push is not None:
block_context.push(self.name, push)
return result
def super(self):
if not hasattr(self, 'context'):
raise TemplateSyntaxError(
"'%s' object has no attribute 'context'. Did you use "
"{{ block.super }} in a base template?" % self.__class__.__name__
)
render_context = self.context.render_context
if (BLOCK_CONTEXT_KEY in render_context and
render_context[BLOCK_CONTEXT_KEY].get_block(self.name) is not None):
return mark_safe(self.render(self.context))
return ''
class ExtendsNode(Node):
must_be_first = True
context_key = 'extends_context'
def __init__(self, nodelist, parent_name, template_dirs=None):
self.nodelist = nodelist
self.parent_name = parent_name
self.template_dirs = template_dirs
self.blocks = {n.name: n for n in nodelist.get_nodes_by_type(BlockNode)}
def __repr__(self):
return '<ExtendsNode: extends %s>' % self.parent_name.token
def find_template(self, template_name, context):
"""
This is a wrapper around engine.find_template(). A history is kept in
the render_context attribute between successive extends calls and
passed as the skip argument. This enables extends to work recursively
without extending the same template twice.
"""
# RemovedInDjango20Warning: If any non-recursive loaders are installed
# do a direct template lookup. If the same template name appears twice,
# raise an exception to avoid system recursion.
for loader in context.template.engine.template_loaders:
if not loader.supports_recursion:
history = context.render_context.setdefault(
self.context_key, [context.template.origin.template_name],
)
if template_name in history:
raise ExtendsError(
"Cannot extend templates recursively when using "
"non-recursive template loaders",
)
template = context.template.engine.get_template(template_name)
history.append(template_name)
return template
history = context.render_context.setdefault(
self.context_key, [context.template.origin],
)
template, origin = context.template.engine.find_template(
template_name, skip=history,
)
history.append(origin)
return template
def get_parent(self, context):
parent = self.parent_name.resolve(context)
if not parent:
error_msg = "Invalid template name in 'extends' tag: %r." % parent
if self.parent_name.filters or\
isinstance(self.parent_name.var, Variable):
error_msg += " Got this from the '%s' variable." %\
self.parent_name.token
raise TemplateSyntaxError(error_msg)
if isinstance(parent, Template):
# parent is a django.template.Template
return parent
if isinstance(getattr(parent, 'template', None), Template):
# parent is a django.template.backends.django.Template
return parent.template
return self.find_template(parent, context)
def render(self, context):
compiled_parent = self.get_parent(context)
if BLOCK_CONTEXT_KEY not in context.render_context:
context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
block_context = context.render_context[BLOCK_CONTEXT_KEY]
# Add the block nodes from this node to the block context
block_context.add_blocks(self.blocks)
# If this block's parent doesn't have an extends node it is the root,
# and its block nodes also need to be added to the block context.
for node in compiled_parent.nodelist:
# The ExtendsNode has to be the first non-text node.
if not isinstance(node, TextNode):
if not isinstance(node, ExtendsNode):
blocks = {n.name: n for n in
compiled_parent.nodelist.get_nodes_by_type(BlockNode)}
block_context.add_blocks(blocks)
break
# Call Template._render explicitly so the parser context stays
# the same.
return compiled_parent._render(context)
class IncludeNode(Node):
context_key = '__include_context'
def __init__(self, template, *args, **kwargs):
self.template = template
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(IncludeNode, self).__init__(*args, **kwargs)
def render(self, context):
"""
Render the specified template and context. Cache the template object
in render_context to avoid reparsing and loading when used in a for
loop.
"""
try:
template = self.template.resolve(context)
# Does this quack like a Template?
if not callable(getattr(template, 'render', None)):
# If not, we'll try our cache, and get_template()
template_name = template
cache = context.render_context.setdefault(self.context_key, {})
template = cache.get(template_name)
if template is None:
template = context.template.engine.get_template(template_name)
cache[template_name] = template
values = {
name: var.resolve(context)
for name, var in six.iteritems(self.extra_context)
}
if self.isolated_context:
return template.render(context.new(values))
with context.push(**values):
return template.render(context)
except Exception:
if context.template.engine.debug:
raise
template_name = getattr(context, 'template_name', None) or 'unknown'
logger.warning(
"Exception raised while rendering {%% include %%} for "
"template '%s'. Empty string rendered instead.",
template_name,
exc_info=True,
)
return ''
@register.tag('block')
def do_block(parser, token):
"""
Define a block that can be overridden by child templates.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' tag takes only one argument" % bits[0])
block_name = bits[1]
# Keep track of the names of BlockNodes found in this template, so we can
# check for duplication.
try:
if block_name in parser.__loaded_blocks:
raise TemplateSyntaxError("'%s' tag with name '%s' appears more than once" % (bits[0], block_name))
parser.__loaded_blocks.append(block_name)
except AttributeError: # parser.__loaded_blocks isn't a list yet
parser.__loaded_blocks = [block_name]
nodelist = parser.parse(('endblock',))
# This check is kept for backwards-compatibility. See #3100.
endblock = parser.next_token()
acceptable_endblocks = ('endblock', 'endblock %s' % block_name)
if endblock.contents not in acceptable_endblocks:
parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks)
return BlockNode(block_name, nodelist)
@register.tag('extends')
def do_extends(parser, token):
"""
Signal that this template extends a parent template.
This tag may be used in two ways: ``{% extends "base" %}`` (with quotes)
uses the literal value "base" as the name of the parent template to extend,
or ``{% extends variable %}`` uses the value of ``variable`` as either the
name of the parent template to extend (if it evaluates to a string) or as
the parent template itself (if it evaluates to a Template object).
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument" % bits[0])
parent_name = parser.compile_filter(bits[1])
nodelist = parser.parse()
if nodelist.get_nodes_by_type(ExtendsNode):
raise TemplateSyntaxError("'%s' cannot appear more than once in the same template" % bits[0])
return ExtendsNode(nodelist, parent_name)
@register.tag('include')
def do_include(parser, token):
"""
Loads a template and renders it with the current context. You can pass
additional context using keyword arguments.
Example::
{% include "foo/some_include" %}
{% include "foo/some_include" with bar="BAZZ!" baz="BING!" %}
Use the ``only`` argument to exclude the current context when rendering
the included template::
{% include "foo/some_include" only %}
{% include "foo/some_include" with bar="1" only %}
"""
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to "
"be included." % bits[0]
)
options = {}
remaining_bits = bits[2:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
return IncludeNode(parser.compile_filter(bits[1]), extra_context=namemap,
isolated_context=isolated_context)
| yephper/django | django/template/loader_tags.py | Python | bsd-3-clause | 12,647 |
import oyaml as yaml
import sys
import configargparse
parser = configargparse.ArgumentParser(auto_env_var_prefix="INIT_")
parser.add_argument("--config-path", help="path to the configuration.yml file", default="/opt/opencga/conf/configuration.yml")
parser.add_argument("--client-config-path", help="path to the client-configuration.yml file", default="/opt/opencga/conf/client-configuration.yml")
parser.add_argument("--storage-config-path", help="path to the storage-configuration.yml file", default="/opt/opencga/conf/storage-configuration.yml")
parser.add_argument("--search-hosts", required=True)
parser.add_argument("--clinical-hosts", required=True)
parser.add_argument("--cellbase-mongo-hosts", required=False, help="A CSV list of mongodb hosts which are running the cellbase database")
parser.add_argument("--cellbase-mongo-hosts-password", required=False, help="The password for the cellbase mongo server provided in '--cellbase-mongo-hosts'")
parser.add_argument("--cellbase-mongo-hosts-user", required=False, help="The username for the cellbase mongo server provided in '--cellbase-mongo-hosts'")
parser.add_argument("--cellbase-rest-urls", required=False, help="A CSV list of cellbase rest servers hosting the cellbase service")
parser.add_argument("--catalog-database-hosts", required=True)
parser.add_argument("--catalog-database-user", required=True)
parser.add_argument("--catalog-database-password", required=True)
parser.add_argument("--catalog-search-hosts", required=True)
parser.add_argument("--catalog-search-user", required=True)
parser.add_argument("--catalog-search-password", required=True)
parser.add_argument("--rest-host", required=True)
parser.add_argument("--grpc-host", required=True)
parser.add_argument("--batch-execution-mode", required=True)
parser.add_argument("--batch-account-name", required=True)
parser.add_argument("--batch-account-key", required=True)
parser.add_argument("--batch-endpoint", required=True)
parser.add_argument("--batch-pool-id", required=True)
parser.add_argument("--batch-docker-args", required=True)
parser.add_argument("--batch-docker-image", required=True)
parser.add_argument("--batch-max-concurrent-jobs", required=True)
parser.add_argument("--hbase-ssh-dns", required=True)
parser.add_argument("--hbase-ssh-user", required=True)
parser.add_argument("--hbase-ssh-pass", required=True)
parser.add_argument("--hbase-ssh-remote-opencga-home", required=True)
parser.add_argument("--health-check-interval", required=True)
parser.add_argument("--save", help="save update to source configuration files (default: false)", default=False, action='store_true')
args = parser.parse_args()
##############################################################################################################
# Load storage configuration yaml
##############################################################################################################
with open(args.storage_config_path) as f:
storage_config = yaml.safe_load(f)
# Inject search hosts
search_hosts = args.search_hosts.replace('\"','').split(",")
for i, search_host in enumerate(search_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
storage_config["search"]["hosts"].clear()
storage_config["search"]["hosts"].insert(i, search_host.strip())
# Inject clinical hosts
clinical_hosts = args.clinical_hosts.replace('\"','').split(",")
for i, clinical_host in enumerate(clinical_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
storage_config["clinical"]["hosts"].clear()
storage_config["clinical"]["hosts"].insert(i, clinical_host.strip())
# Inject cellbase database
has_cellbase_mongo_hosts = args.cellbase_mongo_hosts is not None and args.cellbase_mongo_hosts != ""
if has_cellbase_mongo_hosts:
cellbase_mongo_hosts = args.cellbase_mongo_hosts.replace('\"','').split(",")
for i, cellbase_mongo_host in enumerate(cellbase_mongo_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
storage_config["cellbase"]["database"]["hosts"].clear()
storage_config["cellbase"]["database"]["hosts"].insert(i, cellbase_mongo_host.strip())
storage_config["cellbase"]["database"]["options"]["authenticationDatabase"] = "admin"
storage_config["cellbase"]["database"]["options"]["enableSSL"] = True
storage_config["cellbase"]["database"]["user"] = args.cellbase_mongo_hosts_user
storage_config["cellbase"]["database"]["password"] = args.cellbase_mongo_hosts_password
storage_config["cellbase"]["preferred"] = "local"
# Inject cellbase rest host, if set
if args.cellbase_rest_urls is not None and args.cellbase_rest_urls != "":
cellbase_rest_urls = args.cellbase_rest_urls.replace('\"', '').split(",")
if len(cellbase_rest_urls) > 0:
for i, cellbase_url in enumerate(cellbase_rest_urls):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
storage_config["cellbase"]["hosts"].clear()
storage_config["cellbase"]["hosts"].insert(i, cellbase_url.strip())
# If we have cellbase hosts set the annotator to the DB Adaptor
if has_cellbase_mongo_hosts:
storage_config["variant"]["options"]["annotator"] = "cellbase_db_adaptor"
else :
storage_config["variant"]["options"]["annotator"] = "cellbase_rest"
# Inject Hadoop ssh configuration
for _, storage_engine in enumerate(storage_config["variant"]["engines"]):
if storage_engine["id"] == "hadoop":
storage_engine["options"]["storage.hadoop.mr.executor"] = "ssh"
storage_engine["options"]["storage.hadoop.mr.executor.ssh.host"] = args.hbase_ssh_dns
storage_engine["options"]["storage.hadoop.mr.executor.ssh.user"] = args.hbase_ssh_user
storage_engine["options"]["storage.hadoop.mr.executor.ssh.password"] = args.hbase_ssh_pass
#storage_engine["options"]["storage.hadoop.mr.executor.ssh.key"] = args.hadoop_ssh_key # TODO instead of password
storage_engine["options"]["storage.hadoop.mr.executor.ssh.remoteOpenCgaHome"] = args.hbase_ssh_remote_opencga_home
##############################################################################################################
# Load configuration yaml
##############################################################################################################
with open(args.config_path) as f:
config = yaml.safe_load(f)
# Inject catalog database
catalog_hosts = args.catalog_database_hosts.replace('\"','').split(",")
for i, catalog_host in enumerate(catalog_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
config["catalog"]["database"]["hosts"].clear()
config["catalog"]["database"]["hosts"].insert(i, catalog_host.strip())
config["catalog"]["database"]["user"] = args.catalog_database_user
config["catalog"]["database"]["password"] = args.catalog_database_password
config["catalog"]["database"]["options"]["enableSSL"] = True
config["catalog"]["database"]["options"]["authenticationDatabase"] = "admin"
# Inject search database
catalog_search_hosts = args.catalog_search_hosts.replace('\"','').split(",")
for i, catalog_search_host in enumerate(catalog_search_hosts):
if i == 0:
# If we are overriding the default hosts,
# clear them only on the first iteration
config["catalog"]["searchEngine"]["hosts"].clear()
config["catalog"]["searchEngine"]["hosts"].insert(i, catalog_search_host.strip())
config["catalog"]["searchEngine"]["user"] = args.catalog_search_user
config["catalog"]["searchEngine"]["password"] = args.catalog_search_password
# Inject execution settings
config["analysis"]["index"]["variant"]["maxConcurrentJobs"] = int(args.batch_max_concurrent_jobs)
config["analysis"]["execution"]["id"] = args.batch_execution_mode
config["analysis"]["execution"]["options"] = {}
config["analysis"]["execution"]["options"]["azure.batchAccount"] = args.batch_account_name
config["analysis"]["execution"]["options"]["azure.batchKey"] = args.batch_account_key
config["analysis"]["execution"]["options"]["azure.batchUri"] = args.batch_endpoint
config["analysis"]["execution"]["options"]["azure.batchPoolId"] = args.batch_pool_id
config["analysis"]["execution"]["options"]["azure.dockerImageName"] = args.batch_docker_image
config["analysis"]["execution"]["options"]["azure.dockerArgs"] = args.batch_docker_args
# Inject healthCheck interval
config["healthCheck"]["interval"] = args.health_check_interval
##############################################################################################################
# Load client configuration yaml
##############################################################################################################
with open(args.client_config_path) as f:
client_config = yaml.safe_load(f)
# Inject grpc and rest host
client_config["rest"]["host"] = args.rest_host.replace('"','')
client_config["grpc"]["host"] = args.grpc_host.replace('"','')
# Running with --save will update the configuration files inplace.
# Without --save will simply dump the update YAML to stdout so that
# the caller can handle it.
# Note: The dump will use the safe representation so there is likely
# to be format diffs between the original input and the output as well
# as value changes.
if args.save == False:
yaml.dump(storage_config, sys.stdout, default_flow_style=False, allow_unicode=True)
print("---") # Add yaml delimiter
yaml.dump(config, sys.stdout, default_flow_style=False, allow_unicode=True)
print("---") # Add yaml delimiter
yaml.dump(client_config, sys.stdout, default_flow_style=False, allow_unicode=True)
else:
with open(args.storage_config_path, "w") as f:
yaml.dump(storage_config, f, default_flow_style=False)
with open(args.config_path, "w") as f:
yaml.dump(config, f, default_flow_style=False)
with open(args.client_config_path, "w") as f:
yaml.dump(client_config, f, default_flow_style=False)
| j-coll/opencga | opencga-app/app/cloud/docker/old/opencga-init/override-yaml.py | Python | apache-2.0 | 10,214 |
__doc__ = """Random number array generators for numarray.
This package was ported to numarray from Numeric's RandomArray and
provides functions to generate numarray of random numbers.
"""
from RandomArray2 import *
| fxia22/ASM_xf | PythonD/site_python/numarray/random_array/__init__.py | Python | gpl-2.0 | 218 |
#!/usr/bin/env python3
import urllib.request.Request as Request
req = Request('http://10.192.40.29',
headers = {},
method = 'GET'
)
with urllib.request.urlopen(req) as f:
print f.read().decode('utf-8')
| IvanJobs/play | ceph/s3/tpl.py | Python | mit | 243 |
# Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioTaskRouterClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
taskqueue_sid = "WQXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = TwilioTaskRouterClient(account_sid, auth_token)
taskqueue = client.task_queues(workspace_sid).get(taskqueue_sid)
print(taskqueue.friendly_name)
| teoreteetik/api-snippets | rest/taskrouter/taskqueues/instance/get/example-1/example-1.5.x.py | Python | mit | 527 |
from math import log
"""
-- Various average (means) algorithms implementation
-- See: http://en.wikipedia.org/wiki/Average
-- Returns the sum of a sequence of values
"""
#Calculates the arithmetic mean of the list numbers and returns the result
def arithmetic_mean(numbers): return float(sum(numbers))/len(numbers)
#Calculates the geometric mean of the list numbers
def geometric_mean(numbers):
product=1
for num in numbers:
product*=num
return pow(product,1.0/len(numbers))
#Calculates the harmonic mean of the list numbers
def harmonic_mean(numbers):
return float(len(numbers))/sum([1.0/num for num in numbers])
#Calculates the quadratic mean of the list numbers
def quadratic_mean(numbers):
return ((1.0/len(numbers))*sum([num*num for num in numbers]))**.5
# Calculates the generalized mean of the list numbers according to the
# given power
def generalized_mean(numbers,power):
return (( 1.0/len(numbers))*sum([num**power for num in numbers]))**(1.0/power)
# Calculates weighted sum where weights and values are two list with equal number
# of elements
def weighted_mean(values,weights):
if(len(values)!= len(weights)): print("List lengths don't match")
else:
sumup=0
for counter,number in enumerate(values):
sumup+=number*weights[counter]
return float(sumup)/sum(weights)
# Calculates midrange_mean
def midrange_mean(values):
return 0.5*(min(values)+max(values))
# Calculates midrange_mean
def energatic_mean(values):
sumup=sum([10**(num/10.0) for num in values])
return 10*log((1.0/len(values))*sumup, 10)
def main():
values =[1,2,3,4,5]
weights = [0.1, 0.2, 0.2, 0.3, 0.2]
flag =True
if(arithmetic_mean(values)!=3):
flag = False
elif(geometric_mean(values)!=2.605171084697352):
flag = False
elif(harmonic_mean(values)!=2.18978102189781):
flag = False
elif(quadratic_mean(values)!=3.3166247903554):
flag = False
elif(generalized_mean(values,1)!=arithmetic_mean(values)):
flag = False
elif(weighted_mean(values,weights) != 3.3):
flag = False
elif(midrange_mean(values)!=3):
flag = False
elif(energatic_mean(values)!= 3.2276678141732704):
flag = False
print("Test is"+(" Successful"if(flag) else" Unsuccessful"))
if __name__ == '__main__':
main()
| jiang42/Algorithm-Implementations | Average/Python/wasi0013/Average.py | Python | mit | 2,303 |
# (C) British Crown Copyright 2013 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the Transverse Mercator projection, including OSGB and OSNI.
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
import cartopy.crs as ccrs
class TestTransverseMercator(object):
def setup_class(self):
self.point_a = (-3.474083, 50.727301)
self.point_b = (0.5, 50.5)
self.src_crs = ccrs.PlateCarree()
def test_default(self):
proj = ccrs.TransverseMercator()
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (-245269.53180633,
5627508.74354959))
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (35474.63566645,
5596583.41949901))
def test_osgb_vals(self):
proj = ccrs.TransverseMercator(central_longitude=-2,
central_latitude=49,
scale_factor=0.9996012717,
false_easting=400000,
false_northing=-100000,
globe=ccrs.Globe(datum='OSGB36',
ellipse='airy'))
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (295971.28667707,
93064.27666368))
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (577274.98380140,
69740.49227181))
def test_nan(self):
proj = ccrs.TransverseMercator()
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
class TestOSGB(object):
def setup_class(self):
self.point_a = (-3.474083, 50.727301)
self.point_b = (0.5, 50.5)
self.src_crs = ccrs.PlateCarree()
self.nan = float('nan')
def test_default(self):
proj = ccrs.OSGB()
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (295971.28667707,
93064.27666368))
res = proj.transform_point(*self.point_b, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(res, (577274.98380140,
69740.49227181))
def test_nan(self):
proj = ccrs.OSGB()
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
class TestOSNI(object):
def setup_class(self):
self.point_a = (-6.826286, 54.725116)
self.src_crs = ccrs.PlateCarree()
self.nan = float('nan')
def test_default(self):
proj = ccrs.OSNI()
res = proj.transform_point(*self.point_a, src_crs=self.src_crs)
np.testing.assert_array_almost_equal(
res, (275614.26762651594, 386984.206429612),
decimal=0 if ccrs.PROJ4_VERSION < (5, 0, 0) else 6)
def test_nan(self):
proj = ccrs.OSNI()
res = proj.transform_point(0.0, float('nan'), src_crs=self.src_crs)
assert np.all(np.isnan(res))
res = proj.transform_point(float('nan'), 0.0, src_crs=self.src_crs)
assert np.all(np.isnan(res))
| pelson/cartopy | lib/cartopy/tests/crs/test_transverse_mercator.py | Python | lgpl-3.0 | 4,506 |
# -*- coding:utf-8 -*-
import requests, bs4
ROOT_URL = "http://item.jd.com/%s.html"
def get_response(id):#str
rs = requests.get(ROOT_URL%id)
if rs.ok:
return bs4.BeautifulSoup(rs.text)
return None
def parser_dns_prefetch(bs):#BeautifulSoup
all_dns = bs.find_all('link', rel = 'dns-prefetch')
if len(all_dns) is 0:
return None
rs = []
for dns_tag in all_dns:
if not dns_tag.attrs.has_key('href'):
continue
rs.append('http:' + dns_tag.get('href'))
return rs
def parser_urls(bs):#BeautifulSoup
rs_spec_items = bs.find_all('div', 'spec-items')
if len(rs_spec_items) is 0:
return {}
spec_items = rs_spec_items[0]
rs_ele = spec_items.find_all('img')
images = {}
for ele in rs_ele:
if ele.attrs.has_key('data-url') and ele.attrs.has_key('data-img'):
images[ele.attrs.get('data-url')] = ele.attrs.get('data-img')
return images
| Tary/Python-Tool | PictureFetch/jdimage.py | Python | mit | 894 |
from aimacode.planning import Action
from aimacode.search import Problem
from aimacode.utils import expr
from lp_utils import decode_state
class PgNode():
''' Base class for planning graph nodes.
includes instance sets common to both types of nodes used in a planning graph
parents: the set of nodes in the previous level
children: the set of nodes in the subsequent level
mutex: the set of sibling nodes that are mutually exclusive with this node
'''
def __init__(self):
self.parents = set()
self.children = set()
self.mutex = set()
def is_mutex(self, other) -> bool:
''' Boolean test for mutual exclusion
:param other: PgNode
the other node to compare with
:return: bool
True if this node and the other are marked mutually exclusive (mutex)
'''
if other in self.mutex:
return True
return False
def show(self):
''' helper print for debugging shows counts of parents, children, siblings
:return:
print only
'''
print("{} parents".format(len(self.parents)))
print("{} children".format(len(self.children)))
print("{} mutex".format(len(self.mutex)))
class PgNode_s(PgNode):
'''
A planning graph node representing a state (literal fluent) from a planning
problem.
Args:
----------
symbol : str
A string representing a literal expression from a planning problem
domain.
is_pos : bool
Boolean flag indicating whether the literal expression is positive or
negative.
'''
def __init__(self, symbol: str, is_pos: bool):
''' S-level Planning Graph node constructor
:param symbol: expr
:param is_pos: bool
Instance variables calculated:
literal: expr
fluent in its literal form including negative operator if applicable
Instance variables inherited from PgNode:
parents: set of nodes connected to this node in previous A level; initially empty
children: set of nodes connected to this node in next A level; initially empty
mutex: set of sibling S-nodes that this node has mutual exclusion with; initially empty
'''
PgNode.__init__(self)
self.symbol = symbol
self.is_pos = is_pos
self.literal = expr(self.symbol)
if not self.is_pos:
self.literal = expr('~{}'.format(self.symbol))
def show(self):
'''helper print for debugging shows literal plus counts of parents, children, siblings
:return:
print only
'''
print("\n*** {}".format(self.literal))
PgNode.show(self)
def __eq__(self, other):
'''equality test for nodes - compares only the literal for equality
:param other: PgNode_s
:return: bool
'''
if isinstance(other, self.__class__):
return (self.symbol == other.symbol) \
and (self.is_pos == other.is_pos)
def __hash__(self):
return hash(self.symbol) ^ hash(self.is_pos)
class PgNode_a(PgNode):
'''A-type (action) Planning Graph node - inherited from PgNode
'''
def __init__(self, action: Action):
'''A-level Planning Graph node constructor
:param action: Action
a ground action, i.e. this action cannot contain any variables
Instance variables calculated:
An A-level will always have an S-level as its parent and an S-level as its child.
The preconditions and effects will become the parents and children of the A-level node
However, when this node is created, it is not yet connected to the graph
prenodes: set of *possible* parent S-nodes
effnodes: set of *possible* child S-nodes
is_persistent: bool True if this is a persistence action, i.e. a no-op action
Instance variables inherited from PgNode:
parents: set of nodes connected to this node in previous S level; initially empty
children: set of nodes connected to this node in next S level; initially empty
mutex: set of sibling A-nodes that this node has mutual exclusion with; initially empty
'''
PgNode.__init__(self)
self.action = action
self.prenodes = self.precond_s_nodes()
self.effnodes = self.effect_s_nodes()
self.is_persistent = False
if self.prenodes == self.effnodes:
self.is_persistent = True
def show(self):
'''helper print for debugging shows action plus counts of parents, children, siblings
:return:
print only
'''
print("\n*** {}{}".format(self.action.name, self.action.args))
PgNode.show(self)
def precond_s_nodes(self):
'''precondition literals as S-nodes (represents possible parents for this node).
It is computationally expensive to call this function; it is only called by the
class constructor to populate the `prenodes` attribute.
:return: set of PgNode_s
'''
nodes = set()
for p in self.action.precond_pos:
n = PgNode_s(p, True)
nodes.add(n)
for p in self.action.precond_neg:
n = PgNode_s(p, False)
nodes.add(n)
return nodes
def effect_s_nodes(self):
'''effect literals as S-nodes (represents possible children for this node).
It is computationally expensive to call this function; it is only called by the
class constructor to populate the `effnodes` attribute.
:return: set of PgNode_s
'''
nodes = set()
for e in self.action.effect_add:
n = PgNode_s(e, True)
nodes.add(n)
for e in self.action.effect_rem:
n = PgNode_s(e, False)
nodes.add(n)
return nodes
def __eq__(self, other):
'''equality test for nodes - compares only the action name for equality
:param other: PgNode_a
:return: bool
'''
if isinstance(other, self.__class__):
return (self.action.name == other.action.name) \
and (self.action.args == other.action.args)
def __hash__(self):
return hash(self.action.name) ^ hash(self.action.args)
def mutexify(node1: PgNode, node2: PgNode):
''' adds sibling nodes to each other's mutual exclusion (mutex) set. These should be sibling nodes!
:param node1: PgNode (or inherited PgNode_a, PgNode_s types)
:param node2: PgNode (or inherited PgNode_a, PgNode_s types)
:return:
node mutex sets modified
'''
if type(node1) != type(node2):
raise TypeError('Attempted to mutex two nodes of different types')
node1.mutex.add(node2)
node2.mutex.add(node1)
class PlanningGraph():
'''
A planning graph as described in chapter 10 of the AIMA text. The planning
graph can be used to reason about
'''
def __init__(self, problem: Problem, state: str, serial_planning=True):
'''
:param problem: PlanningProblem (or subclass such as AirCargoProblem or HaveCakeProblem)
:param state: str (will be in form TFTTFF... representing fluent states)
:param serial_planning: bool (whether or not to assume that only one action can occur at a time)
Instance variable calculated:
fs: FluentState
the state represented as positive and negative fluent literal lists
all_actions: list of the PlanningProblem valid ground actions combined with calculated no-op actions
s_levels: list of sets of PgNode_s, where each set in the list represents an S-level in the planning graph
a_levels: list of sets of PgNode_a, where each set in the list represents an A-level in the planning graph
'''
self.problem = problem
self.fs = decode_state(state, problem.state_map)
self.serial = serial_planning
self.all_actions = self.problem.actions_list + self.noop_actions(self.problem.state_map)
self.s_levels = []
self.a_levels = []
self.create_graph()
def noop_actions(self, literal_list):
'''create persistent action for each possible fluent
"No-Op" actions are virtual actions (i.e., actions that only exist in
the planning graph, not in the planning problem domain) that operate
on each fluent (literal expression) from the problem domain. No op
actions "pass through" the literal expressions from one level of the
planning graph to the next.
The no-op action list requires both a positive and a negative action
for each literal expression. Positive no-op actions require the literal
as a positive precondition and add the literal expression as an effect
in the output, and negative no-op actions require the literal as a
negative precondition and remove the literal expression as an effect in
the output.
This function should only be called by the class constructor.
:param literal_list:
:return: list of Action
'''
action_list = []
for fluent in literal_list:
act1 = Action(expr("Noop_pos({})".format(fluent)), ([fluent], []), ([fluent], []))
action_list.append(act1)
act2 = Action(expr("Noop_neg({})".format(fluent)), ([], [fluent]), ([], [fluent]))
action_list.append(act2)
return action_list
def create_graph(self):
''' build a Planning Graph as described in Russell-Norvig 3rd Ed 10.3 or 2nd Ed 11.4
The S0 initial level has been implemented for you. It has no parents and includes all of
the literal fluents that are part of the initial state passed to the constructor. At the start
of a problem planning search, this will be the same as the initial state of the problem. However,
the planning graph can be built from any state in the Planning Problem
This function should only be called by the class constructor.
:return:
builds the graph by filling s_levels[] and a_levels[] lists with node sets for each level
'''
# the graph should only be built during class construction
if (len(self.s_levels) != 0) or (len(self.a_levels) != 0):
raise Exception(
'Planning Graph already created; construct a new planning graph for each new state in the planning sequence')
# initialize S0 to literals in initial state provided.
leveled = False
level = 0
self.s_levels.append(set()) # S0 set of s_nodes - empty to start
# for each fluent in the initial state, add the correct literal PgNode_s
for literal in self.fs.pos:
self.s_levels[level].add(PgNode_s(literal, True))
for literal in self.fs.neg:
self.s_levels[level].add(PgNode_s(literal, False))
# no mutexes at the first level
# continue to build the graph alternating A, S levels until last two S levels contain the same literals,
# i.e. until it is "leveled"
while not leveled:
self.add_action_level(level)
self.update_a_mutex(self.a_levels[level])
level += 1
self.add_literal_level(level)
self.update_s_mutex(self.s_levels[level])
if self.s_levels[level] == self.s_levels[level - 1]:
leveled = True
def add_action_level(self, level):
''' add an A (action) level to the Planning Graph
:param level: int
the level number alternates S0, A0, S1, A1, S2, .... etc the level number is also used as the
index for the node set lists self.a_levels[] and self.s_levels[]
:return:
adds A nodes to the current level in self.a_levels[level]
'''
# TODO add action A level to the planning graph as described in the Russell-Norvig text
# 1. determine what actions to add and create those PgNode_a objects
# 2. connect the nodes to the previous S literal level
# for example, the A0 level will iterate through all possible actions for the problem and add a PgNode_a to a_levels[0]
# set iff all prerequisite literals for the action hold in S0. This can be accomplished by testing
# to see if a proposed PgNode_a has prenodes that are a subset of the previous S level. Once an
# action node is added, it MUST be connected to the S node instances in the appropriate s_level set.
self.a_levels.append(set())
for a in self.all_actions:
node = PgNode_a(a)
parents = set()
satisfiable = [ False for n in range(len(node.prenodes)) ]
ii = 0
for p in node.prenodes:
for s in self.s_levels[level]:
if p == s:
satisfiable[ii] = True
parents.add(s)
break
ii = ii+1
if all(satisfiable):
self.a_levels[level].add(node)
node.parents = parents
def add_literal_level(self, level):
''' add an S (literal) level to the Planning Graph
:param level: int
the level number alternates S0, A0, S1, A1, S2, .... etc the level number is also used as the
index for the node set lists self.a_levels[] and self.s_levels[]
:return:
adds S nodes to the current level in self.s_levels[level]
'''
# TODO add literal S level to the planning graph as described in the Russell-Norvig text
# 1. determine what literals to add
# 2. connect the nodes
# for example, every A node in the previous level has a list of S nodes in effnodes that represent the effect
# produced by the action. These literals will all be part of the new S level. Since we are working with sets, they
# may be "added" to the set without fear of duplication. However, it is important to then correctly create and connect
# all of the new S nodes as children of all the A nodes that could produce them, and likewise add the A nodes to the
# parent sets of the S nodes
self.s_levels.append(set())
for a in self.a_levels[level-1]:
for p in a.action.effect_add:
self.s_levels[level].add(PgNode_s(p,True))
for n in a.action.effect_rem:
self.s_levels[level].add(PgNode_s(n,False))
for a in self.a_levels[level-1]:
children = set()
for s in self.s_levels[level]:
if s in a.effnodes:
children.add(s)
a.children = children
def update_a_mutex(self, nodeset):
''' Determine and update sibling mutual exclusion for A-level nodes
Mutex action tests section from 3rd Ed. 10.3 or 2nd Ed. 11.4
A mutex relation holds between two actions a given level
if the planning graph is a serial planning graph and the pair are nonpersistence actions
or if any of the three conditions hold between the pair:
Inconsistent Effects
Interference
Competing needs
:param nodeset: set of PgNode_a (siblings in the same level)
:return:
mutex set in each PgNode_a in the set is appropriately updated
'''
nodelist = list(nodeset)
for i, n1 in enumerate(nodelist[:-1]):
for n2 in nodelist[i + 1:]:
if (self.serialize_actions(n1, n2) or
self.inconsistent_effects_mutex(n1, n2) or
self.interference_mutex(n1, n2) or
self.competing_needs_mutex(n1, n2)):
mutexify(n1, n2)
def serialize_actions(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool:
'''
Test a pair of actions for mutual exclusion, returning True if the
planning graph is serial, and if either action is persistent; otherwise
return False. Two serial actions are mutually exclusive if they are
both non-persistent.
:param node_a1: PgNode_a
:param node_a2: PgNode_a
:return: bool
'''
if not self.serial:
return False
if node_a1.is_persistent or node_a2.is_persistent:
return False
return True
def inconsistent_effects_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool:
'''
Test a pair of actions for inconsistent effects, returning True if
one action negates an effect of the other, and False otherwise.
HINT: The Action instance associated with an action node is accessible
through the PgNode_a.action attribute. See the Action class
documentation for details on accessing the effects and preconditions of
an action.
:param node_a1: PgNode_a
:param node_a2: PgNode_a
:return: bool
'''
# TODO test for Inconsistent Effects between nodes
for e1 in node_a1.effnodes:
for e2 in node_a2.effnodes:
if e1.symbol == e2.symbol and e1.is_pos != e2.is_pos:
return True
return False
def interference_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool:
'''
Test a pair of actions for mutual exclusion, returning True if the
effect of one action is the negation of a precondition of the other.
HINT: The Action instance associated with an action node is accessible
through the PgNode_a.action attribute. See the Action class
documentation for details on accessing the effects and preconditions of
an action.
:param node_a1: PgNode_a
:param node_a2: PgNode_a
:return: bool
'''
# TODO test for Interference between nodes
for e1 in node_a1.effnodes:
for e2 in node_a2.prenodes:
if e1.symbol == e2.symbol and e1.is_pos != e2.is_pos:
return True
for e1 in node_a1.prenodes:
for e2 in node_a2.effnodes:
if e1.symbol == e2.symbol and e1.is_pos != e2.is_pos:
return True
return False
def competing_needs_mutex(self, node_a1: PgNode_a, node_a2: PgNode_a) -> bool:
'''
Test a pair of actions for mutual exclusion, returning True if one of
the precondition of one action is mutex with a precondition of the
other action.
:param node_a1: PgNode_a
:param node_a2: PgNode_a
:return: bool
'''
# TODO test for Competing Needs between nodes
for e1 in node_a1.parents:
for e2 in node_a2.parents:
if e1.is_mutex(e2):
return True
return False
def update_s_mutex(self, nodeset: set):
''' Determine and update sibling mutual exclusion for S-level nodes
Mutex action tests section from 3rd Ed. 10.3 or 2nd Ed. 11.4
A mutex relation holds between literals at a given level
if either of the two conditions hold between the pair:
Negation
Inconsistent support
:param nodeset: set of PgNode_a (siblings in the same level)
:return:
mutex set in each PgNode_a in the set is appropriately updated
'''
nodelist = list(nodeset)
for i, n1 in enumerate(nodelist[:-1]):
for n2 in nodelist[i + 1:]:
if self.negation_mutex(n1, n2) or self.inconsistent_support_mutex(n1, n2):
mutexify(n1, n2)
def negation_mutex(self, node_s1: PgNode_s, node_s2: PgNode_s) -> bool:
'''
Test a pair of state literals for mutual exclusion, returning True if
one node is the negation of the other, and False otherwise.
HINT: Look at the PgNode_s.__eq__ defines the notion of equivalence for
literal expression nodes, and the class tracks whether the literal is
positive or negative.
:param node_s1: PgNode_s
:param node_s2: PgNode_s
:return: bool
'''
# TODO test for negation between nodes
return (node_s1.symbol == node_s2.symbol) and (node_s1.is_pos != node_s2.is_pos)
def inconsistent_support_mutex(self, node_s1: PgNode_s, node_s2: PgNode_s):
'''
Test a pair of state literals for mutual exclusion, returning True if
there are no actions that could achieve the two literals at the same
time, and False otherwise. In other words, the two literal nodes are
mutex if all of the actions that could achieve the first literal node
are pairwise mutually exclusive with all of the actions that could
achieve the second literal node.
HINT: The PgNode.is_mutex method can be used to test whether two nodes
are mutually exclusive.
:param node_s1: PgNode_s
:param node_s2: PgNode_s
:return: bool
'''
# TODO test for Inconsistent Support between nodes
for a1 in node_s1.parents:
for a2 in node_s2.parents:
if not a1.is_mutex(a2):
return False
return True
def h_levelsum(self) -> int:
'''The sum of the level costs of the individual goals (admissible if goals independent)
:return: int
'''
level_sum = 0
# TODO implement
goals = { }
for g in self.problem.goal:
goals[PgNode_s(g,True)] = -1
for ii in range(len(self.s_levels)):
for g in goals:
if g in self.s_levels[ii] and goals[g] == -1:
goals[g] = ii
for g in goals:
level_sum = level_sum + goals[g]
if goals[g] == -1:
return float("inf")
return level_sum
| fbrei/aind | planning/my_planning_graph.py | Python | mit | 22,173 |
"""tf.size(input, name = None)
解释:这个函数是返回input中一共有多少个元素。
使用例子:"""
import tensorflow as tf
sess = tf.Session()
data = tf.constant([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]])
print(sess.run(data))
d = tf.size(data)
print(sess.run(d)) # 12
"""输入参数:
● input: 一个Tensor。
● name:(可选)为这个操作取一个名字。
输出参数:
● 一个Tensor,数据类型是int32。"""
| Asurada2015/TFAPI_translation | array_ops/tf_size.py | Python | apache-2.0 | 465 |
import pytest
from transporter import create_app, redis_store
@pytest.fixture(scope='function')
def app(request):
"""Session-wide test `Flask` application."""
settings_override = {
'TESTING': True,
# 'SQLALCHEMY_DATABASE_URI': TEST_DATABASE_URI,
}
app = create_app(__name__, config=settings_override,
template_folder='../templates')
redis_store.init_app(app)
# Establish an application context before running the tests.
ctx = app.app_context()
ctx.push()
def teardown():
ctx.pop()
request.addfinalizer(teardown)
return app
# @pytest.fixture(scope='session')
# def db(app, request):
# """Session-wide test database."""
# if os.path.exists(TESTDB_PATH):
# os.unlink(TESTDB_PATH)
#
# def teardown():
# _db.drop_all()
# os.unlink(TESTDB_PATH)
#
# _db.app = app
# _db.create_all()
#
# request.addfinalizer(teardown)
# return _db
| suminb/transporter | tests/conftest.py | Python | mit | 974 |