content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import logging
from django.conf import settings
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from openstack_auth import utils
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import identity
LOG = logging.getLogger(__name__)
INDEX_URL = "horizon:identity:projects:index"
ADD_USER_URL = "horizon:identity:projects:create_user"
PROJECT_USER_MEMBER_SLUG = "update_members"
PROJECT_GROUP_MEMBER_SLUG = "update_group_members"
COMMON_HORIZONTAL_TEMPLATE = "identity/projects/_common_horizontal_form.html"
class CommonQuotaAction(workflows.Action):
_quota_fields = None
def __init__(self, request, *args, **kwargs):
super(CommonQuotaAction, self).__init__(request, *args, **kwargs)
disabled_quotas = self.initial['disabled_quotas']
for field in disabled_quotas:
if field in self.fields:
self.fields[field].required = False
self.fields[field].widget = forms.HiddenInput()
def clean(self):
cleaned_data = super(CommonQuotaAction, self).clean()
usages = quotas.tenant_quota_usages(
self.request, tenant_id=self.initial['project_id'],
targets=tuple(self._quota_fields))
# Validate the quota values before updating quotas.
bad_values = []
for key, value in cleaned_data.items():
used = usages[key].get('used', 0)
if value is not None and 0 <= value < used:
bad_values.append(_('%(used)s %(key)s used') %
{'used': used,
'key': quotas.QUOTA_NAMES.get(key, key)})
if bad_values:
value_str = ", ".join(bad_values)
msg = (_('Quota value(s) cannot be less than the current usage '
'value(s): %s.') %
value_str)
raise forms.ValidationError(msg)
return cleaned_data
def handle(self, request, context):
project_id = context['project_id']
disabled_quotas = context['disabled_quotas']
data = {key: context[key] for key in
self._quota_fields - disabled_quotas}
if data:
self._tenant_quota_update(request, project_id, data)
@abc.abstractmethod
def _tenant_quota_update(self, request, project_id, data):
pass
class ComputeQuotaAction(CommonQuotaAction):
instances = forms.IntegerField(min_value=-1, label=_("Instances"))
cores = forms.IntegerField(min_value=-1, label=_("VCPUs"))
ram = forms.IntegerField(min_value=-1, label=_("RAM (MB)"))
metadata_items = forms.IntegerField(min_value=-1,
label=_("Metadata Items"))
key_pairs = forms.IntegerField(min_value=-1, label=_("Key Pairs"))
server_groups = forms.IntegerField(min_value=-1, label=_("Server Groups"))
server_group_members = forms.IntegerField(
min_value=-1, label=_("Server Group Members"))
injected_files = forms.IntegerField(min_value=-1,
label=_("Injected Files"))
injected_file_content_bytes = forms.IntegerField(
min_value=-1,
label=_("Injected File Content (Bytes)"))
injected_file_path_bytes = forms.IntegerField(
min_value=-1,
label=_("Length of Injected File Path"))
_quota_fields = quotas.NOVA_QUOTA_FIELDS
def _tenant_quota_update(self, request, project_id, data):
nova.tenant_quota_update(request, project_id, **data)
class Meta(object):
name = _("Compute")
slug = 'update_compute_quotas'
help_text = _("Set maximum quotas for the project.")
permissions = ('openstack.roles.admin', 'openstack.services.compute')
class VolumeQuotaAction(CommonQuotaAction):
volumes = forms.IntegerField(min_value=-1, label=_("Volumes"))
snapshots = forms.IntegerField(min_value=-1, label=_("Volume Snapshots"))
gigabytes = forms.IntegerField(
min_value=-1, label=_("Total Size of Volumes and Snapshots (GiB)"))
_quota_fields = quotas.CINDER_QUOTA_FIELDS
def _tenant_quota_update(self, request, project_id, data):
cinder.tenant_quota_update(request, project_id, **data)
class Meta(object):
name = _("Volume")
slug = 'update_volume_quotas'
help_text = _("Set maximum quotas for the project.")
permissions = ('openstack.roles.admin', 'openstack.services.compute')
class NetworkQuotaAction(CommonQuotaAction):
network = forms.IntegerField(min_value=-1, label=_("Networks"))
subnet = forms.IntegerField(min_value=-1, label=_("Subnets"))
port = forms.IntegerField(min_value=-1, label=_("Ports"))
router = forms.IntegerField(min_value=-1, label=_("Routers"))
floatingip = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
security_group = forms.IntegerField(min_value=-1,
label=_("Security Groups"))
security_group_rule = forms.IntegerField(min_value=-1,
label=_("Security Group Rules"))
_quota_fields = quotas.NEUTRON_QUOTA_FIELDS
def _tenant_quota_update(self, request, project_id, data):
api.neutron.tenant_quota_update(request, project_id, **data)
class Meta(object):
name = _("Network")
slug = 'update_network_quotas'
help_text = _("Set maximum quotas for the project.")
permissions = ('openstack.roles.admin', 'openstack.services.compute')
class UpdateComputeQuota(workflows.Step):
action_class = ComputeQuotaAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id", "disabled_quotas")
contributes = quotas.NOVA_QUOTA_FIELDS
def allowed(self, request):
return api.base.is_service_enabled(request, 'compute')
class UpdateVolumeQuota(workflows.Step):
action_class = VolumeQuotaAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id", "disabled_quotas")
contributes = quotas.CINDER_QUOTA_FIELDS
def allowed(self, request):
return cinder.is_volume_service_enabled(request)
class UpdateNetworkQuota(workflows.Step):
action_class = NetworkQuotaAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id", "disabled_quotas")
contributes = quotas.NEUTRON_QUOTA_FIELDS
def allowed(self, request):
return (api.base.is_service_enabled(request, 'network') and
api.neutron.is_quotas_extension_supported(request))
class UpdateQuota(workflows.Workflow):
slug = "update_quotas"
name = _("Edit Quotas")
finalize_button_name = _("Save")
success_message = _('Modified quotas of project')
failure_message = _('Unable to modify quotas of project')
success_url = "horizon:identity:projects:index"
default_steps = (UpdateComputeQuota,
UpdateVolumeQuota,
UpdateNetworkQuota)
class CreateProjectInfoAction(workflows.Action):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
max_length=64)
description = forms.CharField(widget=forms.widgets.Textarea(
attrs={'rows': 4}),
label=_("Description"),
required=False)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
def __init__(self, request, *args, **kwargs):
super(CreateProjectInfoAction, self).__init__(request,
*args,
**kwargs)
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
self.add_extra_fields()
def add_extra_fields(self):
# add extra column defined by setting
EXTRA_INFO = settings.PROJECT_TABLE_EXTRA_INFO
for key, value in EXTRA_INFO.items():
form = forms.CharField(label=value, required=False,)
self.fields[key] = form
class Meta(object):
name = _("Project Information")
help_text = _("Create a project to organize users.")
class CreateProjectInfo(workflows.Step):
action_class = CreateProjectInfoAction
template_name = COMMON_HORIZONTAL_TEMPLATE
contributes = ("domain_id",
"domain_name",
"project_id",
"name",
"description",
"enabled")
def __init__(self, workflow):
super(CreateProjectInfo, self).__init__(workflow)
EXTRA_INFO = settings.PROJECT_TABLE_EXTRA_INFO
self.contributes += tuple(EXTRA_INFO.keys())
class UpdateProjectMembersAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectMembersAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve user list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE
msg = (_('Could not find default role "%s" in Keystone') %
default)
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available users
all_users = []
try:
all_users = api.keystone.user_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
users_list = [(user.id, user.name) for user in all_users]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = users_list
self.fields[field_name].initial = []
# Figure out users & roles
if project_id:
try:
users_roles = api.keystone.get_project_users_roles(request,
project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for user_id in users_roles:
roles_ids = users_roles[user_id]
for role_id in roles_ids:
field_name = self.get_member_field_name(role_id)
self.fields[field_name].initial.append(user_id)
class Meta(object):
name = _("Project Members")
slug = PROJECT_USER_MEMBER_SLUG
class UpdateProjectMembers(workflows.UpdateMembersStep):
action_class = UpdateProjectMembersAction
available_list_title = _("All Users")
members_list_title = _("Project Members")
no_available_text = _("No users found.")
no_members_text = _("No users.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve user list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class UpdateProjectGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve group list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE
msg = (_('Could not find default role "%s" in Keystone') %
default)
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available groups
all_groups = []
try:
all_groups = api.keystone.group_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
# some backends (e.g. LDAP) do not provide group names
groups_list = [
(group.id, getattr(group, 'name', group.id))
for group in all_groups]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = []
# Figure out groups & roles
if project_id:
try:
groups_roles = api.keystone.get_project_groups_roles(
request, project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for group_id in groups_roles:
roles_ids = groups_roles[group_id]
for role_id in roles_ids:
field_name = self.get_member_field_name(role_id)
self.fields[field_name].initial.append(group_id)
class Meta(object):
name = _("Project Groups")
slug = PROJECT_GROUP_MEMBER_SLUG
class UpdateProjectGroups(workflows.UpdateMembersStep):
action_class = UpdateProjectGroupsAction
available_list_title = _("All Groups")
members_list_title = _("Project Groups")
no_available_text = _("No groups found.")
no_members_text = _("No groups.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve role list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class CreateProject(workflows.Workflow):
slug = "create_project"
name = _("Create Project")
finalize_button_name = _("Create Project")
success_message = _('Created new project "%s".')
failure_message = _('Unable to create project "%s".')
success_url = "horizon:identity:projects:index"
default_steps = (CreateProjectInfo,
UpdateProjectMembers)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
self.default_steps = (CreateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups)
super(CreateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
if "%s" in message:
return message % self.context.get('name', 'unknown project')
else:
return message
def _create_project(self, request, data):
# create the project
domain_id = data['domain_id']
try:
# add extra information
EXTRA_INFO = settings.PROJECT_TABLE_EXTRA_INFO
kwargs = dict((key, data.get(key)) for key in EXTRA_INFO)
desc = data['description']
self.object = api.keystone.tenant_create(request,
name=data['name'],
description=desc,
enabled=data['enabled'],
domain=domain_id,
**kwargs)
return self.object
except exceptions.Conflict:
msg = _('Project name "%s" is already used.') % data['name']
self.failure_message = msg
return
except Exception:
exceptions.handle(request, ignore=True)
return
def _update_project_members(self, request, data, project_id):
# update project members
users_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
# count how many users are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_to_add += len(role_list)
# add new users to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_added = 0
for user in role_list:
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user,
role=role.id)
users_added += 1
users_to_add -= users_added
except Exception:
group_msg = _(", add project groups")
exceptions.handle(request,
_('Failed to add %(users_to_add)s project '
'members%(group_msg)s and set project quotas.')
% {'users_to_add': users_to_add,
'group_msg': group_msg})
def _update_project_groups(self, request, data, project_id):
# update project groups
groups_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
# count how many groups are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_to_add += len(role_list)
# add new groups to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_added = 0
for group in role_list:
api.keystone.add_group_role(request,
role=role.id,
group=group,
project=project_id)
groups_added += 1
groups_to_add -= groups_added
except Exception:
exceptions.handle(request,
_('Failed to add %s project groups '
'and update project quotas.')
% groups_to_add)
def handle(self, request, data):
project = self._create_project(request, data)
if not project:
return False
project_id = project.id
self._update_project_members(request, data, project_id)
self._update_project_groups(request, data, project_id)
return True
class UpdateProjectInfoAction(CreateProjectInfoAction):
enabled = forms.BooleanField(required=False, label=_("Enabled"))
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
def __init__(self, request, initial, *args, **kwargs):
super(UpdateProjectInfoAction, self).__init__(
request, initial, *args, **kwargs)
if initial['project_id'] == request.user.project_id:
self.fields['enabled'].widget.attrs['disabled'] = True
self.fields['enabled'].help_text = _(
'You cannot disable your current project')
def clean(self):
cleaned_data = super(UpdateProjectInfoAction, self).clean()
# NOTE(tsufiev): in case the current project is being edited, its
# 'enabled' field is disabled to prevent changing the field value
# which is always `True` for the current project (because the user
# logged in it). Since Django treats disabled checkbox as providing
# `False` value even if its initial value is `True`, we need to
# restore the original `True` value of 'enabled' field here.
if self.fields['enabled'].widget.attrs.get('disabled', False):
cleaned_data['enabled'] = True
return cleaned_data
class Meta(object):
name = _("Project Information")
slug = 'update_info'
help_text = _("Edit the project details.")
class UpdateProjectInfo(workflows.Step):
action_class = UpdateProjectInfoAction
template_name = COMMON_HORIZONTAL_TEMPLATE
depends_on = ("project_id",)
contributes = ("domain_id",
"domain_name",
"name",
"description",
"enabled")
def __init__(self, workflow):
super(UpdateProjectInfo, self).__init__(workflow)
EXTRA_INFO = settings.PROJECT_TABLE_EXTRA_INFO
self.contributes += tuple(EXTRA_INFO.keys())
class UpdateProject(workflows.Workflow):
slug = "update_project"
name = _("Edit Project")
finalize_button_name = _("Save")
success_message = _('Modified project "%s".')
failure_message = _('Unable to modify project "%s".')
success_url = "horizon:identity:projects:index"
default_steps = (UpdateProjectInfo,
UpdateProjectMembers)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
self.default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups)
super(UpdateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
if "%s" in message:
return message % self.context.get('name', 'unknown project')
else:
return message
@memoized.memoized_method
def _get_available_roles(self, request):
return api.keystone.role_list(request)
def _update_project(self, request, data):
"""Update project info"""
domain_id = identity.get_domain_id_for_operation(request)
try:
project_id = data['project_id']
# add extra information
EXTRA_INFO = settings.PROJECT_TABLE_EXTRA_INFO
kwargs = dict((key, data.get(key)) for key in EXTRA_INFO)
return api.keystone.tenant_update(
request,
project_id,
name=data['name'],
description=data['description'],
enabled=data['enabled'],
domain=domain_id,
**kwargs)
except exceptions.Conflict:
msg = _('Project name "%s" is already used.') % data['name']
self.failure_message = msg
return
except Exception as e:
LOG.debug('Project update failed: %s', e)
exceptions.handle(request, ignore=True)
return
def _add_roles_to_users(self, request, data, project_id, user_id,
role_ids, available_roles):
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
current_role_ids = list(role_ids)
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Check if the user is in the list of users with this role.
if user_id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# user role has changed
api.keystone.add_tenant_user_role(
request,
project=project_id,
user=user_id,
role=role.id)
else:
# User role is unchanged, so remove it from the
# remaining roles list to avoid removing it later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
return current_role_ids
def _remove_roles_from_user(self, request, project_id, user_id,
current_role_ids):
for id_to_delete in current_role_ids:
api.keystone.remove_tenant_user_role(
request,
project=project_id,
user=user_id,
role=id_to_delete)
def _is_removing_self_admin_role(self, request, project_id, user_id,
available_roles, current_role_ids):
is_current_user = user_id == request.user.id
is_current_project = project_id == request.user.tenant_id
_admin_roles = utils.get_admin_roles()
available_admin_role_ids = [role.id for role in available_roles
if role.name.lower() in _admin_roles]
admin_roles = [role for role in current_role_ids
if role in available_admin_role_ids]
if admin_roles:
removing_admin = any([role in current_role_ids
for role in admin_roles])
else:
removing_admin = False
if is_current_user and is_current_project and removing_admin:
# Cannot remove "admin" role on current(admin) project
msg = _('You cannot revoke your administrative privileges '
'from the project you are currently logged into. '
'Please switch to another project with '
'administrative privileges or remove the '
'administrative role manually via the CLI.')
messages.warning(request, msg)
return True
else:
return False
def _update_project_members(self, request, data, project_id):
# update project members
users_to_modify = 0
# Project-user member step
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
try:
# Get our role options
available_roles = self._get_available_roles(request)
# Get the users currently associated with this project so we
# can diff against it.
users_roles = api.keystone.get_project_users_roles(
request, project=project_id)
users_to_modify = len(users_roles)
# TODO(bpokorny): The following lines are needed to make sure we
# only modify roles for users who are in the current domain.
# Otherwise, we'll end up removing roles for users who have roles
# on the project but aren't in the domain. For now, Horizon won't
# support managing roles across domains. The Keystone CLI
# supports it, so we may want to add that in the future.
all_users = api.keystone.user_list(request,
domain=data['domain_id'])
users_dict = {user.id: user.name for user in all_users}
for user_id in users_roles:
# Don't remove roles if the user isn't in the domain
if user_id not in users_dict:
users_to_modify -= 1
continue
# Check if there have been any changes in the roles of
# Existing project members.
current_role_ids = list(users_roles[user_id])
modified_role_ids = self._add_roles_to_users(
request, data, project_id, user_id,
current_role_ids, available_roles)
# Prevent admins from doing stupid things to themselves.
removing_admin = self._is_removing_self_admin_role(
request, project_id, user_id, available_roles,
modified_role_ids)
# Otherwise go through and revoke any removed roles.
if not removing_admin:
self._remove_roles_from_user(request, project_id, user_id,
modified_role_ids)
users_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many users may be added for exception handling.
users_to_modify += len(data[field_name])
for role in available_roles:
users_added = 0
field_name = member_step.get_member_field_name(role.id)
for user_id in data[field_name]:
if user_id not in users_roles:
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user_id,
role=role.id)
users_added += 1
users_to_modify -= users_added
return True
except Exception:
group_msg = _(", update project groups")
exceptions.handle(request,
_('Failed to modify %(users_to_modify)s'
' project members%(group_msg)s and '
'update project quotas.')
% {'users_to_modify': users_to_modify,
'group_msg': group_msg})
return False
def _update_project_groups(self, request, data, project_id, domain_id):
# update project groups
groups_to_modify = 0
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
try:
available_roles = self._get_available_roles(request)
# Get the groups currently associated with this project so we
# can diff against it.
project_groups = api.keystone.group_list(request,
domain=domain_id,
project=project_id)
groups_to_modify = len(project_groups)
for group in project_groups:
# Check if there have been any changes in the roles of
# Existing project members.
current_roles = api.keystone.roles_for_group(
self.request,
group=group.id,
project=project_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
# Check if the group is in the list of groups with
# this role.
field_name = member_step.get_member_field_name(role.id)
if group.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# group role has changed
api.keystone.add_group_role(
request,
role=role.id,
group=group.id,
project=project_id)
else:
# Group role is unchanged, so remove it from
# the remaining roles list to avoid removing it
# later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Revoke any removed roles.
for id_to_delete in current_role_ids:
api.keystone.remove_group_role(request,
role=id_to_delete,
group=group.id,
project=project_id)
groups_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many groups may be added for error handling.
groups_to_modify += len(data[field_name])
for role in available_roles:
groups_added = 0
field_name = member_step.get_member_field_name(role.id)
project_group_ids = [x.id for x in project_groups]
for group_id in data[field_name]:
if group_id not in project_group_ids:
api.keystone.add_group_role(request,
role=role.id,
group=group_id,
project=project_id)
groups_added += 1
groups_to_modify -= groups_added
return True
except Exception:
exceptions.handle(request,
_('Failed to modify %s project '
'members, update project groups '
'and update project quotas.')
% groups_to_modify)
return False
def handle(self, request, data):
# FIXME(gabriel): This should be refactored to use Python's built-in
# sets and do this all in a single "roles to add" and "roles to remove"
# pass instead of the multi-pass thing happening now.
project = self._update_project(request, data)
if not project:
return False
project_id = data['project_id']
# Use the domain_id from the project if available
domain_id = getattr(project, "domain_id", '')
ret = self._update_project_members(request, data, project_id)
if not ret:
return False
ret = self._update_project_groups(request, data,
project_id, domain_id)
if not ret:
return False
return True
| 42.520563 | 79 | 0.565018 | [
"Apache-2.0"
] | LinkleYping/horizon-vul | openstack_dashboard/dashboards/identity/projects/workflows.py | 39,289 | Python |
import os
import sys
import subprocess
from subprocess import CalledProcessError
from subprocess import TimeoutExpired
import time
import re
import statistics
class MemPoint:
def __init__(self, time, heap, heap_extra, stack, heap_tree):
self.time = int(time.split("=")[1])
self.heap = int(heap.split("=")[1])
self.heap_extra = int(heap_extra.split("=")[1])
self.stack = int(stack.split("=")[1])
def get_sum_memusage(self):
return self.heap + self.heap_extra + self.stack
def get_mem_usage(filename):
with open(filename) as file:
contents = file.readlines()
memory_points = []
for index in range(len(contents)):
if("snapshot" in contents[index]):
emptyLine = contents[index+1] # not used
time = contents[index+2] # not used
mem_heap = contents[index+3]
mem_heap_extra = contents[index+4]
mem_stacks = contents[index+5]
heap_tree = contents[index+6] #not used
memory_points.append(MemPoint(time, mem_heap, mem_heap_extra, mem_stacks, heap_tree))
maxUsage = max(value.get_sum_memusage() for value in memory_points)
return maxUsage
def getFileSize(filename):
return os.path.getsize(filename)
def purge(dir):
for f in os.listdir(dir):
if "massif.out." in f:
os.remove(os.path.join(dir, f))
def getRam(matrix, filename):
purge(".")
subprocess.run(["valgrind", "--tool=massif", "--stacks=yes",
"--pages-as-heap=no", filename, matrix])
ps = subprocess.Popen(('ls'), stdout=subprocess.PIPE)
resultFilename = subprocess.check_output(
('grep', 'massif.out'), stdin=ps.stdout).decode(sys.stdout.encoding).strip()
ps.wait()
maxUsage = get_mem_usage(resultFilename)
return maxUsage
def getSpeed(matrix, filename):
start = time.time()
resultLines = subprocess.check_output((filename, matrix), stderr=subprocess.STDOUT, timeout=5).decode(sys.stdout.encoding)
end = time.time() - start
return end
def getSolutionFromString(string):
string = string.split('\n')
string = [x.strip() for x in string]
string = list(filter(lambda a: a != '', string))
if len(string) != 2:
raise Exception("Wrong number of lines in outout")
output = {}
output["rows"] = [int(x) for x in string[0].split()]
output["score"] = int(string[1])
return output
def isValid(matrix, filename, solutionFilename):
try:
resultLines = subprocess.check_output((filename, matrix), stderr=subprocess.STDOUT, timeout=5).decode(sys.stdout.encoding)
except CalledProcessError as e:
print("The application did not exit cleanly on: " + matrix)
return False
except TimeoutExpired as e:
print("The application exceeded the allowed time on: " + matrix)
return False
result = getSolutionFromString(resultLines)
with open(solutionFilename) as solutionFile:
solutionLines = solutionFile.read()
solution = getSolutionFromString(solutionLines)
return solution == result
for i in range(len(resultLines)):
if resultLines[i] != solutionLines[i]:
print('Incorrect solution.')
print('Expected: ' + str(resultLines))
print('Received: ' + str(solutionLines))
return False
return True
def tryInt(s):
try:
return int(s)
except:
return s
def alphanum_key(s):
""" Turn a string into a list of string and number chunks.
"z23a" -> ["z", 23, "a"]
"""
return [tryInt(c) for c in re.split('([0-9]+)', s) ]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
l.sort(key=alphanum_key)
def printHelpInfo():
print("")
print("Usage: python3 path/to/executable path/to/directory/with/matrices/and/solutions")
print("")
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Wrong number of arguments!")
printHelpInfo()
sys.exit()
filename = sys.argv[1]
myDir = os.path.dirname(__file__)
if not os.path.exists(filename):
print("Executable file \"" + filename + "\" not found")
printHelpInfo()
sys.exit()
directory = sys.argv[2]
if not os.path.exists(directory) or not os.path.isdir(directory):
print("Matrix directory \"" + directory + "\" is not a valid path")
printHelpInfo()
sys.exit()
fileSize = getFileSize(filename)
matrices = [os.path.join(directory, f) for f in os.listdir(directory) if "matrix" in f]
matrices.sort(key=alphanum_key)
solutions = [os.path.join(directory, f) for f in os.listdir(directory) if "solution" in f]
solutions.sort(key=alphanum_key)
timedout = []
speeds = []
rams = []
for index in range(len(matrices)):
matrix = matrices[index]
solution = solutions[index]
valid = isValid(matrix, filename, solution)
if not valid:
break
speed = getSpeed(matrix, filename)
ram = getRam(matrix, filename)
speeds.append(speed)
rams.append(ram)
print("Filesize is " + str(fileSize))
print("")
print("Speeds are " + str(speeds))
print("")
print("Average Speed is: " + str(statistics.mean(speeds)))
print("")
print("Rams are " + str(rams))
print("")
print("Average Ram is: " + str(round(statistics.mean(rams))))
| 32.634731 | 130 | 0.629541 | [
"MIT"
] | Saqqe/Cream | firstsession/measurement/measure_program.py | 5,450 | Python |
import numpy as np
import matplotlib.pyplot as plt
import pprint
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.cm import ScalarMappable
results = np.load('results.npy')
ini_p1, ini_p2, final_p1, final_p2, loss = results.T
param1_lab = 'Stiffness'
#param2_lab = 'Mass'
param2_lab = 'Gravity'
def normalize(arr):
return (arr - np.min(arr))/np.ptp(arr)
fig, axes = plt.subplots(2, figsize=(8, 7))
ax = axes[0]
ax2 = axes[1]
#ax.set_aspect("equal")
ini_p1_norm = normalize(ini_p1)
ini_p2_norm = normalize(ini_p2)
cmap = lambda p1,p2 : (p1, 0, p2)
cmap_loss = plt.get_cmap('RdYlGn_r')
norm = plt.Normalize(loss.min(), loss.max())
loss_min, loss_max = np.amin(loss), np.amax(loss)
for i in range(len(final_p1)):
ax.scatter(final_p1[i], final_p2[i], color=cmap(ini_p1_norm[i],ini_p2_norm[i]))
#sc = ax2.scatter(final_p1[i], final_p2[i], color=plt.get_cmap('RdYlGn')(1-(loss[i] - loss_min)/(loss_max - loss_min)))
sc = ax2.scatter(final_p1[i], final_p2[i], color=cmap_loss(norm(loss[i])))
ax.set_xlabel("Final Estimated %s Multiplier"%param1_lab)
ax.set_ylabel("Final Estimated %s Multiplier"%param2_lab)
ax.set_xlim(0,np.amax(final_p1)+(np.amax(final_p1) - np.amin(final_p1))/10)
ax.set_ylim(0,np.amax(final_p2)+(np.amax(final_p2) - np.amin(final_p2))/10)
ax2.set_xlim(0,np.amax(final_p1)+(np.amax(final_p1) - np.amin(final_p1))/10)
ax2.set_ylim(0,np.amax(final_p2)+(np.amax(final_p2) - np.amin(final_p2))/10)
sm = ScalarMappable(norm=norm, cmap=cmap_loss)
sm.set_array([])
cbar = fig.colorbar(sm, ax=ax2)
cbar.ax.set_title("Loss", fontsize=10)
fig.suptitle("Est. Cloth Params vs. Initial Guesses")
plt.subplots_adjust(left=0.1, right=0.625, top=0.9)
cax = fig.add_axes([0.7,0.55,0.3,0.3])
cp1 = np.linspace(0,1)
cp2 = np.linspace(0,1)
Cp1, Cp2 = np.meshgrid(cp1,cp2)
C0 = np.zeros_like(Cp1)
# make RGB image, p1 to red channel, p2 to blue channel
Legend = np.dstack((Cp1, C0, Cp2))
# parameters range between 0 and 1
cax.imshow(Legend, origin="lower", extent=[0,1,0,1])
cax.set_xlabel("p1: %s"%param1_lab.lower())
cax.set_xticklabels(np.around(np.linspace(ini_p1[0], ini_p1[-1], 6),2))
cax.set_yticklabels(np.around(np.linspace(ini_p2[0], ini_p2[-1], 6),2))
cax.set_ylabel("p2: %s"%param2_lab.lower())
cax.set_title("Initial Guess Legend", fontsize=10)
plt.savefig('cloth_params.png')
plt.show()
| 35.134328 | 123 | 0.718777 | [
"ECL-2.0",
"Apache-2.0"
] | priyasundaresan/kaolin | diffsim_torch3d/pysim/plot_cloth_params_results.py | 2,354 | Python |
#!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TR-069 has mandatory attribute names that don't comply with policy
# pylint:disable=invalid-name
#
"""Implement the inner handling for tr-98/181 ManagementServer."""
__author__ = 'dgentry@google.com (Denton Gentry)'
import datetime
import math
import os
import random
import re
import sys
import time
import urlparse
import google3
import tornado.ioloop
import helpers
import cwmptypes
# Allow unit tests to override with a mock
PERIODIC_CALLBACK = tornado.ioloop.PeriodicCallback
SESSIONTIMEOUTFILE = '/tmp/cwmp/session_timeout'
class CpeManagementServer(object):
"""Inner class implementing tr-98 & 181 ManagementServer."""
# The default password is trivial. In the initial Inform exchange
# the ACS generally sets ConnectionRequest{Username,Password}
# to values which only it knows. If something goes wrong, we want
# the password to be well known so the ACS can wake us up and
# try again.
ConnectionRequestPassword = cwmptypes.TriggerString('cwmp')
ConnectionRequestUsername = cwmptypes.TriggerString('catawampus')
CWMPRetryMinimumWaitInterval = cwmptypes.TriggerUnsigned(5)
CWMPRetryIntervalMultiplier = cwmptypes.TriggerUnsigned(2000)
DefaultActiveNotificationThrottle = cwmptypes.TriggerUnsigned(0)
EnableCWMP = cwmptypes.ReadOnlyBool(True)
PeriodicInformEnable = cwmptypes.TriggerBool(True)
PeriodicInformTime = cwmptypes.TriggerDate(0)
Password = cwmptypes.TriggerString('')
STUNEnable = cwmptypes.ReadOnlyBool(False)
UpgradesManaged = cwmptypes.ReadOnlyBool(True)
Username = cwmptypes.TriggerString('')
def __init__(self, acs_config, port, ping_path,
acs_url=None, get_parameter_key=None,
start_periodic_session=None, ioloop=None,
restrict_acs_hosts=None, conman_dir='/tmp/conman'):
self._acs_config = acs_config
self.acs_url = acs_url
self.get_parameter_key = get_parameter_key
self.ioloop = ioloop or tornado.ioloop.IOLoop.instance()
self.my_ip = None
self._periodic_callback = None
self._periodic_interval = 0
self._periodic_intervals_startup = 5
self.ping_path = ping_path
self.port = port
self.restrict_acs_hosts = restrict_acs_hosts
self.start_periodic_session = start_periodic_session
self._start_periodic_timeout = None
self._conman_dir = conman_dir
self.ConfigurePeriodicInform()
def Triggered(self):
self.ConfigurePeriodicInform()
def SuccessfulSession(self):
"""Called when we successfully terminate a CWMP session."""
if self._periodic_intervals_startup > 0:
self._periodic_intervals_startup -= 1
def GetPeriodicInformInterval(self):
if self._periodic_interval:
return self._periodic_interval
# checkin the first few times on a short interval, to give
# the ACS several opportunities to set PeriodicInformInterval.
return 60 if self._periodic_intervals_startup > 0 else (15 * 60)
def SetPeriodicInformInterval(self, value):
self._periodic_interval = int(value)
PeriodicInformInterval = property(
GetPeriodicInformInterval,
SetPeriodicInformInterval, None,
'tr-98/181 ManagementServer.PeriodicInformInterval')
def ValidateAcsUrl(self, value):
"""Checks if the URL passed is acceptable. If not raises an exception."""
if not self.restrict_acs_hosts or not value:
return
# Require https for the url scheme.
split_url = urlparse.urlsplit(value)
if split_url.scheme != 'https':
raise ValueError('The ACS Host must be https: %r' % (value,))
# Iterate over the restrict domain name list and see if one of
# the restricted domain names matches the supplied url host name.
restrict_hosts = re.split(r'[\s,]+', self.restrict_acs_hosts)
for host in restrict_hosts:
# Check the full hostname.
if split_url.hostname == host:
return
# Check against the restrict host of form '.foo.com'
if not host.startswith('.'):
dotted_host = '.' + host
else:
dotted_host = host
if split_url.hostname.endswith(dotted_host):
return
# If we don't find a valid host, raise an exception.
raise ValueError('The ACS Host is not permissible: %r' % (value,))
def WantACSAutoprovisioning(self):
"""Whether to enable ACS autoprovisioning."""
# Defaults to off, since that's the safest failure mode. We'd rather
# fail to autoprovision when there's a bug (easy to detect the bug)
# rather than accidentally autoprovisioning when we don't want it (weird
# edge cases that are hard to detect).
return os.path.exists(os.path.join(self._conman_dir,
'acs_autoprovisioning'))
def _GetURL(self):
"""Return the ACS URL to use (internal only)."""
if self.acs_url:
try:
self.ValidateAcsUrl(self.acs_url)
return self.acs_url
except ValueError as e:
print 'Supplied acs_url %r is invalid (%s)' % (self.acs_url, e)
url = self._acs_config.GetAcsUrl()
max_attempts = 20
while url and max_attempts:
try:
self.ValidateAcsUrl(url)
self.MostRecentURL = url
return url
except ValueError as e:
print 'Invalidating url %r (%s)' % (url, e)
if not self._acs_config.InvalidateAcsUrl(url):
print ('set-acs failed to invalidate url!'
'Something is extremely broken.')
sys.exit(100)
url = None
url = self._acs_config.GetAcsUrl()
max_attempts -= 1
# If we get here, there is no valid platform url.
return None
def GetURL(self):
"""Return the ACS URL to use."""
url = self._GetURL()
# All assignments could trigger callbacks, so don't assign unless the
# value has changed.
if url and self.MostRecentURL != url:
self.MostRecentURL = url
return url
def SetURL(self, value):
self.ValidateAcsUrl(value)
if self.acs_url:
self.acs_url = value
else:
self._acs_config.SetAcsUrl(value)
self.MostRecentURL = value
URL = property(GetURL, SetURL, None, 'tr-98/181 ManagementServer.URL')
# This is mainly to allow other code to register callbacks.
# TODO(apenwarr): convert URL to use tr.cwmptypes someday.
MostRecentURL = cwmptypes.String()
def _formatIP(self, ip):
return '[' + ip + ']' if helpers.IsIP6Addr(ip) else ip
def GetConnectionRequestURL(self):
if self.my_ip and self.port and self.ping_path:
path = self.ping_path if self.ping_path[0] != '/' else self.ping_path[1:]
ip = self._formatIP(self.my_ip)
return 'http://%s:%d/%s' % (ip, self.port, path)
else:
return ''
ConnectionRequestURL = property(
GetConnectionRequestURL, None, None,
'tr-98/181 ManagementServer.ConnectionRequestURL')
def GetParameterKey(self):
if self.get_parameter_key is not None:
return self.get_parameter_key()
else:
return ''
ParameterKey = property(GetParameterKey, None, None,
'tr-98/181 ManagementServer.ParameterKey')
def ConfigurePeriodicInform(self):
"""Commit changes to PeriodicInform parameters."""
if self._periodic_callback:
self._periodic_callback.stop()
self._periodic_callback = None
if self._start_periodic_timeout:
self.ioloop.remove_timeout(self._start_periodic_timeout)
self._start_periodic_timeout = None
# Delete the old periodic callback.
if self._periodic_callback:
self._periodic_callback.stop()
self._periodic_callback = None
if self.PeriodicInformEnable and self.PeriodicInformInterval > 0:
msec = self.PeriodicInformInterval * 1000
self._periodic_callback = PERIODIC_CALLBACK(self.start_periodic_session,
msec, self.ioloop)
if self.PeriodicInformTime:
# PeriodicInformTime is just meant as an offset, not an actual time.
# So if it's 25.5 hours in the future and the interval is 1 hour, then
# the interesting part is the 0.5 hours, not the 25.
#
# timetuple might be in the past, but that's okay; the modulus
# makes sure it's never negative. (ie. (-3 % 5) == 2, in python)
timetuple = self.PeriodicInformTime.timetuple()
offset = ((time.mktime(timetuple) - time.time())
% float(self.PeriodicInformInterval))
else:
offset = 0.0
self._start_periodic_timeout = self.ioloop.add_timeout(
datetime.timedelta(seconds=offset), self.StartPeriodicInform)
def StartPeriodicInform(self):
self._periodic_callback.start()
def SessionRetryWait(self, retry_count):
"""Calculate wait time before next session retry.
See $SPEC3 section 3.2.1 for a description of the algorithm.
Args:
retry_count: integer number of retries attempted so far.
Returns:
Number of seconds to wait before initiating next session.
"""
if retry_count == 0:
return 0
periodic_interval = self.PeriodicInformInterval
if self.PeriodicInformInterval <= 0:
periodic_interval = 30
c = 10 if retry_count >= 10 else retry_count
m = float(self.CWMPRetryMinimumWaitInterval)
k = float(self.CWMPRetryIntervalMultiplier) / 1000.0
start = m * math.pow(k, c - 1)
stop = start * k
# pin start/stop to have a maximum value of PeriodicInformInterval
start = int(min(start, periodic_interval / k))
stop = int(min(stop, periodic_interval))
randomwait = random.randrange(start, stop)
return self.GetTimeout(SESSIONTIMEOUTFILE, randomwait)
def GetTimeout(self, filename, default=60):
"""Get timeout value from file for testing."""
try:
return int(open(filename).readline().strip())
except (IOError, ValueError):
pass
return default
| 36.142361 | 79 | 0.698626 | [
"Apache-2.0"
] | DentonGentry/gfiber-catawampus | tr/cpe_management_server.py | 10,409 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import StorageManagementClientConfiguration
from .operations import Operations
from .operations import SkusOperations
from .operations import StorageAccountsOperations
from .operations import UsageOperations
from . import models
class StorageManagementClient(SDKClient):
"""The Azure Storage Management API.
:ivar config: Configuration for client.
:vartype config: StorageManagementClientConfiguration
:ivar operations: Operations operations
:vartype operations: azure.mgmt.storage.v2017_10_01.operations.Operations
:ivar skus: Skus operations
:vartype skus: azure.mgmt.storage.v2017_10_01.operations.SkusOperations
:ivar storage_accounts: StorageAccounts operations
:vartype storage_accounts: azure.mgmt.storage.v2017_10_01.operations.StorageAccountsOperations
:ivar usage: Usage operations
:vartype usage: azure.mgmt.storage.v2017_10_01.operations.UsageOperations
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Gets subscription credentials which uniquely
identify the Microsoft Azure subscription. The subscription ID forms part
of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
self.config = StorageManagementClientConfiguration(credentials, subscription_id, base_url)
super(StorageManagementClient, self).__init__(self.config.credentials, self.config)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self.api_version = '2017-10-01'
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self.config, self._serialize, self._deserialize)
self.skus = SkusOperations(
self._client, self.config, self._serialize, self._deserialize)
self.storage_accounts = StorageAccountsOperations(
self._client, self.config, self._serialize, self._deserialize)
self.usage = UsageOperations(
self._client, self.config, self._serialize, self._deserialize)
| 43.820896 | 98 | 0.718665 | [
"MIT"
] | 16pierre/azure-sdk-for-python | sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2017_10_01/_storage_management_client.py | 2,936 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from pyowm.commons.databoxes import ImageType, Satellite, SubscriptionType
class TestImageType(unittest.TestCase):
def test_repr(self):
instance = ImageType('PDF', 'application/pdf')
repr(instance)
class TestSatellite(unittest.TestCase):
def test_repr(self):
instance = Satellite('Terrasat', 'tst')
repr(instance)
class TestSubscriptionType(unittest.TestCase):
def test_repr(self):
instance = SubscriptionType('startup', 'pro', True)
repr(instance) | 22.307692 | 74 | 0.684483 | [
"MIT"
] | Ankuraxz/pyowm | tests/unit/commons/test_databoxes.py | 580 | Python |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
from torch.autograd import Function, Variable
import numpy as np
def cross_entropy_2D(input, target, weight=None, size_average=True):
n, c, h, w = input.size()
log_p = F.log_softmax(input, dim=1)
log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
target = target.view(target.numel())
loss = F.nll_loss(log_p, target, weight=weight, size_average=False)
if size_average:
loss /= float(target.numel())
return loss
def cross_entropy_3D(input, target, weight=None, size_average=True):
n, c, h, w, s = input.size()
log_p = F.log_softmax(input, dim=1)
log_p = log_p.transpose(1, 2).transpose(2, 3).transpose(3, 4).contiguous().view(-1, c)
target = target.view(target.numel())
loss = F.nll_loss(log_p, target, weight=weight, size_average=False)
if size_average:
loss /= float(target.numel())
return loss
class SoftDiceLoss(nn.Module):
def __init__(self, n_classes):
super(SoftDiceLoss, self).__init__()
self.one_hot_encoder = One_Hot(n_classes).forward
self.n_classes = n_classes
def forward(self, input, target):
smooth = 0.01
batch_size = input.size(0)
input = F.softmax(input, dim=1)
# print("In Loss Sum 0 :",np.sum(input.cpu().detach().numpy()[:,0,...]))
# print("In Loss Sum 1 :",np.sum(input.cpu().detach().numpy()[:,1,...]))
input = input.view(batch_size, self.n_classes, -1)
target = self.one_hot_encoder(target).contiguous().view(batch_size, self.n_classes, -1)
inter = torch.sum(input * target, 2) + smooth
union = torch.sum(input, 2) + torch.sum(target, 2) + smooth
score = torch.sum(2.0 * inter / union)
score = 1.0 - score / (float(batch_size) * float(self.n_classes))
return score
class CustomSoftDiceLoss(nn.Module):
def __init__(self, n_classes, class_ids):
super(CustomSoftDiceLoss, self).__init__()
self.one_hot_encoder = One_Hot(n_classes).forward
self.n_classes = n_classes
self.class_ids = class_ids
def forward(self, input, target):
smooth = 0.01
batch_size = input.size(0)
input = F.softmax(input[:,self.class_ids], dim=1).view(batch_size, len(self.class_ids), -1)
target = self.one_hot_encoder(target).contiguous().view(batch_size, self.n_classes, -1)
target = target[:, self.class_ids, :]
inter = torch.sum(input * target, 2) + smooth
union = torch.sum(input, 2) + torch.sum(target, 2) + smooth
score = torch.sum(2.0 * inter / union)
score = 1.0 - score / (float(batch_size) * float(self.n_classes))
return score
class One_Hot(nn.Module):
def __init__(self, depth):
super(One_Hot, self).__init__()
self.depth = depth
self.ones = torch.sparse.torch.eye(depth).cuda()
def forward(self, X_in):
n_dim = X_in.dim()
output_size = X_in.size() + torch.Size([self.depth])
num_element = X_in.numel()
X_in = X_in.data.long().view(num_element)
out = Variable(self.ones.index_select(0, X_in)).view(output_size)
return out.permute(0, -1, *range(1, n_dim)).squeeze(dim=2).float()
def __repr__(self):
return self.__class__.__name__ + "({})".format(self.depth)
if __name__ == '__main__':
from torch.autograd import Variable
depth=3
batch_size=2
encoder = One_Hot(depth=depth).forward
y = Variable(torch.LongTensor(batch_size, 1, 1, 2 ,2).random_() % depth).cuda() # 4 classes,1x3x3 img
y_onehot = encoder(y)
x = Variable(torch.randn(y_onehot.size()).float())#.cuda()
dicemetric = SoftDiceLoss(n_classes=depth)
dicemetric(x,y) | 36.419048 | 106 | 0.640952 | [
"MIT"
] | Myyyr/segmentation | models/layers/loss.py | 3,824 | Python |
#!/usr/bin/env python
# Copyright (c) 2017-2018 The IchibaCoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os, sys
from subprocess import check_output
def countRelevantCommas(line):
openParensPosStack = []
openParensPos = 0
charCounter = 0
numRelevantCommas = 0
firstOpenParensIndex = line.find("(")
for char in line:
if char == '(':
openParensPosStack.append(charCounter)
if char == ')':
openParensPosStack.pop()
if char == "," and openParensPosStack[-1] == firstOpenParensIndex:
numRelevantCommas += 1
charCounter += 1
return numRelevantCommas
if __name__ == "__main__":
out = check_output(["git", "rev-parse", "--show-toplevel"])
srcDir = out.rstrip() + "/src/"
filelist = [os.path.join(dp, f) for dp, dn, filenames in os.walk(srcDir) for f in filenames if os.path.splitext(f)[1] == '.cpp' or os.path.splitext(f)[1] == '.h' ]
incorrectInstanceCounter = 0
for file in filelist:
f = open(file,"r")
data = f.read()
rows = data.split("\n")
count = 0
full_data = []
lineCounter = 1
tempLine = ""
tempCount = 0
for row in rows:
# Collapse multiple lines into one
tempLine += row
# Line contains LogPrint or LogPrintf
if tempLine.find("LogPrint") != -1:
if tempLine.count("(") == tempLine.count(")"):
havePercents = tempLine.count('%') > 0
if havePercents:
# This line of code has a format specifier that requires checking number of associated arguments
# Determine the number of arguments provided, see if that matches the number of format specifiers
# Count the number of commas after the format specifier string. Check to see if it matches the number of format specifiers.
# Assumes quotes are not escaped in the specifier string and there are no percent signs when specifying the debug level.
# First, determine the position of the comma after the format specifier section, named commaAfterEndSpecifierStringIndex
firstSpecifierIndex = tempLine.find('%')
startSpecifierStringIndex = tempLine.rfind('"',firstSpecifierIndex)
endSpecifierStringIndex = tempLine.find('"',firstSpecifierIndex)
commaAfterEndSpecifierStringIndex = tempLine.find(',',endSpecifierStringIndex)
# Count the number of commas after the specifier string
line = "(" + tempLine[commaAfterEndSpecifierStringIndex:-1]
numCommas = countRelevantCommas(line)
# Determine number of extra percents after specifier string
numExtraPercents = tempLine.count('%', commaAfterEndSpecifierStringIndex)
# Subtract extra from total count. This is the number of expected specifiers
# ignore %%
numPercents = tempLine.count('%') - numExtraPercents - 2*tempLine.count('%%')
if numPercents != numCommas:
print "Incorrect number of arguments for LogPrint(f) statement found."
print(str(file) + ":" + str(lineCounter - tempCount))
print "Line = " + tempLine
print("numRelevantCommas = " + str(numCommas) + ", numRelevantPercents = " + str(numPercents))
print ""
incorrectInstanceCounter += 1
# Done with this multiline, clear tempLine
tempLine = ""
tempCount = 0
else:
tempCount += 1
else:
# No LogPrint, clear tempLine
tempLine = ""
tempCount = 0
lineCounter += 1
print("# of incorrect instances: " + str(incorrectInstanceCounter))
sys.exit(incorrectInstanceCounter)
| 41.912621 | 167 | 0.565207 | [
"MIT"
] | LordSoylent/ICHIBA | contrib/devtools/logprint-scanner.py | 4,317 | Python |
"""pytest style fixtures for use in Virtool Workflows.""" | 57 | 57 | 0.754386 | [
"MIT"
] | igboyes/virtool-workflow | virtool_workflow/fixtures/__init__.py | 57 | Python |
# -*- coding: utf-8 -*-
import json
import logging
from collections import defaultdict
from functools import wraps
from logging.config import dictConfig
from subprocess import call
import redis
import requests
from flask import Flask, Response, redirect, render_template, request, session, url_for
from flask_migrate import Migrate
from pylti.flask import lti
from redis.exceptions import ConnectionError
from rq import Queue, get_current_job
from rq.exceptions import NoSuchJobError
from rq.job import Job
from sqlalchemy.sql import text
import config
from models import Course, Extension, Quiz, User, db
from utils import (
extend_quiz,
get_course,
get_or_create,
get_quizzes,
get_user,
missing_and_stale_quizzes,
search_students,
update_job,
)
conn = redis.from_url(config.REDIS_URL)
q = Queue("quizext", connection=conn)
app = Flask(__name__)
app.config.from_object("config")
dictConfig(config.LOGGING_CONFIG)
logger = logging.getLogger("app")
db.init_app(app)
migrate = Migrate(app, db)
json_headers = {
"Authorization": "Bearer " + config.API_KEY,
"Content-type": "application/json",
}
def check_valid_user(f):
@wraps(f)
def decorated_function(*args, **kwargs):
"""
Decorator to check if the user is allowed access to the app.
If user is allowed, return the decorated function.
Otherwise, return an error page with corresponding message.
"""
canvas_user_id = session.get("canvas_user_id")
lti_logged_in = session.get("lti_logged_in", False)
if not lti_logged_in or not canvas_user_id:
return render_template("error.html", message="Not allowed!")
if "course_id" not in kwargs.keys():
return render_template("error.html", message="No course_id provided.")
course_id = int(kwargs.get("course_id"))
if not session.get("is_admin", False):
enrollments_url = "{}courses/{}/enrollments".format(
config.API_URL, course_id
)
payload = {
"user_id": canvas_user_id,
"type": ["TeacherEnrollment", "TaEnrollment", "DesignerEnrollment"],
}
user_enrollments_response = requests.get(
enrollments_url, data=json.dumps(payload), headers=json_headers
)
user_enrollments = user_enrollments_response.json()
if not user_enrollments or "errors" in user_enrollments:
message = (
"You are not enrolled in this course as a Teacher, "
"TA, or Designer."
)
return render_template("error.html", message=message)
return f(*args, **kwargs)
return decorated_function
def error(exception=None):
return Response(
render_template(
"error.html",
message=exception.get(
"exception", "Please contact your System Administrator."
),
)
)
@app.context_processor
def add_google_analytics_id():
return dict(GOOGLE_ANALYTICS=config.GOOGLE_ANALYTICS)
@app.route("/", methods=["POST", "GET"])
def index():
"""
Default app index.
"""
return "Please contact your System Administrator."
@app.route("/status", methods=["GET"])
def status(): # pragma: no cover
"""
Runs smoke tests and reports status
"""
try:
job_queue_length = len(q.jobs)
except ConnectionError:
job_queue_length = -1
status = {
"tool": "Quiz Extensions",
"checks": {
"index": False,
"xml": False,
"api_key": False,
"redis": False,
"db": False,
"worker": False,
},
"url": url_for("index", _external=True),
"api_url": config.API_URL,
"debug": app.debug,
"xml_url": url_for("xml", _external=True),
"job_queue": job_queue_length,
}
# Check index
try:
response = requests.get(url_for("index", _external=True), verify=False)
status["checks"]["index"] = (
response.text == "Please contact your System Administrator."
)
except Exception:
logger.exception("Index check failed.")
# Check xml
try:
response = requests.get(url_for("xml", _external=True), verify=False)
status["checks"]["xml"] = "application/xml" in response.headers.get(
"Content-Type"
)
except Exception:
logger.exception("XML check failed.")
# Check API Key
try:
response = requests.get(
"{}users/self".format(config.API_URL),
headers={"Authorization": "Bearer " + config.API_KEY},
)
status["checks"]["api_key"] = response.status_code == 200
except Exception:
logger.exception("API Key check failed.")
# Check redis
try:
response = conn.echo("test")
status["checks"]["redis"] = response == b"test"
except ConnectionError:
logger.exception("Redis connection failed.")
# Check DB connection
try:
db.session.query(text("1")).all()
status["checks"]["db"] = True
except Exception:
logger.exception("DB connection failed.")
# Check RQ Worker
status["checks"]["worker"] = (
call('ps aux | grep "rq worker" | grep "quizext" | grep -v grep', shell=True)
== 0
)
# Overall health check - if all checks are True
status["healthy"] = all(v is True for k, v in status["checks"].items())
return Response(json.dumps(status), mimetype="application/json")
@app.route("/lti.xml", methods=["GET"])
def xml():
"""
Returns the lti.xml file for the app.
"""
from urllib.parse import urlparse
domain = urlparse(request.url_root).netloc
return Response(
render_template("lti.xml", tool_id=config.LTI_TOOL_ID, domain=domain),
mimetype="application/xml",
)
@app.route("/quiz/<course_id>/", methods=["GET"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def quiz(lti=lti, course_id=None):
"""
Main landing page for the app.
Displays a page to the user that allows them to select students
to moderate quizzes for.
"""
return render_template(
"userselect.html", course_id=course_id, current_page_number=1
)
@app.route("/refresh/<course_id>/", methods=["POST"])
def refresh(course_id=None):
"""
Creates a new `refresh_background` job.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: flask.Response
:returns: A JSON-formatted response containing a url for the started job.
"""
job = q.enqueue_call(func=refresh_background, args=(course_id,))
return Response(
json.dumps({"refresh_job_url": url_for("job_status", job_key=job.get_id())}),
mimetype="application/json",
status=202,
)
@app.route("/update/<course_id>/", methods=["POST"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def update(lti=lti, course_id=None):
"""
Creates a new `update_background` job.
:param course_id: The Canvas ID of the Course.
:type coruse_id: int
:rtype: flask.Response
:returns: A JSON-formatted response containing urls for the started jobs.
"""
refresh_job = q.enqueue_call(func=refresh_background, args=(course_id,))
update_job = q.enqueue_call(
func=update_background,
args=(course_id, request.get_json()),
depends_on=refresh_job,
)
return Response(
json.dumps(
{
"refresh_job_url": url_for("job_status", job_key=refresh_job.get_id()),
"update_job_url": url_for("job_status", job_key=update_job.get_id()),
}
),
mimetype="application/json",
status=202,
)
@app.route("/jobs/<job_key>/", methods=["GET"])
def job_status(job_key):
try:
job = Job.fetch(job_key, connection=conn)
except NoSuchJobError:
return Response(
json.dumps(
{
"error": True,
"status_msg": "{} is not a valid job key.".format(job_key),
}
),
mimetype="application/json",
status=404,
)
if job.is_finished:
return Response(json.dumps(job.result), mimetype="application/json", status=200)
elif job.is_failed:
logger.error("Job {} failed.\n{}".format(job_key, job.exc_info))
return Response(
json.dumps(
{
"error": True,
"status_msg": "Job {} failed to complete.".format(job_key),
}
),
mimetype="application/json",
status=500,
)
else:
return Response(json.dumps(job.meta), mimetype="application/json", status=202)
def update_background(course_id, extension_dict):
"""
Update time on selected students' quizzes to a specified percentage.
:param course_id: The Canvas ID of the Course to update in
:type course_id: int
:param extension_dict: A dictionary that includes the percent of
time and a list of canvas user ids.
Example:
{
'percent': '300',
'user_ids': [
'0123456',
'1234567',
'9867543',
'5555555'
]
}
:type extension_dict: dict
"""
job = get_current_job()
update_job(job, 0, "Starting...", "started")
with app.app_context():
if not extension_dict:
update_job(job, 0, "Invalid Request", "failed", error=True)
logger.warning("Invalid Request: {}".format(extension_dict))
return job.meta
try:
course_json = get_course(course_id)
except requests.exceptions.HTTPError:
update_job(job, 0, "Course not found.", "failed", error=True)
logger.exception("Unable to find course #{}".format(course_id))
return job.meta
course_name = course_json.get("name", "<UNNAMED COURSE>")
user_ids = extension_dict.get("user_ids", [])
percent = extension_dict.get("percent", None)
if not percent:
update_job(job, 0, "`percent` field required.", "failed", error=True)
logger.warning(
"Percent field not provided. Request: {}".format(extension_dict)
)
return job.meta
course, created = get_or_create(db.session, Course, canvas_id=course_id)
course.course_name = course_name
db.session.commit()
for user_id in user_ids:
try:
canvas_user = get_user(course_id, user_id)
sortable_name = canvas_user.get("sortable_name", "<MISSING NAME>")
sis_id = canvas_user.get("sis_user_id")
except requests.exceptions.HTTPError:
# Unable to find user. Log and skip them.
logger.warning(
"Unable to find user #{} in course #{}".format(user_id, course_id)
)
continue
user, created = get_or_create(db.session, User, canvas_id=user_id)
user.sortable_name = sortable_name
user.sis_id = sis_id
db.session.commit()
# create/update extension
extension, created = get_or_create(
db.session, Extension, course_id=course.id, user_id=user.id
)
extension.percent = percent
db.session.commit()
quizzes = get_quizzes(course_id)
num_quizzes = len(quizzes)
quiz_time_list = []
unchanged_quiz_time_list = []
if num_quizzes < 1:
update_job(
job,
0,
"Sorry, there are no quizzes for this course.",
"failed",
error=True,
)
logger.warning(
"No quizzes found for course {}. Unable to update.".format(course_id)
)
return job.meta
for index, quiz in enumerate(quizzes):
quiz_id = quiz.get("id", None)
quiz_title = quiz.get("title", "[UNTITLED QUIZ]")
comp_perc = int(((float(index)) / float(num_quizzes)) * 100)
updating_str = "Updating quiz #{} - {} [{} of {}]"
update_job(
job,
comp_perc,
updating_str.format(quiz_id, quiz_title, index + 1, num_quizzes),
"processing",
error=False,
)
extension_response = extend_quiz(course_id, quiz, percent, user_ids)
if extension_response.get("success", False) is True:
# add/update quiz
quiz_obj, created = get_or_create(
db.session, Quiz, canvas_id=quiz_id, course_id=course.id
)
quiz_obj.title = quiz_title
quiz_obj.time_limit = quiz.get("time_limit")
db.session.commit()
added_time = extension_response.get("added_time", None)
if added_time is not None:
quiz_time_list.append(
{"title": quiz_title, "added_time": added_time}
)
else:
unchanged_quiz_time_list.append({"title": quiz_title})
else:
update_job(
job,
comp_perc,
extension_response.get("message", "An unknown error occured."),
"failed",
error=True,
)
logger.error("Extension failed: {}".format(extension_response))
return job.meta
msg_str = (
"Success! {} {} been updated for {} student(s) to have {}% time. "
"{} {} no time limit and were left unchanged."
)
message = msg_str.format(
len(quiz_time_list),
"quizzes have" if len(quiz_time_list) != 1 else "quiz has",
len(user_ids),
percent,
len(unchanged_quiz_time_list),
"quizzes have" if len(unchanged_quiz_time_list) != 1 else "quiz has",
)
update_job(job, 100, message, "complete", error=False)
job.meta["quiz_list"] = quiz_time_list
job.meta["unchanged_list"] = unchanged_quiz_time_list
job.save()
return job.meta
def refresh_background(course_id):
"""
Look up existing extensions and apply them to new quizzes.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: dict
:returns: A dictionary containing two parts:
- success `bool` False if there was an error, True otherwise.
- message `str` A long description of success or failure.
"""
job = get_current_job()
update_job(job, 0, "Starting...", "started")
with app.app_context():
course, created = get_or_create(db.session, Course, canvas_id=course_id)
try:
course_name = get_course(course_id).get("name", "<UNNAMED COURSE>")
course.course_name = course_name
db.session.commit()
except requests.exceptions.HTTPError:
update_job(job, 0, "Course not found.", "failed", error=True)
logger.exception("Unable to find course #{}".format(course_id))
return job.meta
quizzes = missing_and_stale_quizzes(course_id)
num_quizzes = len(quizzes)
if num_quizzes < 1:
update_job(
job,
100,
"Complete. No quizzes required updates.",
"complete",
error=False,
)
return job.meta
percent_user_map = defaultdict(list)
inactive_list = []
update_job(job, 0, "Getting past extensions.", "processing", False)
for extension in course.extensions:
# If extension is inactive, ignore.
if not extension.active:
inactive_list.append(extension.user.sortable_name)
logger.debug("Extension #{} is inactive.".format(extension.id))
continue
user_canvas_id = (
User.query.filter_by(id=extension.user_id).first().canvas_id
)
# Check if user is in course. If not, deactivate extension.
try:
canvas_user = get_user(course_id, user_canvas_id)
# Skip user if not a student. Fixes an edge case where a
# student that previously recieved an extension changes roles.
enrolls = canvas_user.get("enrollments", [])
type_list = [
e["type"] for e in enrolls if e["enrollment_state"] == "active"
]
if not any(t == "StudentEnrollment" for t in type_list):
logger.info(
(
"User #{} was found in course #{}, but is not an "
"active student. Deactivating extension #{}. Roles "
"found: {}"
).format(
user_canvas_id,
course_id,
extension.id,
", ".join(type_list) if len(enrolls) > 0 else None,
)
)
extension.active = False
db.session.commit()
inactive_list.append(extension.user.sortable_name)
continue
except requests.exceptions.HTTPError:
log_str = "User #{} not in course #{}. Deactivating extension #{}."
logger.info(log_str.format(user_canvas_id, course_id, extension.id))
extension.active = False
db.session.commit()
inactive_list.append(extension.user.sortable_name)
continue
percent_user_map[extension.percent].append(user_canvas_id)
if len(percent_user_map) < 1:
msg_str = "No active extensions were found.<br>"
if len(inactive_list) > 0:
msg_str += " Extensions for the following students are inactive:<br>{}"
msg_str = msg_str.format("<br>".join(inactive_list))
update_job(job, 100, msg_str, "complete", error=False)
return job.meta
for index, quiz in enumerate(quizzes):
quiz_id = quiz.get("id", None)
quiz_title = quiz.get("title", "[UNTITLED QUIZ]")
comp_perc = int(((float(index)) / float(num_quizzes)) * 100)
refreshing_str = "Refreshing quiz #{} - {} [{} of {}]"
update_job(
job,
comp_perc,
refreshing_str.format(quiz_id, quiz_title, index + 1, num_quizzes),
"processing",
error=False,
)
for percent, user_list in percent_user_map.items():
extension_response = extend_quiz(course_id, quiz, percent, user_list)
if extension_response.get("success", False) is True:
# add/update quiz
quiz_obj, created = get_or_create(
db.session, Quiz, canvas_id=quiz_id, course_id=course.id
)
quiz_obj.title = quiz_title
quiz_obj.time_limit = quiz.get("time_limit")
db.session.commit()
else:
error_message = "Some quizzes couldn't be updated. "
error_message += extension_response.get("message", "")
update_job(job, comp_perc, error_message, "failed", error=True)
return job.meta
msg = "{} quizzes have been updated.".format(len(quizzes))
update_job(job, 100, msg, "complete", error=False)
return job.meta
@app.route("/missing_and_stale_quizzes/<course_id>/", methods=["GET"])
def missing_and_stale_quizzes_check(course_id):
"""
Check if there are missing quizzes.
:param course_id: The Canvas ID of the Course.
:type course_id: int
:rtype: str
:returns: A JSON-formatted string representation of a boolean.
"true" if there are missing quizzes, "false" if there are not.
"""
course = Course.query.filter_by(canvas_id=course_id).first()
if course is None:
# No record of this course. No need to update yet.
return "false"
num_extensions = Extension.query.filter_by(course_id=course.id).count()
if num_extensions == 0:
# There are no extensions for this course yet. No need to update.
return "false"
missing = len(missing_and_stale_quizzes(course_id, True)) > 0
return json.dumps(missing)
@app.route("/filter/<course_id>/", methods=["GET"])
@check_valid_user
@lti(error=error, request="session", role="staff", app=app)
def filter(lti=lti, course_id=None):
"""
Display a filtered and paginated list of students in the course.
:param course_id:
:type: int
:rtype: str
:returns: A list of students in the course using the template
user_list.html.
"""
query = request.args.get("query", "").lower()
page = int(request.args.get("page", 1))
per_page = int(request.args.get("per_page", config.DEFAULT_PER_PAGE))
user_list, max_pages = search_students(
course_id, per_page=per_page, page=page, search_term=query
)
if not user_list or max_pages < 1:
user_list = []
max_pages = 1
return render_template(
"user_list.html", users=user_list, current_page_number=page, max_pages=max_pages
)
@app.route("/launch", methods=["POST"])
@lti(error=error, request="initial", role="staff", app=app)
def lti_tool(lti=lti):
"""
Bootstrapper for lti.
"""
course_id = request.values.get("custom_canvas_course_id")
canvas_user_id = request.values.get("custom_canvas_user_id")
canvas_domain = request.values.get("custom_canvas_api_domain")
if canvas_domain not in config.ALLOWED_CANVAS_DOMAINS:
msg = (
"<p>This tool is only available from the following domain(s):<br/>{}</p>"
"<p>You attempted to access from this domain:<br/>{}</p>"
)
return render_template(
"error.html",
message=msg.format(", ".join(config.ALLOWED_CANVAS_DOMAINS), canvas_domain),
)
roles = request.values.get("roles", [])
session["is_admin"] = "Administrator" in roles
session["canvas_user_id"] = canvas_user_id
session["lti_logged_in"] = True
return redirect(url_for("quiz", course_id=course_id))
| 32.554455 | 88 | 0.57586 | [
"MIT"
] | ayushrusiya47/quiz-extensions | views.py | 23,016 | Python |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bwtool(AutotoolsPackage):
"""bwtool is a command-line utility for bigWig files."""
homepage = "https://github.com/CRG-Barcelona/bwtool"
url = "https://github.com/CRG-Barcelona/bwtool/archive/1.0.tar.gz"
version('1.0', 'cdd7a34ae457b587edfe7dc8a0bdbedd')
depends_on('libbeato')
| 29.5 | 75 | 0.725047 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1nf1n1t3l00p/spack | var/spack/repos/builtin/packages/bwtool/package.py | 531 | Python |
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Extended thread dispatching support.
For basic support see reactor threading API docs.
"""
from twisted.python.compat import _PY3
if not _PY3:
import Queue
else:
import queue as Queue
from twisted.python import failure
from twisted.internet import defer
def deferToThreadPool(reactor, threadpool, f, *args, **kwargs):
"""
Call the function C{f} using a thread from the given threadpool and return
the result as a Deferred.
This function is only used by client code which is maintaining its own
threadpool. To run a function in the reactor's threadpool, use
C{deferToThread}.
@param reactor: The reactor in whose main thread the Deferred will be
invoked.
@param threadpool: An object which supports the C{callInThreadWithCallback}
method of C{twisted.python.threadpool.ThreadPool}.
@param f: The function to call.
@param *args: positional arguments to pass to f.
@param **kwargs: keyword arguments to pass to f.
@return: A Deferred which fires a callback with the result of f, or an
errback with a L{twisted.python.failure.Failure} if f throws an
exception.
"""
d = defer.Deferred()
def onResult(success, result):
if success:
reactor.callFromThread(d.callback, result)
else:
reactor.callFromThread(d.errback, result)
threadpool.callInThreadWithCallback(onResult, f, *args, **kwargs)
return d
def deferToThread(f, *args, **kwargs):
"""
Run a function in a thread and return the result as a Deferred.
@param f: The function to call.
@param *args: positional arguments to pass to f.
@param **kwargs: keyword arguments to pass to f.
@return: A Deferred which fires a callback with the result of f,
or an errback with a L{twisted.python.failure.Failure} if f throws
an exception.
"""
from twisted.internet import reactor
return deferToThreadPool(reactor, reactor.getThreadPool(),
f, *args, **kwargs)
def _runMultiple(tupleList):
"""
Run a list of functions.
"""
for f, args, kwargs in tupleList:
f(*args, **kwargs)
def callMultipleInThread(tupleList):
"""
Run a list of functions in the same thread.
tupleList should be a list of (function, argsList, kwargsDict) tuples.
"""
from twisted.internet import reactor
reactor.callInThread(_runMultiple, tupleList)
def blockingCallFromThread(reactor, f, *a, **kw):
"""
Run a function in the reactor from a thread, and wait for the result
synchronously. If the function returns a L{Deferred}, wait for its
result and return that.
@param reactor: The L{IReactorThreads} provider which will be used to
schedule the function call.
@param f: the callable to run in the reactor thread
@type f: any callable.
@param a: the arguments to pass to C{f}.
@param kw: the keyword arguments to pass to C{f}.
@return: the result of the L{Deferred} returned by C{f}, or the result
of C{f} if it returns anything other than a L{Deferred}.
@raise: If C{f} raises a synchronous exception,
C{blockingCallFromThread} will raise that exception. If C{f}
returns a L{Deferred} which fires with a L{Failure},
C{blockingCallFromThread} will raise that failure's exception (see
L{Failure.raiseException}).
"""
queue = Queue.Queue()
def _callFromThread():
result = defer.maybeDeferred(f, *a, **kw)
result.addBoth(queue.put)
reactor.callFromThread(_callFromThread)
result = queue.get()
if isinstance(result, failure.Failure):
result.raiseException()
return result
__all__ = ["deferToThread", "deferToThreadPool", "callMultipleInThread",
"blockingCallFromThread"]
| 30.740157 | 79 | 0.681352 | [
"Unlicense",
"MIT"
] | adamtheturtle/twisted | src/twisted/internet/threads.py | 3,904 | Python |
from datetime import datetime
from astropy.time import Time
def read_tle_file(tlefile, **kwargs):
"""
Read in a TLE file and return the TLE that is closest to the date you want to
propagate the orbit to.
"""
times = []
line1 = []
line2 = []
from os import path
from datetime import datetime
# Catch if the file can't be opened:
try:
f = open(tlefile, 'r')
except FileNotFoundError:
print("Unable to open: "+tlefile)
ln=0
for line in f:
# print(line)
if (ln == 0):
year= int(line[18:20])
day = int(line[20:23])
times.extend([datetime.strptime("{}:{}".format(year, day), "%y:%j")])
line1.extend([line.strip()])
ln=1
else:
ln=0
line2.extend([line.strip()])
f.close()
return times, line1, line2
def get_epoch_tle(epoch, tlefile):
"""
Find the TLE that is closest to the epoch you want to search.
epoch is a datetime object, tlefile is the file you want to search through.
"""
times, line1, line2 = read_tle_file(tlefile)
from datetime import datetime
from astropy.time import Time
# Allow astropy Time objects
if type(epoch) is Time:
epoch = epoch.datetime
mindt = 100.
min_ind = 0
for ind, t in enumerate(times):
dt = abs((epoch -t).days)
if dt < mindt:
min_ind = ind
mindt = dt
good_line1 = line1[min_ind]
good_line2 = line2[min_ind]
return mindt, good_line1, good_line2
def convert_nustar_time(t, leap=5):
'''
Converts MET seconds to a datetime object.
Default is to subtract off 5 leap seconds.
'''
import astropy.units as u
mjdref = 55197*u.d
met = (t - leap)* u.s + mjdref
met_datetime = Time(met.to(u.d), format = 'mjd').datetime
return met_datetime
def get_nustar_location(checktime, line1, line2):
'''
Code to determine the spacecraft location from the TLE.
Inputs are a datetime object and the two lines of the TLE you want to use.
Returns a tuple that has the X, Y, and Z geocentric coordinates (in km).
'''
from sgp4.earth_gravity import wgs72
from sgp4.io import twoline2rv
from astropy.coordinates import EarthLocation
satellite = twoline2rv(line1, line2, wgs72)
position, velocity = satellite.propagate(
checktime.year, checktime.month, checktime.day,
checktime.hour, checktime.minute, checktime.second)
return position
def eci2el(x,y,z,dt):
"""
Convert Earth-Centered Inertial (ECI) cartesian coordinates to ITRS for astropy EarthLocation object.
Inputs :
x = ECI X-coordinate
y = ECI Y-coordinate
z = ECI Z-coordinate
dt = UTC time (datetime object)
"""
from astropy.coordinates import GCRS, ITRS, EarthLocation, CartesianRepresentation
import astropy.units as u
# convert datetime object to astropy time object
tt=Time(dt,format='datetime')
# Read the coordinates in the Geocentric Celestial Reference System
gcrs = GCRS(CartesianRepresentation(x=x, y=y,z=z), obstime=tt)
# Convert it to an Earth-fixed frame
itrs = gcrs.transform_to(ITRS(obstime=tt))
el = EarthLocation.from_geocentric(itrs.x, itrs.y, itrs.z)
return el
def get_moon_j2000(epoch, line1, line2, position = None):
'''
Code to determine the apparent J2000 position for a given
time and at a given position for the observatory.
epoch needs to be a datetime or Time object.
position is a list/tuple of X/Y/Z positions
'''
from astropy.time import Time
from astropy.coordinates import get_moon, EarthLocation
import astropy.units as u
import sys
from datetime import datetime
if type(epoch) is Time:
epoch = epoch.datetime
if position is None:
position = get_nustar_location(epoch, line1, line2) # position in ECI coords
t=Time(epoch)
loc = eci2el(*position*u.km,t)
moon_coords = get_moon(t,loc)
# Get just the coordinates in degrees
ra_moon, dec_moon = moon_coords.ra.degree * u.deg, moon_coords.dec.degree*u.deg
return ra_moon, dec_moon
| 25.818182 | 106 | 0.602553 | [
"MIT"
] | bwgref/nustar_lunar_pointing | nustar_lunar_pointing/tracking.py | 4,544 | Python |
from uuid import uuid4
from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except:
from django.apps import apps
user_app, user_model = settings.AUTH_USER_MODEL.split('.')
User = apps.get_app_config(user_app).get_model(user_model)
from django.utils.timezone import now
LOT_SETTINGS = getattr(settings, 'LOT', {
'fast-login': {
'name': _(u'Fast login'),
'duration': 60,
'one-time': True,
},
'slow-login': {
'name': _(u'Slow login'),
'duration': 60*60*24,
'one-time': False
},
'always-login': {
'name': _(u'Always login'),
'one-time': False,
'duration': None,
},
})
LOT_TYPE_CHOICES = [
(key, value['name'])
for key, value in LOT_SETTINGS.items()
]
class LOT(models.Model):
uuid = models.CharField(_('UUID'), max_length=50)
type = models.SlugField(_('LOT type'), max_length=50,
choices=LOT_TYPE_CHOICES)
user = models.ForeignKey(User, verbose_name=_('user'))
session_data = models.TextField(_('Jsoned Session Data'), blank=True)
created = models.DateTimeField(_('Creation date'), auto_now_add=True)
next_url = models.URLField(blank=True)
def verify(self):
if self.type not in LOT_SETTINGS:
return False
verify_setting = LOT_SETTINGS[self.type]
duration = verify_setting.get('duration', None)
verify_func = verify_setting.get('verify-func', lambda x: True)
if not verify_func(self):
return False
if duration is None:
return True
return (now() - self.created).total_seconds() < duration
def delete_on_fail(self):
if self.type not in LOT_SETTINGS:
return True
return LOT_SETTINGS[self.type].get('delete-on-fail', True)
def is_one_time(self):
return LOT_SETTINGS.get(self.type, {}).get('one-time', False)
def save(self, *args, **kwargs):
if self.id and not kwargs.pop('force_modification', False):
raise Exception('Modification not allowed without '
'force_modification parameter on save.')
self.uuid = uuid4()
super(LOT, self).save(*args, **kwargs)
def __unicode__(self):
return u"{0} ({1})".format(self.get_type_display(), self.uuid)
| 29.235294 | 73 | 0.622938 | [
"BSD-3-Clause"
] | jespino/django-lot | lot/models.py | 2,485 | Python |
from __future__ import unicode_literals
from future.builtins import int, range, str
from datetime import date, datetime
from os.path import join, split
from uuid import uuid4
from django import forms
from django.forms.extras import SelectDateWidget
from django.core.files.storage import FileSystemStorage
from django.core.urlresolvers import reverse
from django.template import Template
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.timezone import now
from zhiliao.conf import settings
from zhiliao.forms import fields
from zhiliao.forms.models import FormEntry, FieldEntry
from zhiliao.utils.email import split_addresses as split_choices
fs = FileSystemStorage(location=settings.FORMS_UPLOAD_ROOT)
##############################
# Each type of export filter #
##############################
# Text matches
FILTER_CHOICE_CONTAINS = "1"
FILTER_CHOICE_DOESNT_CONTAIN = "2"
# Exact matches
FILTER_CHOICE_EQUALS = "3"
FILTER_CHOICE_DOESNT_EQUAL = "4"
# Greater/less than
FILTER_CHOICE_BETWEEN = "5"
# Multiple values
FILTER_CHOICE_CONTAINS_ANY = "6"
FILTER_CHOICE_CONTAINS_ALL = "7"
FILTER_CHOICE_DOESNT_CONTAIN_ANY = "8"
FILTER_CHOICE_DOESNT_CONTAIN_ALL = "9"
##########################
# Export filters grouped #
##########################
# Text fields
TEXT_FILTER_CHOICES = (
("", _("Nothing")),
(FILTER_CHOICE_CONTAINS, _("Contains")),
(FILTER_CHOICE_DOESNT_CONTAIN, _("Doesn't contain")),
(FILTER_CHOICE_EQUALS, _("Equals")),
(FILTER_CHOICE_DOESNT_EQUAL, _("Doesn't equal")),
)
# Choices with single value entries
CHOICE_FILTER_CHOICES = (
("", _("Nothing")),
(FILTER_CHOICE_CONTAINS_ANY, _("Equals any")),
(FILTER_CHOICE_DOESNT_CONTAIN_ANY, _("Doesn't equal any")),
)
# Choices with multiple value entries
MULTIPLE_FILTER_CHOICES = (
("", _("Nothing")),
(FILTER_CHOICE_CONTAINS_ANY, _("Contains any")),
(FILTER_CHOICE_CONTAINS_ALL, _("Contains all")),
(FILTER_CHOICE_DOESNT_CONTAIN_ANY, _("Doesn't contain any")),
(FILTER_CHOICE_DOESNT_CONTAIN_ALL, _("Doesn't contain all")),
)
# Dates
DATE_FILTER_CHOICES = (
("", _("Nothing")),
(FILTER_CHOICE_BETWEEN, _("Is between")),
)
# The filter function for each filter type
FILTER_FUNCS = {
FILTER_CHOICE_CONTAINS:
lambda val, field: val.lower() in field.lower(),
FILTER_CHOICE_DOESNT_CONTAIN:
lambda val, field: val.lower() not in field.lower(),
FILTER_CHOICE_EQUALS:
lambda val, field: val.lower() == field.lower(),
FILTER_CHOICE_DOESNT_EQUAL:
lambda val, field: val.lower() != field.lower(),
FILTER_CHOICE_BETWEEN:
lambda val_from, val_to, field: (
(not val_from or val_from <= field) and
(not val_to or val_to >= field)
),
FILTER_CHOICE_CONTAINS_ANY:
lambda val, field: set(val) & set(split_choices(field)),
FILTER_CHOICE_CONTAINS_ALL:
lambda val, field: set(val) == set(split_choices(field)),
FILTER_CHOICE_DOESNT_CONTAIN_ANY:
lambda val, field: not set(val) & set(split_choices(field)),
FILTER_CHOICE_DOESNT_CONTAIN_ALL:
lambda val, field: set(val) != set(split_choices(field)),
}
# Export form fields for each filter type grouping
text_filter_field = forms.ChoiceField(label=" ", required=False,
choices=TEXT_FILTER_CHOICES)
choice_filter_field = forms.ChoiceField(label=" ", required=False,
choices=CHOICE_FILTER_CHOICES)
multiple_filter_field = forms.ChoiceField(label=" ", required=False,
choices=MULTIPLE_FILTER_CHOICES)
date_filter_field = forms.ChoiceField(label=" ", required=False,
choices=DATE_FILTER_CHOICES)
class FormForForm(forms.ModelForm):
"""
Form with a set of fields dynamically assigned, directly based on the
given ``forms.models.Form`` instance.
"""
class Meta:
model = FormEntry
exclude = ("form", "entry_time")
def __init__(self, form, context, *args, **kwargs):
"""
Dynamically add each of the form fields for the given form model
instance and its related field model instances.
"""
self.form = form
self.form_fields = form.fields.visible()
initial = kwargs.pop("initial", {})
# If a FormEntry instance is given to edit, populate initial
# with its field values.
field_entries = {}
if kwargs.get("instance"):
for field_entry in kwargs["instance"].fields.all():
field_entries[field_entry.field_id] = field_entry.value
super(FormForForm, self).__init__(*args, **kwargs)
# Create the form fields.
for field in self.form_fields:
field_key = "field_%s" % field.id
field_class = fields.CLASSES[field.field_type]
field_widget = fields.WIDGETS.get(field.field_type)
field_args = {"label": field.label, "required": field.required,
"help_text": field.help_text}
if field.required and not field.help_text:
field_args["help_text"] = _("required")
arg_names = field_class.__init__.__code__.co_varnames
if "max_length" in arg_names:
field_args["max_length"] = settings.FORMS_FIELD_MAX_LENGTH
if "choices" in arg_names:
choices = list(field.get_choices())
if (field.field_type == fields.SELECT and
field.default not in [c[0] for c in choices]):
choices.insert(0, ("", field.placeholder_text))
field_args["choices"] = choices
if field_widget is not None:
field_args["widget"] = field_widget
#
# Initial value for field, in order of preference:
#
# - If a form model instance is given (eg we're editing a
# form response), then use the instance's value for the
# field.
# - If the developer has provided an explicit "initial"
# dict, use it.
# - The default value for the field instance as given in
# the admin.
#
initial_val = None
try:
initial_val = field_entries[field.id]
except KeyError:
try:
initial_val = initial[field_key]
except KeyError:
initial_val = Template(field.default).render(context)
if initial_val:
if field.is_a(*fields.MULTIPLE):
initial_val = split_choices(initial_val)
elif field.field_type == fields.CHECKBOX:
initial_val = initial_val != "False"
self.initial[field_key] = initial_val
self.fields[field_key] = field_class(**field_args)
if field.field_type == fields.DOB:
_now = datetime.now()
years = list(range(_now.year, _now.year - 120, -1))
self.fields[field_key].widget.years = years
# Add identifying type attr to the field for styling.
setattr(self.fields[field_key], "type",
field_class.__name__.lower())
if (field.required and settings.FORMS_USE_HTML5 and
field.field_type != fields.CHECKBOX_MULTIPLE):
self.fields[field_key].widget.attrs["required"] = ""
if field.placeholder_text and not field.default:
text = field.placeholder_text
self.fields[field_key].widget.attrs["placeholder"] = text
def save(self, **kwargs):
"""
Create a ``FormEntry`` instance and related ``FieldEntry``
instances for each form field.
"""
entry = super(FormForForm, self).save(commit=False)
entry.form = self.form
entry.entry_time = now()
entry.save()
entry_fields = entry.fields.values_list("field_id", flat=True)
new_entry_fields = []
for field in self.form_fields:
field_key = "field_%s" % field.id
value = self.cleaned_data[field_key]
if value and self.fields[field_key].widget.needs_multipart_form:
value = fs.save(join("forms", str(uuid4()), value.name), value)
if isinstance(value, list):
value = ", ".join([v.strip() for v in value])
if field.id in entry_fields:
field_entry = entry.fields.get(field_id=field.id)
field_entry.value = value
field_entry.save()
else:
new = {"entry": entry, "field_id": field.id, "value": value}
new_entry_fields.append(FieldEntry(**new))
if new_entry_fields:
FieldEntry.objects.bulk_create(new_entry_fields)
return entry
def email_to(self):
"""
Return the value entered for the first field of type
``forms.fields.EMAIL``.
"""
for field in self.form_fields:
if field.is_a(fields.EMAIL):
return self.cleaned_data["field_%s" % field.id]
return None
class EntriesForm(forms.Form):
"""
Form with a set of fields dynamically assigned that can be used to
filter entries for the given ``forms.models.Form`` instance.
"""
def __init__(self, form, request, *args, **kwargs):
"""
Iterate through the fields of the ``forms.models.Form`` instance and
create the form fields required to control including the field in
the export (with a checkbox) or filtering the field which differs
across field types. User a list of checkboxes when a fixed set of
choices can be chosen from, a pair of date fields for date ranges,
and for all other types provide a textbox for text search.
"""
self.form = form
self.request = request
self.form_fields = form.fields.all()
self.entry_time_name = str(FormEntry._meta.get_field(
"entry_time").verbose_name)
super(EntriesForm, self).__init__(*args, **kwargs)
for field in self.form_fields:
field_key = "field_%s" % field.id
# Checkbox for including in export.
self.fields["%s_export" % field_key] = forms.BooleanField(
label=field.label, initial=True, required=False)
if field.is_a(*fields.CHOICES):
# A fixed set of choices to filter by.
if field.is_a(fields.CHECKBOX):
choices = ((True, _("Checked")), (False, _("Not checked")))
else:
choices = field.get_choices()
contains_field = forms.MultipleChoiceField(label=" ",
choices=choices, widget=forms.CheckboxSelectMultiple(),
required=False)
self.fields["%s_filter" % field_key] = choice_filter_field
self.fields["%s_contains" % field_key] = contains_field
elif field.is_a(*fields.MULTIPLE):
# A fixed set of choices to filter by, with multiple
# possible values in the entry field.
contains_field = forms.MultipleChoiceField(label=" ",
choices=field.get_choices(),
widget=forms.CheckboxSelectMultiple(),
required=False)
self.fields["%s_filter" % field_key] = multiple_filter_field
self.fields["%s_contains" % field_key] = contains_field
elif field.is_a(*fields.DATES):
# A date range to filter by.
self.fields["%s_filter" % field_key] = date_filter_field
self.fields["%s_from" % field_key] = forms.DateField(
label=" ", widget=SelectDateWidget(), required=False)
self.fields["%s_to" % field_key] = forms.DateField(
label=_("and"), widget=SelectDateWidget(), required=False)
else:
# Text box for search term to filter by.
contains_field = forms.CharField(label=" ", required=False)
self.fields["%s_filter" % field_key] = text_filter_field
self.fields["%s_contains" % field_key] = contains_field
# Add ``FormEntry.entry_time`` as a field.
field_key = "field_0"
self.fields["%s_export" % field_key] = forms.BooleanField(initial=True,
label=FormEntry._meta.get_field("entry_time").verbose_name,
required=False)
self.fields["%s_filter" % field_key] = date_filter_field
self.fields["%s_from" % field_key] = forms.DateField(
label=" ", widget=SelectDateWidget(), required=False)
self.fields["%s_to" % field_key] = forms.DateField(
label=_("and"), widget=SelectDateWidget(), required=False)
def __iter__(self):
"""
Yield pairs of include checkbox / filters for each field.
"""
for field_id in [f.id for f in self.form_fields] + [0]:
prefix = "field_%s_" % field_id
fields = [f for f in super(EntriesForm, self).__iter__()
if f.name.startswith(prefix)]
yield fields[0], fields[1], fields[2:]
def columns(self):
"""
Returns the list of selected column names.
"""
fields = [f.label for f in self.form_fields
if self.cleaned_data["field_%s_export" % f.id]]
if self.cleaned_data["field_0_export"]:
fields.append(self.entry_time_name)
return fields
def rows(self, csv=False):
"""
Returns each row based on the selected criteria.
"""
# Store the index of each field against its ID for building each
# entry row with columns in the correct order. Also store the IDs of
# fields with a type of FileField or Date-like for special handling of
# their values.
field_indexes = {}
file_field_ids = []
date_field_ids = []
for field in self.form_fields:
if self.cleaned_data["field_%s_export" % field.id]:
field_indexes[field.id] = len(field_indexes)
if field.is_a(fields.FILE):
file_field_ids.append(field.id)
elif field.is_a(*fields.DATES):
date_field_ids.append(field.id)
num_columns = len(field_indexes)
include_entry_time = self.cleaned_data["field_0_export"]
if include_entry_time:
num_columns += 1
# Get the field entries for the given form and filter by entry_time
# if specified.
field_entries = FieldEntry.objects.filter(
entry__form=self.form).order_by(
"-entry__id").select_related("entry")
if self.cleaned_data["field_0_filter"] == FILTER_CHOICE_BETWEEN:
time_from = self.cleaned_data["field_0_from"]
time_to = self.cleaned_data["field_0_to"]
if time_from and time_to:
field_entries = field_entries.filter(
entry__entry_time__range=(time_from, time_to))
# Loop through each field value ordered by entry, building up each
# entry as a row. Use the ``valid_row`` flag for marking a row as
# invalid if it fails one of the filtering criteria specified.
current_entry = None
current_row = None
valid_row = True
for field_entry in field_entries:
if field_entry.entry_id != current_entry:
# New entry, write out the current row and start a new one.
if valid_row and current_row is not None:
if not csv:
current_row.insert(0, current_entry)
yield current_row
current_entry = field_entry.entry_id
current_row = [""] * num_columns
valid_row = True
if include_entry_time:
current_row[-1] = field_entry.entry.entry_time
field_value = field_entry.value or ""
# Check for filter.
field_id = field_entry.field_id
filter_type = self.cleaned_data.get("field_%s_filter" % field_id)
filter_args = None
if filter_type:
if filter_type == FILTER_CHOICE_BETWEEN:
f, t = "field_%s_from" % field_id, "field_%s_to" % field_id
filter_args = [self.cleaned_data[f], self.cleaned_data[t]]
else:
field_name = "field_%s_contains" % field_id
filter_args = self.cleaned_data[field_name]
if filter_args:
filter_args = [filter_args]
if filter_args:
# Convert dates before checking filter.
if field_id in date_field_ids:
y, m, d = field_value.split(" ")[0].split("-")
dte = date(int(y), int(m), int(d))
filter_args.append(dte)
else:
filter_args.append(field_value)
filter_func = FILTER_FUNCS[filter_type]
if not filter_func(*filter_args):
valid_row = False
# Create download URL for file fields.
if field_entry.value and field_id in file_field_ids:
url = reverse("admin:form_file", args=(field_entry.id,))
field_value = self.request.build_absolute_uri(url)
if not csv:
parts = (field_value, split(field_entry.value)[1])
field_value = mark_safe("<a href=\"%s\">%s</a>" % parts)
# Only use values for fields that were selected.
try:
current_row[field_indexes[field_id]] = field_value
except KeyError:
pass
# Output the final row.
if valid_row and current_row is not None:
if not csv:
current_row.insert(0, current_entry)
yield current_row
| 42.662005 | 79 | 0.593651 | [
"BSD-3-Clause"
] | gladgod/zhiliao | zhiliao/forms/forms.py | 18,302 | Python |
# -*- coding: utf-8 -*-
import json
import datetime
class JobHeader:
"""Represents a row from the callout."""
def __init__(self, raw_data):
self.attributes = {
"contractor": json.dumps(raw_data[0]),
"job_name": json.dumps(raw_data[1]),
"is_dayshift": json.dumps(int(raw_data[3] == "Days")),
"job_id": json.dumps(int(raw_data[4]))
}
self.contractor = raw_data[0]
self.jobName = raw_data[1]
self.startDate = raw_data[2]
self.shift = raw_data[3]
self.id = raw_data[4]
def add_data(self, pop_data):
print("popData for id {0}".format(self.id))
if self.id != pop_data.id:
print("ID mismatch...")
return
class JobData:
TIME_TAG = "start_time"
DATE_TAG = "start_date"
DATE_TIME_TAG = "date_time"
def __init__(self, raw_data):
self.lineMatches = {"Work Type:": "work_type",
"Hours:": "hours",
"Start Date:": "start_date",
"Start Time:": "start_time",
"Duration:": "duration",
"Accommodation:": "accommodation",
"Open To:": "open_to",
"Comments:": "comments",
"Drug Testing Info:": "drug_testing"}
self.nameHireTag = "Name Hired:"
# 2 spaces typo
self.manpowerTag = "Manpower Requirements:"
self.attribute_dictionary = {}
self.manpower = {}
self.name_hires = {}
skip_line = False
for i, row in enumerate(raw_data):
if row.has_attr("bgcolor"):
continue
if skip_line:
skip_line = False
continue
stripped = row.text.strip()
if not row.find("b"):
# print("Element {0} is not bold enough for my needs.".format(row))
continue
if self.check_line_match(i, stripped, raw_data):
skip_line = True
continue
if "Job#" in stripped:
self.id = stripped.split(u'\xa0')[-1]
print("Set Job# to {0}".format(self.id))
continue
if self.manpowerTag in stripped:
self.manpower = self.get_multi_line(self.manpowerTag, i, raw_data)
continue
if self.nameHireTag in stripped:
self.name_hires = self.get_multi_line(self.nameHireTag, i, raw_data)
continue
# # parse checkboxes
# inputs = row.find_all("input")
# if inputs:
# self.attrDic["Shift:"] = "Days" if self.parse_checkbox(row.find_all("b")) else "Nights"
# print("Set Shift: to {0}".format(self.attrDic["Shift:"]))
# continue
print(repr(stripped))
self.attribute_dictionary["manpower"] = json.dumps(self.manpower)
self.attribute_dictionary["name_hires"] = json.dumps(self.name_hires)
date_split = self.attribute_dictionary[self.DATE_TAG].replace('\"', '').split('/')
time_string = self.attribute_dictionary[self.TIME_TAG].replace('\"', '') + ":00"
self.attribute_dictionary[self.DATE_TIME_TAG] = "{0}-{1}-{2} {3}".format(
date_split[2], date_split[0], date_split[1], time_string)
del self.attribute_dictionary[self.DATE_TAG]
del self.attribute_dictionary[self.TIME_TAG]
print("dateTime set to: {0}".format(repr(datetime)))
def check_line_match(self, index, stripped, data_rows):
"""Find lines matching stripped from lineMatchKeys and set value to immediately following row"""
for key, value in self.lineMatches.items():
if stripped == key:
next_row = data_rows[index + 1]
if next_row.find_all("b"):
print("Next row was bold element: {0}. Skipping...".format(next_row))
return False
next_row_stripped = next_row.text.strip()
if next_row_stripped in self.lineMatches:
print("Next row was {0} and is in lineMatchKeys, skipping...".format(next_row_stripped))
return False
self.attribute_dictionary[value] = json.dumps(next_row_stripped)
print("Set {0} to {1}".format(value, self.attribute_dictionary[value]))
del self.lineMatches[key]
return True
return False
@classmethod
def get_multi_line(cls, match, index, data_rows):
attr_list = []
while True:
index += 1
if index >= len(data_rows):
break
next_row = data_rows[index]
if next_row.find("b"):
break
if next_row.find("tr"):
print("Skipping td containing trs")
continue
attr_list.append(next_row.text.strip().replace(u'\xa0', ' '))
attr_dic = {}
i = 0
while i + 1 < len(attr_list):
attr_dic[attr_list[i]] = attr_list[i + 1]
i += 2
print("Set '{0}' to dic:".format(match))
print(repr(attr_dic))
return attr_dic
@classmethod
def parse_checkbox(cls, bold_elements):
for bold in bold_elements:
if "Days" in bold.text.strip():
input_el = bold.find("input")
if input_el:
return input_el.has_attr("checked")
| 34.295858 | 109 | 0.514665 | [
"MIT"
] | ataboo/CalloutScrape | DataObjects.py | 5,796 | Python |
from django.shortcuts import render, redirect, get_object_or_404
from django.views.decorators.http import require_POST
from shop.models import Product
from .cart import Cart
from .forms import CartAddProductForm
from coupons.forms import CouponApplyForm
from shop.recommender import Recommender
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product,
quantity=cd['quantity'],
update_quantity=cd['update'])
return redirect('cart:cart_detail')
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(
initial={'quantity': item['quantity'],
'update': True})
coupon_apply_form = CouponApplyForm()
r = Recommender()
cart_products = [item['product'] for item in cart]
recommended_products = r.suggest_products_for(cart_products,
max_results=4)
return render(request,
'cart/detail.html',
{'cart': cart,
'coupon_apply_form': coupon_apply_form,
'recommended_products': recommended_products})
| 32.734694 | 68 | 0.644015 | [
"MIT"
] | AngelLiang/Django-2-by-Example | Chapter09/myshop/cart/views.py | 1,604 | Python |
import argparse
from graph4nlp.pytorch.modules.config import get_basic_args
from graph4nlp.pytorch.modules.utils.config_utils import get_yaml_config, update_values
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--dataset_yaml",
type=str,
default="examples/pytorch/semantic_parsing/graph2tree/geo/config/new_dynamic_graphsage_undirected.yaml", # noqa
)
parser.add_argument("--learning-rate", type=float, default=1e-3)
parser.add_argument("--gpuid", type=int, default=1, help="which gpu to use. -1 = use CPU")
parser.add_argument(
"--seed", type=int, default=123, help="torch manual random number generator seed"
)
parser.add_argument("--init-weight", type=float, default=0.08, help="initailization weight")
parser.add_argument("--weight-decay", type=float, default=0)
parser.add_argument(
"--max-epochs",
type=int,
default=200,
help="number of full passes through the training data",
)
parser.add_argument("--min-freq", type=int, default=1, help="minimum frequency for vocabulary")
parser.add_argument("--grad-clip", type=int, default=5, help="clip gradients at this value")
# dataset config
parser.add_argument("--batch-size", type=int, default=20, help="the size of one mini-batch")
parser.add_argument("--share-vocab", type=bool, default=True, help="whether to share vocab")
parser.add_argument("--pretrained_word_emb_name", type=str, default="6B", help="")
parser.add_argument("--pretrained_word_emb_url", type=str, default=None, help="")
parser.add_argument(
"--pretrained_word_emb_cache_dir", type=str, default=".vector_cache", help=""
)
parser.add_argument("--beam-size", type=int, default=4, help="the beam size of beam search")
cfg = parser.parse_args()
our_args = get_yaml_config(cfg.dataset_yaml)
template = get_basic_args(
graph_construction_name=our_args["graph_construction_name"],
graph_embedding_name=our_args["graph_embedding_name"],
decoder_name=our_args["decoder_name"],
)
update_values(to_args=template, from_args_list=[our_args, vars(cfg)])
return template
| 41.830189 | 120 | 0.703654 | [
"Apache-2.0"
] | RyanWangZf/graph4nlp | examples/pytorch/semantic_parsing/graph2tree/geo/src/config.py | 2,217 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
----------------------
Asynchronous SqlHelper
----------------------
TODO:
#. Transaction
Examples:
Simple Usage:
::
from kipp.aio import SqlHelper, run_until_complete, coroutine2
@coroutine2
def main():
db = SqlHelper('movoto')
r = yield db.getOneBySql('show DATABASES;')
if __name__ == '__main__':
run_until_complete(main())
"""
from __future__ import unicode_literals
from kipp.libs import PY2, PY3
from kipp.utils import get_logger
from .base import run_on_executor, thread_executor
class Py3SqlHelper(object):
pass
class Py2SqlHelper:
executor = thread_executor
def __init__(self, *args, **kw):
from Utilities.movoto.SqlHelper import SqlHelper as MovotoSqlHelper
self.sqlhelper = MovotoSqlHelper(*args, **kw)
def __getattr__(self, name):
return getattr(self.sqlhelper, name)
@run_on_executor()
def getAllBySql(self, sql, *args, **kw):
return self.sqlhelper.getAllBySql(sql, *args, **kw)
@run_on_executor()
def getOneBySql(self, sql, *args, **kw):
return self.sqlhelper.getOneBySql(sql, *args, **kw)
@run_on_executor()
def executeBySql(self, sql, *args, **kw):
return self.sqlhelper.executeBySql(sql, *args, **kw)
@run_on_executor()
def executeManyBySql(self, sql, *args, **kw):
return self.sqlhelper.executeManyBySql(sql, *args, **kw)
get_all_by_sql = getAllBySql
get_one_by_sql = getOneBySql
execute_by_sql = executeBySql
execute_many_by_sql = executeManyBySql
class SqlHelper:
def __init__(self, *args, **kw):
if PY2:
get_logger().info("set SqlHelper for py2")
from Utilities.movoto import settings
self.sqlhelper = Py2SqlHelper(*args, **kw)
elif PY3:
get_logger().info("set SqlHelper for py3")
self.sqlhelper = Py3SqlHelper(*args, **kw)
def __getattr__(self, name):
return getattr(self.sqlhelper, name)
| 23.908046 | 75 | 0.630769 | [
"MIT"
] | Laisky/kipp | kipp/aio/sqlhelper.py | 2,080 | Python |
#!/usr/bin/env python3
import argparse
import collections
import hashlib
import itertools
import os
import re
import sys
import statistics
import subprocess
import tempfile
import time
import pprint
import json
from collections import namedtuple
# Argument parser
parser = argparse.ArgumentParser(description="""
Compare running times and memory usage of a set of compressors.
""")
parser.add_argument('--suite', '-s', type=str, default='',
help='the comparison suite to execute')
parser.add_argument('--iterations', '-n', type=int, default=1,
help='the amount of iterations for each input file')
parser.add_argument('files', metavar='FILE', type=str, nargs='+',
help='the input files to use for comparison')
parser.add_argument('--format', type=str, default='stdout',
help='Format to output')
parser.add_argument('--nomem', action="store_true",
help='Don\'t measure memory')
args = parser.parse_args()
class StdOutTable:
def __init__(self):
pass
def print(self, *s):
print(*s)
def file(self, srcfname, srcsize, srchash):
print()
print("File: %s (%s, sha256=%s)" % (srcfname, memsize(srcsize), srchash))
def header(self, tup):
print()
print(("%"+ str(maxnicknamelength) + "s | %10s | %10s | %10s | %10s | %10s | %4s |") % tup)
print('-'*(maxnicknamelength+5*10+6*3+4+2))
def cell(self, content, format, sep, f):
print((format + " " + sep) % f(content), end='',flush=True)
def end_row(self):
print()
def flush(self):
pass
class JsonTable:
def __init__(self):
self.messages = []
self.files = {}
def print(self, *s):
self.messages.append(" ".join(map(str, iter(s))))
def file(self, srcfname, srcsize, srchash):
self.files[srcfname] = {}
self.currentfile = self.files[srcfname]
self.currentfile["cols"] = {}
self.currentfile["size"] = srcsize
self.currentfile["hash"] = srchash
def header(self, tup):
self.headings = tup
self.current_heading = 0
for heading in tup:
self.currentfile["cols"][heading] = []
def cell(self, content, format, sep, f):
self.currentfile["cols"][self.headings[self.current_heading]].append(content)
self.current_heading += 1
def end_row(self):
self.current_heading = 0
def flush(self):
print(json.dumps(self.__dict__, sort_keys=True, indent=4))
sot = StdOutTable()
if args.format == "json":
sot = JsonTable()
# Ensure that the input files are readable
for srcfname in args.files:
if not os.access(srcfname, os.R_OK):
sot.print("ERROR: Input file not found or not readable:", srcfname)
quit()
# Check that valgrind is available for memory measurement
mem_available = False
if not args.nomem:
try:
subprocess.check_call(["valgrind", "--version"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
mem_available = True
except:
mem_available = False
sot.print("WARNING: valgrind not found - memory measurement unavailable.")
sot.print()
# Program execution definition
StdOut = 0
StdIn = 0
Exec = collections.namedtuple('Exec', ['args', 'outp', 'inp'])
Exec.__new__.__defaults__ = (None, None) # args is required
# Compressor Pair definition
CompressorPair = collections.namedtuple('CompressorPair', ['name', 'compress', 'decompress'])
def Tudocomp(name, algorithm, tdc_binary='./tdc', cflags=[], dflags=[]):
return CompressorPair(name,
compress = Exec(args=[tdc_binary, '-a', algorithm] + cflags, outp='--output'),
decompress = Exec(args=[tdc_binary, '-d'] + dflags, outp='--output'))
def StdCompressor(name, binary, cflags=[], dflags=[]):
return CompressorPair(name,
compress = Exec(args=[binary] + cflags, inp=StdIn, outp=StdOut),
decompress = Exec(args=[binary] + dflags, inp=StdIn, outp=StdOut))
# Define suite
if args.suite:
# Evaluate suite as Python
try:
with open(args.suite, "r") as f:
suite = eval(f.read())
# sanity checks
if not type(suite) is list:
raise(Exception(
"Suite evaluated to " + str(type(suite)) +
", but should be a list of CompressorPair objects"))
if len(suite) == 0:
raise(Exception("Suite is empty"))
for c in suite:
if not isinstance(c, CompressorPair):
raise(Exception("Suite must only contain CompressorPair objects" +
", found " + str(type(c))))
except:
sot.print("ERROR: Failed to load suite '" + args.suite + "'")
sot.print(sys.exc_info()[1])
quit()
sot.print("Using suite '" + args.suite + "'")
else:
# default
suite = [
# tudocomp examples
Tudocomp(name='lfs_simst', algorithm='lfs_comp(sim_st)'),
Tudocomp(name='lfs_esa', algorithm='lfs_comp(esa)'),
# Tudocomp(name='lfs_st', algorithm='lfs_comp(st)'),
Tudocomp(name='lfs2', algorithm='lfs2'),
Tudocomp(name='lz78(ternary)', algorithm='lz78(coder=bit,lz78trie=ternary)'),
Tudocomp(name='lz78', algorithm='lz78'),
Tudocomp(name='lzw', algorithm='lzw'),
Tudocomp(name='repair(min=50)', algorithm='repair(bit,50)'),
Tudocomp(name='lzw', algorithm='lzw'),
Tudocomp(name='lzss', algorithm='lzss(bit)'),
Tudocomp(name='bwtzip', algorithm='bwt:rle:mtf:encode(huff)'),
Tudocomp(name='lcpcomp(t=5,arrays,scans(a=25))', algorithm='lcpcomp(coder=sle,threshold=5,comp=arrays,dec=scan(25))'),
Tudocomp(name='lzss_lcp(t=5,bit)', algorithm='lzss_lcp(coder=bit,threshold=5)'),
Tudocomp(name='lz78u(t=5,huff)', algorithm='lz78u(coder=bit,threshold=5,comp=buffering(huff))'),
Tudocomp(name='lcpcomp(t=5,heap,compact)', algorithm='lcpcomp(coder=sle,threshold="5",comp=heap,dec=compact)'),
Tudocomp(name='sle', algorithm='encode(sle)'),
Tudocomp(name='huff', algorithm='encode(huff)'),
Tudocomp(name='lzw(ternary)', algorithm='lzw(coder=bit,lz78trie=ternary)'),
# Some standard Linux compressors
StdCompressor(name='gzip -1', binary='gzip', cflags=['-1'], dflags=['-d']),
StdCompressor(name='gzip -9', binary='gzip', cflags=['-9'], dflags=['-d']),
StdCompressor(name='bzip2 -1', binary='bzip2', cflags=['-1'], dflags=['-d']),
StdCompressor(name='bzip2 -9', binary='bzip2', cflags=['-9'], dflags=['-d']),
StdCompressor(name='lzma -1', binary='lzma', cflags=['-1'], dflags=['-d']),
StdCompressor(name='lzma -9', binary='lzma', cflags=['-9'], dflags=['-d']),
#StdCompressor(name='lcpcompress', binary='lcpcompress', cflags=[''], dflags=['-d']),
]
sot.print("Using built-in default suite")
def memsize(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def timesize(num, suffix='s'):
if(num < 1.0):
for unit in ['','m','mu','n']:
if num > 1.0:
return "%3.1f%s%s" % (num, unit, suffix)
num *= 1000
return "%.1f%s%s" % (num, '?', suffix)
else:
if(num < 600):
return "%3.1f%s" % (num, 's')
elif(num < 3600):
num /= 60
return "%3.1f%s" % (num, 'min')
elif(num > 3600):
num /= 3600
return "%3.1f%s" % (num, 'h')
def run_exec(x, infilename, outfilename):
args = list(x.args)
# Delete existing output file
if os.path.exists(outfilename):
os.remove(outfilename)
# Determine Output
if(x.outp == StdOut):
outfile = open(outfilename, "wb")
pipe_out = outfile
else:
outfile = None
pipe_out = logfile
args += ([x.outp, outfilename] if x.outp != None else [outfilename])
# Determine input
if(x.inp == StdIn):
infile = open(infilename, "rb")
pipe_in = infile
else:
infile = None
pipe_in = None
args += ([x.inp, infilename] if x.inp != None else [infilename])
# Call
t0 = time.time()
subprocess.check_call(args, stdin=pipe_in, stdout=pipe_out, stderr=logfile)
# Close files
outfile.close() if outfile else None
infile.close() if infile else None
# Yield time delta
return(time.time() - t0)
def measure_time(x, infilename, outfilename):
t=[]
for _ in range(0, args.iterations):
t = t + [run_exec(x, infilename, outfilename)]
return(statistics.median(t))
def measure_mem(x, infilename, outfilename):
massiffilename=tempfile.mktemp()
run_exec(
Exec(args=['valgrind', '-q', '--tool=massif', '--pages-as-heap=yes', '--massif-out-file=' + massiffilename] + x.args, inp=x.inp, outp=x.outp),
infilename, outfilename)
with open(massiffilename) as f:
maxmem=0
for line in f.readlines():
match = re.match('^mem_heap_B=([0-9]+)', line)
if match:
maxmem = max(maxmem,int(match.group(1)))
os.remove(massiffilename)
return(maxmem)
maxnicknamelength = len(max(suite, key=lambda p: len(p.name))[0] ) + 3
sot.print("Number of iterations per file: ", args.iterations)
for srcfname in args.files:
srchash = hashlib.sha256(open(srcfname, 'rb').read()).hexdigest()
srcsize = os.path.getsize(srcfname)
sot.file(srcfname, srcsize, srchash)
sot.header(("Compressor", "C Time", "C Memory", "C Rate", "D Time", "D Memory", "chk"));
logfilename = tempfile.mktemp()
decompressedfilename = tempfile.mktemp()
outfilename = tempfile.mktemp()
def print_column(content, format="%11s", sep="|", f=lambda x:x):
sot.cell(content, format, sep, f)
def end_row():
sot.end_row()
try:
with open(logfilename,"wb") as logfile:
for c in suite:
# nickname
print_column(c.name, "%"+ str(maxnicknamelength) +"s")
# compress time
try:
comp_time=measure_time(c.compress, srcfname, outfilename)
print_column(comp_time*1000, f=lambda x: timesize(x/1000))
except FileNotFoundError as e:
print_column("(ERR)", sep=">")
sot.print(" " + e.strerror)
continue
# compress memory
if mem_available:
comp_mem=measure_mem(c.compress, srcfname, outfilename)
print_column(comp_mem,f=memsize)
else:
print_column("(N/A)")
# compress rate
outputsize=os.path.getsize(outfilename)
print_column(float(outputsize) / float(srcsize), format="%10.4f%%", f=lambda x: 100*x)
# decompress time
dec_time = measure_time(c.decompress, outfilename, decompressedfilename)
print_column(dec_time*1000,f=lambda x: timesize(x/1000))
# decompress memory
if mem_available:
dec_mem = measure_mem(c.decompress, outfilename, decompressedfilename)
print_column(dec_mem,f=memsize)
else:
print_column("(N/A)")
# decompress check
decompressedhash = hashlib.sha256(
open(decompressedfilename, 'rb').read()).hexdigest()
if decompressedhash != srchash:
print_column("FAIL", format="%5s")
else:
print_column("OK", format="%5s")
# EOL
end_row()
except:
sot.print()
sot.print("ERROR:", sys.exc_info()[0])
sot.print(sys.exc_info()[1])
with open(logfilename, 'r') as fin: sot.print(fin.read())
os.remove(logfilename)
if os.path.exists(decompressedfilename):
os.remove(decompressedfilename)
if os.path.exists(outfilename):
os.remove(outfilename)
sot.flush()
| 35.535411 | 151 | 0.571827 | [
"ECL-2.0",
"Apache-2.0"
] | Sascha6790/tudocomp | etc/compare.py | 12,544 | Python |
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class StopAppResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'app_id': 'str',
'state': 'AppState',
'x_request_id': 'str'
}
attribute_map = {
'app_id': 'app_id',
'state': 'state',
'x_request_id': 'X-request-Id'
}
def __init__(self, app_id=None, state=None, x_request_id=None):
"""StopAppResponse - a model defined in huaweicloud sdk"""
super(StopAppResponse, self).__init__()
self._app_id = None
self._state = None
self._x_request_id = None
self.discriminator = None
if app_id is not None:
self.app_id = app_id
if state is not None:
self.state = state
if x_request_id is not None:
self.x_request_id = x_request_id
@property
def app_id(self):
"""Gets the app_id of this StopAppResponse.
应用id
:return: The app_id of this StopAppResponse.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this StopAppResponse.
应用id
:param app_id: The app_id of this StopAppResponse.
:type: str
"""
self._app_id = app_id
@property
def state(self):
"""Gets the state of this StopAppResponse.
:return: The state of this StopAppResponse.
:rtype: AppState
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this StopAppResponse.
:param state: The state of this StopAppResponse.
:type: AppState
"""
self._state = state
@property
def x_request_id(self):
"""Gets the x_request_id of this StopAppResponse.
:return: The x_request_id of this StopAppResponse.
:rtype: str
"""
return self._x_request_id
@x_request_id.setter
def x_request_id(self, x_request_id):
"""Sets the x_request_id of this StopAppResponse.
:param x_request_id: The x_request_id of this StopAppResponse.
:type: str
"""
self._x_request_id = x_request_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StopAppResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.084848 | 79 | 0.55948 | [
"Apache-2.0"
] | huaweicloud/huaweicloud-sdk-python-v3 | huaweicloud-sdk-cloudrtc/huaweicloudsdkcloudrtc/v2/model/stop_app_response.py | 4,312 | Python |
'''
Description:
Play music on Spotify with python.
Author: AlejandroV
Version: 1.0
Video: https://youtu.be/Vj64pkXtz28
'''
from spotipy.oauth2 import SpotifyClientCredentials
import spotipy
import webbrowser as web
import pyautogui
from time import sleep
# your credentials
client_id = 'YOUR_CLIENT_ID_HERE'
client_secret = 'YOUR_CLIENT_SECRET_HERE'
flag = 0
# artist and name of the song
author = ''
song = 'bad guy'.upper()
if len(author) > 0:
# authenticate
sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(client_id, client_secret))
result = sp.search(author)
for i in range(0, len(result["tracks"]["items"])):
# songs by artist
name_song = result["tracks"]["items"][i]["name"].upper()
if song in name_song:
flag = 1
web.open(result["tracks"]["items"][i]["uri"])
sleep(5)
pyautogui.press("enter")
break
# if song by artist not found
if flag == 0:
song = song.replace(" ", "%20")
web.open(f'spotify:search:{song}')
sleep(5)
for i in range(18):
pyautogui.press("tab")
for i in range(2):
pyautogui.press("enter")
sleep(1) | 24.38 | 103 | 0.632486 | [
"MIT"
] | avmmodules/playmusic_spoty | playmusic_spoty.py | 1,219 | Python |
# Copyright 2018 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Miscellaneous Utility functions.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name, no-member, no-name-in-module, protected-access
# pylint: disable=redefined-outer-name, too-many-arguments
from typing import List, Union
import inspect
import funcsigs
from pydoc import locate
import copy
import collections
import numpy as np
import tensorflow as tf
from texar.tf.hyperparams import HParams
from texar.tf.utils.dtypes import is_str, is_callable, compat_as_text, \
_maybe_list_to_array
# pylint: disable=anomalous-backslash-in-string
MAX_SEQ_LENGTH = np.iinfo(np.int32).max
# Some modules cannot be imported directly,
# e.g., `import tensorflow.train` fails.
# Such modules are treated in a special way in utils like `get_class` as below.
# _unimportable_modules = {
# 'tensorflow.train', 'tensorflow.keras.regularizers'
# }
__all__ = [
"_inspect_getargspec",
"get_args",
"get_default_arg_values",
"check_or_get_class",
"get_class",
"check_or_get_instance",
"get_instance",
"check_or_get_instance_with_redundant_kwargs",
"get_instance_with_redundant_kwargs",
"get_function",
"call_function_with_redundant_kwargs",
"get_instance_kwargs",
"dict_patch",
"dict_lookup",
"dict_fetch",
"dict_pop",
"flatten_dict",
"strip_token",
"strip_eos",
"strip_bos",
"strip_special_tokens",
"str_join",
"map_ids_to_strs",
"default_str",
"uniquify_str",
"ceildiv",
"straight_through",
"truncate_seq_pair",
]
# TODO(zhiting): complete this
def _expand_name(name):
"""Replaces common shorthands with respective full names.
"tf.xxx" --> "tensorflow.xxx"
"tx.xxx" --> "texar.tf.xxx"
"""
return name
def _inspect_getargspec(fn):
"""Returns `inspect.getargspec(fn)` for Py2 and `inspect.getfullargspec(fn)`
for Py3
"""
try:
return inspect.getfullargspec(fn)
except AttributeError:
try:
return inspect.getargspec(fn)
except TypeError:
return inspect.getargspec(fn.__call__)
def get_args(fn):
"""Gets the arguments of a function.
Args:
fn (callable): The function to inspect.
Returns:
list: A list of argument names (str) of the function.
"""
argspec = _inspect_getargspec(fn)
args = argspec.args
# Empty args can be because `fn` is decorated. Use `funcsigs.signature`
# to re-do the inspect
if len(args) == 0:
args = funcsigs.signature(fn).parameters.keys()
args = list(args)
return args
def get_default_arg_values(fn):
"""Gets the arguments and respective default values of a function.
Only arguments with default values are included in the output dictionary.
Args:
fn (callable): The function to inspect.
Returns:
dict: A dictionary that maps argument names (str) to their default
values. The dictionary is empty if no arguments have default values.
"""
argspec = _inspect_getargspec(fn)
if argspec.defaults is None:
return {}
num_defaults = len(argspec.defaults)
return dict(zip(argspec.args[-num_defaults:], argspec.defaults))
def check_or_get_class(class_or_name, module_path=None, superclass=None):
"""Returns the class and checks if the class inherits :attr:`superclass`.
Args:
class_or_name: Name or full path to the class, or the class itself.
module_paths (list, optional): Paths to candidate modules to search
for the class. This is used if :attr:`class_or_name` is a string and
the class cannot be located solely based on :attr:`class_or_name`.
The first module in the list that contains the class
is used.
superclass (optional): A (list of) classes that the target class
must inherit.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
TypeError: If class does not inherits :attr:`superclass`.
"""
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_path)
if superclass is not None:
if not issubclass(class_, superclass):
raise TypeError(
"A subclass of {} is expected. Got: {}".format(
superclass, class_))
return class_
def get_class(class_name, module_paths=None):
"""Returns the class based on class name.
Args:
class_name (str): Name or full path to the class.
module_paths (list): Paths to candidate modules to search for the
class. This is used if the class cannot be located solely based on
`class_name`. The first module in the list that contains the class
is used.
Returns:
The target class.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
class_ = locate(class_name)
if (class_ is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
# Special treatment for unimportable modules by directly
# accessing the class
class_ = locate('.'.join([module_path, class_name]))
if class_ is not None:
break
# else:
# module = importlib.import_module(module_path)
# if class_name in dir(module):
# class_ = getattr(module, class_name)
# break
if class_ is None:
raise ValueError(
"Class not found in {}: {}".format(module_paths, class_name))
return class_
def check_or_get_instance(ins_or_class_or_name, kwargs, module_paths=None,
classtype=None):
"""Returns a class instance and checks types.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or full path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor. Ignored
if `ins_or_class_or_name` is a class instance.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) class of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance(class_or_name, kwargs, module_paths=None):
"""Creates a class instance.
Args:
class_or_name: A class, or its name or full path to a class to
instantiate.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_or_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
"""
# Locate the class
class_ = class_or_name
if is_str(class_):
class_ = get_class(class_, module_paths)
# Check validity of arguments
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key in kwargs.keys():
if key not in class_args:
raise ValueError(
"Invalid argument for class %s.%s: %s, valid args: %s" %
(class_.__module__, class_.__name__, key, list(class_args)))
return class_(**kwargs)
def check_or_get_instance_with_redundant_kwargs(
ins_or_class_or_name, kwargs, module_paths=None, classtype=None):
"""Returns a class instance and checks types.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
ins_or_class_or_name: Can be of 3 types:
- A class to instantiate.
- A string of the name or module path to a class to \
instantiate.
- The class instance to check types.
kwargs (dict): Keyword arguments for the class constructor.
module_paths (list, optional): Paths to candidate modules to
search for the class. This is used if the class cannot be
located solely based on :attr:`class_name`. The first module
in the list that contains the class is used.
classtype (optional): A (list of) classes of which the instance must
be an instantiation.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
ValueError: If :attr:`kwargs` contains arguments that are invalid
for the class construction.
TypeError: If the instance is not an instantiation of
:attr:`classtype`.
"""
ret = ins_or_class_or_name
if is_str(ret) or isinstance(ret, type):
ret = get_instance_with_redundant_kwargs(ret, kwargs, module_paths)
if classtype is not None:
if not isinstance(ret, classtype):
raise TypeError(
"An instance of {} is expected. Got: {}".format(classtype, ret))
return ret
def get_instance_with_redundant_kwargs(
class_name, kwargs, module_paths=None):
"""Creates a class instance.
Only those keyword arguments in :attr:`kwargs` that are included in the
class construction method are used.
Args:
class_name (str): A class or its name or module path.
kwargs (dict): A dictionary of arguments for the class constructor. It
may include invalid arguments which will be ignored.
module_paths (list of str): A list of paths to candidate modules to
search for the class. This is used if the class cannot be located
solely based on :attr:`class_name`. The first module in the list
that contains the class is used.
Returns:
A class instance.
Raises:
ValueError: If class is not found based on :attr:`class_name` and
:attr:`module_paths`.
"""
# Locate the class
class_ = get_class(class_name, module_paths)
# Select valid arguments
selected_kwargs = {}
class_args = set(get_args(class_.__init__))
if kwargs is None:
kwargs = {}
for key, value in kwargs.items():
if key in class_args:
selected_kwargs[key] = value
return class_(**selected_kwargs)
def get_function(fn_or_name, module_paths=None):
"""Returns the function of specified name and module.
Args:
fn_or_name (str or callable): Name or full path to a function, or the
function itself.
module_paths (list, optional): A list of paths to candidate modules to
search for the function. This is used only when the function
cannot be located solely based on :attr:`fn_or_name`. The first
module in the list that contains the function is used.
Returns:
A function.
"""
if is_callable(fn_or_name):
return fn_or_name
fn = locate(fn_or_name)
if (fn is None) and (module_paths is not None):
for module_path in module_paths:
# if module_path in _unimportable_modules:
fn = locate('.'.join([module_path, fn_or_name]))
if fn is not None:
break
# module = importlib.import_module(module_path)
# if fn_name in dir(module):
# fn = getattr(module, fn_name)
# break
if fn is None:
raise ValueError(
"Method not found in {}: {}".format(module_paths, fn_or_name))
return fn
def call_function_with_redundant_kwargs(fn, kwargs):
"""Calls a function and returns the results.
Only those keyword arguments in :attr:`kwargs` that are included in the
function's argument list are used to call the function.
Args:
fn (function): A callable. If :attr:`fn` is not a python function,
:attr:`fn.__call__` is called.
kwargs (dict): A `dict` of arguments for the callable. It
may include invalid arguments which will be ignored.
Returns:
The returned results by calling :attr:`fn`.
"""
try:
fn_args = set(get_args(fn))
except TypeError:
fn_args = set(get_args(fn.__cal__))
if kwargs is None:
kwargs = {}
# Select valid arguments
selected_kwargs = {}
for key, value in kwargs.items():
if key in fn_args:
selected_kwargs[key] = value
return fn(**selected_kwargs)
def get_instance_kwargs(kwargs, hparams):
"""Makes a dict of keyword arguments with the following structure:
`kwargs_ = {'hparams': dict(hparams), **kwargs}`.
This is typically used for constructing a module which takes a set of
arguments as well as a argument named `hparams`.
Args:
kwargs (dict): A dict of keyword arguments. Can be `None`.
hparams: A dict or an instance of :class:`~texar.tf.HParams` Can be `None`.
Returns:
A `dict` that contains the keyword arguments in :attr:`kwargs`, and
an additional keyword argument named `hparams`.
"""
if hparams is None or isinstance(hparams, dict):
kwargs_ = {'hparams': hparams}
elif isinstance(hparams, HParams):
kwargs_ = {'hparams': hparams.todict()}
else:
raise ValueError(
'`hparams` must be a dict, an instance of HParams, or a `None`.')
kwargs_.update(kwargs or {})
return kwargs_
def dict_patch(tgt_dict, src_dict):
"""Recursively patch :attr:`tgt_dict` by adding items from :attr:`src_dict`
that do not exist in :attr:`tgt_dict`.
If respective items in :attr:`src_dict` and :attr:`tgt_dict` are both
`dict`, the :attr:`tgt_dict` item is patched recursively.
Args:
tgt_dict (dict): Target dictionary to patch.
src_dict (dict): Source dictionary.
Return:
dict: The new :attr:`tgt_dict` that is patched.
"""
if src_dict is None:
return tgt_dict
for key, value in src_dict.items():
if key not in tgt_dict:
tgt_dict[key] = copy.deepcopy(value)
elif isinstance(value, dict) and isinstance(tgt_dict[key], dict):
tgt_dict[key] = dict_patch(tgt_dict[key], value)
return tgt_dict
def dict_lookup(dict_, keys, default=None):
"""Looks up :attr:`keys` in the dict, returns the corresponding values.
The :attr:`default` is used for keys not present in the dict.
Args:
dict_ (dict): A dictionary for lookup.
keys: A numpy array or a (possibly nested) list of keys.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. Error is raised if :attr:`default` is not given and
key is not in the dict.
Returns:
A numpy array of values with the same structure as :attr:`keys`.
Raises:
TypeError: If key is not in :attr:`dict_` and :attr:`default` is `None`.
"""
return np.vectorize(lambda x: dict_.get(x, default))(keys)
def dict_fetch(src_dict, tgt_dict_or_keys):
"""Fetches a sub dict of :attr:`src_dict` with the keys in
:attr:`tgt_dict_or_keys`.
Args:
src_dict: A dict or instance of :class:`~texar.tf.HParams`.
The source dict to fetch values from.
tgt_dict_or_keys: A dict, instance of :class:`~texar.tf.HParams`,
or a list (or a dict_keys) of keys to be included in the output
dict.
Returns:
A new dict that is a subdict of :attr:`src_dict`.
"""
if src_dict is None:
return src_dict
if isinstance(tgt_dict_or_keys, HParams):
tgt_dict_or_keys = tgt_dict_or_keys.todict()
if isinstance(tgt_dict_or_keys, dict):
tgt_dict_or_keys = tgt_dict_or_keys.keys()
keys = list(tgt_dict_or_keys)
if isinstance(src_dict, HParams):
src_dict = src_dict.todict()
return {k: src_dict[k] for k in keys if k in src_dict}
def dict_pop(dict_, pop_keys, default=None):
"""Removes keys from a dict and returns their values.
Args:
dict_ (dict): A dictionary from which items are removed.
pop_keys: A key or a list of keys to remove and return respective
values or :attr:`default`.
default (optional): Value to be returned when a key is not in
:attr:`dict_`. The default value is `None`.
Returns:
A `dict` of the items removed from :attr:`dict_`.
"""
if not isinstance(pop_keys, (list, tuple)):
pop_keys = [pop_keys]
ret_dict = {key: dict_.pop(key, default) for key in pop_keys}
return ret_dict
def flatten_dict(dict_, parent_key="", sep="."):
"""Flattens a nested dictionary. Namedtuples within the dictionary are
converted to dicts.
Adapted from:
https://github.com/google/seq2seq/blob/master/seq2seq/models/model_base.py
Args:
dict_ (dict): The dictionary to flatten.
parent_key (str): A prefix to prepend to each key.
sep (str): Separator that intervenes between parent and child keys.
E.g., if `sep` == '.', then `{ "a": { "b": 3 } }` is converted
into `{ "a.b": 3 }`.
Returns:
A new flattened `dict`.
"""
items = []
for key, value in dict_.items():
key_ = parent_key + sep + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten_dict(value, key_, sep=sep).items())
elif isinstance(value, tuple) and hasattr(value, "_asdict"):
dict_items = collections.OrderedDict(zip(value._fields, value))
items.extend(flatten_dict(dict_items, key_, sep=sep).items())
else:
items.append((key_, value))
return dict(items)
def default_str(str_, default_str):
"""Returns :attr:`str_` if it is not `None` or empty, otherwise returns
:attr:`default_str`.
Args:
str_: A string.
default_str: A string.
Returns:
Either :attr:`str_` or :attr:`default_str`.
"""
if str_ is not None and str_ != "":
return str_
else:
return default_str
def uniquify_str(str_, str_set):
"""Uniquifies :attr:`str_` if :attr:`str_` is included in :attr:`str_set`.
This is done by appending a number to :attr:`str_`. Returns
:attr:`str_` directly if it is not included in :attr:`str_set`.
Args:
str_ (string): A string to uniquify.
str_set (set, dict, or list): A collection of strings. The returned
string is guaranteed to be different from the elements in the
collection.
Returns:
The uniquified string. Returns :attr:`str_` directly if it is
already unique.
Example:
.. code-block:: python
print(uniquify_str('name', ['name', 'name_1']))
# 'name_2'
"""
if str_ not in str_set:
return str_
else:
for i in range(1, len(str_set) + 1):
unique_str = str_ + "_%d" % i
if unique_str not in str_set:
return unique_str
raise ValueError("Fails to uniquify string: " + str_)
def _recur_split(s, dtype_as):
"""Splits (possibly nested list of) strings recursively.
"""
if is_str(s):
return _maybe_list_to_array(s.split(), dtype_as)
else:
s_ = [_recur_split(si, dtype_as) for si in s]
return _maybe_list_to_array(s_, s)
def strip_token(str_, token, is_token_list=False, compat=True):
"""Returns a copy of strings with leading and trailing tokens removed.
Note that besides :attr:`token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
token (str): The token to strip, e.g., the '<PAD>' token defined in
:class:`~texar.tf.data.SpecialTokens`.PAD
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
The stripped strings of the same structure/shape as :attr:`str_`.
Example:
.. code-block:: python
str_ = '<PAD> a sentence <PAD> <PAD> '
str_stripped = strip_token(str_, '<PAD>')
# str_stripped == 'a sentence'
str_ = ['<PAD>', 'a', 'sentence', '<PAD>', '<PAD>', '', '']
str_stripped = strip_token(str_, '<PAD>', is_token_list=True)
# str_stripped == 'a sentence'
"""
def _recur_strip(s):
if is_str(s):
if token == "":
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).\
replace(' ' + token, '').replace(token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
def strip_eos(str_, eos_token='<EOS>', is_token_list=False, compat=True):
"""Remove the EOS token and all subsequent tokens.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
eos_token (str): The EOS token. Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
s_tokens = s.split()
if eos_token in s_tokens:
return ' '.join(s_tokens[:s_tokens.index(eos_token)])
else:
return s
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_eos_ = strip_eos
def strip_bos(str_, bos_token='<BOS>', is_token_list=False, compat=True):
"""Remove all leading BOS tokens.
Note that besides :attr:`bos_token`, all leading and trailing whitespace
characters are also removed.
If :attr:`is_token_list` is False, then the function assumes tokens in
:attr:`str_` are separated with whitespace character.
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
bos_token (str): The BOS token. Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same structure/shape as :attr:`str_`.
"""
def _recur_strip(s):
if is_str(s):
if bos_token == '':
return ' '.join(s.strip().split())
else:
return ' '.join(s.strip().split()).replace(bos_token + ' ', '')
else:
s_ = [_recur_strip(si) for si in s]
return _maybe_list_to_array(s_, s)
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
strp_str = _recur_strip(s)
if is_token_list:
strp_str = _recur_split(strp_str, str_)
return strp_str
_strip_bos_ = strip_bos
def strip_special_tokens(str_, strip_pad='<PAD>', strip_bos='<BOS>',
strip_eos='<EOS>', is_token_list=False, compat=True):
"""Removes special tokens in strings, including:
- Removes EOS and all subsequent tokens
- Removes leading and and trailing PAD tokens
- Removes leading BOS tokens
Note that besides the special tokens, all leading and trailing whitespace
characters are also removed.
This is a joint function of :func:`strip_eos`, :func:`strip_pad`, and
:func:`strip_bos`
Args:
str_: A `str`, or an `n`-D numpy array or (possibly nested)
list of `str`.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
is_token_list (bool): Whether each sentence in :attr:`str_` is a list
of tokens. If False, each sentence in :attr:`str_` is assumed to
contain tokens separated with space character.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
Strings of the same shape of :attr:`str_` with special tokens stripped.
"""
s = str_
if compat:
s = compat_as_text(s)
if is_token_list:
s = str_join(s, compat=False)
if strip_eos is not None and strip_eos is not False:
s = _strip_eos_(s, strip_eos, is_token_list=False, compat=False)
if strip_pad is not None and strip_pad is not False:
s = strip_token(s, strip_pad, is_token_list=False, compat=False)
if strip_bos is not None and strip_bos is not False:
s = _strip_bos_(s, strip_bos, is_token_list=False, compat=False)
if is_token_list:
s = _recur_split(s, str_)
return s
def str_join(tokens, sep=' ', compat=True):
"""Concats :attr:`tokens` along the last dimension with intervening
occurrences of :attr:`sep`.
Args:
tokens: An `n`-D numpy array or (possibly nested) list of `str`.
sep (str): The string intervening between the tokens.
compat (bool): Whether to convert tokens into `unicode` (Python 2)
or `str` (Python 3).
Returns:
An `(n-1)`-D numpy array (or list) of `str`.
"""
def _recur_join(s):
if len(s) == 0:
return ''
elif is_str(s[0]):
return sep.join(s)
else:
s_ = [_recur_join(si) for si in s]
return _maybe_list_to_array(s_, s)
if compat:
tokens = compat_as_text(tokens)
str_ = _recur_join(tokens)
return str_
def map_ids_to_strs(ids, vocab, join=True, strip_pad='<PAD>',
strip_bos='<BOS>', strip_eos='<EOS>', compat=True):
"""Transforms `int` indexes to strings by mapping ids to tokens,
concatenating tokens into sentences, and stripping special tokens, etc.
Args:
ids: An n-D numpy array or (possibly nested) list of `int` indexes.
vocab: An instance of :class:`~texar.tf.data.Vocab`.
join (bool): Whether to concat along the last dimension of the
the tokens into a string separated with a space character.
strip_pad (str): The PAD token to strip from the strings (i.e., remove
the leading and trailing PAD tokens of the strings). Default
is '<PAD>' as defined in
:class:`~texar.tf.data.SpecialTokens`.PAD.
Set to `None` or `False` to disable the stripping.
strip_bos (str): The BOS token to strip from the strings (i.e., remove
the leading BOS tokens of the strings).
Default is '<BOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.BOS.
Set to `None` or `False` to disable the stripping.
strip_eos (str): The EOS token to strip from the strings (i.e., remove
the EOS tokens and all subsequent tokens of the strings).
Default is '<EOS>' as defined in
:class:`~texar.tf.data.SpecialTokens`.EOS.
Set to `None` or `False` to disable the stripping.
Returns:
If :attr:`join` is True, returns a `(n-1)`-D numpy array (or list) of
concatenated strings. If :attr:`join` is False, returns an `n`-D numpy
array (or list) of str tokens.
Example:
.. code-block:: python
text_ids = [[1, 9, 6, 2, 0, 0], [1, 28, 7, 8, 2, 0]]
text = map_ids_to_strs(text_ids, data.vocab)
# text == ['a sentence', 'parsed from ids']
text = map_ids_to_strs(
text_ids, data.vocab, join=False,
strip_pad=None, strip_bos=None, strip_eos=None)
# text == [['<BOS>', 'a', 'sentence', '<EOS>', '<PAD>', '<PAD>'],
# ['<BOS>', 'parsed', 'from', 'ids', '<EOS>', '<PAD>']]
"""
tokens = vocab.map_ids_to_tokens_py(ids)
if isinstance(ids, (list, tuple)):
tokens = tokens.tolist()
if compat:
tokens = compat_as_text(tokens)
str_ = str_join(tokens, compat=False)
str_ = strip_special_tokens(
str_, strip_pad=strip_pad, strip_bos=strip_bos, strip_eos=strip_eos,
compat=False)
if join:
return str_
else:
return _recur_split(str_, ids)
def ceildiv(a, b):
"""Divides with ceil.
E.g., `5 / 2 = 2.5`, `ceildiv(5, 2) = 3`.
Args:
a (int): Dividend integer.
b (int): Divisor integer.
Returns:
int: Ceil quotient.
"""
return -(-a // b)
def straight_through(fw_tensor, bw_tensor):
"""Use a tensor in forward pass while backpropagating gradient to another.
Args:
fw_tensor: A tensor to be used in the forward pass.
bw_tensor: A tensor to which gradient is backpropagated. Must have the
same shape and type with :attr:`fw_tensor`.
Returns:
A tensor of the same shape and value with :attr:`fw_tensor` but will
direct gradient to bw_tensor.
"""
return tf.stop_gradient(fw_tensor) + bw_tensor - tf.stop_gradient(bw_tensor)
def truncate_seq_pair(tokens_a: Union[List[int], List[str]],
tokens_b: Union[List[int], List[str]],
max_length: int):
r"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal
percent of tokens from each, since if one sequence is very short then
each token that's truncated likely contains more information than a
longer sequence.
Example:
.. code-block:: python
tokens_a = [1, 2, 3, 4, 5]
tokens_b = [6, 7]
truncate_seq_pair(tokens_a, tokens_b, 5)
tokens_a # [1, 2, 3]
tokens_b # [6, 7]
Args:
tokens_a: A list of tokens or token ids.
tokens_b: A list of tokens or token ids.
max_length: maximum sequence length.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
| 33.182342 | 83 | 0.624711 | [
"Apache-2.0"
] | awesomemachinelearning/texar | texar/tf/utils/utils.py | 34,576 | Python |
#!/usr/bin/env python3
from helper import lemniscate, saveImage, getImageName
from math import pi
from random import uniform
IMAGE_WIDTH_IN = 5
IMAGE_HEIGHT_IN = 2
DPI = 400
def makeImage(height, width, imageName):
k = 1.9
numSamples = 100000000
imageWidth = width
imageHeight = height
data = [0] * imageHeight * imageWidth
cx = imageWidth/2
cy = imageHeight/2
radius = imageWidth/2 * .95
for i in range(numSamples):
# need to increase range for complete curve
theta = uniform(0, 10 * pi)
pA_x, pA_y = lemniscate(theta, radius, cx, cy)
pB_x, pB_y = lemniscate(k * theta, radius, cx, cy)
# pick a random point on the line segment [pA, pB]
r = uniform(0, 1)
pC_x = (1 - r) * pA_x + r * pB_x
pC_y = (1 - r) * pA_y + r * pB_y
i = int(pC_x + .5)
j = int(pC_y + .5)
data[j * imageWidth + i] += 1
saveImage(data, imageName, imageWidth, imageHeight,
bg=[255, 255, 255], fg=[221, 100, 0], alphaMultiplier=18)
if __name__ == "__main__":
imageName = getImageName(__file__)
width = IMAGE_WIDTH_IN * DPI
height = IMAGE_HEIGHT_IN * DPI
makeImage(height, width, imageName)
| 25.137255 | 71 | 0.585023 | [
"MIT"
] | nicholsonja/Bridges-2019 | presentation/presentation_images/genImage_07.py | 1,282 | Python |
import magma as m
from magma import *
def test_pair():
# types
A2 = Tuple[Bit, Bit]
print(A2)
assert isinstance(A2, TupleMeta)
print(str(A2))
assert A2 == A2
B2 = Tuple[In(Bit), In(Bit)]
assert isinstance(B2, TupleMeta)
assert B2 == B2
C2 = Tuple[Out(Bit), Out(Bit)]
assert isinstance(C2, TupleMeta)
#assert str(C2) == 'Tuple(x=Out(Bit),y=Out(Bit))'
assert C2 == C2
assert issubclass(m.In(m.Tuple[Bit, Bit]), m.In(m.Tuple[Bit, Bit]))
assert isinstance(m.In(m.Tuple[Bit, Bit])(), m.In(m.Tuple[Bit, Bit]))
assert issubclass(m.In(m.Tuple[Bit, Bit]), m.Tuple[Bit, Bit])
assert isinstance(m.In(m.Tuple[Bit, Bit])(), m.Tuple[Bit, Bit])
assert not issubclass(m.In(m.Tuple[Bit, Bit]), m.Out(m.Tuple[Bit, Bit]))
assert not isinstance(m.In(m.Tuple[Bit, Bit])(), m.Out(m.Tuple[Bit, Bit]))
assert issubclass(m.Out(m.Tuple[Bit, Bit]), m.Out(m.Tuple[Bit, Bit]))
assert isinstance(m.Out(m.Tuple[Bit, Bit])(), m.Out(m.Tuple[Bit, Bit]))
assert issubclass(m.Out(m.Tuple[Bit, Bit]), m.Tuple[Bit, Bit])
assert isinstance(m.Out(m.Tuple[Bit, Bit])(), m.Tuple[Bit, Bit])
assert not issubclass(m.Out(m.Tuple[Bit, Bit]), m.In(m.Tuple[Bit, Bit]))
assert not isinstance(m.Out(m.Tuple[Bit, Bit])(), m.In(m.Tuple[Bit, Bit]))
def test_dict():
# types
class A2(Product, cache=True):
x = Bit
y = Bit
print(A2)
assert isinstance(A2, ProductMeta)
print(str(A2))
assert issubclass(In(A2), A2)
assert issubclass(Out(A2), A2)
assert issubclass(Flip(A2), A2)
assert not issubclass(In(A2), Out(A2))
assert not issubclass(Out(A2), In(A2))
assert issubclass(Flip(In(A2)), Out(A2))
assert issubclass(Flip(Out(A2)), In(A2))
assert issubclass(Out(In(A2)), Out(A2))
assert issubclass(In(Out(A2)), In(A2))
assert not issubclass(Out(In(A2)), In(Out(A2)))
assert not issubclass(In(Out(A2)), Out(In(A2)))
assert not issubclass(Flip(In(A2)), Flip(Out(A2)))
assert not issubclass(Flip(Out(A2)), Flip(In(A2)))
assert isinstance(In(A2)(), A2)
assert isinstance(Out(A2)(), A2)
assert isinstance(Flip(A2)(), A2)
assert not isinstance(In(A2)(), Out(A2))
assert not isinstance(Out(A2)(), In(A2))
assert isinstance(Flip(In(A2))(), Out(A2))
assert isinstance(Flip(Out(A2))(), In(A2))
assert isinstance(Out(In(A2))(), Out(A2))
assert isinstance(In(Out(A2))(), In(A2))
assert not isinstance(Out(In(A2))(), In(Out(A2)))
assert not isinstance(In(Out(A2))(), Out(In(A2)))
assert not isinstance(Flip(In(A2))(), Flip(Out(A2)))
assert not isinstance(Flip(Out(A2))(), Flip(In(A2)))
#assert str(A2) == 'Tuple(x=Bit,y=Bit)'
assert A2 == A2
class B2(Product, cache=True):
x = In(Bit)
y = In(Bit)
assert isinstance(B2, ProductMeta)
#assert str(B2) == 'Tuple(x=In(Bit),y=In(Bit))'
assert B2 == B2
class C2(Product, cache=True):
x = Out(Bit)
y = Out(Bit)
assert isinstance(C2, ProductMeta)
#assert str(C2) == 'Tuple(x=Out(Bit),y=Out(Bit))'
assert C2 == C2
assert A2 == B2
assert A2 == C2
assert B2 == C2
assert A2 is not B2
assert A2 is not C2
assert B2 is not C2
def test_flip():
class Product2(Product):
x = In(Bit)
y = Out(Bit)
print(Product2)
print(Flip(Product2))
Tin = In(Product2)
Tout = Out(Product2)
print(Tin)
print(Tout)
assert Tin == Product2
assert Tout == Product2
assert Tin == Tout
assert Tin is not Product2
assert Tout is not Product2
assert Tin is not Tout
T = In(Tout)
assert T == Tin
#T = Flip(Tout)
#assert T == Tin
# print(T)
T = Out(Tin)
assert T == Tout
#T = Flip(Tin)
#assert T == Tout
# print(T)
def test_wire():
class Product2(Product):
x = Bit
y = Bit
t0 = Product2(name='t0')
t1 = Product2(name='t1')
wire(t0, t1)
assert t0.wired()
assert t1.wired()
assert t1.value() is t0
assert t0.value() is t1
assert t0.driving() == dict(x=[t1.x], y=[t1.y])
b0 = t0.x
b1 = t1.x
assert b0 is b1._wire.driver.bit
assert b1 is b0._wire.driving()[0]
assert b1.value() is b0
def test_val():
class A2(Product):
x = Bit
y = Bit
# constructor
a = A2(name='a')
print('created A2')
assert isinstance(a, Product)
assert str(a) == 'a'
# selectors
print('a["x"]')
b = a['x']
assert isinstance(b, Bit)
assert str(b) == 'a.x'
print('a.x')
b = a.x
assert isinstance(b, Bit)
assert str(b) == 'a.x'
def test_nested():
# Test for https://github.com/phanrahan/magma/issues/445
def hierIO():
class dictIO(Product):
baseIO = make_baseIO()
ctr = m.In(m.Bit)
return dictIO
def DefineCtrModule():
class ctrModule(m.Circuit):
name = "ctr_module"
io = m.IO(ctr=m.In(m.Bit))
return ctrModule
def make_baseIO():
class dictIO(Product):
in0 = m.In(m.Bit),
out0 = m.Out(m.Bit)
return dictIO
def DefineBaseModule():
class baseModule(m.Circuit):
name = "base_module"
io = m.IO(baseIO=make_baseIO())
return baseModule
def DefineHier():
class HierModule(m.Circuit):
name = "hier_module"
io = m.IO(hier=hierIO())
baseM = DefineBaseModule()()
ctrM = DefineCtrModule()()
m.wire(baseM.baseIO, io.hier.baseIO)
m.wire(ctrM.ctr, io.hier.ctr)
return HierModule
baseMH = DefineHier()
m.compile("build/baseMH", baseMH, output="coreir-verilog")
def test_tuple_nested_tuple_value():
def IFC0(params):
return m.Product.from_fields("IFC0", {
"port0": m.In(m.Bits[params['param0']]),
"port1": m.In(m.Bits[params['param0']]),
"port2": m.In(m.Array[params['param0'], m.Bits[2]]),
"port3": m.In(m.Bits[params['param0']]),
"port4": m.In(m.Bit),
"port5": m.In(m.Bit),
"port7": m.In(m.Bit),
"port8": m.In(m.Bit),
"port9": m.In(m.Bit),
"port10": m.In(m.Bits[m.bitutils.clog2(params['param0'])]),
})
def IFC1(params):
dictOut = {"port4": m.Out(m.Bit)}
return m.Product.from_fields("IFC1", dictOut)
def DefineMyCircuit(params):
class MyCircuit(m.Circuit):
io = m.IO(IFC0=IFC0(params).flip())
return MyCircuit
def DefineTop(params):
class Top(m.Circuit):
io = m.IO(IFC1=IFC1(params))
m.wire(io.IFC1.port4, DefineMyCircuit(params)().IFC0.port4)
return Top
m.compile("top", DefineTop({'param0': 5}))
def test_flat_length():
a = m.Product.from_fields("anon", dict(x=m.Bits[5], y=m.Bits[3], z=m.Bit))
assert a.flat_length() == 9
def test_anon_product():
product = m.Product.from_fields("anon", dict(x=m.Bits[5], y=m.Bits[3], z=m.Bit))
assert isinstance(product, AnonymousProductMeta)
assert isinstance(product, ProductMeta)
anon_product = m.AnonProduct[dict(x=m.Bits[5], y=m.Bits[3], z=m.Bit)]
assert isinstance(anon_product, AnonymousProductMeta)
assert not isinstance(anon_product, ProductMeta)
assert anon_product.flat_length() == product.flat_length()
assert anon_product.x == product.x
assert anon_product.y == product.y
assert anon_product.z == product.z
assert anon_product == product
assert not anon_product is product
| 26.013605 | 84 | 0.582767 | [
"MIT"
] | leonardt/magma | tests/test_type/test_tuple.py | 7,648 | Python |
from django.test import TestCase
###############################################
## test resources
###############################################
class TestPlayerSerializers(TestCase):
fixtures = ['auth.json', 'team.json']
def test_registration(self):
client = APIClient()
user = {'username': 'martin',
'first_name': 'Martin',
'last_name': 'Bright',
'email':'martin@abc.com',
'password':'pwd123',
'confirm_password':'pwd123',
'birth_year':1983}
response = client.post('/' + API_PATH + 'register', user, format='json')
assert response.status_code == 201 | 33.142857 | 80 | 0.474138 | [
"MIT"
] | vollov/aron | team/tests.py | 696 | Python |
# -*- coding: utf-8 -*-
{
'name': 'Payment - Account',
'category': 'Accounting/Accounting',
'summary': 'Account and Payment Link and Portal',
'version': '1.0',
'description': """Link Account and Payment and add Portal Payment
Provide tools for account-related payment as well as portal options to
enable payment.
* UPDATE ME
""",
'depends': ['payment'],
'data': [
'views/account_portal_templates.xml',
],
'installable': True,
'auto_install': False,
}
| 22.863636 | 70 | 0.626243 | [
"MIT"
] | VaibhavBhujade/Blockchain-ERP-interoperability | odoo-13.0 - Copy/addons/account_payment/__manifest__.py | 503 | Python |
ximport tulip
def reader(s):
res = yield from s.read(1)
while res:
print ('got data:', res)
res = yield from s.read(1)
def main(stream):
stream2 = tulip.StreamReader()
# start separate task
t = tulip.async(reader(stream2))
while 1:
data = yield from stream.read(1)
print ('received data:', data)
if data == b'0':
break
stream2.feed_data(data)
stream2.feed_eof()
#yield from t
if __name__ == '__main__':
loop = tulip.get_event_loop()
stream = tulip.StreamReader()
stream.feed_data(b'1234567890')
try:
loop.run_until_complete(main(stream))
except KeyboardInterrupt:
pass
| 19.153846 | 46 | 0.562249 | [
"Apache-2.0"
] | bslatkin/pycon2014 | lib/asyncio-0.4.1/sched_test.py | 747 | Python |
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import ClockCycles, ReadWrite, NextTimeStep, RisingEdge, FallingEdge
from cocotb.binary import BinaryValue
import numpy as np
from matplotlib import pyplot as plt
from scipy.signal import butter, filtfilt
from fixedpoint import FixedPoint
@cocotb.test()
async def test_peak_detect(dut):
num_samples_per_epoch = 15*50
num_epochs = 10
num_samples = num_samples_per_epoch * num_epochs
data = np.loadtxt('46343_acceleration.txt', delimiter=' ')
#count_feature = np.loadtxt('46343_cleaned_counts.out', delimiter=' ')
fs = 50
time = np.arange(np.amin(data[:, 0]), np.amax(data[:, 0]), 1.0 / fs)
z_accel = np.interp(time, data[:, 0], data[:, 3])
# cf_low = 3
# cf_hi = 11
# order = 5
# w1 = cf_low / (fs / 2)
# w2 = cf_hi / (fs / 2)
# pass_band = [w1, w2]
# b, a = butter(order, pass_band, 'bandpass')
# z_filt = filtfilt(b, a, z_accel)
start_offset_sec = 120
offset = fs * start_offset_sec
z_accel = z_accel[offset:offset+num_samples]
print(f"Number of samples to input {z_accel.shape[0]}")
#count_feature = count_feature[::num_epochs]
clk = dut.clk
dut.i_z_accel <= 0
dut.i_valid <= 0
cocotb.fork(Clock(clk, 25, units="ns").start())
# Reset logic
await NextTimeStep()
dut.reset <= 1
await ClockCycles(clk, 1)
await ReadWrite()
dut.reset <= 0
await ClockCycles(clk, 1)
await ReadWrite()
for i, z in enumerate(z_accel):
dut.i_z_accel <= BinaryValue(str(FixedPoint(float(z/5), 1, 7)))
dut.i_valid <= 1
await ClockCycles(clk, 1)
dut.i_valid <= 0
await ClockCycles(clk, 10)
dut.i_valid <= 0
await ClockCycles(clk, 100)
| 30.517241 | 89 | 0.649153 | [
"MIT"
] | edge-analytics/fpga-sleep-tracker | fpga/test/featurize/actigraphy_counts/actigraphy_counts_tb.py | 1,770 | Python |
import getpass
import logging
import os
from urlparse import urlparse
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from readthedocs.builds.constants import LATEST
from readthedocs.builds.constants import LATEST_VERBOSE_NAME
from readthedocs.builds.models import Build
log = logging.getLogger(__name__)
SYNC_USER = getattr(settings, 'SYNC_USER', getpass.getuser())
def run_on_app_servers(command):
"""
A helper to copy a single file across app servers
"""
log.info("Running %s on app servers" % command)
ret_val = 0
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
for server in settings.MULTIPLE_APP_SERVERS:
ret = os.system("ssh %s@%s %s" % (SYNC_USER, server, command))
if ret != 0:
ret_val = ret
return ret_val
else:
ret = os.system(command)
return ret
def clean_url(url):
parsed = urlparse(url)
if parsed.scheme:
scheme, netloc = parsed.scheme, parsed.netloc
elif parsed.netloc:
scheme, netloc = "http", parsed.netloc
else:
scheme, netloc = "http", parsed.path
return netloc
def cname_to_slug(host):
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode()
slug = domain.split('.')[0]
return slug
def trigger_build(project, version=None, record=True, force=False, basic=False):
"""
An API to wrap the triggering of a build.
"""
# Avoid circular import
from readthedocs.projects.tasks import update_docs
if project.skip:
return None
if not version:
version = project.versions.get(slug=LATEST)
if record:
build = Build.objects.create(
project=project,
version=version,
type='html',
state='triggered',
success=True,
)
update_docs.delay(pk=project.pk, version_pk=version.pk, record=record,
force=force, basic=basic, build_pk=build.pk)
else:
build = None
update_docs.delay(pk=project.pk, version_pk=version.pk, record=record,
force=force, basic=basic)
return build
def send_email(recipient, subject, template, template_html, context=None,
request=None):
'''
Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
request
Request object for determining absolute URL
'''
if request:
scheme = 'https' if request.is_secure() else 'http'
context['uri'] = '{scheme}://{host}'.format(scheme=scheme,
host=request.get_host())
ctx = {}
ctx.update(context)
msg = EmailMultiAlternatives(
subject,
get_template(template).render(ctx),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(ctx), 'text/html')
msg.send()
| 26.645161 | 80 | 0.633777 | [
"MIT"
] | ank-forked/readthedocs.org | readthedocs/core/utils/__init__.py | 3,304 | Python |
from django.contrib import admin
from .models import *
class foodAdmin(admin.ModelAdmin):
class Meta:
model=Fooditem
list_display=['name']
list_filter=['name']
admin.site.register(Profile)
admin.site.register(UserFooditem)
admin.site.register(Category)
admin.site.register(Fooditem,foodAdmin)
| 22.785714 | 39 | 0.746082 | [
"MIT"
] | Akumucollins/Calorie-Counter | calories/admin.py | 319 | Python |
# -*- coding: utf-8 -*-
from music21.test.dedent import dedent
__all__ = [
'dedent',
'testDocumentation',
'testExternal',
'testPerformance',
'timeGraphs',
'testStream',
'helpers',
]
import sys
if sys.version > '3':
from music21.test import testStream
from music21.test import testDocumentation
else:
import testDocumentation # @Reimport
import testStream # @Reimport
_DOC_IGNORE_MODULE_OR_PACKAGE = True
#------------------------------------------------------------------------------
# eof
| 18.931034 | 79 | 0.570128 | [
"MIT"
] | lasconic/randomsheetmusic | lib/music21/test/__init__.py | 549 | Python |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class MedicationKnowledge_AdministrationGuidelinesSchema:
"""
Information about a medication that is used to support knowledge.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Information about a medication that is used to support knowledge.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
dosage: Dosage for the medication for the specific guidelines.
indicationCodeableConcept: Indication for use that apply to the specific administration guidelines.
indicationReference: Indication for use that apply to the specific administration guidelines.
patientCharacteristics: Characteristics of the patient that are relevant to the administration
guidelines (for example, height, weight, gender, etc.).
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.medicationknowledge_dosage import (
MedicationKnowledge_DosageSchema,
)
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.r4.complex_types.medicationknowledge_patientcharacteristics import (
MedicationKnowledge_PatientCharacteristicsSchema,
)
if (
max_recursion_limit
and nesting_list.count("MedicationKnowledge_AdministrationGuidelines")
>= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + [
"MedicationKnowledge_AdministrationGuidelines"
]
my_parent_path = (
parent_path + ".medicationknowledge_administrationguidelines"
if parent_path
else "medicationknowledge_administrationguidelines"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Dosage for the medication for the specific guidelines.
StructField(
"dosage",
ArrayType(
MedicationKnowledge_DosageSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# Indication for use that apply to the specific administration guidelines.
StructField(
"indicationCodeableConcept",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Indication for use that apply to the specific administration guidelines.
StructField(
"indicationReference",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Characteristics of the patient that are relevant to the administration
# guidelines (for example, height, weight, gender, etc.).
StructField(
"patientCharacteristics",
ArrayType(
MedicationKnowledge_PatientCharacteristicsSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 49.776119 | 107 | 0.568516 | [
"Apache-2.0"
] | icanbwell/SparkFhirSchemas | spark_fhir_schemas/r4/complex_types/medicationknowledge_administrationguidelines.py | 13,340 | Python |
"""
LoSetup - command ``/usr/sbin/losetup -l``
==========================================
This parser reads the output of ``/usr/sbin/losetup -l`` into a list of entries.
Each entry is a dictionary of headers:
* ``NAME`` - the path name of the loop back device (strings)
* ``SIZELIMIT`` - the data end position of backing file in bytes (integer)
* ``OFFSET`` - the data start position of backing file in bytes (integer)
* ``AUTOCLEAR`` - the autoclear flag (boolean)
* ``RO`` - the read only flag (boolean)
* ``BACK-FILE`` - the path of the backing file (strings)
* ``DIO`` - the direct I/O flag (boolean)
* ``LOG-SEC`` - the logical sector size of the loop device in bytes (integer)
Sample output of ``losetup -l`` command is::
NAME SIZELIMIT OFFSET AUTOCLEAR RO BACK-FILE DIO LOG-SEC
/dev/loop0 0 0 0 0 /root/disk.img 1 512
Examples:
>>> type(losetup)
<class 'insights.parsers.losetup.LoSetup'>
>>> len(losetup)
1
>>> losetup[0]['NAME']
'/dev/loop0'
>>> losetup[0]['RO']
False
>>> losetup[0]['DIO']
True
>>> losetup[0]['NAME']
'/dev/loop0'
>>> losetup[0]['SIZELIMIT']
0
>>> losetup[0]['OFFSET']
0
>>> losetup[0]['AUTOCLEAR']
False
>>> losetup[0]['RO']
False
>>> losetup[0]['BACK-FILE']
'/root/disk.img'
>>> losetup [0]['DIO']
True
>>> losetup[0]['LOG-SEC']
512
"""
from insights import CommandParser, parser
from insights.specs import Specs
from insights.parsers import parse_delimited_table, SkipException
@parser(Specs.losetup)
class LoSetup(CommandParser, list):
"""
Parses the output of the ``/usr/sbin/losetup -l`` command.
"""
def parse_content(self, content):
if not content:
raise SkipException("Empty output.")
self.extend(parse_delimited_table(content))
for entry in self:
for key in ['SIZELIMIT', 'OFFSET', 'LOG-SEC']:
if key in entry:
entry[key] = int(entry[key])
for key in ['AUTOCLEAR', 'RO', 'DIO']:
if key in entry:
entry[key] = True if entry[key] == '1' else False
| 29.635135 | 80 | 0.578203 | [
"Apache-2.0"
] | LightOfHeaven1994/insights-core | insights/parsers/losetup.py | 2,193 | Python |
import asyncio
import concurrent
from concurrent.futures import ThreadPoolExecutor
from unittest import TestCase
import os
import cv2
from apscheduler.schedulers.background import BackgroundScheduler
import bot
from bot.providers import trainer_matches as tm
from bot.duel_links_runtime import DuelLinkRunTime
from bot.providers import Steam
from bot.common import crop_image
from bot.shared import alphabet
from bot.utils.common import default_config
class TestSteam(TestCase):
provider = None
__debug_pictures__ = False
images_needed_debug = [
"street_replay.png",
"home_page_steam.png",
os.path.join("steam", "steam_pre_battle.png"),
os.path.join("steam", "steam_back.png")
]
def setUp(self):
os.environ['LOG_CFG'] = r'D:\Sync\OneDrive\Yu-gi-oh_bot\config.ini'
scheduler = BackgroundScheduler()
dlRuntime = DuelLinkRunTime(default_config(r'D:\Sync\OneDrive\Yu-gi-oh_bot'), scheduler, False)
self.provider = Steam(scheduler, default_config(r'D:\Sync\OneDrive\Yu-gi-oh_bot'), dlRuntime, False)
self.provider.sleep_factor = 0.0
self.loop = asyncio.get_event_loop()
self.loop.set_default_executor(ThreadPoolExecutor(2))
dlRuntime._loop = self.loop
self.provider.is_process_running()
def test_battle(self):
self.fail()
def test_check_if_battle(self):
location = os.path.join(self.provider.assets, "steam", "steam_pre_battle.png")
img = cv2.imread(location)
self.assertTrue(self.provider.check_if_battle(img), "Is Battle")
def test_check_battle_is_running(self):
self.fail()
def test_click_auto_duel(self):
self.provider.click_auto_duel()
def test_compare_with_back_button(self):
img = os.path.join(self.provider.assets, "steam", "steam_back.png")
t = tm.BoundingTrainer(img, bounding_area=self.provider.predefined.main_area)
location = os.path.join(self.provider.assets, "back__.png")
# t.show_area_bounded(self.provider.predefined.main_area, img)
# t._debug = True
self.assertTrue(t.get_matches(location, 3) is True, "Expecting a back button")
# t.compare()
def test_determine_autoduel_status(self):
self.fail()
def test_ensure_resolutions_matches(self):
location = os.path.join(self.provider.assets, "steam", "download_update.png")
img = cv2.imread(location)
self.provider.ensure_resolutions_matches(img)
img = img[0:100, 0:100]
with self.assertRaises(bot.providers.BotSetupError) as context:
self.provider.ensure_resolutions_matches(img)
def test_is_process_running(self):
self.fail()
def test_key_escape(self):
self.fail()
def test_kill_process(self):
self.fail()
def test_method_name(self):
self.fail()
def test_pass_through_initial_screen(self):
self.provider.is_process_running()
test_function = lambda x: x is False
with self.assertRaises(Exception) as context:
self.provider.__generic_wait_for__('DuelLinks Landing Page', test_function,
None)
self.assertTrue('Maximum exception count' in str(context.exception))
self.provider.sleep_factor = 0.5
self.assertTrue(callable(self.provider.__is_initial_screen__))
with self.assertRaises(concurrent.futures._base.TimeoutError) as context:
self.provider.__generic_wait_for__('DuelLinks Landing Page', test_function,
self.provider.__is_initial_screen__, timeout=5)
def test_start_process(self):
self.fail()
def test_scan(self):
self.fail()
def test_scan_for_ok(self):
img = os.path.join(self.provider.assets, "steam", "steam_back.png")
t = tm.BoundingTrainer(img, bounding_area=self.provider.predefined.main_area)
location = os.path.join(self.provider.assets, "back__.png")
# t.show_area_bounded(self.provider.predefined.main_area, img)
# t._debug = True
self.assertTrue(t.get_matches(location, 3) is True, "Expecting a back button")
# t.compare()
def test_scan_for_close(self):
img = os.path.join(self.provider.assets, "steam", "steam_close.png")
area = self.provider.predefined.main_area
area['width'] = 400
t = tm.BoundingTrainer(img, bounding_area=area)
location = os.path.join(self.provider.assets, "close.png")
# t.show_area_bounded(self.provider.predefined.main_area, img)
# t._debug = True
self.assertTrue(t.get_matches(location, 3) is True, "Expecting a back button")
img = os.path.join(self.provider.assets, "steam", "steam_ok.png")
t = tm.BoundingTrainer(img, bounding_area=area)
location = os.path.join(self.provider.assets, "close.png")
t.get_matches(location, 3)
# t.show_area_bounded(self.provider.predefined.main_area, img)
# t._debug = True
self.assertTrue(t.get_matches(location, 3) is False, "Is Ok button not close")
def test_scan_for_download(self):
img = os.path.join(self.provider.assets, "steam", "download_update.png")
t = tm.BoundingTrainer(img, 500, 300, 600, 300)
location = os.path.join(self.provider.assets, "download_button.png")
# t.show_area(500, 300, 600, 300, img)
self.assertTrue(t.get_matches(location, 3) is True, "Expecting a download button")
def test_swipe_right(self):
self.fail()
# self.provider.swipe_right(0)
def test_swipe_left(self):
self.fail()
# self.provider.swipe_left(0)
def test_swipe_time(self):
self.fail()
def test_swipe(self):
self.fail()
def test_tap(self):
x, y = self.provider.predefined.yugioh_initiate_link
self.provider.tap(x, y)
def test_verify_battle(self):
location = os.path.join(self.provider.assets, "steam", "duel_variant_autoduel.png")
img = cv2.imread(location)
points, version = self.provider.verify_battle(img)
self.assertTrue(version == 2)
def test_wait_for(self):
location = os.path.join(self.provider.assets, "steam", "ok_button_duel.png")
img = cv2.imread(location)
img = crop_image(img, **self.provider.predefined.ok_button_duel)
word = self.provider.img_to_string(img, alphabet).lower()
self.assertTrue(word == 'ok')
def test_wait_for_notifications(self):
self.fail()
def test_battle_icons(self):
self.provider.is_process_running()
img = self.provider.get_img_from_screen_shot()
area = self.provider.predefined.main_area
area['height'] = 700
t = tm.BoundingTrainer(img, bounding_area=area)
t.capture_white_circles()
| 37.912088 | 108 | 0.670725 | [
"MIT"
] | david252620/Yugioh-bot | tests/providers/test_steam_.py | 6,900 | Python |
from ..errors import MalformedResponseError
from ..properties import FailedMailbox, SearchableMailbox
from ..util import MNS, add_xml_child, create_element
from ..version import EXCHANGE_2013
from .common import EWSService
class GetSearchableMailboxes(EWSService):
"""MSDN:
https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getsearchablemailboxes-operation
"""
SERVICE_NAME = "GetSearchableMailboxes"
element_container_name = f"{{{MNS}}}SearchableMailboxes"
failed_mailboxes_container_name = f"{{{MNS}}}FailedMailboxes"
supported_from = EXCHANGE_2013
cls_map = {cls.response_tag(): cls for cls in (SearchableMailbox, FailedMailbox)}
def call(self, search_filter, expand_group_membership):
return self._elems_to_objs(
self._get_elements(
payload=self.get_payload(
search_filter=search_filter,
expand_group_membership=expand_group_membership,
)
)
)
def _elem_to_obj(self, elem):
return self.cls_map[elem.tag].from_xml(elem=elem, account=None)
def get_payload(self, search_filter, expand_group_membership):
payload = create_element(f"m:{self.SERVICE_NAME}")
if search_filter:
add_xml_child(payload, "m:SearchFilter", search_filter)
if expand_group_membership is not None:
add_xml_child(payload, "m:ExpandGroupMembership", "true" if expand_group_membership else "false")
return payload
def _get_elements_in_response(self, response):
for msg in response:
for container_name in (self.element_container_name, self.failed_mailboxes_container_name):
try:
container = self._get_element_container(message=msg, name=container_name)
except MalformedResponseError:
# Responses may contain no mailboxes of either kind. _get_element_container() does not accept this.
continue
yield from self._get_elements_in_container(container=container)
| 43.142857 | 119 | 0.690161 | [
"BSD-2-Clause"
] | cygnus9/exchangelib | exchangelib/services/get_searchable_mailboxes.py | 2,114 | Python |
#!/usr/bin/env python2
# coding=utf-8
# ^^^^^^^^^^^^ TODO remove when supporting only Python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s CHAVEZCOIN too low! (Should be %s CHAVEZCOIN)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s CHAVEZCOIN too high! (Should be %s CHAVEZCOIN)"%(str(fee), str(target_fee)))
return curr_balance
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print "Mining blocks..."
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 500)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 500)
assert_equal(self.nodes[1].getbalance(), 500)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 210 CHAVEZCOIN from 0 to 2 using sendtoaddress call.
# Second transaction will be child of first, and will require a fee
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 110)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 100)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 200)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 1000 CHAVEZCOIN in block rewards plus fees, but
# minus the 210 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 1000-210)
assert_equal(self.nodes[2].getbalance(), 210)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 1000)
assert_equal(self.nodes[2].getbalance("from1"), 1000-210)
# Send 100 CHAVEZCOIN normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 100, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('900'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('100'))
# Send 100 CHAVEZCOIN with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 100, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('200'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 100 CHAVEZCOIN
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('100')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 100 CHAVEZCOIN with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 100}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('100')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('100'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 499.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
#check if wallet or blochchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
# Check modes:
# - True: unicode escaped as \u....
# - False: unicode directly as UTF-8
for mode in [True, False]:
self.nodes[0].ensure_ascii = mode
# unicode check: Basic Multilingual Plane, Supplementary Plane respectively
for s in [u'рыба', u'𝅘𝅥𝅯']:
addr = self.nodes[0].getaccountaddress(s)
label = self.nodes[0].getaccount(addr)
assert_equal(label.encode('utf-8'), s.encode('utf-8')) # TODO remove encode(...) when supporting only Python3
assert(s in self.nodes[0].listaccounts().keys())
self.nodes[0].ensure_ascii = True # restore to default
# maintenance tests
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
'-salvagewallet',
]
for m in maintenance:
print "check " + m
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest ().main ()
| 44.642857 | 165 | 0.6368 | [
"MIT"
] | chavezcoin-project/chavezcoin-D | qa/rpc-tests/wallet.py | 15,632 | Python |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Transform library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import string
import numpy as np
import pandas as pd
import pandas.util.testing as pandas_testing
from six.moves import range
import tensorflow.compat.v1 as tf
from correct_batch_effects_wdn import metadata
from correct_batch_effects_wdn import transform
_ACTIVITY = "ACTIVE"
_PLATE = "plate1"
_SITE = 0
_TIMEPOINT = "0"
_SEQUENCE = "AGCT"
_CELL_DENSITY = "0"
_PASSAGE = "0"
_CELL_LINE_ID = ""
class TransformTest(tf.test.TestCase):
def setUp(self):
super(TransformTest, self).setUp()
wells_384, rows_384, cols_384 = [], [], []
for row in string.ascii_uppercase[:16]:
for col in range(24):
wells_384.append("%s%02d" % (row, col))
rows_384.append("%s" % row)
cols_384.append("%02d" % col)
n_per_batch = 100
n_each_control = 3 * n_per_batch
n_other = 3 * n_per_batch
np.random.seed(123)
self.columns = [0, 1]
neg_control_batches = []
for i in range(0, n_each_control, n_per_batch):
batch = "week%d" % (i % n_per_batch)
control_tuples = []
for j in range(n_per_batch):
control_tuples.append(
("NEGATIVE_CONTROL", "DMSO", "DMSO", 1.0, _ACTIVITY, batch, _PLATE,
wells_384[j], rows_384[j], cols_384[j], _SITE, _TIMEPOINT,
_SEQUENCE, _CELL_DENSITY, _PASSAGE, _CELL_LINE_ID))
neg_control_batches.append(
pd.DataFrame(
np.random.multivariate_normal(
mean=np.array([2.0 + i, 4.0 + i]),
cov=np.array([[3.0 + i, 1.0 + i], [1.0 + i, 2.0 + i]]),
size=n_per_batch),
columns=self.columns,
index=pd.MultiIndex.from_tuples(
control_tuples, names=metadata.METADATA_ORDER)))
self.neg_controls = pd.concat(neg_control_batches)
pos_control_batches = []
for i in range(0, n_each_control, n_per_batch):
batch = "week%d" % (i % n_per_batch)
control_tuples = []
for j in range(n_per_batch):
control_tuples.append(
("POSITIVE_CONTROL", "Taxol", "Taxol", 1.0, _ACTIVITY, batch,
_PLATE, wells_384[j], rows_384[j], cols_384[j], _SITE, _TIMEPOINT,
_SEQUENCE, _CELL_DENSITY, _PASSAGE, _CELL_LINE_ID))
pos_control_batches.append(
pd.DataFrame(
np.random.multivariate_normal(
mean=np.array([5.0 + i, 7.0 + i]),
cov=np.array([[6.0 + i, 4.0 + i], [4.0 + i, 5.0 + i]]),
size=n_per_batch),
columns=self.columns,
index=pd.MultiIndex.from_tuples(
control_tuples, names=metadata.METADATA_ORDER)))
self.pos_controls = pd.concat(pos_control_batches)
self.controls = pd.concat([self.neg_controls, self.pos_controls])
experimental_batches = []
for i in range(0, n_other, n_per_batch):
batch = "week%d" % (i % n_per_batch)
experimental_tuples = []
for j in range(n_per_batch):
experimental_tuples.append(
("EXPERIMENTAL", "other", "2", 1.0, _ACTIVITY, batch, _PLATE,
wells_384[j], rows_384[j], cols_384[j], _SITE, _TIMEPOINT,
_SEQUENCE, _CELL_DENSITY, _PASSAGE, _CELL_LINE_ID))
experimental_batches.append(
pd.DataFrame(
np.random.multivariate_normal(
mean=np.array([1.0 + i, 2.0 + i]),
cov=np.array([[3.0 + i, 1.0 + i], [1.0 + i, 2.0 + i]]),
size=n_per_batch),
columns=self.columns,
index=pd.MultiIndex.from_tuples(
experimental_tuples, names=metadata.METADATA_ORDER)))
self.experimental = pd.concat(experimental_batches)
self.data = pd.concat([self.controls, self.experimental])
def testGetNegativeControls(self):
pandas_testing.assert_frame_equal(self.neg_controls,
transform.get_negative_controls(
self.data))
def testEigSymmetric(self):
q_expected = np.array([[1.0 / np.sqrt(2), -1.0 / np.sqrt(2)],
[1.0 / np.sqrt(2), 1.0 / np.sqrt(2)]])
# q should be orthonormal - make sure it really is
pandas_testing.assert_almost_equal(
q_expected.T.dot(q_expected), np.identity(2))
lambda_expected = np.diag([3.0, 2.0])
a = q_expected.dot(lambda_expected).dot(q_expected.T)
lambda_computed, q_computed = transform.eig_symmetric(a)
pandas_testing.assert_almost_equal(
np.diag(lambda_expected), lambda_computed)
# make sure q_computed is orthonormal
pandas_testing.assert_almost_equal(
np.identity(2), q_expected.T.dot(q_expected))
for i in range(q_expected.shape[0]):
ev_expected = q_expected[:, i]
ev_computed = q_computed[:, i]
# In this example, the eigenvalues are discrete, so the eigenvectors are
# unique up to sign. Since the sign will depend on the particulars of
# the algorithm used to generate the eigenvectors, just make sure that
# the dot product with the expected eigenvectors is +/- 1
pandas_testing.assert_almost_equal(1.0,
np.abs(ev_expected.dot(ev_computed)))
def testFactorAnalysisRun(self):
transform.factor_analysis(self.data, 0.1, -1)
def testGetBootstrapSampleRun(self):
bootstrap_data = transform.get_bootstrap_sample(self.data)
self.assertTupleEqual(self.data.shape, bootstrap_data.shape)
def testTransformDf(self):
df_small = pd.DataFrame(np.array([[1.0, 2.0], [3.0, 4.0]]))
rotate_mat_np = np.array([[3.0, 4.0], [5.0, 6.0]])
shift_vec_np = np.array([[-1.0], [-2.0]])
expected = pd.DataFrame(np.array([[10.0, 15.0], [24.0, 37.0]]))
df_trans = transform.transform_df(
df_small, rotate_mat_np, shift_vec_np)
pandas_testing.assert_frame_equal(df_trans, expected)
def testSumOfSquare(self):
a = tf.constant(np.array([1.0, 2.0]))
expected = 5.0
with self.session() as sess:
a_sum_of_square = sess.run(transform.sum_of_square(a))
self.assertEqual(a_sum_of_square, expected)
def testDropUnevaluatedComp(self):
pandas_testing.assert_frame_equal(
pd.concat([self.pos_controls, self.experimental]),
transform.drop_unevaluated_comp(self.data))
if __name__ == "__main__":
tf.test.main()
| 38.868132 | 79 | 0.641787 | [
"Apache-2.0"
] | MeghnaNatraj/google-research | correct_batch_effects_wdn/transform_test.py | 7,074 | Python |
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self, plotly_name='x', parent_name='layout.ternary.domain', **kwargs
):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'plot'),
items=kwargs.pop(
'items', [
{
'valType': 'number',
'min': 0,
'max': 1,
'editType': 'plot'
}, {
'valType': 'number',
'min': 0,
'max': 1,
'editType': 'plot'
}
]
),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 29.870968 | 76 | 0.399568 | [
"MIT"
] | Abd-Elrazek/plotly.py | plotly/validators/layout/ternary/domain/_x.py | 926 | Python |
# -*- coding=utf-8 -*-
"""
# library: jionlp
# author: dongrixinyu
# license: Apache License 2.0
# Email: dongrixinyu.89@163.com
# github: https://github.com/dongrixinyu/JioNLP
# description: Preprocessing tool for Chinese NLP
"""
__version__ = '1.3.49'
import os
from jionlp.util.logger import set_logger
from jionlp.util.zip_file import unzip_file, UNZIP_FILE_LIST
logging = set_logger(level='INFO', log_dir_name='.jionlp_logs')
# unzip dictionary files
DIR_PATH = os.path.dirname(os.path.abspath(__file__))
for file_name in UNZIP_FILE_LIST:
if not os.path.exists(os.path.join(DIR_PATH, 'dictionary', file_name)):
unzip_file()
history = """
╭──────────────────────────────────────────────────────────────────────────╮
│ • • • ░░░░░░░░░░░░░░░░░░░░░ History Messages ░░░░░░░░░░░░░░░░░░░░░░░░░ │
├──────────────────────────────────────────────────────────────────────────┤
│ │
│ JioNLP, a python tool for Chinese NLP preprocessing & parsing. │
│ URL: https://github.com/dongrixinyu/JioNLP │
│ │
│ | date | updated funcs and info | │
│ | ---------- | --------------------------------------------------- | │
│ | 2020-03-13 | first push | │
│ | 2020-03-18 | update rules | │
│ | 2020-03-24 | add traditional and simplified conversion | │
│ | 2020-03-26 | add location parser 2019 | │
│ | 2020-03-31 | add sentences splitter | │
│ | 2020-04-02 | add id chard parser | │
│ | 2020-04-03 | add stopwords remover | │
│ | 2020-04-26 | add pinyin and location recognizer | │
│ | 2020-05-26 | add chinese word, char, xiehouyu dict | │
│ | 2020-06-01 | add ner tools | │
│ | 2020-06-10 | add location recognizer | │
│ | 2020-06-30 | add char radical parser | │
│ | 2020-07-07 | add ner acceleration tools and lexicon ner tool | │
│ | 2020-07-13 | add sim hash tool | │
│ | 2020-07-14 | add sentiment analysis | │
│ | 2020-07-27 | add key phrase extraction - ckpe | │
│ | 2020-08-24 | update pinyin | │
│ | 2020-09-14 | add back translation for data augmentation | │
│ | 2020-10-16 | update 2020 china location dictionary | │
│ | 2020-10-19 | add zip_file for compressing the size of dict files | │
│ | 2020-11-10 | add extractive summary func | │
│ | 2020-11-24 | add phone location recognition | │
│ | 2020-12-18 | add idiom solitaire | │
│ | 2020-12-28 | add help searching tool | │
│ | 2021-01-19 | add money number to character tool | │
│ | 2021-01-22 | update outdated china location conversion | │
│ | 2021-02-01 | acquire 400 stars and 58 forks on Github | │
│ | 2021-02-02 | add swap char position text augmentation | │
│ | 2021-02-09 | add homophone and add & delete text augmentation | │
│ | 2021-02-10 | update dictionaries | │
│ | 2021-03-15 | update chinese char dictionaries | │
│ | 2021-03-18 | add replace entity text augmentation | │
│ | 2021-03-24 | update extract money and standardization | │
│ | 2021-04-21 | add solar lunar date conversion | │
│ | 2021-06-23 | add time parser | │
│ | 2021-07-04 | update time parser | │
│ | 2021-07-18 | update time parser | │
│ | 2021-09-01 | add jionlp online version | │
│ | 2021-10-25 | update extract money and parse money | │
│ | 2021-11-10 | add logger tuner | │
│ | 2021-12-04 | add chinese word segmentor tools | │
│ │
╰──────────────────────────────────────────────────────────────────────────╯
"""
from jionlp.util import *
from jionlp.dictionary import *
from jionlp.rule import *
from jionlp.gadget import *
from jionlp.textaug import *
from jionlp.algorithm import *
# from jionlp.util.fast_loader import FastLoader
# rule = FastLoader('rule', globals(), 'jionlp.rule')
| 52.659574 | 76 | 0.43697 | [
"Apache-2.0"
] | FYWinds/JioNLP | jionlp/__init__.py | 5,700 | Python |
"""
This module contains the lambda function code for put-storage-tags API.
This file uses environment variables in place of config; thus
sddcapi_boot_dir is not required.
"""
# pylint: disable=import-error,logging-format-interpolation,broad-except,too-many-statements,C0413,W1203,R1703,R0914
import boto3
import botocore.exceptions
import os
import sys
import json
import traceback
from cloudx_sls_authorization import lambda_auth
THISDIR = os.path.dirname(__file__) # boto3-proxy
APPDIR = os.path.dirname(THISDIR) # boto3-proxy
if APPDIR not in sys.path:
sys.path.append(APPDIR)
if THISDIR not in sys.path:
sys.path.append(THISDIR)
from utils import api_request
from utils import helpers, secrets
from utils.exceptions import InvalidRegionException, InvalidInputException
from utils.log_helper import Logger
logger = Logger()
# Define LDAP lookup configs
LDAP_SERVER = os.environ['LDAP_SERVER']
LDAP_USERNAME = os.environ['LDAP_USERNAME']
LDAP_PASSWORD_SECRET_NAME = os.environ['LDAP_PASSWORD_SECRET_NAME']
LDAP_SEARCH_BASE = os.environ['LDAP_SEARCH_BASE']
LDAP_OBJECT_CLASS = os.environ['LDAP_OBJECT_CLASS']
LDAP_GROUP_NAME = os.environ['LDAP_GROUP_NAME'].split(',')
LDAP_LOOKUP_ATTRIBUTE = os.environ['LDAP_LOOKUP_ATTRIBUTE']
MSFT_IDP_TENANT_ID = os.environ['MSFT_IDP_TENANT_ID']
MSFT_IDP_APP_ID = os.environ['MSFT_IDP_APP_ID'].split(',')
MSFT_IDP_CLIENT_ROLES = os.environ['MSFT_IDP_CLIENT_ROLES'].split(',')
# Status codes
SUCCESS_STATUS = 200 # success
BAD_REQUEST_STATUS = 400 # service not supported, action not supported
NOT_FOUND_STATUS = 404 # invalid account, invalid region
UNAUTHORIZED_STATUS = 401 # invalid auth token
INTERNAL_SERVICE_ERROR_STATUS = 500 # internal service error
def handler(event, context):
"""
Boto3 Proxy API Handler
"""
headers = event.get('Headers', event.get('headers'))
if 'request-context-id' in headers:
logger.set_uuid(headers['request-context-id'])
logger.info({"Incoming event": event})
logger.info('Incoming context: %s', context)
request_body = json.loads(event.get('body', {}))
try:
# Define service client
secrets_client = boto3.client('secretsmanager')
lambda_auth.authorize_lambda_request(event, MSFT_IDP_TENANT_ID, MSFT_IDP_APP_ID,
MSFT_IDP_CLIENT_ROLES, LDAP_SERVER, LDAP_USERNAME,
secrets.retrieve_ldap_password(secrets_client,
logger,
LDAP_PASSWORD_SECRET_NAME
),
LDAP_SEARCH_BASE,
LDAP_OBJECT_CLASS, LDAP_GROUP_NAME, LDAP_LOOKUP_ATTRIBUTE)
# Get the SSM client
ssm_client = boto3.client('ssm')
except Exception as e:
traceback.print_exc()
return {
'statusCode': UNAUTHORIZED_STATUS,
'body': json.dumps({'error': f"Unauthorized. {str(e)}"})
}
# Get environment variables
resp_headers = {
'Content-Type': 'application/json',
"request-context-id": logger.get_uuid()
}
if hasattr(context, 'local_test'):
logger.info('Running at local')
path_params = event.get('pathParameters', {})
request_headers = event.get('headers', {})
vpcxiam_endpoint = os.environ.get('vpcxiam_endpoint')
vpcxiam_scope = os.environ.get('vpcxiam_scope')
vpcxiam_host = os.environ.get('vpcxiam_host')
# Set the default success response and status code
resp = {
'message': 'API action has been successfully completed'
}
status_code = SUCCESS_STATUS
try:
account = path_params.get('account-id')
region = path_params.get('region-name')
service = path_params.get('boto3-service')
action = path_params.get('boto3-action')
logger.info(f"Account: {account}")
logger.info(f"Region: {region}")
logger.info(f"Boto3 Service: {service}")
logger.info(f"Boto3 Action: {action}")
# is authorized?
logger.info(f'is_authorized({request_headers}, {MSFT_IDP_APP_ID}, '
f'{MSFT_IDP_TENANT_ID}, {MSFT_IDP_CLIENT_ROLES}')
# Get the credentials for the account resources will be created in.
url = (vpcxiam_endpoint +
f"/v1/accounts/{account}/roles/admin/credentials")
scope = vpcxiam_scope
additional_headers = {
'Host': vpcxiam_host
}
api_requests = api_request.ApiRequests()
credentials = json.loads(
(api_requests.request(url=url, method='get', scope=scope, additional_headers=additional_headers)).text
)
error = credentials.get('error', {})
if error:
logger.error(error)
raise ValueError(error)
credentials = credentials.get('credentials', {})
try:
# Validate service and if valid, get the allowed actions
ssm_parameter_name = '/vpcx/aws/boto3-proxy/allowed-actions/'+service
logger.info("Looking up parameter "+ssm_parameter_name)
allowed_actions = ssm_client.get_parameter(Name=ssm_parameter_name)
except botocore.exceptions.ClientError as err:
logger.error(err)
if err.response['Error']['Code'] == 'ParameterNotFound':
raise InvalidInputException("Service " + service + " is not an allowed service for the API")
else:
raise error
# Validate action
if action not in allowed_actions['Parameter']['Value']:
raise InvalidInputException("Action "+action+" is not an allowed action for the API")
# Validate region
ec2_client = boto3.client(
service_name='ec2',
aws_access_key_id=credentials.get('AccessKeyId', ''),
aws_secret_access_key=credentials.get('SecretAccessKey', ''),
aws_session_token=credentials.get('SessionToken', ''))
helpers.is_region_valid(ec2_client, region)
logger.info(f"{region} is a valid region")
# Create clients for the given region and given account
boto3_client = boto3.client(
service_name=service,
region_name=region,
aws_access_key_id=credentials.get('AccessKeyId', ''),
aws_secret_access_key=credentials.get('SecretAccessKey', ''),
aws_session_token=credentials.get('SessionToken', ''))
# Call the action (function) for the boto3 client's service with the request params
kwargs = request_body
getattr(boto3_client, action)(**kwargs)
# boto3 error
except botocore.exceptions.ClientError as err:
status_code = INTERNAL_SERVICE_ERROR_STATUS
resp = {
'error': f'{type(err).__name__}: {err}'
}
except InvalidRegionException:
status_code = NOT_FOUND_STATUS
resp = {
'error': 'Please enter a valid region in the url path'
}
except InvalidInputException as err:
status_code = BAD_REQUEST_STATUS
resp = {
'error': str(err)
}
except ValueError as err:
status_code = NOT_FOUND_STATUS
resp = {
'error': str(err)
}
except Exception as err:
status_code = INTERNAL_SERVICE_ERROR_STATUS
resp = {
'error': f'{type(err).__name__}: {err}'
}
resp = helpers.lambda_returns(status_code, resp_headers, json.dumps(resp))
logger.info(f'response: {resp}')
return resp
| 38.567164 | 116 | 0.634675 | [
"MIT-0"
] | aws-samples/boto3-proxy-api-sls | boto3_proxy/index.py | 7,752 | Python |
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
# Checking is mock object gi was called only once
@patch('time.sleep', return_value=True)
# The patch as a decorator will pass the argument to the test below it
def test_wait_for_db(self, ts):
"""Test waiting for db"""
# When the ConnectionHandler raised OperationalError, then it waits for
# 1 sec and tries again. Delay here can be removed in the unit test by
# using a patch decorator
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
# For 1st 5 tries, it raises OperationalError, then 6th time True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
| 40.933333 | 79 | 0.679153 | [
"MIT"
] | anirudhs1998/recipe-app-api | app/core/tests/test_commands.py | 1,228 | Python |
#coding=utf-8
#
# Copyright (C) 2015 Feigr TECH Co., Ltd. All rights reserved.
# Created on 2013-8-13, by Junn
#
#
#import settings
from django.middleware.csrf import get_token
from django.http.response import Http404
from django.core.exceptions import PermissionDenied
from rest_framework.generics import GenericAPIView
from rest_framework import exceptions, status
from rest_framework.response import Response
from core.authentication import CsrfError
from utils.http import JResponse
from core import codes
def csrf_failure(request, reason=''):
"""
customize the response for csrf_token invalid
"""
# if request.is_ajax():
# return JResponse(codes.get('csrf_invalid'))
# return
get_token(request)
return JResponse(codes.get('csrf_invalid'), status=403)
class CustomAPIView(GenericAPIView):
"""
customize the APIView for customize exception response
"""
def handle_exception(self, exc):
"""
Handle any exception that occurs, by returning an appropriate response,
or re-raising the error.
"""
if isinstance(exc, exceptions.Throttled):
# Throttle wait header
self.headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait
if isinstance(exc, (exceptions.NotAuthenticated,
exceptions.AuthenticationFailed)):
# WWW-Authenticate header for 401 responses, else coerce to 403
auth_header = self.get_authenticate_header(self.request)
if auth_header:
self.headers['WWW-Authenticate'] = auth_header
else:
exc.status_code = status.HTTP_403_FORBIDDEN
if isinstance(exc, exceptions.MethodNotAllowed):
return Response(codes.get('invalid_request_method'),
status=exc.status_code,
exception=True)
elif isinstance(exc, CsrfError):
return Response(codes.get('csrf_invalid'),
status=exc.status_code,
exception=True)
elif isinstance(exc, exceptions.ParseError):
return Response(codes.get('parse_error'),
status=exc.status_code,
exception=True)
elif isinstance(exc, exceptions.AuthenticationFailed):
return Response(codes.get('authentication_failed'),
status=exc.status_code,
exception=True)
elif isinstance(exc, exceptions.NotAuthenticated):
return Response(codes.get('not_authenticated'),
status=exc.status_code,
exception=True)
elif isinstance(exc, exceptions.PermissionDenied):
return Response(codes.get('permission_denied'),
status=exc.status_code,
exception=True)
elif isinstance(exc, exceptions.NotAcceptable):
return Response(codes.get('not_acceptable'),
status=exc.status_code,
exception=True)
elif isinstance(exc, exceptions.UnsupportedMediaType):
return Response(codes.get('unsupported_media_type'),
status=exc.status_code,
exception=True)
elif isinstance(exc, exceptions.Throttled):
return Response(codes.get('throttled'),
status=exc.status_code,
exception=True)
elif isinstance(exc, Http404):
return Response(codes.get('not_found'),
status=status.HTTP_404_NOT_FOUND,
exception=True)
elif isinstance(exc, PermissionDenied):
return Response(codes.get('permission_denied'),
status=status.HTTP_403_FORBIDDEN,
exception=True)
raise
| 35.857143 | 79 | 0.588396 | [
"MIT"
] | dlooto/driver-vision | apps/core/views.py | 4,016 | Python |
import os
from pathlib import Path
from dataclasses import field
from typing import Dict, Tuple, Sequence
from pydantic.dataclasses import dataclass
from pydantic import StrictStr
@dataclass
class StreetViewConfig:
SIZE: str = "600x300"
HEADING: str = "151.78"
PITCH: str = "-0.76"
KEY = os.environ.get("GOOGLE_DEV_API_KEY")
LOCAL_IMAGE_FOLDER: str = f"{Path(__file__).resolve().parent.parent.parent.parent.parent.parent.parent.parent}/local_data/streetview_images"
LOCAL_LINKS_FOLDER: str = f"{Path(__file__).resolve().parent.parent.parent.parent.parent.parent.parent.parent}/local_data/streetview_links"
LOCAL_METADATA_FOLDER: str = f"{Path(__file__).resolve().parent.parent.parent.parent.parent.parent.parent.parent}/local_data/streetview_metadata"
PLACE = "Iraqi_Kurdistan"
META_BASE = "https://maps.googleapis.com/maps/api/streetview/metadata?"
@dataclass
class OSMConfig:
TAGS = {"building": "residential"}
PLACE = "Iraq"
# NAME = "Iraqi Kurdistan"
# ADMIN_LEVEL = 3
@dataclass
class DataConfig:
COUNTRY_CODES = ["IQ"]
YEAR: int = 2020
MON_START: int = 1
DATE_START: int = 1
YEAR_END: int = 2021
MON_END: int = 8
DATE_END: int = 22
PLACE = "Iraqi Kurdistan, Iraq"
BASE_FOLDER = "/ee_data"
LANDSAT_IMAGE_COLLECTION: str = "LANDSAT/LC08/C01/T1"
MODEL_NAME = "CH4"
LANDSAT_IMAGE_BAND: Sequence[str] = field(
default_factory=lambda: ["B4", "B3", "B2"]
)
NIGHTTIME_LIGHT_IMAGE_COLLECTION: str = "NOAA/VIIRS/DNB/MONTHLY_V1/VCMCFG"
NIGHTTIME_LIGHT_IMAGE_BAND: str = "avg_rad"
METEROLOGICAL_IMAGE_COLLECTION: str = "NOAA/GFS0P25"
METEROLOGICAL_IMAGE_BAND: Sequence[str] = field(
default_factory=lambda: [
"temperature_2m_above_ground",
"relative_humidity_2m_above_ground",
"total_precipitation_surface",
"total_cloud_cover_entire_atmosphere",
"u_component_of_wind_10m_above_ground",
"v_component_of_wind_10m_above_ground",
]
)
POPULATION_IMAGE_COLLECTION: str = (
"CIESIN/GPWv411/GPW_Basic_Demographic_Characteristics"
)
POPULATION_IMAGE_BAND = "basic_demographic_characteristics"
LAND_COVER_IMAGE_COLLECTION: str = "COPERNICUS/Landcover/100m/Proba-V-C3/Global"
LAND_COVER_IMAGE_BAND: str = "discrete_classification"
IMAGE_FOLDER = "local_data/image_folder"
COUNTRY_BOUNDING_BOXES: Dict[
StrictStr, Tuple[StrictStr, Tuple[float, float, float, float]]
] = field(
default_factory=lambda: {
"AF": (
"Afghanistan",
(60.5284298033, 29.318572496, 75.1580277851, 38.4862816432),
),
"AO": (
"Angola",
(11.6400960629, -17.9306364885, 24.0799052263, -4.43802336998),
),
"AL": (
"Albania",
(19.3044861183, 39.624997667, 21.0200403175, 42.6882473822),
),
"AE": (
"United Arab Emirates",
(51.5795186705, 22.4969475367, 56.3968473651, 26.055464179),
),
"AR": (
"Argentina",
(-73.4154357571, -55.25, -53.628348965, -21.8323104794),
),
"AM": (
"Armenia",
(43.5827458026, 38.7412014837, 46.5057198423, 41.2481285671),
),
"AQ": ("Antarctica", (-180.0, -90.0, 180.0, -63.2706604895)),
"TF": ("Fr. S. and Antarctic Lands", (68.72, -49.775, 70.56, -48.625)),
"AU": (
"Australia",
(113.338953078, -43.6345972634, 153.569469029, -10.6681857235),
),
"AT": (
"Austria",
(9.47996951665, 46.4318173285, 16.9796667823, 49.0390742051),
),
"AZ": (
"Azerbaijan",
(44.7939896991, 38.2703775091, 50.3928210793, 41.8606751572),
),
"BI": (
"Burundi",
(29.0249263852, -4.49998341229, 30.752262811, -2.34848683025),
),
"BE": (
"Belgium",
(2.51357303225, 49.5294835476, 6.15665815596, 51.4750237087),
),
"BJ": (
"Benin",
(0.772335646171, 6.14215770103, 3.79711225751, 12.2356358912),
),
"BF": (
"Burkina Faso",
(-5.47056494793, 9.61083486576, 2.17710778159, 15.1161577418),
),
"BD": (
"Bangladesh",
(88.0844222351, 20.670883287, 92.6727209818, 26.4465255803),
),
"BG": (
"Bulgaria",
(22.3805257504, 41.2344859889, 28.5580814959, 44.2349230007),
),
"BS": ("Bahamas", (-78.98, 23.71, -77.0, 27.04)),
"BA": ("Bosnia and Herz.", (15.7500260759, 42.65, 19.59976, 45.2337767604)),
"BY": (
"Belarus",
(23.1994938494, 51.3195034857, 32.6936430193, 56.1691299506),
),
"BZ": (
"Belize",
(-89.2291216703, 15.8869375676, -88.1068129138, 18.4999822047),
),
"BO": (
"Bolivia",
(-69.5904237535, -22.8729187965, -57.4983711412, -9.76198780685),
),
"BR": (
"Brazil",
(-73.9872354804, -33.7683777809, -34.7299934555, 5.24448639569),
),
"BN": (
"Brunei",
(114.204016555, 4.007636827, 115.450710484, 5.44772980389),
),
"BT": (
"Bhutan",
(88.8142484883, 26.7194029811, 92.1037117859, 28.2964385035),
),
"BW": (
"Botswana",
(19.8954577979, -26.8285429827, 29.4321883481, -17.6618156877),
),
"CF": (
"Central African Rep.",
(14.4594071794, 2.2676396753, 27.3742261085, 11.1423951278),
),
"CA": ("Canada", (-140.99778, 41.6751050889, -52.6480987209, 83.23324)),
"CH": (
"Switzerland",
(6.02260949059, 45.7769477403, 10.4427014502, 47.8308275417),
),
"CL": ("Chile", (-75.6443953112, -55.61183, -66.95992, -17.5800118954)),
"CN": (
"China",
(73.6753792663, 18.197700914, 135.026311477, 53.4588044297),
),
"CI": (
"Ivory Coast",
(-8.60288021487, 4.33828847902, -2.56218950033, 10.5240607772),
),
"CM": (
"Cameroon",
(8.48881554529, 1.72767263428, 16.0128524106, 12.8593962671),
),
"CD": (
"Congo (Kinshasa)",
(12.1823368669, -13.2572266578, 31.1741492042, 5.25608775474),
),
"CG": (
"Congo (Brazzaville)",
(11.0937728207, -5.03798674888, 18.4530652198, 3.72819651938),
),
"CO": (
"Colombia",
(-78.9909352282, -4.29818694419, -66.8763258531, 12.4373031682),
),
"CR": (
"Costa Rica",
(-85.94172543, 8.22502798099, -82.5461962552, 11.2171192489),
),
"CU": (
"Cuba",
(-84.9749110583, 19.8554808619, -74.1780248685, 23.1886107447),
),
"CY": (
"Cyprus",
(32.2566671079, 34.5718694118, 34.0048808123, 35.1731247015),
),
"CZ": (
"Czech Rep.",
(12.2401111182, 48.5553052842, 18.8531441586, 51.1172677679),
),
"DE": (
"Germany",
(5.98865807458, 47.3024876979, 15.0169958839, 54.983104153),
),
"DJ": ("Djibouti", (41.66176, 10.9268785669, 43.3178524107, 12.6996385767)),
"DK": (
"Denmark",
(8.08997684086, 54.8000145534, 12.6900061378, 57.730016588),
),
"DO": (
"Dominican Rep.",
(-71.9451120673, 17.598564358, -68.3179432848, 19.8849105901),
),
"DZ": (
"Algeria",
(-8.68439978681, 19.0573642034, 11.9995056495, 37.1183806422),
),
"EC": (
"Ecuador",
(-80.9677654691, -4.95912851321, -75.2337227037, 1.3809237736),
),
"EG": ("Egypt", (24.70007, 22.0, 36.86623, 31.58568)),
"ER": (
"Eritrea",
(36.3231889178, 12.4554157577, 43.0812260272, 17.9983074),
),
"ES": (
"Spain",
(-9.39288367353, 35.946850084, 3.03948408368, 43.7483377142),
),
"EE": (
"Estonia",
(23.3397953631, 57.4745283067, 28.1316992531, 59.6110903998),
),
"ET": ("Ethiopia", (32.95418, 3.42206, 47.78942, 14.95943)),
"FI": (
"Finland",
(20.6455928891, 59.846373196, 31.5160921567, 70.1641930203),
),
"FJ": ("Fiji", (-180.0, -18.28799, 180.0, -16.0208822567)),
"FK": ("Falkland Is.", (-61.2, -52.3, -57.75, -51.1)),
"FR": (
"France",
(-54.5247541978, 2.05338918702, 9.56001631027, 51.1485061713),
),
"GA": (
"Gabon",
(8.79799563969, -3.97882659263, 14.4254557634, 2.32675751384),
),
"GB": (
"United Kingdom",
(-7.57216793459, 49.959999905, 1.68153079591, 58.6350001085),
),
"GE": (
"Georgia",
(39.9550085793, 41.0644446885, 46.6379081561, 43.553104153),
),
"GH": (
"Ghana",
(-3.24437008301, 4.71046214438, 1.0601216976, 11.0983409693),
),
"GN": (
"Guinea",
(-15.1303112452, 7.3090373804, -7.83210038902, 12.5861829696),
),
"GM": (
"Gambia",
(-16.8415246241, 13.1302841252, -13.8449633448, 13.8764918075),
),
"GW": (
"Guinea Bissau",
(-16.6774519516, 11.0404116887, -13.7004760401, 12.6281700708),
),
"GQ": (
"Eq. Guinea",
(9.3056132341, 1.01011953369, 11.285078973, 2.28386607504),
),
"GR": (
"Greece",
(20.1500159034, 34.9199876979, 26.6041955909, 41.8269046087),
),
"GL": ("Greenland", (-73.297, 60.03676, -12.20855, 83.64513)),
"GT": (
"Guatemala",
(-92.2292486234, 13.7353376327, -88.2250227526, 17.8193260767),
),
"GY": (
"Guyana",
(-61.4103029039, 1.26808828369, -56.5393857489, 8.36703481692),
),
"HN": (
"Honduras",
(-89.3533259753, 12.9846857772, -83.147219001, 16.0054057886),
),
"HR": (
"Croatia",
(13.6569755388, 42.47999136, 19.3904757016, 46.5037509222),
),
"HT": (
"Haiti",
(-74.4580336168, 18.0309927434, -71.6248732164, 19.9156839055),
),
"HU": (
"Hungary",
(16.2022982113, 45.7594811061, 22.710531447, 48.6238540716),
),
"ID": (
"Indonesia",
(95.2930261576, -10.3599874813, 141.03385176, 5.47982086834),
),
"IN": (
"India",
(68.1766451354, 7.96553477623, 97.4025614766, 35.4940095078),
),
"IE": (
"Ireland",
(-9.97708574059, 51.6693012559, -6.03298539878, 55.1316222195),
),
"IR": (
"Iran",
(44.1092252948, 25.0782370061, 63.3166317076, 39.7130026312),
),
"IQ": (
"Iraq",
(38.7923405291, 29.0990251735, 48.5679712258, 37.3852635768),
),
"IS": (
"Iceland",
(-24.3261840479, 63.4963829617, -13.609732225, 66.5267923041),
),
"IL": (
"Israel",
(34.2654333839, 29.5013261988, 35.8363969256, 33.2774264593),
),
"IT": ("Italy", (6.7499552751, 36.619987291, 18.4802470232, 47.1153931748)),
"JM": (
"Jamaica",
(-78.3377192858, 17.7011162379, -76.1996585761, 18.5242184514),
),
"JO": (
"Jordan",
(34.9226025734, 29.1974946152, 39.1954683774, 33.3786864284),
),
"JP": (
"Japan",
(129.408463169, 31.0295791692, 145.543137242, 45.5514834662),
),
"KZ": (
"Kazakhstan",
(46.4664457538, 40.6623245306, 87.3599703308, 55.3852501491),
),
"KE": ("Kenya", (33.8935689697, -4.67677, 41.8550830926, 5.506)),
"KG": (
"Kyrgyzstan",
(69.464886916, 39.2794632025, 80.2599902689, 43.2983393418),
),
"KH": (
"Cambodia",
(102.3480994, 10.4865436874, 107.614547968, 14.5705838078),
),
"KR": (
"S. Korea",
(126.117397903, 34.3900458847, 129.468304478, 38.6122429469),
),
"KW": (
"Kuwait",
(46.5687134133, 28.5260627304, 48.4160941913, 30.0590699326),
),
"LA": ("Laos", (100.115987583, 13.88109101, 107.564525181, 22.4647531194)),
"LB": (
"Lebanon",
(35.1260526873, 33.0890400254, 36.6117501157, 34.6449140488),
),
"LR": (
"Liberia",
(-11.4387794662, 4.35575511313, -7.53971513511, 8.54105520267),
),
"LY": ("Libya", (9.31941084152, 19.58047, 25.16482, 33.1369957545)),
"LK": (
"Sri Lanka",
(79.6951668639, 5.96836985923, 81.7879590189, 9.82407766361),
),
"LS": (
"Lesotho",
(26.9992619158, -30.6451058896, 29.3251664568, -28.6475017229),
),
"LT": (
"Lithuania",
(21.0558004086, 53.9057022162, 26.5882792498, 56.3725283881),
),
"LU": (
"Luxembourg",
(5.67405195478, 49.4426671413, 6.24275109216, 50.1280516628),
),
"LV": (
"Latvia",
(21.0558004086, 55.61510692, 28.1767094256, 57.9701569688),
),
"MA": (
"Morocco",
(-17.0204284327, 21.4207341578, -1.12455115397, 35.7599881048),
),
"MD": (
"Moldova",
(26.6193367856, 45.4882831895, 30.0246586443, 48.4671194525),
),
"MG": (
"Madagascar",
(43.2541870461, -25.6014344215, 50.4765368996, -12.0405567359),
),
"MX": ("Mexico", (-117.12776, 14.5388286402, -86.811982388, 32.72083)),
"MK": (
"Macedonia",
(20.46315, 40.8427269557, 22.9523771502, 42.3202595078),
),
"ML": (
"Mali",
(-12.1707502914, 10.0963607854, 4.27020999514, 24.9745740829),
),
"MM": (
"Myanmar",
(92.3032344909, 9.93295990645, 101.180005324, 28.335945136),
),
"ME": ("Montenegro", (18.45, 41.87755, 20.3398, 43.52384)),
"MN": (
"Mongolia",
(87.7512642761, 41.5974095729, 119.772823928, 52.0473660345),
),
"MZ": (
"Mozambique",
(30.1794812355, -26.7421916643, 40.7754752948, -10.3170960425),
),
"MR": (
"Mauritania",
(-17.0634232243, 14.6168342147, -4.92333736817, 27.3957441269),
),
"MW": (
"Malawi",
(32.6881653175, -16.8012997372, 35.7719047381, -9.23059905359),
),
"MY": (
"Malaysia",
(100.085756871, 0.773131415201, 119.181903925, 6.92805288332),
),
"NA": (
"Namibia",
(11.7341988461, -29.045461928, 25.0844433937, -16.9413428687),
),
"NC": (
"New Caledonia",
(164.029605748, -22.3999760881, 167.120011428, -20.1056458473),
),
"NE": (
"Niger",
(0.295646396495, 11.6601671412, 15.9032466977, 23.4716684026),
),
"NG": (
"Nigeria",
(2.69170169436, 4.24059418377, 14.5771777686, 13.8659239771),
),
"NI": (
"Nicaragua",
(-87.6684934151, 10.7268390975, -83.147219001, 15.0162671981),
),
"NL": (
"Netherlands",
(3.31497114423, 50.803721015, 7.09205325687, 53.5104033474),
),
"NO": (
"Norway",
(4.99207807783, 58.0788841824, 31.29341841, 80.6571442736),
),
"NP": (
"Nepal",
(80.0884245137, 26.3978980576, 88.1748043151, 30.4227169866),
),
"NZ": (
"New Zealand",
(166.509144322, -46.641235447, 178.517093541, -34.4506617165),
),
"OM": ("Oman", (52.0000098, 16.6510511337, 59.8080603372, 26.3959343531)),
"PK": (
"Pakistan",
(60.8742484882, 23.6919650335, 77.8374507995, 37.1330309108),
),
"PA": (
"Panama",
(-82.9657830472, 7.2205414901, -77.2425664944, 9.61161001224),
),
"PE": (
"Peru",
(-81.4109425524, -18.3479753557, -68.6650797187, -0.0572054988649),
),
"PH": (
"Philippines",
(117.17427453, 5.58100332277, 126.537423944, 18.5052273625),
),
"PG": (
"Papua New Guinea",
(141.000210403, -10.6524760881, 156.019965448, -2.50000212973),
),
"PL": (
"Poland",
(14.0745211117, 49.0273953314, 24.0299857927, 54.8515359564),
),
"PR": (
"Puerto Rico",
(-67.2424275377, 17.946553453, -65.5910037909, 18.5206011011),
),
"KP": (
"N. Korea",
(124.265624628, 37.669070543, 130.780007359, 42.9853868678),
),
"PT": (
"Portugal",
(-9.52657060387, 36.838268541, -6.3890876937, 42.280468655),
),
"PY": (
"Paraguay",
(-62.6850571357, -27.5484990374, -54.2929595608, -19.3427466773),
),
"QA": (
"Qatar",
(50.7439107603, 24.5563308782, 51.6067004738, 26.1145820175),
),
"RO": (
"Romania",
(20.2201924985, 43.6884447292, 29.62654341, 48.2208812526),
),
"RU": ("Russia", (-180.0, 41.151416124, 180.0, 81.2504)),
"RW": (
"Rwanda",
(29.0249263852, -2.91785776125, 30.8161348813, -1.13465911215),
),
"SA": (
"Saudi Arabia",
(34.6323360532, 16.3478913436, 55.6666593769, 32.161008816),
),
"SD": ("Sudan", (21.93681, 8.61972971293, 38.4100899595, 22.0)),
"SS": ("S. Sudan", (23.8869795809, 3.50917, 35.2980071182, 12.2480077571)),
"SN": (
"Senegal",
(-17.6250426905, 12.332089952, -11.4678991358, 16.5982636581),
),
"SB": (
"Solomon Is.",
(156.491357864, -10.8263672828, 162.398645868, -6.59933847415),
),
"SL": (
"Sierra Leone",
(-13.2465502588, 6.78591685631, -10.2300935531, 10.0469839543),
),
"SV": (
"El Salvador",
(-90.0955545723, 13.1490168319, -87.7235029772, 14.4241327987),
),
"SO": ("Somalia", (40.98105, -1.68325, 51.13387, 12.02464)),
"RS": ("Serbia", (18.82982, 42.2452243971, 22.9860185076, 46.1717298447)),
"SR": (
"Suriname",
(-58.0446943834, 1.81766714112, -53.9580446031, 6.0252914494),
),
"SK": (
"Slovakia",
(16.8799829444, 47.7584288601, 22.5581376482, 49.5715740017),
),
"SI": (
"Slovenia",
(13.6981099789, 45.4523163926, 16.5648083839, 46.8523859727),
),
"SE": (
"Sweden",
(11.0273686052, 55.3617373725, 23.9033785336, 69.1062472602),
),
"SZ": (
"Swaziland",
(30.6766085141, -27.2858794085, 32.0716654803, -25.660190525),
),
"SY": (
"Syria",
(35.7007979673, 32.312937527, 42.3495910988, 37.2298725449),
),
"TD": ("Chad", (13.5403935076, 7.42192454674, 23.88689, 23.40972)),
"TG": (
"Togo",
(-0.0497847151599, 5.92883738853, 1.86524051271, 11.0186817489),
),
"TH": (
"Thailand",
(97.3758964376, 5.69138418215, 105.589038527, 20.4178496363),
),
"TJ": (
"Tajikistan",
(67.4422196796, 36.7381712916, 74.9800024759, 40.9602133245),
),
"TM": (
"Turkmenistan",
(52.5024597512, 35.2706639674, 66.5461503437, 42.7515510117),
),
"TL": (
"East Timor",
(124.968682489, -9.39317310958, 127.335928176, -8.27334482181),
),
"TT": ("Trinidad and Tobago", (-61.95, 10.0, -60.895, 10.89)),
"TN": (
"Tunisia",
(7.52448164229, 30.3075560572, 11.4887874691, 37.3499944118),
),
"TR": (
"Turkey",
(26.0433512713, 35.8215347357, 44.7939896991, 42.1414848903),
),
"TW": (
"Taiwan",
(120.106188593, 21.9705713974, 121.951243931, 25.2954588893),
),
"TZ": ("Tanzania", (29.3399975929, -11.7209380022, 40.31659, -0.95)),
"UG": ("Uganda", (29.5794661801, -1.44332244223, 35.03599, 4.24988494736)),
"UA": (
"Ukraine",
(22.0856083513, 44.3614785833, 40.0807890155, 52.3350745713),
),
"UY": (
"Uruguay",
(-58.4270741441, -34.9526465797, -53.209588996, -30.1096863746),
),
"US": (
"United States",
(-171.791110603, 18.91619, -66.96466, 71.3577635769),
),
"UZ": (
"Uzbekistan",
(55.9289172707, 37.1449940049, 73.055417108, 45.5868043076),
),
"VE": (
"Venezuela",
(-73.3049515449, 0.724452215982, -59.7582848782, 12.1623070337),
),
"VN": (
"Vietnam",
(102.170435826, 8.59975962975, 109.33526981, 23.3520633001),
),
"VU": (
"Vanuatu",
(166.629136998, -16.5978496233, 167.844876744, -14.6264970842),
),
"PS": (
"West Bank",
(34.9274084816, 31.3534353704, 35.5456653175, 32.5325106878),
),
"WO": (
"World",
(180, 90, -180, -90),
),
"YE": (
"Yemen",
(42.6048726743, 12.5859504257, 53.1085726255, 19.0000033635),
),
"ZA": (
"South Africa",
(16.3449768409, -34.8191663551, 32.830120477, -22.0913127581),
),
"ZM": (
"Zambia",
(21.887842645, -17.9612289364, 33.4856876971, -8.23825652429),
),
"ZW": (
"Zimbabwe",
(25.2642257016, -22.2716118303, 32.8498608742, -15.5077869605),
),
}
)
| 36.855072 | 149 | 0.444908 | [
"BSD-3-Clause"
] | AQ-AI/open-geo-engine | open_geo_engine/config/model_settings.py | 25,430 | Python |
#!/usr/bin/python
import argparse
import os
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(
description="Update scripting grammar")
parser.add_argument("--java", type=str, help="Java executable", default="java")
parser.add_argument("--antlr", type=str, help="Antlr3 location", default="antlr3.jar")
parser.add_argument("script_file", type=str)
args = parser.parse_args(sys.argv[1:])
source_location = os.path.abspath(os.path.join(args.script_file, '..', '..', 'src'));
script_name = os.path.splitext(os.path.split(args.script_file)[-1])[0]
subprocess.check_call(
[args.java, '-jar', args.antlr, os.path.abspath(args.script_file), '-o',
source_location])
files = [ os.path.join(source_location, script_name + x) for x in
['.tokens', 'Lexer.hpp', 'Lexer.cpp', 'Parser.hpp', 'Parser.cpp']]
last_licence_line = "// THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
source_line = "From the grammar source file :"
source_date = " On :"
file_name = "static const char fileName[] = \""
for file in files:
write_lines = []
with open(file) as my_f:
has_read_licence = False
insert_clang_format_off = True
in_comment = False
for line in my_f.readlines():
if insert_clang_format_off:
write_lines.append("// clang-format off\n")
insert_clang_format_off = False
if last_licence_line in line:
has_read_licence = True
if source_line in line:
continue
if source_date in line:
continue
if has_read_licence:
if (file_name in line):
line = file_name + script_name + ".g\";"
if line.lstrip().startswith('//'):
continue
line_left = line
line = ""
while(line_left):
if not in_comment and '/*' in line_left:
vals = line_left.split('/*', 1)
line += vals[0]
line_left = vals[1]
in_comment = True
continue
if in_comment and '*/' in line_left:
line_left = line_left.split('*/', 1)[1]
in_comment = False
continue
if not in_comment:
line += line_left
line_left = ""
line = line.replace("ImplTraits::CommonTokenType*", "ImplTraits::CommonTokenType const*")
write_lines.append(line.rstrip() + '\n')
with open(file, "w") as my_f:
my_f.writelines(write_lines)
if __name__ == "__main__":
main()
| 35.847222 | 97 | 0.598605 | [
"BSD-2-Clause"
] | WNProject/WNFramework | Libraries/WNScripting/generate_grammar.py | 2,581 | Python |
"""
Django settings for profiles project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@k^mj7^tf1a_e1(8r54))u!7#m=bsztd)^j(++le6$xa*%%!q+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile' | 25.5 | 91 | 0.6953 | [
"MIT"
] | Iceman8423/profiles-rest-api | profiles/settings.py | 3,213 | Python |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
# Though the following import is not directly being used, it is required
# for 3D projection to work
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
iris = datasets.load_iris()
X = [12, 20, 28, 18, 29, 33, 24, 45, 45, 52, 51, 52, 55, 53, 55, 61, 64, 69, 72]
y = [39, 36, 30, 52, 54, 46, 55, 59, 63, 70, 66, 63, 58, 23, 14, 8, 19, 7, 24]
estimators = [('k_means_iris_8', KMeans(n_clusters=8)),
('k_means_iris_3', KMeans(n_clusters=3)),
('k_means_iris_bad_init', KMeans(n_clusters=3, n_init=1,
init='random'))]
fignum = 1
titles = ['8 clusters', '3 clusters', '3 clusters, bad initialization']
for name, est in estimators:
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2],
c=labels.astype(np.float), edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title(titles[fignum - 1])
ax.dist = 12
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean(),
X[y == label, 2].mean() + 2, name,
horizontalalignment='center',
bbox=dict(alpha=.2, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y, edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
ax.set_title('Ground Truth')
ax.dist = 12
fig.show() | 32.408451 | 80 | 0.610169 | [
"MIT"
] | anikaanzum/NetworkDataAnalysis | k-means.py | 2,301 | Python |
from flask import Blueprint, render_template, request, session, url_for
from CTFd.models import Tags, Users
from CTFd.utils.decorators import authed_only
from CTFd.utils.decorators.visibility import check_account_visibility
from CTFd.utils.helpers import get_errors, get_infos
from CTFd.utils.user import get_current_user, get_user_badges
users = Blueprint("users", __name__)
@users.route("/users")
@check_account_visibility
def listing():
q = request.args.get("q")
field = request.args.get("field", "name")
if field not in ("name", "website"):
field = "name"
filters = []
if q:
filters.append(getattr(Users, field).like("%{}%".format(q)))
users = (
Users.query.filter_by(banned=False, hidden=False)
.filter(*filters)
.order_by(Users.id.asc())
.paginate(per_page=50)
)
args = dict(request.args)
args.pop("page", 1)
return render_template(
"users/users.html",
users=users,
prev_page=url_for(request.endpoint, page=users.prev_num, **args),
next_page=url_for(request.endpoint, page=users.next_num, **args),
q=q,
field=field,
)
@users.route("/profile")
@users.route("/user")
@authed_only
def private():
infos = get_infos()
errors = get_errors()
user = get_current_user()
badges = get_user_badges(session["id"])
return render_template(
"users/private.html",
user=user,
account=user.account,
badges=badges,
Tags=Tags,
infos=infos,
errors=errors,
)
@users.route("/users/<int:user_id>")
@check_account_visibility
def public(user_id):
infos = get_infos()
errors = get_errors()
user = Users.query.filter_by(id=user_id, banned=False, hidden=False).first_or_404()
badges = get_user_badges(user_id)
return render_template(
"users/public.html",
user=user,
account=user.account,
badges=badges,
Tags=Tags,
infos=infos,
errors=errors
)
| 24.518072 | 87 | 0.644226 | [
"Apache-2.0"
] | ldecoster/Plateforme-CTF | CTFd/users.py | 2,035 | Python |
# -*- coding: utf-8 -*-
import time
import mock
import pytest
import requests
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
from anacode import codes
from anacode.api import client
from anacode.api import writers
def empty_response(*args, **kwargs):
resp = requests.Response()
resp._content = b'{}'
resp.status_code = 200
return resp
def empty_json(*args, **kwargs):
return {}
@pytest.fixture
def auth():
return '1234567890'
@pytest.fixture
def auth_header(auth):
return {'Authorization': 'Token %s' % auth,
'Accept': 'application/json'}
@pytest.fixture
def api(auth):
return client.AnacodeClient(auth)
@mock.patch('requests.post', empty_response)
def test_scrape_call(api, auth_header, mocker):
mocker.spy(requests, 'post')
api.scrape('http://chinese.portal.com.ch')
assert requests.post.call_count == 1
requests.post.assert_called_once_with(
urljoin(client.ANACODE_API_URL, 'scrape/'),
headers=auth_header, json={'url': 'http://chinese.portal.com.ch'})
@mock.patch('requests.post', empty_response)
def test_categories_call(api, auth_header, mocker):
mocker.spy(requests, 'post')
api.analyze(['安全性能很好,很帅气。'], ['categories'])
assert requests.post.call_count == 1
json_data = {'texts': ['安全性能很好,很帅气。'], 'analyses': ['categories']}
requests.post.assert_called_once_with(
urljoin(client.ANACODE_API_URL, 'analyze/'),
headers=auth_header, json=json_data)
@mock.patch('requests.post', empty_response)
def test_sentiment_call(api, auth_header, mocker):
mocker.spy(requests, 'post')
api.analyze(['安全性能很好,很帅气。'], ['sentiment'])
assert requests.post.call_count == 1
requests.post.assert_called_once_with(
urljoin(client.ANACODE_API_URL, 'analyze/'),
headers=auth_header, json={'texts': ['安全性能很好,很帅气。'],
'analyses': ['sentiment']})
@mock.patch('requests.post', empty_response)
def test_concepts_call(api, auth_header, mocker):
mocker.spy(requests, 'post')
api.analyze(['安全性能很好,很帅气。'], ['concepts'])
assert requests.post.call_count == 1
requests.post.assert_called_once_with(
urljoin(client.ANACODE_API_URL, 'analyze/'),
headers=auth_header, json={'texts': ['安全性能很好,很帅气。'],
'analyses': ['concepts']})
@mock.patch('requests.post', empty_response)
def test_absa_call(api, auth_header, mocker):
mocker.spy(requests, 'post')
api.analyze(['安全性能很好,很帅气。'], ['absa'])
assert requests.post.call_count == 1
requests.post.assert_called_once_with(
urljoin(client.ANACODE_API_URL, 'analyze/'),
headers=auth_header, json={'texts': ['安全性能很好,很帅气。'],
'analyses': ['absa']})
@pytest.mark.parametrize('code,call,args', [
(codes.SCRAPE, 'scrape', ['http://www.google.com/']),
(codes.ANALYZE, 'analyze', ['安全性能很好,很帅气。', ['categories']]),
])
def test_proper_method_call(api, code, call, args, mocker):
mock.patch('anacode.api.client.AnacodeClient.' + call, empty_json)
mocker.spy(api, call)
api.call((code, *args))
getattr(api, call).assert_called_once_with(*args)
@pytest.mark.parametrize('call,args', [
('scrape', ['http://www.google.com/']),
('analyze', [['安全性能很好,很帅气。'], ['categories', 'concepts']]),
])
@pytest.mark.parametrize('count,call_count', [
(0, 0), (5, 0), (9, 0), (10, 1), (11, 1), (19, 1), (20, 2),
])
def test_should_start_analysis(api, mocker, call, args, count, call_count):
writer = writers.DataFrameWriter()
writer.init()
to_mock = 'anacode.api.client.AnacodeClient.' + call
mock.patch(to_mock, empty_json)
analyzer = client.Analyzer(api, writer, bulk_size=10)
mocker.spy(analyzer, 'execute_tasks_and_store_output')
for _ in range(count):
getattr(analyzer, call)(*args)
assert analyzer.execute_tasks_and_store_output.call_count == call_count
@pytest.mark.parametrize('call, args', [
('scrape', ([], )),
('analyze', ([], ['categories'])),
('analyze', ([], ['concepts'])),
('analyze', ([], ['sentiment'])),
('analyze', ([], ['absa'])),
])
def test_analysis_execution(api, mocker, call, args):
text = ['安全性能很好,很帅气。']
writer = writers.DataFrameWriter()
writer.init()
to_mock = 'anacode.api.client.AnacodeClient.' + call
mock.patch(to_mock, empty_json)
mocker.spy(api, call)
analyzer = client.Analyzer(api, writer, bulk_size=10)
for _ in range(4):
getattr(analyzer, call)(*args)
analyzer.execute_tasks_and_store_output()
assert getattr(api, call).call_count == 4
def time_consuming(*args, **kwargs):
time.sleep(0.1)
return {}
@mock.patch('anacode.api.client.AnacodeClient.analyze', time_consuming)
def test_parallel_queries(api, mocker):
text = ['安全性能很好,很帅气。']
writer = writers.DataFrameWriter()
writer.init()
mocker.spy(api, 'analyze')
analyzer = client.Analyzer(api, writer, threads=4, bulk_size=4)
start = time.time()
with analyzer:
for _ in range(4):
analyzer.analyze(text, ['categories'])
stop = time.time()
duration = stop - start
assert abs(duration - 0.1) < 0.1
| 29.762712 | 75 | 0.65186 | [
"BSD-3-Clause"
] | anacode/anacode-toolkit | tests/test_api_client.py | 5,532 | Python |
# coding: utf-8
# AUTOGENERATED BY gen_script.sh from kp1.py
# Copyright (C) Nyimbi Odero, Thu Aug 3 20:34:20 EAT 2017
import calendar
from flask import redirect, flash, url_for, Markup
from flask import render_template
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.views import ModelView, BaseView, MasterDetailView, MultipleView, RestCRUDView, CompactCRUDMixin
from flask_appbuilder import ModelView, CompactCRUDMixin, aggregate_count, action, expose, BaseView, has_access
from flask_appbuilder.charts.views import ChartView, TimeChartView, GroupByChartView
from flask_appbuilder.models.group import aggregate_count
from flask_appbuilder.widgets import ListThumbnail, ListWidget
from flask_appbuilder.widgets import FormVerticalWidget, FormInlineWidget, FormHorizontalWidget, ShowBlockWidget
from flask_appbuilder.models.sqla.filters import FilterStartsWith, FilterEqualFunction as FA
from app import appbuilder, db
from .models import *
# Basic Lists
hide_list = ['created_by', 'changed_by', 'created_on', 'changed_on']
#To pretty Print from PersonMixin
def pretty_month_year(value):
return calendar.month_name[value.month] + ' ' + str(value.year)
def pretty_year(value):
return str(value.year)
def fill_gender():
try:
db.session.add(Gender(name='Male'))
db.session.add(Gender(name='Female'))
db.session.commit()
except:
db.session.rollback()
class LawyerView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Lawyer, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicemanView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policeman, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class BailView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Bail, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CaseView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Case, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CasecategoryView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Casecategory, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CauseofactionView(CompactCRUDMixin, ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Causeofaction, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CommitaltypeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Commitaltype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ConstituencyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Constituency, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CountyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(County, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CourtView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Court, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CourtlevelView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Courtlevel, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class CourtstationView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Courtstation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DefendantView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Defendant, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DoctemplateView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Doctemplate, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class DocumentView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Document, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class FilingView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Filing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class FilingtypeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Filingtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class GenderView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Gender, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class HearingView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Hearing, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class HearingtypeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Hearingtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class InvestigationView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Investigation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class JoRankView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(JoRank, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class JudicialofficerView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Judicialofficer, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class LawfirmView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Lawfirm, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class NatureofsuitView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Natureofsuit, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PaymentView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Payment, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PaymentmethodView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Paymentmethod, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PlaintiffView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Plaintiff, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicerankView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policerank, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PoliceroleView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policerole, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicestationView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policestation, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PolicestationtypeView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Policestationtype, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PrisonView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prison, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class PrisoncommitalView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prisoncommital, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ProsecutorView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prosecutor, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ProsecutorteamView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Prosecutorteam, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class SubcountyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Subcounty, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class SuretyView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Surety, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class TownView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Town, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class WitnesView(ModelView):#MasterDetailView, MultipleView
datamodel=SQLAInterface(Witnes, db.session)
#add_title =
#list_title =
#edit_title =
#show_title =
#add_widget = (FormVerticalWidget|FormInlineWidget)
#show_widget = ShowBlockWidget
#list_widget = (ListThumbnail|ListWidget)
#base_order = ("name", "asc")
search_exclude_columns = person_exclude_columns + biometric_columns + person_search_exclude_columns
add_exclude_columns = edit_exclude_columns = audit_exclude_columns
#label_columns = {"contact_group":"Contacts Group"}
#add_columns = person_list_columns + ref_columns + contact_columns
#edit_columns = person_list_columns + ref_columns + contact_columns
#list_columns = person_list_columns + ref_columns + contact_columns
#list_widget = ListBlock|ListItem|ListThumbnail|ListWidget (default)
#related_views =[]
#show_fieldsets = person_show_fieldset + contact_fieldset
#edit_fieldsets = add_fieldsets = \
# ref_fieldset + person_fieldset + contact_fieldset #+ activity_fieldset + place_fieldset + biometric_fieldset + employment_fieldset
#description_columns = {"name":"your models name column","address":"the address column"}
#show_template = "appbuilder/general/model/show_cascade.html"
#edit_template = "appbuilder/general/model/edit_cascade.html"
@action("muldelete", "Delete", Markup("<p>Delete all Really?</p><p>Ok then...</p>"), "fa-rocket")
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class AttorneyChartView(GroupByChartView):
datamodel = SQLAInterface(Attorney , db.session)
chart_title = 'Grouped Attorney by Birth'
label_columns = AttorneyView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class AttorneyTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Attorney , db.session)
chart_title = 'Grouped Birth Attorney'
chart_type = 'AreaChart'
label_columns = AttorneyView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PlaintiffChartView(GroupByChartView):
datamodel = SQLAInterface(Plaintiff , db.session)
chart_title = 'Grouped Plaintiff by Birth'
label_columns = PlaintiffView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PlaintiffTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Plaintiff , db.session)
chart_title = 'Grouped Birth Plaintiff'
chart_type = 'AreaChart'
label_columns = PlaintiffView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class WitnessChartView(GroupByChartView):
datamodel = SQLAInterface(Witness , db.session)
chart_title = 'Grouped Witness by Birth'
label_columns = WitnessView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class WitnessTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Witness , db.session)
chart_title = 'Grouped Birth Witness'
chart_type = 'AreaChart'
label_columns = WitnessView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class SuretyChartView(GroupByChartView):
datamodel = SQLAInterface(Surety , db.session)
chart_title = 'Grouped Surety by Birth'
label_columns = SuretyView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class SuretyTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Surety , db.session)
chart_title = 'Grouped Birth Surety'
chart_type = 'AreaChart'
label_columns = SuretyView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class ProsecutorChartView(GroupByChartView):
datamodel = SQLAInterface(Prosecutor , db.session)
chart_title = 'Grouped Prosecutor by Birth'
label_columns = ProsecutorView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class ProsecutorTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Prosecutor , db.session)
chart_title = 'Grouped Birth Prosecutor'
chart_type = 'AreaChart'
label_columns = ProsecutorView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class PoliceofficerChartView(GroupByChartView):
datamodel = SQLAInterface(Policeofficer , db.session)
chart_title = 'Grouped Policeofficer by Birth'
label_columns = PoliceofficerView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class PoliceofficerTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Policeofficer , db.session)
chart_title = 'Grouped Birth Policeofficer'
chart_type = 'AreaChart'
label_columns = PoliceofficerView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class JudicialofficerChartView(GroupByChartView):
datamodel = SQLAInterface(Judicialofficer , db.session)
chart_title = 'Grouped Judicialofficer by Birth'
label_columns = JudicialofficerView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class JudicialofficerTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Judicialofficer , db.session)
chart_title = 'Grouped Birth Judicialofficer'
chart_type = 'AreaChart'
label_columns = JudicialofficerView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
class DefendantChartView(GroupByChartView):
datamodel = SQLAInterface(Defendant , db.session)
chart_title = 'Grouped Defendant by Birth'
label_columns = DefendantView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group' : 'age_today',
"series" : [(aggregate_count,"age_today")]
},
{
'group' : 'gender',
"series" : [(aggregate_count,"age_today")]
}
]
class DefendantTimeChartView(GroupByChartView):
datamodel = SQLAInterface(Defendant , db.session)
chart_title = 'Grouped Birth Defendant'
chart_type = 'AreaChart'
label_columns = DefendantView.label_columns
definitions = [
{
'group' : 'age_today',
'formatter': pretty_month_year,
"series" : [(aggregate_count,"age_today")]
},
{
'group': 'age_today',
'formatter': pretty_year,
"series" : [(aggregate_count,"age_today")]
}
]
# How to create a MasterDetailView
#class DetailView(ModelView):
# datamodel = SQLAInterface(DetailTable, db.session)
#class MasterView(MasterDetailView):
# datamodel = SQLAInterface(MasterTable, db.session)
# related_views = [DetailView]
# How to create a MultipleView
#class MultipleViewsExp(MultipleView):
# views = [GroupModelView, ContactModelView]
#View Registration
db.create_all()
fill_gender()
appbuilder.add_view(LawyerView(), "Lawyers", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicemanView(), "Policemans", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(BailView(), "Bails", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CaseView(), "Cases", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CasecategoryView(), "Casecategorys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CauseofactionView(), "Causeofactions", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CommitaltypeView(), "Commitaltypes", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ConstituencyView(), "Constituencys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CountyView(), "Countys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtView(), "Courts", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtlevelView(), "Courtlevels", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(CourtstationView(), "Courtstations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(DefendantView(), "Defendants", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(DoctemplateView(), "Doctemplates", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(DocumentView(), "Documents", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(FilingView(), "Filings", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(FilingtypeView(), "Filingtypes", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(GenderView(), "Genders", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(HearingView(), "Hearings", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(HearingtypeView(), "Hearingtypes", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(InvestigationView(), "Investigations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(JoRankView(), "JoRanks", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(JudicialofficerView(), "Judicialofficers", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(LawfirmView(), "Lawfirms", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(NatureofsuitView(), "Natureofsuits", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PaymentView(), "Payments", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PaymentmethodView(), "Paymentmethods", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PlaintiffView(), "Plaintiffs", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicerankView(), "Policeranks", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PoliceroleView(), "Policeroles", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicestationView(), "Policestations", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PolicestationtypeView(), "Policestationtypes", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PrisonView(), "Prisons", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(PrisoncommitalView(), "Prisoncommitals", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ProsecutorView(), "Prosecutors", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(ProsecutorteamView(), "Prosecutorteams", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(SubcountyView(), "Subcountys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(SuretyView(), "Suretys", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(TownView(), "Towns", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(WitnesView(), "Witness", icon="fa-folder-open-o", category="Setup")
appbuilder.add_view(AttorneyChartView(), 'Attorney Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(AttorneyTimeChartView(), 'Attorney Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PlaintiffChartView(), 'Plaintiff Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PlaintiffTimeChartView(), 'Plaintiff Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(WitnessChartView(), 'Witness Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(WitnessTimeChartView(), 'Witness Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(SuretyChartView(), 'Surety Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(SuretyTimeChartView(), 'Surety Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ProsecutorChartView(), 'Prosecutor Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(ProsecutorTimeChartView(), 'Prosecutor Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PoliceofficerChartView(), 'Policeofficer Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(PoliceofficerTimeChartView(), 'Policeofficer Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(JudicialofficerChartView(), 'Judicialofficer Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(JudicialofficerTimeChartView(), 'Judicialofficer Time Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DefendantChartView(), 'Defendant Age Chart', icon='fa-dashboard', category='Reports')
appbuilder.add_view(DefendantTimeChartView(), 'Defendant Time Chart', icon='fa-dashboard', category='Reports')
#appbuilder.add_separator("Setup")
#appbuilder.add_separator("My Views")
#appbuilder.add_link(name, href, icon='', label='', category='', category_icon='', category_label='', baseview=None)
| 43.02974 | 136 | 0.722925 | [
"MIT"
] | nyimbi/caseke | zarc/views_2017-08-03-21:28:54.py | 81,025 | Python |
import importlib.util
import logging
import re
import time
from collections import defaultdict
from inspect import getsource
from pathlib import Path
from types import ModuleType
from typing import Dict, List, Set, Type
import click
from flask_appbuilder import Model
from flask_migrate import downgrade, upgrade
from graphlib import TopologicalSorter # pylint: disable=wrong-import-order
from sqlalchemy import inspect
from rabbitai import db
from rabbitai.utils.mock_data import add_sample_rows
logger = logging.getLogger(__name__)
def import_migration_script(filepath: Path) -> ModuleType:
"""
像导入模块一样导入迁移脚本。
:param filepath: 文件路径对象。
:return:
"""
spec = importlib.util.spec_from_file_location(filepath.stem, filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def extract_modified_tables(module: ModuleType) -> Set[str]:
"""
提取由迁移脚本修改的表。
此函数使用一种简单的方法来查看迁移脚本的源代码以查找模式。它可以通过实际遍历AST来改进。
"""
tables: Set[str] = set()
for function in {"upgrade", "downgrade"}:
source = getsource(getattr(module, function))
tables.update(re.findall(r'alter_table\(\s*"(\w+?)"\s*\)', source, re.DOTALL))
tables.update(re.findall(r'add_column\(\s*"(\w+?)"\s*,', source, re.DOTALL))
tables.update(re.findall(r'drop_column\(\s*"(\w+?)"\s*,', source, re.DOTALL))
return tables
def find_models(module: ModuleType) -> List[Type[Model]]:
"""
在迁移脚本中查找所有模型。
:param module:
:return:
"""
models: List[Type[Model]] = []
tables = extract_modified_tables(module)
# 添加在迁移脚本中显式定义的模型
queue = list(module.__dict__.values())
while queue:
obj = queue.pop()
if hasattr(obj, "__tablename__"):
tables.add(obj.__tablename__)
elif isinstance(obj, list):
queue.extend(obj)
elif isinstance(obj, dict):
queue.extend(obj.values())
# 添加隐式模型
for obj in Model._decl_class_registry.values():
if hasattr(obj, "__table__") and obj.__table__.fullname in tables:
models.append(obj)
# 按拓扑排序,这样我们可以按顺序创建实体并维护关系(例如,在创建切片之前创建数据库)
sorter = TopologicalSorter()
for model in models:
inspector = inspect(model)
dependent_tables: List[str] = []
for column in inspector.columns.values():
for foreign_key in column.foreign_keys:
dependent_tables.append(foreign_key.target_fullname.split(".")[0])
sorter.add(model.__tablename__, *dependent_tables)
order = list(sorter.static_order())
models.sort(key=lambda model: order.index(model.__tablename__))
return models
@click.command()
@click.argument("filepath")
@click.option("--limit", default=1000, help="Maximum number of entities.")
@click.option("--force", is_flag=True, help="Do not prompt for confirmation.")
@click.option("--no-auto-cleanup", is_flag=True, help="Do not remove created models.")
def main(
filepath: str, limit: int = 1000, force: bool = False, no_auto_cleanup: bool = False
) -> None:
auto_cleanup = not no_auto_cleanup
session = db.session()
print(f"Importing migration script: {filepath}")
module = import_migration_script(Path(filepath))
revision: str = getattr(module, "revision", "")
down_revision: str = getattr(module, "down_revision", "")
if not revision or not down_revision:
raise Exception(
"Not a valid migration script, couldn't find down_revision/revision"
)
print(f"Migration goes from {down_revision} to {revision}")
current_revision = db.engine.execute(
"SELECT version_num FROM alembic_version"
).scalar()
print(f"Current version of the DB is {current_revision}")
print("\nIdentifying models used in the migration:")
models = find_models(module)
model_rows: Dict[Type[Model], int] = {}
for model in models:
rows = session.query(model).count()
print(f"- {model.__name__} ({rows} rows in table {model.__tablename__})")
model_rows[model] = rows
session.close()
if current_revision != down_revision:
if not force:
click.confirm(
"\nRunning benchmark will downgrade the Rabbitai DB to "
f"{down_revision} and upgrade to {revision} again. There may "
"be data loss in downgrades. Continue?",
abort=True,
)
downgrade(revision=down_revision)
print("Benchmarking migration")
results: Dict[str, float] = {}
start = time.time()
upgrade(revision=revision)
duration = time.time() - start
results["Current"] = duration
print(f"Migration on current DB took: {duration:.2f} seconds")
min_entities = 10
new_models: Dict[Type[Model], List[Model]] = defaultdict(list)
while min_entities <= limit:
downgrade(revision=down_revision)
print(f"Running with at least {min_entities} entities of each model")
for model in models:
missing = min_entities - model_rows[model]
if missing > 0:
print(f"- Adding {missing} entities to the {model.__name__} model")
try:
added_models = add_sample_rows(session, model, missing)
except Exception:
session.rollback()
raise
model_rows[model] = min_entities
session.commit()
if auto_cleanup:
new_models[model].extend(added_models)
start = time.time()
upgrade(revision=revision)
duration = time.time() - start
print(f"Migration for {min_entities}+ entities took: {duration:.2f} seconds")
results[f"{min_entities}+"] = duration
min_entities *= 10
if auto_cleanup:
print("Cleaning up DB")
# delete in reverse order of creation to handle relationships
for model, entities in list(new_models.items())[::-1]:
session.query(model).filter(
model.id.in_(entity.id for entity in entities)
).delete(synchronize_session=False)
session.commit()
if current_revision != revision and not force:
click.confirm(f"\nRevert DB to {revision}?", abort=True)
upgrade(revision=revision)
print("Reverted")
print("\nResults:\n")
for label, duration in results.items():
print(f"{label}: {duration:.2f} s")
if __name__ == "__main__":
from rabbitai.app import create_app
app = create_app()
with app.app_context():
main()
| 32.727723 | 88 | 0.644683 | [
"Apache-2.0"
] | psbsgic/rabbitai | scripts/benchmark_migration.py | 6,911 | Python |
# coding: utf-8
"""
Flat API
The Flat API allows you to easily extend the abilities of the [Flat Platform](https://flat.io), with a wide range of use cases including the following: * Creating and importing new music scores using MusicXML, MIDI, Guitar Pro (GP3, GP4, GP5, GPX, GP), PowerTab, TuxGuitar and MuseScore files * Browsing, updating, copying, exporting the user's scores (for example in MP3, WAV or MIDI) * Managing educational resources with Flat for Education: creating & updating the organization accounts, the classes, rosters and assignments. The Flat API is built on HTTP. Our API is RESTful It has predictable resource URLs. It returns HTTP response codes to indicate errors. It also accepts and returns JSON in the HTTP body. The [schema](/swagger.yaml) of this API follows the [OpenAPI Initiative (OAI) specification](https://www.openapis.org/), you can use and work with [compatible Swagger tools](http://swagger.io/open-source-integrations/). This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/). You can use your favorite HTTP/REST library for your programming language to use Flat's API. This specification and reference is [available on Github](https://github.com/FlatIO/api-reference). Getting Started and learn more: * [API Overview and interoduction](https://flat.io/developers/docs/api/) * [Authentication (Personal Access Tokens or OAuth2)](https://flat.io/developers/docs/api/authentication.html) * [SDKs](https://flat.io/developers/docs/api/sdks.html) * [Rate Limits](https://flat.io/developers/docs/api/rate-limits.html) * [Changelog](https://flat.io/developers/docs/api/changelog.html) # noqa: E501
OpenAPI spec version: 2.7.0
Contact: developers@flat.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import flat_api
from flat_api.models.collection import Collection # noqa: E501
from flat_api.rest import ApiException
class TestCollection(unittest.TestCase):
"""Collection unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCollection(self):
"""Test Collection"""
# FIXME: construct object with mandatory attributes with example values
# model = flat_api.models.collection.Collection() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 59.658537 | 1,686 | 0.742845 | [
"Apache-2.0"
] | FlatIO/api-client-python | test/test_collection.py | 2,446 | Python |
from tkinter import *
from tkinter.ttk import * # styling library
from random import randint
root = Tk()
root.title("GUESS ME")
root.geometry("350x100")
root.configure(background='#AEB6BF')
#Style
style = Style()
style.theme_use('classic')
for elem in ['TLabel', 'TButton']:
style.configure(elem, background='#AEB6BF')
class ValueSmallError(Exception):
pass
class ValueLargeError(Exception):
pass
ans = randint(1,100)
def guess():
num = int(num1.get())
try:
if num > ans:
raise ValueLargeError
elif num < ans:
raise ValueSmallError
else:
Label(root,text = "Congratulations, You won !!").grid(column=0,row=3)
except ValueLargeError:
Label(root,text = "Your no is large, guess again").grid(column=0,row=3)
except ValueSmallError:
Label(root,text = "Your no is small, guess again").grid(column=0,row=3)
return
Label(root,text = "\t\t*** GUESS ME ***").grid(column=0,row=0)
Label(root,text = "\nGuess the number(1-100)").grid(column=0,row=1)
num1 = Entry(root)
num1.grid(column=1,row=1)
btn1 = Button(root,text = "Submit",command = guess).grid(column=1,row=3)
root.mainloop()
| 25.489362 | 81 | 0.651085 | [
"BSD-3-Clause"
] | kmranrg/GuessMe | guess.py | 1,198 | Python |
from regpfa import * | 20 | 20 | 0.8 | [
"BSD-2-Clause"
] | damianangelo1712/pred_analytics_context_dbn | regpfa/__init__.py | 20 | Python |
import copy
import json
import logging
import time
from elasticsearch import Elasticsearch
import es_search_functions
from common.mongo_client import getMongoClient
from apps.apis.search.keyword_list import keyword_list
class SuggestionCacheBuilder:
MAX_LEVEL = 2
def __init__(self, site_id, mongo_client):
self.site_id = site_id
self.tasks = []
self.mongo_client = mongo_client
def output_entry(self, cache_entry):
self.mongo_client.updateSearchTermsCache(self.site_id, cache_entry)
def process_task(self, terms, terms_count, level):
cache_entry = {"terms": terms, "count": terms_count,
"categories": [],
"more_terms": []}
if level > self.MAX_LEVEL:
return
current_time = time.time()
if current_time - self.last_logging_time > 2:
self.last_logging_time = current_time
time_spent = time.time() - self.start_time
current_tasks = len(self.tasks)
if current_tasks != 0:
logging.debug("Time Spent: %s | TASKS: %s/%s | %s"
% (time_spent, self.finished_tasks, current_tasks,
self.finished_tasks/float(current_tasks + self.finished_tasks)))
if terms:
size_limit = 1000
else:
size_limit = 1000000
filter = {"term": {"available": True}}
keywords_facets = {'terms': {'field': 'keywords'},
"facet_filter": filter}
if size_limit:
keywords_facets["terms"]["size"] = size_limit
facets = {'keywords': keywords_facets,
'categories': {'terms': {'field': 'categories', 'size': 5},
"facet_filter": filter}
}
body={"facets": facets,
"filter": filter}
if terms:
body["query"] = es_search_functions.construct_query(" ".join(terms))
es = Elasticsearch()
res = es.search(index=es_search_functions.getESItemIndexName(self.site_id),
search_type="count",
body=body
)
suggested_categories = []
for facet in res["facets"]["categories"]["terms"]:
if facet["count"] < terms_count:
suggested_category = {"category_id": facet["term"], "count": facet["count"]}
cache_entry["categories"].append(suggested_category)
terms_to_check = []
for kw in res["facets"]["keywords"]["terms"]:
keyword_status = keyword_list.getKeywordStatus(self.site_id, kw["term"])
if keyword_status == keyword_list.WHITE_LIST and kw["count"] < terms_count:
terms_to_check.append(kw)
for term_to_check in terms_to_check:
cache_entry["more_terms"].append(copy.copy(term_to_check))
new_terms = list(terms) + [term_to_check["term"]]
new_terms.sort()
self.tasks.append((tuple(new_terms), term_to_check["count"], level + 1))
self.finished_tasks += 1
self.output_entry(cache_entry)
def rebuild(self):
self.start_time = time.time()
self.last_logging_time = time.time()
self.finished_tasks = 0
# FIXME
logging.debug("Start to rebuild Suggestion Cache for site: %s" % self.site_id)
self.tasks.append((tuple(), 1000000, 0))
while len(self.tasks) > 0:
task = self.tasks.pop(0)
self.process_task(*task)
finish_time = time.time()
logging.debug("Finished to rebuild Suggestion Cache for site: %s, total time spent: %s seconds." % (self.site_id, finish_time - self.start_time))
def rebuild_suggestion_cache(site_id):
mongo_client = getMongoClient()
builder = SuggestionCacheBuilder(site_id, mongo_client)
builder.rebuild()
| 37.214953 | 153 | 0.583124 | [
"MIT"
] | sunliwen/poco | poco/apps/apis/search/suggestion_cache_builder.py | 3,982 | Python |
# Copyright (C) 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from tooz import coordination
coordinator = coordination.get_coordinator('zake://', b'host-1')
coordinator.start()
# Create a group
group = bytes(str(uuid.uuid4()).encode('ascii'))
request = coordinator.create_group(group)
request.get()
def group_joined(event):
# Event is an instance of tooz.coordination.MemberJoinedGroup
print(event.group_id, event.member_id)
coordinator.watch_join_group(group, group_joined)
coordinator.stop()
| 28.944444 | 69 | 0.760077 | [
"Apache-2.0"
] | blockvigil/tooz | examples/group_membership_watch.py | 1,042 | Python |
# -*- coding: utf-8 -*-
# @Time: 2020/4/17 12:40
# @Author: GraceKoo
# @File: 241_different-ways-to-add-parentheses.py
# @Desc: https://leetcode-cn.com/problems/different-ways-to-add-parentheses/
from typing import List
class Solution:
def diffWaysToCompute(self, input: str) -> List[int]:
if input.isdigit():
return [int(input)]
res = []
for index, value in enumerate(input):
if value in ["+", "-", "*"]:
left = self.diffWaysToCompute(input[:index])
right = self.diffWaysToCompute(input[index + 1 :])
# 合并结果
for l in left:
for r in right:
if value == "+":
res.append(l + r)
elif value == "-":
res.append(l - r)
elif value == "*":
res.append(l * r)
return res
so = Solution()
print(so.diffWaysToCompute("2*3-4*5"))
| 31.84375 | 76 | 0.47105 | [
"Apache-2.0"
] | Buddy119/algorithm | Codes/gracekoo/241_different-ways-to-add-parentheses.py | 1,027 | Python |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'projeto_curso_2.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.173913 | 79 | 0.682563 | [
"MIT"
] | Jhonattan-rocha/Meus-Projetos | Python/Django/projeto_curso_2/manage.py | 671 | Python |
# Generated by Django 3.1.7 on 2021-03-07 11:27
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0005_auto_20210303_1338'),
]
operations = [
migrations.CreateModel(
name='House',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
('uniqueCode', models.CharField(max_length=10)),
('inhabitants', models.ManyToManyField(related_name='House', to=settings.AUTH_USER_MODEL)),
],
),
]
| 27.875 | 102 | 0.723468 | [
"Apache-2.0"
] | connorkeevill/CM20257-housemates | app/migrations/0006_house.py | 669 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import select
import socket
import queue
import time
import os
class emsc_select_server:
def __init__(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setblocking(False)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_address = ('0.0.0.0', 5000)
self.server.bind(self.server_address)
self.server.listen(1000)
self.inputs = [self.server]
self.outputs = []
self.message_queues = {}
self.timeout = 20
def run(self):
response = "接收成功,返回数据: connecting status: 200 \n"
response += "haody,client ! | "
while self.inputs:
print("waiting for next event")
# timeout是超时,当前连接要是超过这个时间的话,就会kill
readable, writable, exceptional = select.select(self.inputs, self.outputs, self.inputs, self.timeout)
if not (readable or writable or exceptional):
print("Time out ! ")
break
for ser in readable:
if ser is self.server:
# 通过inputs查看是否有客户端来
connection, client_address = ser.accept()
print("connection from ", client_address)
connection.setblocking(0)
self.inputs.append(connection)
self.message_queues[connection] = queue.Queue()
else:
data = ser.recv(1024)
if data:
print("收到数据 ", data.decode(), "\n来自:", ser.getpeername())
self.message_queues[ser].put(data)
# 添加通道
if ser not in self.outputs:
self.outputs.append(ser)
else:
print("closing", client_address)
if ser in self.outputs:
self.outputs.remove(ser)
self.inputs.remove(ser)
ser.close()
# 清除队列信息
del self.message_queues[ser]
for ser in writable:
try:
next_msg = self.message_queues[ser].get_nowait()
except queue.Empty:
print(ser.getpeername(), 'queue empty')
self.outputs.remove(ser)
else:
print("发送数据 ", str(response + next_msg.decode()), " to ", ser.getpeername(),"\n")
ser.send(response.encode()+next_msg)
for ser in exceptional:
print(" exception condition on ", ser.getpeername())
# stop listening for input on the connection
self.inputs.remove(ser)
if ser in self.outputs:
self.outputs.remove(ser)
ser.close()
# 清除队列信息
del self.message_queues[ser]
if __name__=="__main__":
select_server = emsc_select_server()
select_server.run() | 37.578313 | 113 | 0.506252 | [
"Apache-2.0"
] | roancsu/shadowX | run/server_select.py | 3,253 | Python |
# File: __init__.py
#
# Copyright (c) 2018-2019 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
| 40.266667 | 95 | 0.756623 | [
"Apache-2.0"
] | splunk-soar-connectors/hipchat | __init__.py | 604 | Python |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import pytest
from azure.communication.sms.aio import SmsClient
from azure.communication.sms import (
PhoneNumber, SendSmsOptions
)
from _shared.asynctestcase import AsyncCommunicationTestCase
from _shared.testcase import (
BodyReplacerProcessor, ResponseReplacerProcessor
)
class SMSClientTestAsync(AsyncCommunicationTestCase):
def __init__(self, method_name):
super(SMSClientTestAsync, self).__init__(method_name)
def setUp(self):
super(SMSClientTestAsync, self).setUp()
if self.is_playback():
self.phone_number = "+18000005555"
else:
self.phone_number = os.getenv("PHONE_NUMBER")
self.recording_processors.extend([
BodyReplacerProcessor(keys=["to", "from", "messageId"]),
ResponseReplacerProcessor(keys=[self._resource_name])])
@AsyncCommunicationTestCase.await_prepared_test
@pytest.mark.live_test_only
async def test_send_sms_async(self):
sms_client = SmsClient.from_connection_string(self.connection_str)
async with sms_client:
# calling send() with sms values
sms_response = await sms_client.send(
from_phone_number=PhoneNumber(self.phone_number),
to_phone_numbers=[PhoneNumber(self.phone_number)],
message="Hello World via SMS",
send_sms_options=SendSmsOptions(enable_delivery_report=True)) # optional property
assert sms_response.message_id is not None
| 37.22449 | 98 | 0.650768 | [
"MIT"
] | Co0olboi/azure-sdk-for-python | sdk/communication/azure-communication-sms/tests/test_sms_client_e2e_async.py | 1,824 | Python |
"""
Ex 012 - make an algorithm that reads the price of a product and shows it with 5% discount
"""
print('Discover how much is a product with 5% off discount')
print('-' * 50)
pp = float(input('Enter the product price: '))
pd = pp - (pp / 100) * 5
print('-' * 50)
print(f"The product price was {pp:.2f}, on promotion of 5% will cost {pd:.2f}")
input('Enter to exit')
| 24.733333 | 90 | 0.652291 | [
"MIT"
] | Kevinwmiguel/PythonExercises | Ex12.py | 371 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['FeatureGroupArgs', 'FeatureGroup']
@pulumi.input_type
class FeatureGroupArgs:
def __init__(__self__, *,
event_time_feature_name: pulumi.Input[str],
feature_definitions: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]],
record_identifier_feature_name: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']] = None,
online_store_config: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]] = None):
"""
The set of arguments for constructing a FeatureGroup resource.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]] tags: An array of key-value pair to apply to this resource.
"""
pulumi.set(__self__, "event_time_feature_name", event_time_feature_name)
pulumi.set(__self__, "feature_definitions", feature_definitions)
pulumi.set(__self__, "record_identifier_feature_name", record_identifier_feature_name)
if description is not None:
pulumi.set(__self__, "description", description)
if feature_group_name is not None:
pulumi.set(__self__, "feature_group_name", feature_group_name)
if offline_store_config is not None:
pulumi.set(__self__, "offline_store_config", offline_store_config)
if online_store_config is not None:
pulumi.set(__self__, "online_store_config", online_store_config)
if role_arn is not None:
pulumi.set(__self__, "role_arn", role_arn)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Input[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@event_time_feature_name.setter
def event_time_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "event_time_feature_name", value)
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@feature_definitions.setter
def feature_definitions(self, value: pulumi.Input[Sequence[pulumi.Input['FeatureGroupFeatureDefinitionArgs']]]):
pulumi.set(self, "feature_definitions", value)
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Input[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@record_identifier_feature_name.setter
def record_identifier_feature_name(self, value: pulumi.Input[str]):
pulumi.set(self, "record_identifier_feature_name", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@feature_group_name.setter
def feature_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "feature_group_name", value)
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "offline_store_config")
@offline_store_config.setter
def offline_store_config(self, value: Optional[pulumi.Input['OfflineStoreConfigPropertiesArgs']]):
pulumi.set(self, "offline_store_config", value)
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]:
return pulumi.get(self, "online_store_config")
@online_store_config.setter
def online_store_config(self, value: Optional[pulumi.Input['OnlineStoreConfigPropertiesArgs']]):
pulumi.set(self, "online_store_config", value)
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> Optional[pulumi.Input[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@role_arn.setter
def role_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "role_arn", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FeatureGroupTagArgs']]]]):
pulumi.set(self, "tags", value)
class FeatureGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description about the FeatureGroup.
:param pulumi.Input[str] event_time_feature_name: The Event Time Feature Name.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]] feature_definitions: An Array of Feature Definition
:param pulumi.Input[str] feature_group_name: The Name of the FeatureGroup.
:param pulumi.Input[str] record_identifier_feature_name: The Record Identifier Feature Name.
:param pulumi.Input[str] role_arn: Role Arn
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]] tags: An array of key-value pair to apply to this resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FeatureGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Resource Type definition for AWS::SageMaker::FeatureGroup
:param str resource_name: The name of the resource.
:param FeatureGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FeatureGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
event_time_feature_name: Optional[pulumi.Input[str]] = None,
feature_definitions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupFeatureDefinitionArgs']]]]] = None,
feature_group_name: Optional[pulumi.Input[str]] = None,
offline_store_config: Optional[pulumi.Input[pulumi.InputType['OfflineStoreConfigPropertiesArgs']]] = None,
online_store_config: Optional[pulumi.Input[pulumi.InputType['OnlineStoreConfigPropertiesArgs']]] = None,
record_identifier_feature_name: Optional[pulumi.Input[str]] = None,
role_arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FeatureGroupTagArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = description
if event_time_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'event_time_feature_name'")
__props__.__dict__["event_time_feature_name"] = event_time_feature_name
if feature_definitions is None and not opts.urn:
raise TypeError("Missing required property 'feature_definitions'")
__props__.__dict__["feature_definitions"] = feature_definitions
__props__.__dict__["feature_group_name"] = feature_group_name
__props__.__dict__["offline_store_config"] = offline_store_config
__props__.__dict__["online_store_config"] = online_store_config
if record_identifier_feature_name is None and not opts.urn:
raise TypeError("Missing required property 'record_identifier_feature_name'")
__props__.__dict__["record_identifier_feature_name"] = record_identifier_feature_name
__props__.__dict__["role_arn"] = role_arn
__props__.__dict__["tags"] = tags
super(FeatureGroup, __self__).__init__(
'aws-native:sagemaker:FeatureGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'FeatureGroup':
"""
Get an existing FeatureGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = FeatureGroupArgs.__new__(FeatureGroupArgs)
__props__.__dict__["description"] = None
__props__.__dict__["event_time_feature_name"] = None
__props__.__dict__["feature_definitions"] = None
__props__.__dict__["feature_group_name"] = None
__props__.__dict__["offline_store_config"] = None
__props__.__dict__["online_store_config"] = None
__props__.__dict__["record_identifier_feature_name"] = None
__props__.__dict__["role_arn"] = None
__props__.__dict__["tags"] = None
return FeatureGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description about the FeatureGroup.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="eventTimeFeatureName")
def event_time_feature_name(self) -> pulumi.Output[str]:
"""
The Event Time Feature Name.
"""
return pulumi.get(self, "event_time_feature_name")
@property
@pulumi.getter(name="featureDefinitions")
def feature_definitions(self) -> pulumi.Output[Sequence['outputs.FeatureGroupFeatureDefinition']]:
"""
An Array of Feature Definition
"""
return pulumi.get(self, "feature_definitions")
@property
@pulumi.getter(name="featureGroupName")
def feature_group_name(self) -> pulumi.Output[str]:
"""
The Name of the FeatureGroup.
"""
return pulumi.get(self, "feature_group_name")
@property
@pulumi.getter(name="offlineStoreConfig")
def offline_store_config(self) -> pulumi.Output[Optional['outputs.OfflineStoreConfigProperties']]:
return pulumi.get(self, "offline_store_config")
@property
@pulumi.getter(name="onlineStoreConfig")
def online_store_config(self) -> pulumi.Output[Optional['outputs.OnlineStoreConfigProperties']]:
return pulumi.get(self, "online_store_config")
@property
@pulumi.getter(name="recordIdentifierFeatureName")
def record_identifier_feature_name(self) -> pulumi.Output[str]:
"""
The Record Identifier Feature Name.
"""
return pulumi.get(self, "record_identifier_feature_name")
@property
@pulumi.getter(name="roleArn")
def role_arn(self) -> pulumi.Output[Optional[str]]:
"""
Role Arn
"""
return pulumi.get(self, "role_arn")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence['outputs.FeatureGroupTag']]]:
"""
An array of key-value pair to apply to this resource.
"""
return pulumi.get(self, "tags")
| 45.848837 | 158 | 0.674423 | [
"Apache-2.0"
] | AaronFriel/pulumi-aws-native | sdk/python/pulumi_aws_native/sagemaker/feature_group.py | 15,772 | Python |
import unittest
from os.path import abspath, join
from robot import api, parsing, reporting, result, running
from robot.utils.asserts import assert_equals
class TestExposedApi(unittest.TestCase):
def test_test_case_file(self):
assert_equals(api.TestCaseFile, parsing.TestCaseFile)
def test_test_data_directory(self):
assert_equals(api.TestDataDirectory, parsing.TestDataDirectory)
def test_resource_file(self):
assert_equals(api.ResourceFile, parsing.ResourceFile)
def test_test_data(self):
assert_equals(api.TestData, parsing.TestData)
def test_execution_result(self):
assert_equals(api.ExecutionResult, result.ExecutionResult)
def test_test_suite(self):
assert_equals(api.TestSuite, running.TestSuite)
def test_result_writer(self):
assert_equals(api.ResultWriter, reporting.ResultWriter)
class TestTestSuiteBuilder(unittest.TestCase):
misc = join(abspath(__file__), '..', '..', '..', 'atest', 'testdata', 'misc')
sources = [join(misc, n) for n in 'pass_and_fail.robot', 'normal.robot']
def test_create_with_datasources_as_list(self):
suite = api.TestSuiteBuilder().build(*self.sources)
assert_equals(suite.name, 'Pass And Fail & Normal')
def test_create_with_datasource_as_string(self):
suite = api.TestSuiteBuilder().build(self.sources[0])
assert_equals(suite.name, 'Pass And Fail')
if __name__ == '__main__':
unittest.main()
| 30.265306 | 81 | 0.724882 | [
"ECL-2.0",
"Apache-2.0"
] | Mogztter/robotframework | utest/api/test_exposed_api.py | 1,483 | Python |
from api.views import BucketlistViewSet, ItemViewSet, UserViewSet
from django.conf.urls import url, include
from rest_framework.authtoken import views as authviews
from rest_framework_nested import routers
router = routers.SimpleRouter()
router.register(r'bucketlists', BucketlistViewSet)
router.register(r'users', UserViewSet)
bucketlists_router = routers.NestedSimpleRouter(router, r'bucketlists',
lookup='bucketlist')
bucketlists_router.register(r'items', ItemViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
url(r'^', include(bucketlists_router.urls)),
url(r'^auth/login/', authviews.obtain_auth_token, name='api-login'),
]
| 36.842105 | 72 | 0.732857 | [
"Unlicense"
] | andela-mnzomo/life-list | lifelist/api/urls.py | 700 | Python |
from django.contrib import admin
from django.urls import path
from django.contrib.auth.views import LoginView
from . import views
app_name = 'users'
urlpatterns = [
# ex /users/
path('', views.index, name='index'),
# ex /users/login/
path('login/', LoginView.as_view(template_name='users/login.html'),
name='login'),
# ex /users/logout/
path('logout/', views.logout_view, name='logout'),
# ex /users/register/
path('register/', views.register, name='register'),
]
| 21.541667 | 72 | 0.649903 | [
"MIT"
] | Kowies/ToDo-web-app | users/urls.py | 517 | Python |
import mido
import json
import time
from math import floor
import board
import busio
import digitalio
import adafruit_tlc5947
def playMidi(song_name):
mid = mido.MidiFile('midifiles/' + song_name)
notesDict = {'songName': 'testname', 'bpm': 999, 'notes': []}
tempo = 0
length = 0
notesArray = [[]]
tickLength = 0
SCK = board.SCK
MOSI = board.MOSI
LATCH = digitalio.DigitalInOut(board.D5)
# Initialize SPI bus.
spi = busio.SPI(clock=SCK, MOSI=MOSI)
# Initialize TLC5947
tlc5947 = adafruit_tlc5947.TLC5947(spi, LATCH, auto_write=False,
num_drivers=4)
for x in range(88):
tlc5947[x] = 0
tlc5947.write()
for msg in mid:
if msg.is_meta and msg.type == 'set_tempo':
tempo = int(msg.tempo)
length = int(floor(mido.second2tick(mid.length,
mid.ticks_per_beat,
tempo)))
tickLength = mido.tick2second(1, mid.ticks_per_beat, tempo)
break
print('Tick length: ' + str(tickLength))
currentTick = 0
notesArray[0] = [0 for x in range(89)]
lineIncrement = 0
for msg in mid:
#print(msg)
if msg.type is 'note_on' or msg.type is 'note_off':
delayAfter = int(floor(mido.second2tick(msg.time, mid.ticks_per_beat, tempo)))
if delayAfter == 0:
if msg.note < 89:
notesArray[lineIncrement][msg.note - 12] = msg.velocity
else:
notesArray[lineIncrement][88] = delayAfter
notesArray.append([0 for x in range(89)])
lineIncrement += 1
""" Old code:
for x in range (newNote['delayAfter']):
if x != 0:
notesArray[x+currentTick] = notesArray[x+currentTick-1]
currentTick += newNote['delayAfter']
notesArray[currentTick][newNote['note'] - 1] = newNote['velocity']
# tlc5947.write()
notesDict['notes'].append(newNote)
"""
"""
with open('notes.json', 'w') as outfile:
json.dump(notesDict, outfile)
"""
startTime = time.time()
tlc5947.write()
time.sleep(3)
for line in notesArray:
"""
tlc5947[27] = 900
tlc5947[68] = 4000
tlc5947.write()
time.sleep(2)
tlc5947[27] = 0
tlc5947[68] = 0
tlc5947.write()
time.sleep(2)
"""
print(line)
# send array to PWM IC
for x in range(len(line) - 1):
if line[x] != 0:
tlc5947[x] = line[x] * 32
else:
tlc5947[x] = 0
tlc5947.write()
# time.sleep(tickLength)
time.sleep(mido.tick2second(line[88], mid.ticks_per_beat, tempo) * 0.4)
for x in range(88):
tlc5947[x] = 0
tlc5947.write()
time.sleep(mido.tick2second(line[88], mid.ticks_per_beat, tempo) * 0.6)
for x in range(88):
tlc5947[x] = 0
tlc5947.write()
#playMidi('twinkle_twinkle.mid')
#playMidi('for_elise_by_beethoven.mid')
# playMidi('debussy_clair_de_lune.mid')
# playMidi('chopin_minute.mid')
# playMidi('jules_mad_world.mid')
| 28.303279 | 90 | 0.51839 | [
"Apache-2.0"
] | curtissimo41/Player-Piano-19363 | play_midi_curtis.py | 3,453 | Python |
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2013-2021 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
# parts contributed by free-x https://github.com/free-x
# parts contributed by Matt Hawkins http://www.raspberrypi-spy.co.uk/
#
###############################################################################
import hashlib
import avnav_handlerList
from avnav_nmea import *
from avnav_worker import *
class AVNUserAppHandler(AVNWorker):
'''
handle the files in the user directory
'''
CHILDNAME="UserTool"
TYPE="addon"
@classmethod
def getStartupGroup(cls):
return 3
@classmethod
def getPrefix(cls):
return None
@classmethod
def getConfigParam(cls, child=None):
#we add this to the ones configured at HTTPServer
if child == cls.CHILDNAME:
return {
'url':None, #we replace $HOST...
'title':'',
'icon':None, #an icon below $datadir/user
'keepUrl':'' #auto detect
}
if not child is None:
return None
rt = {
'interval': '5',
}
return rt
@classmethod
def preventMultiInstance(cls):
return True
@classmethod
def autoInstantiate(cls):
return True
def __init__(self,param):
self.userHandler=None # AVNUserHandler
self.imagesHandler=None # AVNImagesHandler
self.httpServer=None # AVNHTTPServer
self.addonList=[]
self.additionalAddOns=[]
AVNWorker.__init__(self,param)
def startInstance(self, navdata):
self.userHandler=self.findHandlerByName('AVNUserHandler')
if self.userHandler is None:
raise Exception("unable to find a user handler")
self.imagesHandler=self.findHandlerByName('AVNImagesHandler')
if self.imagesHandler is None:
raise Exception("unable to find an images handler")
self.httpServer = self.findHandlerByName('AVNHttpServer')
if self.httpServer is None:
raise Exception("unable to find AVNHttpServer")
super().startInstance(navdata)
# thread run method - just try forever
def run(self):
sleepTime=self.getFloatParam('interval')
self.setInfo('main', "starting", WorkerStatus.STARTED)
self.fillList()
while not self.shouldStop():
self.wait(sleepTime)
def computeKey(self,entry):
md5=hashlib.md5()
for k in ('url','icon','title'):
v=entry.get(k)
if v is not None:
try:
md5.update(v.encode('utf-8'))
except Exception as e:
AVNLog.error("unable to compute md5 for %s: %s",v,e)
return md5.hexdigest()
def fillList(self):
data = []
alreadyFound=set()
childlist = self.param.get(self.CHILDNAME)
if childlist is not None:
for child in childlist:
url=child.get('url')
key=self.computeKey(child)
if url is None:
child['invalid']=True
if key in alreadyFound:
AVNLog.error("duplicate user app found, ignoring %s",url)
while key in alreadyFound:
key = key + "x"
child['name']=key
child['invalid']=True
else:
child['name']=key
alreadyFound.add(key)
item=child.copy()
item['canDelete']=True
item['source']='user'
data.append(item)
serverAddons = self.httpServer.getParamValue(self.CHILDNAME)
nr=0
if serverAddons is not None:
for addon in serverAddons:
newAddon = addon.copy()
newAddon['canDelete']=False
newAddon['name']="server:%d"%nr
newAddon['source']='legacy'
nr+=1
data.append(newAddon)
for addon in data:
url = addon.get('url')
if url is None:
addon['invalid']=True
if not url.startswith("http"):
userFile = self.findFileForUrl(url)
if userFile is None:
AVNLog.error("error: user url %s not found", url)
addon['invalid']=True
if addon.get('title') == '':
del addon['title']
keepUrl = False
if addon.get('keepUrl') is None or addon.get('keepUrl') == '':
if addon.get('url').startswith("http"):
keepUrl = True
else:
if str(addon.get('keepUrl')).lower() == "true":
keepUrl = True
addon['keepUrl'] = keepUrl
icon = addon['icon']
if not icon.startswith("http"):
if not icon.startswith("/user"):
icon="/user/"+icon
addon['icon']=icon
iconpath = self.findFileForUrl(icon)
if iconpath is None:
AVNLog.error("icon path %s for %s not found, ignoring entry", icon, addon['url'])
addon['invalid'] = True
self.addonList=data
self.setInfo('main', "active, %d addons"%len(data), WorkerStatus.NMEA)
return
def findFileForUrl(self,url):
if url is None:
return None
if url.startswith("http"):
return None
(path,query)=self.httpServer.pathQueryFromUrl(url)
filePath=self.httpServer.tryExternalMappings(path,query)
if filePath is None or not os.path.exists(filePath):
return None
return filePath
def findChild(self,name,ignoreInvalid=False):
children=self.param.get(self.CHILDNAME)
if children is None:
return -1
if not isinstance(children,list):
return -1
for i in range(0,len(children)):
child =children[i]
if child.get('name') == name:
if ignoreInvalid:
inList=[e for e in self.addonList if e.get('name') == name and not ( e.get('invalid') == True)]
if len(inList) < 0:
return -1
return i
return -1
def getChildConfig(self,name):
idx=self.findChild(name)
if idx < 0:
return {}
else:
return self.param[self.CHILDNAME][idx]
def handleDelete(self,name):
if name is None:
raise Exception("missing name")
name = AVNUtil.clean_filename(name)
idx=self.findChild(name)
if idx < 0:
raise Exception("unable to find %s"%name)
self.removeChildConfig(self.CHILDNAME,idx)
self.fillList()
def handleList(self,httpHandler,includeInvalid):
host = httpHandler.headers.get('host')
hostparts = host.split(':')
outdata=[]
src=self.additionalAddOns+self.addonList
for addon in src:
if addon.get('invalid') == True and not includeInvalid:
continue
item=addon.copy()
if hostparts is not None:
item['originalUrl']=addon['url']
item['url'] = addon['url'].replace('$HOST', hostparts[0])
outdata.append(item)
rt = AVNUtil.getReturnData(items=outdata)
return rt
def getHandledCommands(self):
rt={"api": self.TYPE, "list": self.TYPE, "delete": self.TYPE}
prefix=self.getPrefix()
if prefix is not None:
rt["path"]=prefix
return rt
def checkName(self,name,doRaise=True):
cleanName=AVNUtil.clean_filename(name)
if name != cleanName:
if doRaise:
raise Exception("name %s is invalid"%name)
return False
return True
def registerAddOn(self,name,url,iconPath,title=None):
newAddon = {
'name': name,
'url': url,
'icon': iconPath,
'title': title,
'canDelete': False,
'source':'plugin'
}
self.additionalAddOns.append(newAddon)
def unregisterAddOn(self,name):
if name is None:
raise Exception("name cannot be None")
for ao in self.additionalAddOns:
if ao.get('name') == name:
self.additionalAddOns.remove(ao)
return True
def deleteByUrl(self,url):
"""
called by the user handler when a user file is deleted
@param url:
@return:
"""
if url is None:
return
for addon in self.addonList:
if addon.get('canDelete') == True and addon.get('url') == url:
self.handleDelete(addon.get('name'))
def handleApiRequest(self, type, subtype, requestparam, **kwargs):
if type == 'api':
command=AVNUtil.getHttpRequestParam(requestparam,'command',True)
name=AVNUtil.getHttpRequestParam(requestparam,'name',False)
if command == 'delete':
self.handleDelete(name)
return AVNUtil.getReturnData()
elif command == 'list':
includeInvalid = AVNUtil.getHttpRequestParam(requestparam, "invalid")
return self.handleList(kwargs.get('handler'),includeInvalid is not None and includeInvalid.lower() == 'true')
elif command == 'update':
url=AVNUtil.getHttpRequestParam(requestparam,'url',True)
icon=AVNUtil.getHttpRequestParam(requestparam,'icon',True)
title=AVNUtil.getHttpRequestParam(requestparam,'title')
param = {}
param['icon'] = icon
param['title'] = title
param['url'] = url
param['keepUrl'] = url.startswith("http")
doAdd=False
if name is None:
doAdd=True
name=self.computeKey(param)
#add
for entry in self.addonList:
if entry['name'] == name:
raise Exception("trying to add an already existing url %s"%url)
param['name']=name
if not url.startswith("http"):
userFile=self.findFileForUrl(url)
if userFile is None:
raise Exception("unable to find a local file for %s"%url)
if not icon.startswith("http"):
iconFile=self.findFileForUrl(icon)
if iconFile is None:
raise Exception("unable to find an icon file for %s"%icon)
idx=self.findChild(name)
if idx < 0 and not doAdd:
raise Exception("did not find a user app with this name")
for k in list(param.keys()):
idx=self.changeChildConfig(self.CHILDNAME,idx,k,param[k],True)
self.writeConfigChanges()
self.fillList()
return AVNUtil.getReturnData()
raise Exception("unknown command for %s api request: %s"%(self.type,command))
if type == "list":
includeInvalid=AVNUtil.getHttpRequestParam(requestparam,"invalid")
return self.handleList(kwargs.get('handler'),includeInvalid is not None and includeInvalid.lower() == 'true')
if type == 'delete':
name = AVNUtil.getHttpRequestParam(requestparam, "name",True)
self.handleDelete(name)
return AVNUtil.getReturnData()
raise Exception("unable to handle user request %s"%(type))
avnav_handlerList.registerHandler(AVNUserAppHandler)
| 32.579096 | 117 | 0.63366 | [
"MIT"
] | Littlechay/avnav | server/handler/avnuserapps.py | 11,533 | Python |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "styx"
PROJECT_SPACE_DIR = "/home/zchen61/carnd-capstone-test/ros/devel"
PROJECT_VERSION = "0.0.0"
| 42.333333 | 68 | 0.703412 | [
"MIT"
] | wolf-zchen/CarND-capstone-project | ros/build/styx/catkin_generated/pkg.develspace.context.pc.py | 381 | Python |
"""empty message
Revision ID: 4a83f309f411
Revises:
Create Date: 2019-06-20 23:47:24.513383
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4a83f309f411'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('handle', sa.String(length=25), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('handle'),
sa.UniqueConstraint('id')
)
op.create_table('post',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('img_name', sa.String(length=100), nullable=True),
sa.Column('caption', sa.String(length=250), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('img_name')
)
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('updated_at', sa.DateTime(timezone=True), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.Column('user', sa.String(length=50), nullable=True),
sa.Column('text', sa.String(length=100), nullable=True),
sa.ForeignKeyConstraint(['post_id'], ['post.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('comment')
op.drop_table('post')
op.drop_table('users')
# ### end Alembic commands ###
| 34.190476 | 71 | 0.67363 | [
"MIT"
] | stark3998/Instagram | migrations/versions/4a83f309f411_.py | 2,154 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""":Mod: views.py
:Synopsis:
:Author:
costa
servilla
ide
:Created:
7/23/18
"""
import daiquiri
from datetime import date, datetime
import html
import json
import math
import os.path
import pandas as pd
from pathlib import Path
import pickle
import requests
from shutil import copyfile
from urllib.parse import urlparse, quote
from zipfile import ZipFile
from flask import (
Blueprint, flash, render_template, redirect, request, url_for, session, Markup
)
from flask_login import (
current_user, login_required
)
from flask import Flask, current_app
from webapp import mailout
from webapp.config import Config
import csv
from webapp.home.exceptions import DataTableError, MissingFileError
from webapp.home.forms import (
CreateEMLForm, DownloadEMLForm, ImportPackageForm,
OpenEMLDocumentForm, DeleteEMLForm, SaveAsForm,
LoadDataForm, LoadMetadataForm, LoadOtherEntityForm,
ImportEMLForm, ImportEMLItemsForm, ImportItemsForm,
SubmitToEDIForm, SendToColleagueForm
)
from webapp.home.load_data_table import (
load_data_table, load_other_entity, delete_data_files, get_md5_hash
)
from webapp.home.import_package import (
copy_ezeml_package, upload_ezeml_package, import_ezeml_package
)
from webapp.home.metapype_client import (
load_eml, save_both_formats, new_child_node, remove_child, create_eml,
move_up, move_down, UP_ARROW, DOWN_ARROW, RELEASE_NUMBER,
save_old_to_new, read_xml, new_child_node, truncate_middle,
compose_rp_label, compose_full_gc_label, compose_taxonomic_label,
compose_funding_award_label, compose_project_label, list_data_packages,
import_responsible_parties, import_coverage_nodes, import_funding_award_nodes,
import_project_nodes, get_check_metadata_status
)
from webapp.home.check_metadata import check_eml
from webapp.buttons import *
from webapp.pages import *
from metapype.eml import names
from metapype.model import mp_io
from metapype.model.node import Node
from werkzeug.utils import secure_filename
import webapp.views.data_tables.dt as dt
import webapp.auth.user_data as user_data
logger = daiquiri.getLogger('views: ' + __name__)
home = Blueprint('home', __name__, template_folder='templates')
help_dict = {}
keywords = {}
def log_info(msg):
app = Flask(__name__)
with app.app_context():
current_app.logger.info(msg)
def non_breaking(_str):
return _str.replace(' ', html.unescape(' '))
def debug_msg(msg):
if Config.LOG_DEBUG:
app = Flask(__name__)
with app.app_context():
current_app.logger.info(msg)
def debug_None(x, msg):
if x is None:
if Config.LOG_DEBUG:
app = Flask(__name__)
with app.app_context():
current_app.logger.info(msg)
def reload_metadata():
current_document = current_user.get_filename()
if not current_document:
# if we've just deleted the current document, it won't exist
return redirect(url_for(PAGE_INDEX))
# Call load_eml here to get the check_metadata status set correctly
eml_node = load_eml(filename=current_document)
return current_document, eml_node
@home.before_app_first_request
def init_session_vars():
session["check_metadata_status"] = "green"
@home.before_app_first_request
def fixup_upload_management():
return
USER_DATA_DIR = 'user-data'
to_delete = set()
# loop on the various users' data directories
for user_folder_name in os.listdir(USER_DATA_DIR):
if user_folder_name == 'uploads' or user_folder_name == 'zip_temp':
continue
if os.path.isdir(os.path.join(USER_DATA_DIR, user_folder_name)):
user_data.clear_data_table_upload_filenames(user_folder_name)
full_path = os.path.join(USER_DATA_DIR, user_folder_name)
uploads_path = os.path.join(full_path, 'uploads')
# look at the EML model json files
for file in os.listdir(full_path):
full_file = os.path.join(full_path, file)
if os.path.isfile(full_file) and full_file.lower().endswith('.json') and file != '__user_properties__.json':
# some directories may have obsolete 'user_properties.json' files
if file == 'user_properties.json':
to_delete.add(os.path.join(full_path, 'user_properties.json'))
continue
# create a subdir of the user's uploads directory for this document's uploads
document_name = file[:-5]
subdir_name = os.path.join(uploads_path, document_name)
try:
os.mkdir(subdir_name)
except OSError:
pass
# open the model file
with open(full_file, "r") as json_file:
json_obj = json.load(json_file)
eml_node = mp_io.from_json(json_obj)
# look at data tables
data_table_nodes = []
eml_node.find_all_descendants(names.DATATABLE, data_table_nodes)
for data_table_node in data_table_nodes:
object_name_node = data_table_node.find_descendant(names.OBJECTNAME)
if object_name_node:
object_name = object_name_node.content
object_path = os.path.join(uploads_path, object_name)
target_path = os.path.join(subdir_name, object_name)
if os.path.isfile(object_path):
to_delete.add(object_path)
copyfile(object_path, target_path)
# look at other entities
other_entity_nodes = []
eml_node.find_all_descendants(names.OTHERENTITY, other_entity_nodes)
for other_entity_node in other_entity_nodes:
object_name_node = other_entity_node.find_descendant(names.OBJECTNAME)
if object_name_node:
object_name = object_name_node.content
object_path = os.path.join(uploads_path, object_name)
if os.path.isfile(object_path):
to_delete.add(object_path)
copyfile(object_path, os.path.join(subdir_name, object_name))
# clean up temp files
for path in os.listdir(subdir_name):
path = os.path.join(subdir_name, path)
if os.path.isfile(path) and path.endswith('ezeml_tmp'):
to_delete.add(path)
# now capture all uploaded file names in the user data
for file in os.listdir(uploads_path):
uploads_folder = os.path.join(uploads_path, file)
if os.path.isdir(uploads_folder):
# add the uploaded files to the user data
for uploaded_file in os.listdir(uploads_folder):
user_data.add_data_table_upload_filename(uploaded_file, user_folder_name, file)
# clean up temp files
for path in os.listdir(full_path):
path = os.path.join(full_path, path)
if os.path.isfile(path) and path.endswith('ezeml_tmp'):
to_delete.add(path)
# now we can delete the files we've copied
for file in to_delete:
os.remove(file)
@home.before_app_request
@home.before_app_first_request
def load_eval_entries():
rows = []
with open('webapp/static/evaluate.csv') as csv_file:
csv_reader = csv.reader(csv_file)
for row in csv_reader:
rows.append(row)
for row_num in range(1, len(rows)):
id, *vals = rows[row_num]
session[f'__eval__{id}'] = vals
@home.before_app_request
@home.before_app_first_request
def init_keywords():
lter_keywords = pickle.load(open('webapp/static/lter_keywords.pkl', 'rb'))
keywords['LTER'] = lter_keywords
def get_keywords(which):
return keywords.get(which, [])
@home.before_app_request
@home.before_app_first_request
def init_help():
lines = []
with open('webapp/static/help.txt') as help:
lines = help.readlines()
index = 0
def get_help_item(lines, index):
id = lines[index].rstrip()
title = lines[index+1].rstrip()
content = '<p>'
index = index + 2
while index < len(lines):
line = lines[index].rstrip('\n')
index = index + 1
if line.startswith('--------------------'):
break
if len(line) == 0:
line = '</p><p>'
content = content + line
if index >= len(lines):
break
content = content + '</p>'
return (id, title, content), index
while index < len(lines):
(id, title, content), index = get_help_item(lines, index)
help_dict[id] = (title, content)
if id == 'contents':
# special case for supporting base.html template
session[f'__help__{id}'] = (title, content)
def get_help(id):
title, content = help_dict.get(id)
return f'__help__{id}', title, content
def get_helps(ids):
helps = []
for id in ids:
if id in help_dict:
title, content = help_dict.get(id)
helps.append((f'__help__{id}', title, content))
return helps
@home.route('/')
def index():
if current_user.is_authenticated:
current_document = user_data.get_active_document()
if current_document:
eml_node = load_eml(filename=current_document)
if eml_node:
new_page = PAGE_TITLE
else:
user_data.remove_active_file()
new_page = PAGE_FILE_ERROR
return redirect(url_for(new_page, filename=current_document))
return render_template('index.html')
@home.route('/edit/<page>')
def edit(page:str=None):
'''
The edit page allows for direct editing of a top-level element such as
title, abstract, creators, etc. This function simply redirects to the
specified page, passing the packageid as the only parameter.
'''
if current_user.is_authenticated and page:
current_filename = user_data.get_active_document()
if current_filename:
eml_node = load_eml(filename=current_filename)
new_page = page if eml_node else PAGE_FILE_ERROR
return redirect(url_for(new_page, filename=current_filename))
return render_template('index.html')
def get_back_url():
url = url_for(PAGE_INDEX)
if current_user.is_authenticated:
new_page = get_redirect_target_page()
filename = user_data.get_active_document()
if new_page and filename:
url = url_for(new_page, filename=filename)
return url
@home.route('/about')
def about():
return render_template('about.html', back_url=get_back_url(), title='About')
@home.route('/user_guide')
def user_guide():
return render_template('user_guide.html', back_url=get_back_url(), title='User Guide')
@home.route('/news')
def news():
return render_template('news.html', back_url=get_back_url(), title="What's New")
@home.route('/encoding_error/<filename>')
def encoding_error(filename=None, errors=None):
return render_template('encoding_error.html', filename=filename, errors=errors, title='Encoding Errors')
@home.route('/file_error/<filename>')
def file_error(filename=None):
return render_template('file_error.html', filename=filename, title='File Error')
@home.route('/delete', methods=['GET', 'POST'])
@login_required
def delete():
form = DeleteEMLForm()
form.filename.choices = list_data_packages(True, True)
# Process POST
if request.method == 'POST':
if 'Cancel' in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
filename = form.filename.data
user_data.discard_data_table_upload_filenames_for_package(filename)
return_value = user_data.delete_eml(filename=filename)
if filename == user_data.get_active_document():
current_user.set_filename(None)
if isinstance(return_value, str):
flash(return_value)
else:
flash(f'Deleted {filename}')
return redirect(url_for(PAGE_INDEX))
# Process GET
return render_template('delete_eml.html', title='Delete EML',
form=form)
@home.route('/save', methods=['GET', 'POST'])
@login_required
def save():
current_document = current_user.get_filename()
if not current_document:
flash('No document currently open')
return render_template('index.html')
eml_node = load_eml(filename=current_document)
if not eml_node:
flash(f'Unable to open {current_document}')
return render_template('index.html')
save_both_formats(filename=current_document, eml_node=eml_node)
flash(f'Saved {current_document}')
return redirect(url_for(PAGE_TITLE, filename=current_document))
def copy_uploads(from_package, to_package):
from_folder = user_data.get_document_uploads_folder_name(from_package)
to_folder = user_data.get_document_uploads_folder_name(to_package)
for filename in os.listdir(from_folder):
from_path = os.path.join(from_folder, filename)
to_path = os.path.join(to_folder, filename)
copyfile(from_path, to_path)
user_data.add_data_table_upload_filename(filename, document_name=to_package)
@home.route('/save_as', methods=['GET', 'POST'])
@login_required
def save_as():
# Determine POST type
if request.method == 'POST':
if BTN_SAVE in request.form:
submit_type = 'Save'
elif BTN_CANCEL in request.form:
submit_type = 'Cancel'
else:
submit_type = None
form = SaveAsForm()
current_document = current_user.get_filename()
# Process POST
if request.method == 'POST':
if BTN_CANCEL in request.form:
if current_document:
# Revert back to the old filename
return redirect(get_back_url())
else:
return render_template('index.html')
if form.validate_on_submit():
if not current_document:
flash('No document currently open')
return render_template('index.html')
eml_node = load_eml(filename=current_document)
if not eml_node:
flash(f'Unable to open {current_document}')
return render_template('index.html')
new_document = form.filename.data
return_value = save_old_to_new(
old_filename=current_document,
new_filename=new_document,
eml_node=eml_node)
if isinstance(return_value, str):
flash(return_value)
new_filename = current_document # Revert back to the old filename
else:
copy_uploads(current_document, new_document)
current_user.set_filename(filename=new_document)
flash(f'Saved as {new_document}')
new_page = PAGE_TITLE # Return the Response object
return redirect(url_for(new_page, filename=new_document))
# else:
# return redirect(url_for(PAGE_SAVE_AS, filename=current_filename))
# Process GET
if current_document:
# form.filename.data = current_filename
help = get_helps(['save_as_document'])
return render_template('save_as.html',
filename=current_document,
title='Save As',
form=form,
help=help)
else:
flash("No document currently open")
return render_template('index.html')
@home.route('/download', methods=['GET', 'POST'])
@login_required
def download():
form = DownloadEMLForm()
form.filename.choices = list_data_packages(True, True)
# Process POST
if form.validate_on_submit():
filename = form.filename.data
return_value = user_data.download_eml(filename=filename)
if isinstance(return_value, str):
flash(return_value)
else:
return return_value
# Process GET
return render_template('download_eml.html', title='Download EML',
form=form)
@home.route('/check_metadata/<filename>', methods=['GET', 'POST'])
@login_required
def check_metadata(filename:str):
current_document = user_data.get_active_document()
if not current_document:
raise FileNotFoundError
eml_node = load_eml(filename=current_document)
content = check_eml(eml_node, filename)
# Process POST
if request.method == 'POST':
# return render_template(PAGE_CHECK, filename=filename)
return redirect(url_for(PAGE_CHECK, filename=current_document))
else:
set_current_page('check_metadata')
return render_template('check_metadata.html', content=content, title='Check Metadata')
@home.route('/download_current', methods=['GET', 'POST'])
@login_required
def download_current():
current_document = user_data.get_active_document()
if current_document:
# Force the document to be saved, so it gets cleaned
eml_node = load_eml(filename=current_document)
save_both_formats(filename=current_document, eml_node=eml_node)
# Do the download
return_value = user_data.download_eml(filename=current_document)
if isinstance(return_value, str):
flash(return_value)
else:
return return_value
def allowed_data_file(filename):
ALLOWED_EXTENSIONS = set(['csv', 'tsv', 'txt', 'xml', 'ezeml_tmp'])
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def allowed_metadata_file(filename):
ALLOWED_EXTENSIONS = set(['xml'])
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
'''
This function is deprecated. It was originally used as a first
step in a two-step process for data table upload, but that process
has been consolidated into a single step (see the load_data()
function).
@home.route('/upload_data_file', methods=['GET', 'POST'])
@login_required
def upload_data_file():
uploads_folder = get_user_uploads_folder_name()
form = UploadDataFileForm()
# Process POST
if request.method == 'POST' and form.validate_on_submit():
# Check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
if file:
filename = secure_filename(file.filename)
if filename is None or filename == '':
flash('No selected file')
elif allowed_data_file(filename):
file.save(os.path.join(uploads_folder, filename))
flash(f'{filename} uploaded')
else:
flash(f'{filename} is not a supported data file type')
return redirect(request.url)
# Process GET
return render_template('upload_data_file.html', title='Upload Data File',
form=form)
'''
@home.route('/create', methods=['GET', 'POST'])
@login_required
def create():
form = CreateEMLForm()
# Process POST
help = get_helps(['new_eml_document'])
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
filename = form.filename.data
user_filenames = user_data.get_user_document_list()
if user_filenames and filename and filename in user_filenames:
flash(f'{filename} already exists')
return render_template('create_eml.html', help=help,
form=form)
create_eml(filename=filename)
current_user.set_filename(filename)
current_user.set_packageid(None)
return redirect(url_for(PAGE_TITLE, filename=filename))
# Process GET
return render_template('create_eml.html', help=help, form=form)
@home.route('/open_eml_document', methods=['GET', 'POST'])
@login_required
def open_eml_document():
form = OpenEMLDocumentForm()
form.filename.choices = list_data_packages(False, False)
# Process POST
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
filename = form.filename.data
eml_node = load_eml(filename)
if eml_node:
current_user.set_filename(filename)
packageid = eml_node.attributes.get('packageId', None)
if packageid:
current_user.set_packageid(packageid)
create_eml(filename=filename)
new_page = PAGE_TITLE
else:
new_page = PAGE_FILE_ERROR
return redirect(url_for(new_page, filename=filename))
# Process GET
return render_template('open_eml_document.html', title='Open EML Document',
form=form)
@home.route('/import_parties', methods=['GET', 'POST'])
@login_required
def import_parties():
form = ImportEMLForm()
form.filename.choices = list_data_packages(True, True)
# Process POST
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
# new_page = get_redirect_target_page()
# url = url_for(new_page, filename=current_user.get_filename())
# return redirect(url)
if form.validate_on_submit():
filename = form.filename.data
return redirect(url_for('home.import_parties_2', filename=filename))
# Process GET
help = get_helps(['import_responsible_parties'])
return render_template('import_parties.html', help=help, form=form)
def get_redirect_target_page():
current_page = get_current_page()
if current_page == 'title':
return PAGE_TITLE
elif current_page == 'creator':
return PAGE_CREATOR_SELECT
elif current_page == 'metadata_provider':
return PAGE_METADATA_PROVIDER_SELECT
elif current_page == 'associated_party':
return PAGE_ASSOCIATED_PARTY_SELECT
elif current_page == 'abstract':
return PAGE_ABSTRACT
elif current_page == 'keyword':
return PAGE_KEYWORD_SELECT
elif current_page == 'intellectual_rights':
return PAGE_INTELLECTUAL_RIGHTS
elif current_page == 'geographic_coverage':
return PAGE_GEOGRAPHIC_COVERAGE_SELECT
elif current_page == 'temporal_coverage':
return PAGE_TEMPORAL_COVERAGE_SELECT
elif current_page == 'taxonomic_coverage':
return PAGE_TAXONOMIC_COVERAGE_SELECT
elif current_page == 'maintenance':
return PAGE_MAINTENANCE
elif current_page == 'contact':
return PAGE_CONTACT_SELECT
elif current_page == 'publisher':
return PAGE_PUBLISHER
elif current_page == 'publication_info':
return PAGE_PUBLICATION_INFO
elif current_page == 'method_step':
return PAGE_METHOD_STEP_SELECT
elif current_page == 'project':
return PAGE_PROJECT
elif current_page == 'data_table':
return PAGE_DATA_TABLE_SELECT
elif current_page == 'other_entity':
return PAGE_OTHER_ENTITY_SELECT
elif current_page == 'check_metadata':
return PAGE_CHECK
elif current_page == 'export_package':
return PAGE_EXPORT_DATA_PACKAGE
elif current_page == 'data_package_id':
return PAGE_DATA_PACKAGE_ID
elif current_page == 'submit_package':
return PAGE_SUBMIT_TO_EDI
elif current_page == 'send_to_other':
return PAGE_SEND_TO_OTHER
else:
return PAGE_TITLE
@home.route('/import_parties_2/<filename>/', methods=['GET', 'POST'])
@login_required
def import_parties_2(filename):
form = ImportEMLItemsForm()
eml_node = load_eml(filename)
parties = get_responsible_parties_for_import(eml_node)
choices = [[party[2], party[1]] for party in parties]
form.to_import.choices = choices
targets = [
("Creators", "Creators"),
("Metadata Providers", "Metadata Providers"),
("Associated Parties", "Associated Parties"),
("Contacts", "Contacts"),
("Publisher", "Publisher"),
("Project Personnel", "Project Personnel")]
form.target.choices = targets
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
node_ids_to_import = form.data['to_import']
target_class = form.data['target']
target_filename = current_user.get_filename()
import_responsible_parties(target_filename, node_ids_to_import, target_class)
if target_class == 'Creators':
new_page = PAGE_CREATOR_SELECT
elif target_class == 'Metadata Providers':
new_page = PAGE_METADATA_PROVIDER_SELECT
elif target_class == 'Associated Parties':
new_page = PAGE_ASSOCIATED_PARTY_SELECT
elif target_class == 'Contacts':
new_page = PAGE_CONTACT_SELECT
elif target_class == 'Publisher':
new_page = PAGE_PUBLISHER
elif target_class == 'Project Personnel':
new_page = PAGE_PROJECT_PERSONNEL_SELECT
return redirect(url_for(new_page, filename=target_filename))
# Process GET
help = get_helps(['import_responsible_parties_2'])
return render_template('import_parties_2.html', target_filename=filename, help=help, form=form)
def get_responsible_parties_for_import(eml_node):
parties = []
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.CREATOR]):
label = compose_rp_label(node)
parties.append(('Creator', f'{label} (Creator)', node.id))
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.METADATAPROVIDER]):
label = compose_rp_label(node)
parties.append(('Metadata Provider', f'{label} (Metadata Provider)', node.id))
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.ASSOCIATEDPARTY]):
label = compose_rp_label(node)
parties.append(('Associated Party', f'{label} (Associated Party)', node.id))
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.CONTACT]):
label = compose_rp_label(node)
parties.append(('Contact', f'{label} (Contact)', node.id))
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.PUBLISHER]):
label = compose_rp_label(node)
parties.append(('Publisher', f'{label} (Publisher)', node.id))
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.PERSONNEL]):
label = compose_rp_label(node)
parties.append(('Project Personnel', f'{label} (Project Personnel)', node.id))
return parties
@home.route('/import_geo_coverage', methods=['GET', 'POST'])
@login_required
def import_geo_coverage():
form = ImportEMLForm()
form.filename.choices = list_data_packages(False, False)
# Process POST
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
filename = form.filename.data
return redirect(url_for('home.import_geo_coverage_2', filename=filename))
# Process GET
help = get_helps(['import_geographic_coverage'])
return render_template('import_geo_coverage.html', help=help, form=form)
@home.route('/import_geo_coverage_2/<filename>/', methods=['GET', 'POST'])
@login_required
def import_geo_coverage_2(filename):
form = ImportItemsForm()
eml_node = load_eml(filename)
coverages = get_geo_coverages_for_import(eml_node)
choices = [[coverage[1], coverage[0]] for coverage in coverages]
form.to_import.choices = choices
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
node_ids_to_import = form.data['to_import']
target_package = current_user.get_filename()
import_coverage_nodes(target_package, node_ids_to_import)
return redirect(url_for(PAGE_GEOGRAPHIC_COVERAGE_SELECT, filename=target_package))
# Process GET
help = get_helps(['import_geographic_coverage_2'])
return render_template('import_geo_coverage_2.html', help=help, target_filename=filename, form=form)
def get_geo_coverages_for_import(eml_node):
coverages = []
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.COVERAGE, names.GEOGRAPHICCOVERAGE]):
label = compose_full_gc_label(node)
coverages.append((f'{label}', node.id))
return coverages
@home.route('/import_temporal_coverage', methods=['GET', 'POST'])
@login_required
def import_temporal_coverage():
form = ImportEMLForm()
form.filename.choices = list_data_packages(False, False)
# Process POST
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
# new_page = get_redirect_target_page()
# url = url_for(new_page, filename=current_user.get_filename())
# return redirect(url)
if form.validate_on_submit():
filename = form.filename.data
return redirect(url_for('home.import_temporal_coverage_2', filename=filename))
# Process GET
return render_template('import_temporal_coverage.html', form=form)
@home.route('/import_temporal_coverage_2/<filename>/', methods=['GET', 'POST'])
@login_required
def import_temporal_coverage_2(filename):
form = ImportItemsForm()
eml_node = load_eml(filename)
coverages = get_temporal_coverages_for_import(eml_node)
choices = [[coverage[1], coverage[0]] for coverage in coverages]
form.to_import.choices = choices
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
node_ids_to_import = form.data['to_import']
target_package = current_user.get_filename()
import_coverage_nodes(target_package, node_ids_to_import)
return redirect(url_for(PAGE_TEMPORAL_COVERAGE_SELECT, filename=target_package))
# Process GET
return render_template('import_temporal_coverage_2.html', target_filename=filename, title='Import Metadata',
form=form)
def get_temporal_coverages_for_import(eml_node):
coverages = []
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.COVERAGE, names.TEMPORALCOVERAGE]):
label = compose_full_gc_label(node) # FIXME
coverages.append((f'{label}', node.id))
return coverages
@home.route('/import_taxonomic_coverage', methods=['GET', 'POST'])
@login_required
def import_taxonomic_coverage():
form = ImportEMLForm()
form.filename.choices = list_data_packages(False, False)
# Process POST
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
filename = form.filename.data
return redirect(url_for('home.import_taxonomic_coverage_2', filename=filename))
# Process GET
help = get_helps(['import_taxonomic_coverage'])
return render_template('import_taxonomic_coverage.html', help=help, form=form)
@home.route('/import_taxonomic_coverage_2/<filename>/', methods=['GET', 'POST'])
@login_required
def import_taxonomic_coverage_2(filename):
form = ImportItemsForm()
eml_node = load_eml(filename)
coverages = get_taxonomic_coverages_for_import(eml_node)
choices = [[coverage[1], coverage[0]] for coverage in coverages]
form.to_import.choices = choices
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
node_ids_to_import = form.data['to_import']
target_package = current_user.get_filename()
import_coverage_nodes(target_package, node_ids_to_import)
return redirect(url_for(PAGE_TAXONOMIC_COVERAGE_SELECT, filename=target_package))
# Process GET
help = get_helps(['import_taxonomic_coverage_2'])
return render_template('import_taxonomic_coverage_2.html', help=help, target_filename=filename, form=form)
def get_taxonomic_coverages_for_import(eml_node):
coverages = []
for node in eml_node.find_all_nodes_by_path([names.DATASET, names.COVERAGE, names.TAXONOMICCOVERAGE]):
label = truncate_middle(compose_taxonomic_label(node), 100, ' ... ')
coverages.append((f'{label}', node.id))
return coverages
@home.route('/import_funding_awards', methods=['GET', 'POST'])
@login_required
def import_funding_awards():
form = ImportEMLForm()
form.filename.choices = list_data_packages(False, False)
# Process POST
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
filename = form.filename.data
return redirect(url_for('home.import_funding_awards_2', filename=filename))
# Process GET
help = get_helps(['import_funding_awards'])
return render_template('import_funding_awards.html', help=help, form=form)
@home.route('/import_funding_awards_2/<filename>/', methods=['GET', 'POST'])
@login_required
def import_funding_awards_2(filename):
form = ImportItemsForm()
eml_node = load_eml(filename)
coverages = get_funding_awards_for_import(eml_node)
choices = [[coverage[1], coverage[0]] for coverage in coverages]
form.to_import.choices = choices
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
node_ids_to_import = form.data['to_import']
target_package = current_user.get_filename()
import_funding_award_nodes(target_package, node_ids_to_import)
return redirect(url_for(PAGE_FUNDING_AWARD_SELECT, filename=target_package))
# Process GET
help = get_helps(['import_funding_awards_2'])
return render_template('import_funding_awards_2.html', help=help, target_filename=filename, form=form)
def get_funding_awards_for_import(eml_node):
awards = []
award_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.AWARD])
for award_node in award_nodes:
label = truncate_middle(compose_funding_award_label(award_node), 80, ' ... ')
awards.append((f'{label}', award_node.id))
return awards
@home.route('/import_related_projects', methods=['GET', 'POST'])
@login_required
def import_related_projects():
form = ImportEMLForm()
form.filename.choices = list_data_packages(False, False)
# Process POST
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
filename = form.filename.data
return redirect(url_for('home.import_related_projects_2', filename=filename))
# Process GET
help = get_helps(['import_related_projects'])
return render_template('import_related_projects.html', help=help, form=form)
@home.route('/import_related_projects_2/<filename>/', methods=['GET', 'POST'])
@login_required
def import_related_projects_2(filename):
form = ImportItemsForm()
eml_node = load_eml(filename)
coverages = get_projects_for_import(eml_node)
choices = [[coverage[1], coverage[0]] for coverage in coverages]
form.to_import.choices = choices
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
if form.validate_on_submit():
node_ids_to_import = form.data['to_import']
target_package = current_user.get_filename()
import_project_nodes(target_package, node_ids_to_import)
return redirect(url_for(PAGE_RELATED_PROJECT_SELECT, filename=target_package))
# Process GET
help = get_helps(['import_related_projects_2'])
return render_template('import_related_projects_2.html', help=help, target_filename=filename, form=form)
def get_projects_for_import(eml_node):
projects = []
project = eml_node.find_single_node_by_path([names.DATASET, names.PROJECT])
project_nodes = eml_node.find_all_nodes_by_path([names.DATASET, names.PROJECT, names.RELATED_PROJECT])
project_nodes.append(project)
for project_node in project_nodes:
label = truncate_middle(compose_project_label(project_node), 80, ' ... ')
projects.append((f'{label}', project_node.id))
return projects
def display_decode_error_lines(filename):
errors = []
with open(filename, 'r', errors='replace') as f:
lines = f.readlines()
for index, line in enumerate(lines, start=1):
if "�" in line:
errors.append((index, line))
return errors
def create_ezeml_package_manifest(user_folder, manifest_files):
with open(f'{user_folder}/ezEML_manifest.txt', 'w') as manifest_file:
manifest_file.write(f'ezEML Data Archive Manifest\n')
manifest_file.write(f'ezEML Release {RELEASE_NUMBER}\n')
manifest_file.write(f'--------------------\n')
for filetype, filename, filepath in manifest_files:
manifest_file.write(f'{filetype}\n')
manifest_file.write(f'{filename}\n')
manifest_file.write(f'{get_md5_hash(filepath)}\n')
def zip_package(current_document=None, eml_node=None):
if not current_document:
current_document = current_user.get_filename()
if not current_document:
raise FileNotFoundError
if not eml_node:
eml_node = load_eml(filename=current_document)
user_folder = user_data.get_user_folder_name()
zipfile_name = f'{current_document}.zip'
zipfile_path = os.path.join(user_folder, zipfile_name)
zip_object = ZipFile(zipfile_path, 'w')
manifest_files = []
pathname = f'{user_folder}/{current_document}.json'
arcname = f'{current_document}.json'
zip_object.write(pathname, arcname)
manifest_files.append(('JSON', f'{current_document}.json', pathname))
package_id = user_data.get_active_packageid()
if package_id:
# copy the EML file using the package_id as name
arcname = f'{package_id}.xml'
copyfile(f'{user_folder}/{current_document}.xml', f'{user_folder}/{arcname}')
else:
arcname = f'{current_document}.xml'
# pathname = f'{user_folder}/{current_document}.xml'
pathname = f'{user_folder}/{arcname}'
manifest_files.append(('XML', arcname, pathname))
zip_object.write(pathname, arcname)
create_ezeml_package_manifest(user_folder, manifest_files)
pathname = f'{user_folder}/ezEML_manifest.txt'
arcname = 'ezEML_manifest.txt'
zip_object.write(pathname, arcname)
# get data files
uploads_folder = user_data.get_document_uploads_folder_name()
data_table_nodes = []
eml_node.find_all_descendants(names.DATATABLE, data_table_nodes)
entity_nodes = []
eml_node.find_all_descendants(names.OTHERENTITY, entity_nodes)
data_nodes = data_table_nodes + entity_nodes
for data_node in data_nodes:
object_name_node = data_node.find_single_node_by_path([names.PHYSICAL, names.OBJECTNAME])
if object_name_node:
object_name = object_name_node.content
pathname = f'{uploads_folder}/{object_name}'
arcname = f'data/{object_name}'
try:
zip_object.write(pathname, arcname)
except FileNotFoundError as err:
filename = os.path.basename(err.filename)
msg = f"Unable to archive the package. Missing file: {filename}."
flash(msg, category='error')
return None
zip_object.close()
return zipfile_path
def save_as_ezeml_package_export(archive_file):
current_document = current_user.get_filename()
if not current_document:
raise FileNotFoundError
user_folder = user_data.get_user_folder_name()
# Create the exports folder
timestamp = datetime.now().date().strftime('%Y_%m_%d') + '_' + datetime.now().time().strftime('%H_%M_%S')
export_folder = os.path.join(user_folder, 'exports', current_document, timestamp)
os.makedirs(export_folder, exist_ok=True)
_, archive_basename = os.path.split(archive_file)
src = archive_file
dest = f'{export_folder}/{archive_basename}'
copyfile(src, dest)
parsed_url = urlparse(request.base_url)
download_url = f"{parsed_url.scheme}://{parsed_url.netloc}/{dest}"
return archive_basename, download_url
@home.route('/export_package', methods=['GET', 'POST'])
@login_required
def export_package():
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
current_document, eml_node = reload_metadata() # So check_metadata status is correct
if request.method == 'POST':
save_both_formats(current_document, eml_node)
zipfile_path = zip_package(current_document, eml_node)
if zipfile_path:
archive_basename, download_url = save_as_ezeml_package_export(zipfile_path)
if download_url:
return redirect(url_for('home.export_package_2', package_name=archive_basename,
download_url=get_shortened_url(download_url), safe=''))
archive_basename, download_url = save_as_ezeml_package_export(zipfile_path)
if download_url:
return redirect(url_for('home.export_package_2', package_name=archive_basename,
download_url=get_shortened_url(download_url), safe=''))
# Process GET
help = get_helps(['export_package'])
return render_template('export_package.html', back_url=get_back_url(), title='Export Data Package', help=help)
@home.route('/export_package_2/<package_name>/<path:download_url>', methods=['GET', 'POST'])
@login_required
def export_package_2(package_name, download_url):
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
reload_metadata() # So check_metadata status is correct
return render_template('export_package_2.html', back_url=get_back_url(), title='Export Data Package',
package_name=package_name, download_url=download_url)
def submit_package_mail_body(name=None, email_address=None, archive_name=None, download_url=None, notes=None):
msg = 'Dear EDI Data Curator:' + '\n\n' + \
'This email was auto-generated by ezEML.\n\n\n' + \
'Please submit the following data package to the EDI data repository.\n\n' + \
' Sender\'s name: ' + name + '\n\n' + \
' Sender\'s email: ' + email_address + '\n\n' + \
' Package name: ' + archive_name + '\n\n' + \
' Download URL: ' + get_shortened_url(download_url) + '\n\n' # Note: get_shortened_url handles blanks
if notes:
msg += ' Sender\'s Notes: ' + notes
return msg
def insert_urls(uploads_url_prefix, eml_node, node_type):
upload_nodes = []
eml_node.find_all_descendants(node_type, upload_nodes)
for upload_node in upload_nodes:
try:
physical_node = upload_node.find_descendant(names.PHYSICAL)
object_name_node = physical_node.find_child(names.OBJECTNAME)
object_name = object_name_node.content
distribution_node = physical_node.find_child(names.DISTRIBUTION)
if distribution_node:
physical_node.remove_child(distribution_node)
distribution_node = new_child_node(names.DISTRIBUTION, physical_node)
online_node = new_child_node(names.ONLINE, distribution_node)
url_node = new_child_node(names.URL, online_node)
url_node.add_attribute('function', 'download')
url_node.content = f"{uploads_url_prefix}/{object_name}".replace(' ', '%20')
except Exception as err:
flash(err)
continue
def insert_upload_urls(current_document, eml_node):
user_folder = user_data.get_user_folder_name()
uploads_folder = f'{user_folder}/uploads/{current_document}'
parsed_url = urlparse(request.base_url)
uploads_url_prefix = f"{parsed_url.scheme}://{parsed_url.netloc}/{uploads_folder}"
insert_urls(uploads_url_prefix, eml_node, names.DATATABLE)
insert_urls(uploads_url_prefix, eml_node, names.OTHERENTITY)
@home.route('/submit_package', methods=['GET', 'POST'])
@login_required
def submit_package():
form = SubmitToEDIForm()
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
current_document, eml_node = reload_metadata() # So check_metadata status is correct
if form.validate_on_submit():
# If the user has clicked Save in the EML Documents menu, for example, we want to ignore the
# programmatically generated Submit
if request.form.get(BTN_SUBMIT) == BTN_SUBMIT_TO_EDI:
name = form.data['name']
email_address = form.data['email_address']
notes = form.data['notes']
# update the EML to include URLs to data table files and other entity files
insert_upload_urls(current_document, eml_node)
save_both_formats(filename=current_document, eml_node=eml_node)
zipfile_path = zip_package(current_document, eml_node)
if zipfile_path:
_, download_url = save_as_ezeml_package_export(zipfile_path)
msg = submit_package_mail_body(name, email_address, current_document, download_url, notes)
subject = 'ezEML-Generated Data Submission Request'
to_address = ['support@environmentaldatainitiative.org']
sent = mailout.send_mail(subject=subject, msg=msg, to=to_address)
if sent:
flash(f'Package {current_document} has been sent to EDI. We will notify you when it has been added to the repository.')
else:
flash(f'Email failed to send', 'error')
return redirect(get_back_url())
set_current_page('submit_package')
help = get_helps(['submit_package'])
return render_template('submit_package.html',
title='Send to EDI',
check_metadata_status=get_check_metadata_status(eml_node, current_document),
form=form, help=help)
def get_shortened_url(long_url):
# Note: full URL encoding via urllib.parse.quote causes hideuri to throw an error that URL is invalid.
# So, we just encode blanks.
r = requests.post('https://hideuri.com/api/v1/shorten', data={'url': long_url.replace(' ', '%20')})
try:
r.raise_for_status()
return r.json()['result_url']
except requests.exceptions.HTTPError as e:
return long_url
def send_to_other_email(name, email_address, title, url):
name_quoted = quote(name)
email_address_quoted = quote(email_address)
title_quoted = quote(title)
url = get_shortened_url(url) # Note; get_shortened_url handles blank chars
msg_quoted = f'mailto:{email_address}?subject=ezEML-Generated%20Data%20Package&body=Dear%20{name}%3A%0D%0A%0D%0A' \
f'I%20have%20created%20a%20data%20package%20containing%20EML%20metadata%20and%20associated%20data%20files%20' \
f'for%20your%20inspection.%0D%0A%0D%0ATitle%3A%20%22{title}%22%0D%0A%0D%0AThe%20data%20package%20is%20' \
f'available%20for%20download%20here%3A%20{url}%0D%0A%0D%0AThe%20package%20was%20created%20using%20ezEML.%20' \
f'After%20you%20download%20the%20package%2C%20you%20can%20import%20it%20into%20ezEML%2C%20or%20you%20can%20' \
f'unzip%20it%20to%20extract%20the%20EML%20file%20and%20associated%20data%20files%20to%20work%20with%20them%20' \
f'directly.%0D%0A%0D%0ATo%20learn%20more%20about%20ezEML%2C%20go%20to%20https%3A%2F%2Fezeml.edirepository.org.' \
f'%0D%0A%0D%0AThanks!'
msg_html = Markup(f'Dear {name}:<p><br>'
f'I have created a data package containing EML metadata and associated data files '
f'for your inspection.<p>Title: "{title}"<p>The data package is '
f'available for download here: {url}.<p>The package was created using ezEML. '
f'After you download the package, you can import it into ezEML, or you can '
f'unzip it to extract the EML file and associated data files to work with them '
f'directly.<p>To learn more about ezEML, go to https://ezeml.edirepository.org.'
f'<p>Thanks!')
msg_raw = f'Dear {name}:\n\n' \
f'I have created a data package containing EML metadata and associated data files ' \
f'for your inspection.\n\nTitle: "{title}"\n\nThe data package is ' \
f'available for download here: {url}.\n\nThe package was created using ezEML. ' \
f'After you download the package, you can import it into ezEML, or you can ' \
f'unzip it to extract the EML file and associated data files to work with them ' \
f'directly.\n\nTo learn more about ezEML, go to https://ezeml.edirepository.org.' \
f'\n\nThanks!'
return msg_quoted, msg_html, msg_raw
@home.route('/send_to_other/<filename>/', methods=['GET', 'POST'])
@home.route('/send_to_other/<filename>/<mailto>/', methods=['GET', 'POST'])
@login_required
def send_to_other(filename=None, mailto=None):
form = SendToColleagueForm()
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
current_document, eml_node = reload_metadata() # So check_metadata status is correct
if form.validate_on_submit():
colleague_name = form.data['colleague_name']
email_address = form.data['email_address']
eml_node = load_eml(filename=filename)
dataset_node = eml_node.find_child(child_name=names.DATASET)
title_node = dataset_node.find_child(names.TITLE)
title = ''
if title_node:
title = title_node.content
if not title:
flash('The data package requires a Title', 'error')
return redirect(get_back_url())
zipfile_path = zip_package(current_document, eml_node)
_, download_url = save_as_ezeml_package_export(zipfile_path)
if not mailto:
mailto, mailto_html, mailto_raw = send_to_other_email(colleague_name, email_address, title, download_url)
else:
mailto = None # so we don't pop up the email client when the page is returned to after sending the 1st time
mailto_html = None
mailto_raw=None
eml_node = load_eml(filename=filename)
title_node = eml_node.find_single_node_by_path([names.DATASET, names.TITLE])
if not title_node or not title_node.content:
flash('The data package must have a Title before it can be sent.', 'error')
set_current_page('send_to_other')
if mailto:
form.colleague_name.data = ''
form.email_address.data = ''
help = get_helps(['send_to_colleague_2'])
return render_template('send_to_other_2.html',
title='Send to Other',
mailto=mailto,
mailto_html=mailto_html,
mailto_raw=mailto_raw,
check_metadata_status=get_check_metadata_status(eml_node, current_document),
form=form, help=help)
else:
help = get_helps(['send_to_colleague'])
return render_template('send_to_other.html',
title='Send to Other',
check_metadata_status=get_check_metadata_status(eml_node, current_document),
form=form, help=help)
def get_column_properties(dt_node, object_name):
data_file = object_name
column_vartypes, _, _ = user_data.get_uploaded_table_column_properties(data_file)
if column_vartypes:
return column_vartypes
uploads_folder = user_data.get_document_uploads_folder_name()
num_header_rows = 1
field_delimiter_node = dt_node.find_descendant(names.FIELDDELIMITER)
if field_delimiter_node:
delimiter = field_delimiter_node.content
else:
delimiter = ','
quote_char_node = dt_node.find_descendant(names.QUOTECHARACTER)
if quote_char_node:
quote_char = quote_char_node.content
else:
quote_char = '"'
try:
new_dt_node, new_column_vartypes, new_column_names, new_column_categorical_codes, *_ = load_data_table(
uploads_folder, data_file, num_header_rows, delimiter, quote_char)
user_data.add_uploaded_table_properties(data_file,
new_column_vartypes,
new_column_names,
new_column_categorical_codes)
return new_column_vartypes
except FileNotFoundError:
raise FileNotFoundError('The older version of the data table is missing from our server. Please use "Load Data Table from CSV File" instead of "Re-upload".')
except Exception as err:
raise Exception('Internal error 103')
def check_data_table_similarity(old_dt_node, new_dt_node, new_column_vartypes, new_column_names, new_column_codes):
if not old_dt_node or not new_dt_node:
raise Exception('Internal error 100')
old_attribute_list = old_dt_node.find_child(names.ATTRIBUTELIST)
new_attribute_list = new_dt_node.find_child(names.ATTRIBUTELIST)
if len(old_attribute_list.children) != len(new_attribute_list.children):
raise IndexError('The new table has a different number of columns from the original table.')
document = current_user.get_filename()
old_object_name_node = old_dt_node.find_descendant(names.OBJECTNAME)
if not old_object_name_node:
raise Exception('Internal error 101')
old_object_name = old_object_name_node.content
if not old_object_name:
raise Exception('Internal error 102')
old_column_vartypes, _, _ = user_data.get_uploaded_table_column_properties(old_object_name)
if not old_column_vartypes:
# column properties weren't saved. compute them anew.
old_column_vartypes = get_column_properties(old_dt_node, old_object_name)
if old_column_vartypes != new_column_vartypes:
diffs = []
for col_name, old_type, new_type, attr_node in zip(new_column_names, old_column_vartypes, new_column_vartypes, old_attribute_list.children):
if old_type != new_type:
diffs.append((col_name, old_type, new_type, attr_node))
raise ValueError(diffs)
def substitute_nans(codes):
substituted = []
if codes:
for code in codes:
if isinstance(code, list):
substituted.append(substitute_nans(code))
elif not isinstance(code, float) or not math.isnan(code):
substituted.append(code)
else:
substituted.append('NAN')
else:
substituted.append(None)
return substituted
def compare_codes(old_codes, new_codes):
old_substituted = substitute_nans(old_codes)
new_substituted = substitute_nans(new_codes)
return old_substituted == new_substituted
def add_node_if_missing(parent_node, child_name):
child = parent_node.find_descendant(child_name)
if not child:
child = new_child_node(child_name, parent=parent_node)
return child
def update_data_table(old_dt_node, new_dt_node, new_column_names, new_column_categorical_codes):
debug_msg(f'Entering update_data_table')
if not old_dt_node or not new_dt_node:
return
old_object_name_node = old_dt_node.find_descendant(names.OBJECTNAME)
old_size_node = old_dt_node.find_descendant(names.SIZE)
old_records_node = old_dt_node.find_descendant(names.NUMBEROFRECORDS)
old_md5_node = old_dt_node.find_descendant(names.AUTHENTICATION)
old_field_delimiter_node = old_dt_node.find_descendant(names.FIELDDELIMITER)
old_record_delimiter_node = old_dt_node.find_descendant(names.RECORDDELIMITER)
old_quote_char_node = old_dt_node.find_descendant(names.QUOTECHARACTER)
new_object_name_node = new_dt_node.find_descendant(names.OBJECTNAME)
new_size_node = new_dt_node.find_descendant(names.SIZE)
new_records_node = new_dt_node.find_descendant(names.NUMBEROFRECORDS)
new_md5_node = new_dt_node.find_descendant(names.AUTHENTICATION)
new_field_delimiter_node = new_dt_node.find_descendant(names.FIELDDELIMITER)
new_record_delimiter_node = new_dt_node.find_descendant(names.RECORDDELIMITER)
new_quote_char_node = new_dt_node.find_descendant(names.QUOTECHARACTER)
old_object_name = old_object_name_node.content
old_object_name_node.content = new_object_name_node.content.replace('.ezeml_tmp', '')
old_size_node.content = new_size_node.content
old_records_node.content = new_records_node.content
old_md5_node.content = new_md5_node.content
old_field_delimiter_node.content = new_field_delimiter_node.content
# record delimiter node is not required, so may be missing
if old_record_delimiter_node:
if new_record_delimiter_node:
old_record_delimiter_node.content = new_record_delimiter_node.content
else:
old_record_delimiter_node.parent.remove_child(old_record_delimiter_node)
else:
if new_record_delimiter_node:
# make sure needed ancestor nodes exist
physical_node = add_node_if_missing(old_dt_node, names.PHYSICAL)
data_format_node = add_node_if_missing(physical_node, names.DATAFORMAT)
text_format_node = add_node_if_missing(data_format_node, names.TEXTFORMAT)
new_child_node(names.RECORDDELIMITER, text_format_node).content = new_record_delimiter_node.content
# quote char node is not required, so may be missing
if old_quote_char_node:
if new_quote_char_node:
old_quote_char_node.content = new_quote_char_node.content
else:
old_quote_char_node.parent.remove_child(old_quote_char_node)
else:
if new_quote_char_node:
new_child_node(names.QUOTECHARACTER, old_field_delimiter_node.parent).content = new_quote_char_node.content
_, old_column_names, old_column_categorical_codes = user_data.get_uploaded_table_column_properties(old_object_name)
if old_column_names != new_column_names:
# substitute the new column names
old_attribute_list_node = old_dt_node.find_child(names.ATTRIBUTELIST)
old_attribute_names_nodes = []
old_attribute_list_node.find_all_descendants(names.ATTRIBUTENAME, old_attribute_names_nodes)
for old_attribute_names_node, old_name, new_name in zip(old_attribute_names_nodes, old_column_names, new_column_names):
if old_name != new_name:
debug_None(old_attribute_names_node, 'old_attribute_names_node is None')
old_attribute_names_node.content = new_name
if not compare_codes(old_column_categorical_codes, new_column_categorical_codes):
# need to fix up the categorical codes
old_attribute_list_node = old_dt_node.find_child(names.ATTRIBUTELIST)
old_aattribute_nodes = old_attribute_list_node.find_all_children(names.ATTRIBUTE)
new_attribute_list_node = new_dt_node.find_child(names.ATTRIBUTELIST)
new_attribute_nodes = new_attribute_list_node.find_all_children(names.ATTRIBUTE)
for old_attribute_node, old_codes, new_attribute_node, new_codes in zip(old_aattribute_nodes,
old_column_categorical_codes,
new_attribute_nodes,
new_column_categorical_codes):
if not compare_codes(old_codes, new_codes):
# use the new_codes, preserving any relevant code definitions
# first, get the old codes and their definitions
old_code_definition_nodes = []
old_attribute_node.find_all_descendants(names.CODEDEFINITION, old_code_definition_nodes)
code_definitions = {}
parent_node = None
for old_code_definition_node in old_code_definition_nodes:
code_node = old_code_definition_node.find_child(names.CODE)
code = None
if code_node:
code = str(code_node.content)
definition_node = old_code_definition_node.find_child(names.DEFINITION)
definition = None
if definition_node:
definition = definition_node.content
if code and definition:
code_definitions[code] = definition
# remove the old code definition node
parent_node = old_code_definition_node.parent
parent_node.remove_child(old_code_definition_node)
# add clones of new definition nodes and set their definitions, if known
if not parent_node:
continue
new_code_definition_nodes = []
new_attribute_node.find_all_descendants(names.CODEDEFINITION, new_code_definition_nodes)
for new_code_definition_node in new_code_definition_nodes:
clone = new_code_definition_node.copy()
parent_node.add_child(clone)
clone.parent = parent_node
code_node = clone.find_child(names.CODE)
if code_node:
code = str(code_node.content)
else:
code = None
definition_node = clone.find_child(names.DEFINITION)
definition = code_definitions.get(code)
if definition:
definition_node.content = definition
debug_msg(f'Leaving update_data_table')
def backup_metadata(filename):
user_folder = user_data.get_user_folder_name()
if not user_folder:
flash('User folder not found', 'error')
return
# make sure backups directory exists
backup_path = os.path.join(user_folder, 'backups')
try:
os.mkdir(backup_path)
except FileExistsError:
pass
timestamp = datetime.now().date().strftime('%Y_%m_%d') + '_' + datetime.now().time().strftime('%H_%M_%S')
backup_filename = f'{user_folder}/backups/{filename}.json.{timestamp}'
filename = f'{user_folder}/{filename}.json'
try:
copyfile(filename, backup_filename)
except:
flash(f'Error backing up file {filename}.json', 'error')
@home.route('/import_package', methods=['GET', 'POST'])
@login_required
def import_package():
form = ImportPackageForm()
package_list = user_data.get_user_document_list()
# Process POST
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
reload_metadata() # So check_metadata status is correct
if request.method == 'POST' and form.validate_on_submit():
# Check if the post request has the file part
if 'file' not in request.files:
flash('No file part', 'error')
return redirect(request.url)
file = request.files['file']
if file:
# TODO: Possibly reconsider whether to use secure_filename in the future. It would require
# separately keeping track of the original filename and the possibly modified filename.
# filename = secure_filename(file.filename)
filename = file.filename
if not os.path.splitext(filename)[1] == '.xml':
flash('Please select a file with file extension ".xml".', 'error')
return redirect(request.url)
package_base_filename = os.path.basename(filename)
package_name = os.path.splitext(package_base_filename)[0]
# See if package with that name already exists
try:
unversioned_package_name = upload_ezeml_package(file, package_name)
except FileNotFoundError as err:
# Manifest file is missing
flash(f'The selected file does not appear to be a valid ezEML data package file. '
'Please select a different file or check with the package provider for a corrected file.',
'error')
return redirect(request.url)
except ValueError as err:
# A bad checksum
filename = err.args[0]
flash(f'The selected package appears to have been modified manually outside of ezEML. '
'Please ask the package provider to provide a package file exported directly '
'from ezEML.', 'error')
return redirect(request.url)
if unversioned_package_name in user_data.get_user_document_list():
return redirect(url_for('home.import_package_2', package_name=unversioned_package_name))
else:
import_ezeml_package(unversioned_package_name)
fixup_upload_management()
current_user.set_filename(filename=unversioned_package_name)
return redirect(url_for(PAGE_TITLE, filename=unversioned_package_name))
# Process GET
help = get_helps(['import_package'])
return render_template('import_package.html', title='Import an ezEML Data Package',
packages=package_list, form=form, help=help)
@home.route('/import_package_2/<package_name>', methods=['GET', 'POST'])
@login_required
def import_package_2(package_name):
form = ImportPackageForm()
# Process POST
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
reload_metadata() # So check_metadata status is correct
if request.method == 'POST' and form.validate_on_submit():
form = request.form
if form['replace_copy'] == 'copy':
package_name = copy_ezeml_package(package_name)
import_ezeml_package(package_name)
fixup_upload_management()
current_user.set_filename(filename=package_name)
return redirect(url_for(PAGE_TITLE, filename=package_name))
# Process GET
help = get_helps(['import_package_2'])
return render_template('import_package_2.html', title='Import an ezEML Data Package',
package_name=package_name, form=form, help=help)
def column_names_changed(filepath, delimiter, quote_char, dt_node):
# Assumes CSV file has been saved to the file system
# This function is called only in the reupload case.
data_frame = pd.read_csv(filepath, encoding='utf8', sep=delimiter, quotechar=quote_char, nrows=1)
columns = data_frame.columns
new_column_names = []
for col in columns:
new_column_names.append(col)
old_column_names = []
if dt_node:
attribute_list_node = dt_node.find_child(names.ATTRIBUTELIST)
if attribute_list_node:
for attribute_node in attribute_list_node.children:
attribute_name_node = attribute_node.find_child(names.ATTRIBUTENAME)
if attribute_name_node:
old_column_names.append(attribute_name_node.content)
return old_column_names != new_column_names
@home.route('/reupload_data_with_col_names_changed/<saved_filename>/<dt_node_id>', methods=['GET', 'POST'])
@login_required
def reupload_data_with_col_names_changed(saved_filename, dt_node_id):
form = LoadDataForm()
document = current_user.get_filename()
if request.method == 'POST':
if BTN_CANCEL in request.form:
return redirect(get_back_url())
if BTN_CONTINUE in request.form:
return redirect(url_for(PAGE_REUPLOAD, filename=document, dt_node_id=dt_node_id, saved_filename=saved_filename, name_chg_ok=True), code=307) # 307 keeps it a POST
help = get_helps(['data_table_reupload_full'])
return render_template('reupload_data_with_col_names_changed.html', title='Re-upload Data Table',
form=form, saved_filename=saved_filename, dt_node_id=dt_node_id, help=help)
@home.route('/load_data/<filename>', methods=['GET', 'POST'])
@login_required
def load_data(filename=None):
log_info(f'Entering load_data: request.method={request.method}')
# filename that's passed in is actually the document name, for historical reasons.
# We'll clear it to avoid misunderstandings...
filename = None
form = LoadDataForm()
document = current_user.get_filename()
uploads_folder = user_data.get_document_uploads_folder_name()
# Process POST
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
if request.method == 'POST' and form.validate_on_submit():
# Check if the post request has the file part
if 'file' not in request.files:
flash('No file part', 'error')
return redirect(request.url)
eml_node = load_eml(filename=document)
dataset_node = eml_node.find_child(names.DATASET)
if not dataset_node:
dataset_node = new_child_node(names.DATASET, eml_node)
file = request.files['file']
if file:
filename = file.filename
if filename:
if filename is None or filename == '':
flash('No selected file', 'error')
elif allowed_data_file(filename):
# Make sure the user's uploads directory exists
Path(uploads_folder).mkdir(parents=True, exist_ok=True)
filepath = os.path.join(uploads_folder, filename)
if file:
# Upload the file to the uploads directory
file.save(filepath)
num_header_rows = 1
delimiter = form.delimiter.data
quote_char = form.quote.data
try:
dt_node, new_column_vartypes, new_column_names, new_column_categorical_codes, *_ = \
load_data_table(uploads_folder, filename, num_header_rows, delimiter, quote_char)
except UnicodeDecodeError as err:
errors = display_decode_error_lines(filepath)
return render_template('encoding_error.html', filename=filename, errors=errors)
except DataTableError as err:
flash(f'Data table has an error: {err.message}', 'error')
return redirect(request.url)
flash(f"Loaded {filename}")
dt_node.parent = dataset_node
dataset_node.add_child(dt_node)
user_data.add_data_table_upload_filename(filename)
if new_column_vartypes:
user_data.add_uploaded_table_properties(filename,
new_column_vartypes,
new_column_names,
new_column_categorical_codes)
delete_data_files(uploads_folder)
save_both_formats(filename=document, eml_node=eml_node)
return redirect(url_for(PAGE_DATA_TABLE, filename=document, dt_node_id=dt_node.id, delimiter=delimiter, quote_char=quote_char))
else:
flash(f'{filename} is not a supported data file type')
return redirect(request.url)
# Process GET
return render_template('load_data.html', title='Load Data',
form=form)
def handle_reupload(dt_node_id=None, saved_filename=None, document=None,
eml_node=None, uploads_folder=None, name_chg_ok=False,
delimiter=None, quote_char=None):
dataset_node = eml_node.find_child(names.DATASET)
if not dataset_node:
dataset_node = new_child_node(names.DATASET, eml_node)
if not saved_filename:
raise MissingFileError('Unexpected error: file not found')
dt_node = Node.get_node_instance(dt_node_id)
num_header_rows = 1
filepath = os.path.join(uploads_folder, saved_filename)
if not name_chg_ok:
if column_names_changed(filepath, delimiter, quote_char, dt_node):
# Go get confirmation
return redirect(url_for(PAGE_REUPLOAD_WITH_COL_NAMES_CHANGED,
saved_filename=saved_filename,
dt_node_id=dt_node_id),
code=307)
try:
new_dt_node, new_column_vartypes, new_column_names, new_column_categorical_codes, *_ = load_data_table(
uploads_folder, saved_filename, num_header_rows, delimiter, quote_char)
types_changed = None
try:
check_data_table_similarity(dt_node,
new_dt_node,
new_column_vartypes,
new_column_names,
new_column_categorical_codes)
except ValueError as err:
types_changed = err.args[0]
except FileNotFoundError as err:
error = err.args[0]
flash(error, 'error')
return redirect(url_for(PAGE_DATA_TABLE_SELECT, filename=document))
except IndexError as err:
error = err.args[0]
flash(f'Re-upload not done. {error}', 'error')
return redirect(url_for(PAGE_DATA_TABLE_SELECT, filename=document))
try:
# use the existing dt_node, but update objectName, size, rows, MD5, etc.
# also, update column names and categorical codes, as needed
update_data_table(dt_node, new_dt_node, new_column_names, new_column_categorical_codes)
# rename the temp file
os.rename(filepath, filepath.replace('.ezeml_tmp', ''))
if types_changed:
err_string = 'Please note: One or more columns in the new table have a different data type than they had in the old table.<ul>'
for col_name, old_type, new_type, attr_node in types_changed:
dt.change_measurement_scale(attr_node, old_type.name, new_type.name)
err_string += f'<li><b>{col_name}</b> changed from {old_type.name} to {new_type.name}'
err_string += '</ul>'
flash(Markup(err_string))
except Exception as err:
# display error
error = err.args[0]
flash(f"Data table could not be re-uploaded. {error}", 'error')
return redirect(url_for(PAGE_DATA_TABLE_SELECT, filename=document))
except UnicodeDecodeError as err:
errors = display_decode_error_lines(filepath)
return render_template('encoding_error.html', filename=document, errors=errors)
except DataTableError as err:
flash(f'Data table has an error: {err.message}', 'error')
return redirect(request.url)
data_file = saved_filename.replace('.ezeml_tmp', '')
flash(f"Loaded {data_file}")
dt_node.parent = dataset_node
object_name_node = dt_node.find_descendant(names.OBJECTNAME)
if object_name_node:
object_name_node.content = data_file
user_data.add_data_table_upload_filename(data_file)
if new_column_vartypes:
user_data.add_uploaded_table_properties(data_file,
new_column_vartypes,
new_column_names,
new_column_categorical_codes)
delete_data_files(uploads_folder)
backup_metadata(filename=document)
save_both_formats(filename=document, eml_node=eml_node)
return redirect(url_for(PAGE_DATA_TABLE, filename=document, dt_node_id=dt_node.id, delimiter=delimiter,
quote_char=quote_char))
@home.route('/reupload_data/<filename>/<dt_node_id>', methods=['GET', 'POST'])
@home.route('/reupload_data/<filename>/<dt_node_id>/<saved_filename>/<name_chg_ok>', methods=['GET', 'POST'])
@login_required
def reupload_data(dt_node_id=None, filename=None, saved_filename=None, name_chg_ok=False):
# filename that's passed in is actually the document name, for historical reasons.
# We'll clear it to avoid misunderstandings...
filename = None
form = LoadDataForm()
document = current_user.get_filename()
uploads_folder = user_data.get_document_uploads_folder_name()
eml_node = load_eml(filename=document)
data_table_name = ''
dt_node = Node.get_node_instance(dt_node_id)
if dt_node:
entity_name_node = dt_node.find_child(names.ENTITYNAME)
if entity_name_node:
data_table_name = entity_name_node.content
if not data_table_name:
flash(f'Data table name not found in the metadata.', 'error')
return redirect(request.url)
if request.method == 'POST' and BTN_CANCEL in request.form:
url = url_for(PAGE_DATA_TABLE_SELECT, filename=document)
return redirect(url)
if request.method == 'POST':
if dt_node:
if saved_filename:
filename = saved_filename
else:
file = request.files['file']
if file:
filename = f"{file.filename}"
if allowed_data_file(filename):
# We upload the new version of the CSV file under a temp name so we have both files to inspect.
filename = f"{filename}.ezeml_tmp"
filepath = os.path.join(uploads_folder, filename)
file.save(filepath)
else:
flash(f'{filename} is not a supported data file type', 'error')
return redirect(request.url)
delimiter = form.delimiter.data
quote_char = form.quote.data
try:
return handle_reupload(dt_node_id=dt_node_id, saved_filename=filename, document=document,
eml_node=eml_node, uploads_folder=uploads_folder, name_chg_ok=name_chg_ok,
delimiter=delimiter, quote_char=quote_char)
except MissingFileError as err:
flash(err.message, 'error')
return redirect(request.url)
except Exception as err:
return redirect(request.url)
# Process GET
help = get_helps(['data_table_reupload_full'])
return render_template('reupload_data.html', title='Re-upload Data Table',
form=form, name=data_table_name, help=help)
@home.route('/reupload_other_entity/<filename>/<node_id>', methods=['GET', 'POST'])
@login_required
def reupload_other_entity(filename, node_id):
form = LoadOtherEntityForm()
document = current_user.get_filename()
uploads_folder = user_data.get_document_uploads_folder_name()
eml_node = load_eml(filename=document)
other_entity_name = ''
oe_node = Node.get_node_instance(node_id)
if oe_node:
entity_name_node = oe_node.find_child(names.ENTITYNAME)
if entity_name_node:
other_entity_name = entity_name_node.content
if not other_entity_name:
raise ValueError("Other entity's name not found")
if request.method == 'POST' and BTN_CANCEL in request.form:
url = url_for(PAGE_OTHER_ENTITY_SELECT, filename=filename)
return redirect(url)
if request.method == 'POST':
return redirect(url_for(PAGE_LOAD_OTHER_ENTITY, node_id=node_id), code=307) # 307 keeps it a POST
help = get_helps(['data_table_reupload_full']) # FIXME
return render_template('reupload_other_entity.html', title='Re-upload Other Entity',
form=form, name=other_entity_name, help=help)
@home.route('/load_other_entity/<node_id>', methods=['GET', 'POST'])
@login_required
def load_entity(node_id=None):
form = LoadOtherEntityForm()
document = current_user.get_filename()
uploads_folder = user_data.get_document_uploads_folder_name()
# Process POST
if request.method == 'POST' and BTN_CANCEL in request.form:
return redirect(get_back_url())
if request.method == 'POST' and form.validate_on_submit():
# Check if the post request has the file part
if 'file' not in request.files:
flash('No file part', 'error')
return redirect(request.url)
file = request.files['file']
if file:
# TODO: Possibly reconsider whether to use secure_filename in the future. It would require
# separately keeping track of the original filename and the possibly modified filename.
# filename = secure_filename(file.filename)
filename = file.filename
if filename is None or filename == '':
flash('No selected file', 'error')
else:
file.save(os.path.join(uploads_folder, filename))
data_file = filename
data_file_path = f'{uploads_folder}/{data_file}'
flash(f'Loaded {data_file}')
eml_node = load_eml(filename=document)
dataset_node = eml_node.find_child(names.DATASET)
other_entity_node = load_other_entity(dataset_node, uploads_folder, data_file, node_id=node_id)
save_both_formats(filename=document, eml_node=eml_node)
return redirect(url_for(PAGE_OTHER_ENTITY, filename=document, node_id=other_entity_node.id))
# Process GET
return render_template('load_other_entity.html', title='Load Other Entity',
form=form)
@home.route('/load_metadata', methods=['GET', 'POST'])
@login_required
def load_metadata():
form = LoadMetadataForm()
document = current_user.get_filename()
uploads_folder = user_data.get_document_uploads_folder_name()
# Process POST
if request.method == 'POST' and form.validate_on_submit():
# Check if the post request has the file part
if 'file' not in request.files:
flash('No file part', 'error')
return redirect(request.url)
file = request.files['file']
if file:
# TODO: Possibly reconsider whether to use secure_filename in the future. It would require
# separately keeping track of the original filename and the possibly modified filename.
# filename = secure_filename(file.filename)
# filename = file.filename
filename = secure_filename(file.filename)
if filename is None or filename == '':
flash('No selected file', 'error')
elif allowed_metadata_file(filename):
Path(uploads_folder).mkdir(parents=True, exist_ok=True)
file.save(os.path.join(uploads_folder, filename))
metadata_file = filename
metadata_file_path = f'{uploads_folder}/{metadata_file}'
with open(metadata_file_path, 'r') as file:
metadata_str = file.read()
try:
eml_node = read_xml(metadata_str)
except Exception as e:
flash(e, 'error')
if eml_node:
packageid = eml_node.attribute_value('packageId')
if packageid:
current_user.set_packageid(packageid)
save_both_formats(filename=filename, eml_node=eml_node)
return redirect(url_for(PAGE_TITLE, filename=filename))
else:
flash(f'Unable to determine packageid from file {filename}', 'error')
else:
flash(f'Unable to load metadata from file {filename}', 'error')
else:
flash(f'{filename} is not a supported data file type', 'error')
return redirect(request.url)
# Process GET
return render_template('load_metadata.html', title='Load Metadata',
form=form)
@home.route('/close', methods=['GET', 'POST'])
@login_required
def close():
current_document = current_user.get_filename()
if current_document:
current_user.set_filename(None)
flash(f'Closed {current_document}')
else:
flash("There was no package open")
set_current_page('')
return render_template('index.html')
def select_post(filename=None, form=None, form_dict=None,
method=None, this_page=None, back_page=None,
next_page=None, edit_page=None, project_node_id=None, reupload_page=None):
node_id = None
new_page = None
if form_dict:
for key in form_dict:
val = form_dict[key][0] # value is the first list element
if val in (BTN_BACK, BTN_DONE):
new_page = back_page
elif val[0:4] == BTN_BACK:
node_id = project_node_id
new_page = back_page
elif val in [BTN_NEXT, BTN_SAVE_AND_CONTINUE]:
node_id = project_node_id
new_page = next_page
elif val == BTN_EDIT:
new_page = edit_page
node_id = key
elif val == BTN_REMOVE:
new_page = this_page
node_id = key
eml_node = load_eml(filename=filename)
# Get the data table filename, if any, so we can remove it from the uploaded list
# dt_node = Node.get_node_instance(node_id)
# if dt_node and dt_node.name == names.DATATABLE:
# object_name_node = dt_node.find_single_node_by_path([names.PHYSICAL, names.OBJECTNAME])
# if object_name_node:
# object_name = object_name_node.content
# if object_name:
# user_data.discard_data_table_upload_filename(object_name)
remove_child(node_id=node_id)
node_id = project_node_id # for relatedProject case
save_both_formats(filename=filename, eml_node=eml_node)
elif val == BTN_REUPLOAD:
node_id = key
if reupload_page:
new_page = reupload_page
else:
node_id = key
new_page = PAGE_REUPLOAD
elif val == BTN_HIDDEN_CHECK:
new_page = PAGE_CHECK
elif val == BTN_HIDDEN_SAVE:
new_page = this_page
elif val == BTN_HIDDEN_DOWNLOAD:
new_page = PAGE_DOWNLOAD
elif val == BTN_HIDDEN_NEW:
new_page = PAGE_CREATE
elif val == BTN_HIDDEN_OPEN:
new_page = PAGE_OPEN
elif val == BTN_HIDDEN_CLOSE:
new_page = PAGE_CLOSE
elif val == UP_ARROW:
new_page = this_page
node_id = key
process_up_button(filename, node_id)
elif val == DOWN_ARROW:
new_page = this_page
node_id = key
process_down_button(filename, node_id)
elif val[0:3] == BTN_ADD:
new_page = edit_page
node_id = '1'
elif val == BTN_LOAD_DATA_TABLE:
new_page = PAGE_LOAD_DATA
node_id = '1'
elif val == BTN_LOAD_GEO_COVERAGE:
new_page = PAGE_LOAD_GEO_COVERAGE
node_id = '1'
elif val == BTN_LOAD_OTHER_ENTITY:
new_page = PAGE_LOAD_OTHER_ENTITY
node_id = '1'
elif val == BTN_REUSE:
new_page = PAGE_IMPORT_PARTY
node_id = '1'
if form.validate_on_submit():
if new_page in [PAGE_DATA_TABLE, PAGE_LOAD_DATA, PAGE_REUPLOAD, PAGE_REUPLOAD_WITH_COL_NAMES_CHANGED ]:
return url_for(new_page, filename=filename, dt_node_id=node_id, project_node_id=project_node_id)
else:
return url_for(new_page, filename=filename, node_id=node_id, project_node_id=project_node_id)
def process_up_button(filename:str=None, node_id:str=None):
process_updown_button(filename, node_id, move_up)
def process_down_button(filename:str=None, node_id:str=None):
process_updown_button(filename, node_id, move_down)
def process_updown_button(filename:str=None, node_id:str=None, move_function=None):
if filename and node_id and move_function:
eml_node = load_eml(filename=filename)
child_node = Node.get_node_instance(node_id)
if child_node:
parent_node = child_node.parent
if parent_node:
move_function(parent_node, child_node)
save_both_formats(filename=filename, eml_node=eml_node)
def compare_begin_end_dates(begin_date_str:str=None, end_date_str:str=None):
begin_date = None
end_date = None
flash_msg = None
if len(begin_date_str) == 4:
begin_date_str += '-01-01'
if len(end_date_str) == 4:
end_date_str += '-01-01'
# date.fromisoformat() is a Python 3.7 feature
#if begin_date_str and end_date_str:
#begin_date = date.fromisoformat(begin_date_str)
#end_date = date.fromisoformat(end_date_str)
if begin_date and end_date and begin_date > end_date:
flash_msg = 'Begin date should be less than or equal to end date'
if end_date:
today_date = date.today()
if end_date > today_date:
msg = "End date should not be greater than today's date"
if flash_msg:
flash_msg += "; " + msg
else:
flash_msg = msg
return flash_msg
def set_current_page(page):
session['current_page'] = page
def get_current_page():
return session.get('current_page')
| 40.38112 | 174 | 0.652648 | [
"Apache-2.0"
] | mother-db/ezEMLmotherDB | webapp/home/views.py | 91,546 | Python |
#!/usr/bin/python3
import argparse
import os
import shutil
import sys
import traceback
from multiprocessing import Pool, cpu_count
from os.path import expanduser
import time
from typing import Tuple
from colorama import Fore
from atcodertools.client.atcoder import AtCoderClient, Contest, LoginError, PageNotFoundError
from atcodertools.client.models.problem import Problem
from atcodertools.client.models.problem_content import InputFormatDetectionError, SampleDetectionError
from atcodertools.codegen.code_style_config import DEFAULT_WORKSPACE_DIR_PATH
from atcodertools.codegen.models.code_gen_args import CodeGenArgs
from atcodertools.common.language import ALL_LANGUAGES, CPP
from atcodertools.common.logging import logger
from atcodertools.config.config import Config
from atcodertools.constprediction.constants_prediction import predict_constants
from atcodertools.fileutils.create_contest_file import create_examples, \
create_code
from atcodertools.fmtprediction.models.format_prediction_result import FormatPredictionResult
from atcodertools.fmtprediction.predict_format import NoPredictionResultError, \
MultiplePredictionResultsError, predict_format
from atcodertools.tools import get_default_config_path
from atcodertools.tools.models.metadata import Metadata
from atcodertools.tools.utils import with_color
from atcodertools.config.config import ConfigType
class BannedFileDetectedError(Exception):
pass
class EnvironmentInitializationError(Exception):
pass
def output_splitter():
# for readability
print("=================================================", file=sys.stderr)
def _message_on_execution(cwd: str, cmd: str):
return "Executing the following command in `{}`: {}".format(cwd, cmd)
def prepare_procedure(atcoder_client: AtCoderClient,
problem: Problem,
config: Config):
workspace_root_path = config.code_style_config.workspace_dir
template_code_path = config.code_style_config.template_file
lang = config.code_style_config.lang
pid = problem.get_alphabet()
problem_dir_path = os.path.join(
workspace_root_path,
problem.get_contest().get_id(),
pid)
def emit_error(text):
logger.error(with_color("Problem {}: {}".format(pid, text), Fore.RED))
def emit_warning(text):
logger.warning("Problem {}: {}".format(pid, text))
def emit_info(text):
logger.info("Problem {}: {}".format(pid, text))
# Return if a directory for the problem already exists
if config.etc_config.skip_existing_problems:
if os.path.exists(problem_dir_path):
emit_info(
f"Skipped preparation because the directory already exists: {problem_dir_path}")
return
emit_info('{} is used for template'.format(template_code_path))
# Fetch problem data from the statement
try:
content = atcoder_client.download_problem_content(problem)
except InputFormatDetectionError as e:
emit_error("Failed to download input format.")
raise e
except SampleDetectionError as e:
emit_error("Failed to download samples.")
raise e
# Store examples to the directory path
if len(content.get_samples()) == 0:
emit_info("No samples.")
else:
os.makedirs(problem_dir_path, exist_ok=True)
create_examples(content.get_samples(), problem_dir_path,
config.etc_config.in_example_format, config.etc_config.out_example_format)
emit_info("Created examples.")
code_file_path = os.path.join(
problem_dir_path,
"main.{}".format(lang.extension))
# If there is an existing code, just create backup
if os.path.exists(code_file_path):
backup_id = 1
while True:
backup_name = "{}.{}".format(code_file_path, backup_id)
if not os.path.exists(backup_name):
new_path = backup_name
shutil.copy(code_file_path, backup_name)
break
backup_id += 1
emit_info(
"Backup for existing code '{}' -> '{}'".format(
code_file_path,
new_path))
try:
prediction_result = predict_format(content)
emit_info(
with_color("Format prediction succeeded", Fore.LIGHTGREEN_EX))
except (NoPredictionResultError, MultiplePredictionResultsError) as e:
prediction_result = FormatPredictionResult.empty_result()
if isinstance(e, NoPredictionResultError):
msg = "No prediction -- Failed to understand the input format"
else:
msg = "Too many prediction -- Failed to understand the input format"
emit_warning(with_color(msg, Fore.LIGHTRED_EX))
constants = predict_constants(content.original_html)
code_generator = config.code_style_config.code_generator
with open(template_code_path, "r") as f:
template = f.read()
create_code(code_generator(
CodeGenArgs(
template,
prediction_result.format,
constants,
config.code_style_config
)),
code_file_path)
emit_info("Saved code to {}".format(code_file_path))
# Save metadata
metadata_path = os.path.join(problem_dir_path, "metadata.json")
Metadata(problem,
os.path.basename(code_file_path),
config.etc_config.in_example_format.replace("{}", "*"),
config.etc_config.out_example_format.replace("{}", "*"),
lang,
constants.judge_method,
constants.timeout
).save_to(metadata_path)
emit_info("Saved metadata to {}".format(metadata_path))
if config.postprocess_config.exec_cmd_on_problem_dir is not None:
emit_info(_message_on_execution(problem_dir_path,
config.postprocess_config.exec_cmd_on_problem_dir))
config.postprocess_config.execute_on_problem_dir(
problem_dir_path)
output_splitter()
def func(argv: Tuple[AtCoderClient, Problem, Config]):
atcoder_client, problem, config = argv
prepare_procedure(atcoder_client, problem, config)
def prepare_contest(atcoder_client: AtCoderClient,
contest_id: str,
config: Config,
retry_delay_secs: float = 1.5,
retry_max_delay_secs: float = 60,
retry_max_tries: int = 10):
attempt_count = 1
while True:
try:
problem_list = atcoder_client.download_problem_list(
Contest(contest_id=contest_id))
break
except PageNotFoundError:
if 0 < retry_max_tries < attempt_count:
raise EnvironmentInitializationError
logger.warning(
"Failed to fetch. Will retry in {} seconds. (Attempt {})".format(retry_delay_secs, attempt_count))
time.sleep(retry_delay_secs)
retry_delay_secs = min(retry_delay_secs * 2, retry_max_delay_secs)
attempt_count += 1
tasks = [(atcoder_client,
problem,
config) for
problem in problem_list]
output_splitter()
if config.etc_config.parallel_download:
thread_pool = Pool(processes=cpu_count())
thread_pool.map(func, tasks)
else:
for argv in tasks:
try:
func(argv)
except Exception:
# Prevent the script from stopping
print(traceback.format_exc(), file=sys.stderr)
pass
if config.postprocess_config.exec_cmd_on_contest_dir is not None:
contest_dir_path = os.path.join(
config.code_style_config.workspace_dir, contest_id)
logger.info(_message_on_execution(contest_dir_path,
config.postprocess_config.exec_cmd_on_contest_dir))
config.postprocess_config.execute_on_contest_dir(
contest_dir_path)
USER_CONFIG_PATH = os.path.join(
expanduser("~"), ".atcodertools.toml")
def get_config(args: argparse.Namespace) -> Config:
def _load(path: str) -> Config:
logger.info("Going to load {} as config".format(path))
with open(path, 'r') as f:
return Config.load(f, {ConfigType.CODESTYLE, ConfigType.POSTPROCESS, ConfigType.ETC}, args)
if args.config:
return _load(args.config)
if os.path.exists(USER_CONFIG_PATH):
return _load(USER_CONFIG_PATH)
return _load(get_default_config_path())
class DeletedFunctionalityError(Exception):
pass
def main(prog, args):
parser = argparse.ArgumentParser(
prog=prog,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("contest_id",
help="Contest ID (e.g. arc001)")
parser.add_argument("--without-login",
action="store_true",
help="Download data without login")
parser.add_argument("--workspace",
help="Path to workspace's root directory. This script will create files"
" in {{WORKSPACE}}/{{contest_name}}/{{alphabet}}/ e.g. ./your-workspace/arc001/A/\n"
"[Default] {}".format(DEFAULT_WORKSPACE_DIR_PATH))
parser.add_argument("--lang",
help="Programming language of your template code, {}.\n"
.format(" or ".join([lang.name for lang in ALL_LANGUAGES])) + "[Default] {}".format(CPP.name))
parser.add_argument("--template",
help="File path to your template code\n{}".format(
"\n".join(
["[Default ({dname})] {path}".format(
dname=lang.display_name,
path=lang.default_template_path
) for lang in ALL_LANGUAGES]
))
)
# Deleted functionality
parser.add_argument('--replacement', help=argparse.SUPPRESS)
parser.add_argument("--parallel",
action="store_true",
help="Prepare problem directories asynchronously using multi processors.",
default=None)
parser.add_argument("--save-no-session-cache",
action="store_true",
help="Save no session cache to avoid security risk",
default=None)
parser.add_argument("--skip-existing-problems",
action="store_true",
help="Skip processing every problem for which a directory already exists",
default=None)
parser.add_argument("--config",
help="File path to your config file\n{0}{1}".format("[Default (Primary)] {}\n".format(
USER_CONFIG_PATH),
"[Default (Secondary)] {}\n".format(
get_default_config_path()))
)
args = parser.parse_args(args)
if args.replacement is not None:
logger.error(with_color("Sorry! --replacement argument no longer exists"
" and you can only use --template."
" See the official document for details.", Fore.LIGHTRED_EX))
raise DeletedFunctionalityError
config = get_config(args)
try:
import AccountInformation # noqa
raise BannedFileDetectedError(
"We abolished the logic with AccountInformation.py. Please delete the file.")
except ImportError:
pass
client = AtCoderClient()
if not config.etc_config.download_without_login:
try:
client.login(
save_session_cache=not config.etc_config.save_no_session_cache)
logger.info("Login successful.")
except LoginError:
logger.error(
"Failed to login (maybe due to wrong username/password combination?)")
sys.exit(-1)
else:
logger.info("Downloading data without login.")
prepare_contest(client,
args.contest_id,
config)
if __name__ == "__main__":
main(sys.argv[0], sys.argv[1:])
| 36.707101 | 118 | 0.626662 | [
"MIT"
] | anosatsuk124/atcoder-tools | atcodertools/tools/envgen.py | 12,407 | Python |
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
import math
from modeling.sync_batchnorm.batchnorm import SynchronizedBatchNorm2d
import torch.utils.model_zoo as model_zoo
def conv_bn(inp, oup, stride, BatchNorm):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
BatchNorm(oup),
nn.ReLU6(inplace=True)
)
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, dilation, expand_ratio, BatchNorm):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
self.kernel_size = 3
self.dilation = dilation
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, 1, bias=False),
BatchNorm(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, 1, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 0, dilation, groups=hidden_dim, bias=False),
BatchNorm(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, 1, bias=False),
BatchNorm(oup),
)
def forward(self, x):
x_pad = fixed_padding(x, self.kernel_size, dilation=self.dilation)
if self.use_res_connect:
x = x + self.conv(x_pad)
else:
x = self.conv(x_pad)
return x
class MobileNetV2(nn.Module):
def __init__(self, output_stride=8, BatchNorm=None, width_mult=1., pretrained=True):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
current_stride = 1
rate = 1
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
input_channel = int(input_channel * width_mult)
self.features = [conv_bn(3, input_channel, 2, BatchNorm)]
current_stride *= 2
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
if current_stride == output_stride:
stride = 1
dilation = rate
rate *= s
else:
stride = s
dilation = 1
current_stride *= s
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, stride, dilation, t, BatchNorm))
else:
self.features.append(block(input_channel, output_channel, 1, dilation, t, BatchNorm))
input_channel = output_channel
self.features = nn.Sequential(*self.features)
self._initialize_weights()
if pretrained:
self._load_pretrained_model()
self.low_level_features = self.features[0:4]
self.high_level_features = self.features[4:]
def forward(self, x):
low_level_feat = self.low_level_features(x)
x = self.high_level_features(low_level_feat)
return x, low_level_feat
def _load_pretrained_model(self):
pretrain_dict = torch.load(os.path.join(os.path.dirname(os.path.abspath(__file__)),'./mobilenet_VOC.pth'))
model_dict = {}
state_dict = self.state_dict()
for k, v in pretrain_dict.items():
if k in state_dict:
model_dict[k] = v
state_dict.update(model_dict)
self.load_state_dict(state_dict)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, SynchronizedBatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if __name__ == "__main__":
input = torch.rand(1, 3, 512, 512)
model = MobileNetV2(output_stride=16, BatchNorm=nn.BatchNorm2d)
output, low_level_feat = model(input)
print(output.size())
print(low_level_feat.size()) | 35.644737 | 114 | 0.568106 | [
"MIT"
] | haofengsiji/synthetic-to-real-semantic-segmentation | modeling/backbone/mobilenet.py | 5,418 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=43
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.Y.on(input_qubit[2])) # number=18
c.append(cirq.Z.on(input_qubit[3])) # number=28
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=21
c.append(cirq.H.on(input_qubit[3])) # number=22
c.append(cirq.X.on(input_qubit[3])) # number=13
c.append(cirq.H.on(input_qubit[3])) # number=23
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=24
c.append(cirq.H.on(input_qubit[3])) # number=25
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=34
c.append(cirq.H.on(input_qubit[0])) # number=35
c.append(cirq.H.on(input_qubit[1])) # number=19
c.append(cirq.H.on(input_qubit[0])) # number=15
c.append(cirq.CZ.on(input_qubit[2],input_qubit[0])) # number=16
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.rx(1.6838936623241292).on(input_qubit[2])) # number=36
c.append(cirq.Y.on(input_qubit[1])) # number=26
c.append(cirq.Y.on(input_qubit[1])) # number=27
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=29
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=30
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=40
c.append(cirq.X.on(input_qubit[0])) # number=41
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=42
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=37
c.append(cirq.X.on(input_qubit[0])) # number=38
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=39
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq3134.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 37.303371 | 77 | 0.680723 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | benchmark/startCirq3134.py | 3,320 | Python |
# -*- coding: utf-8 -*-
"""DBO MSSQL driver
.. module:: lib.database.dbo.drivers.mssql.driver
:platform: Unix
:synopsis: DBO MSSQL driver
.. moduleauthor:: Petr Rašek <bowman@hydratk.org>
"""
try:
import pymssql
except ImportError:
raise NotImplementedError('MSSQL client is not supported for PyPy')
from hydratk.lib.database.dbo import dbodriver
class DBODriver(dbodriver.DBODriver):
"""Class DBODriver
"""
_host = None
_port = 1433
_dbname = None
_driver_options = {
'timeout': 5.0,
'detect_types': 0,
# available “DEFERRED”, “IMMEDIATE” or “EXCLUSIVE”
'isolation_level': None,
'check_same_thread': None,
'factory': 'Connection',
'cached_statements': 100
}
def _parse_dsn(self, dsn):
"""Method parses dsn
Args:
dsn (str): dsn
Returns:
bool: True
Raises:
exception: Exception
"""
dsn_opt = dsn.split(':')[1]
dsn_opt_tokens = dsn_opt.split(';')
for dsn_opt_token in dsn_opt_tokens:
# print(dsn_opt_token)
opt = dsn_opt_token.split('=')
if opt[0] == 'host':
self._host = opt[1]
if opt[0] == 'port':
self._port = int(opt[1])
if opt[0] == 'database':
self._dbname = opt[1]
if opt[0] == 'user':
self._username = opt[1]
if opt[0] == 'password':
self._password = opt[1]
return True
def _apply_driver_options(self, driver_options):
"""Method sets driver options
Args:
driver_option (dict): driver options
Returns:
void
"""
for optname, optval in driver_options.items():
if optname in self._driver_options:
self._driver_options[optname] = optval
def connect(self):
"""Method connects to database
Args:
none
Returns:
void
"""
self._dbcon = pymssql.connect(
server=self._host, port=self._port, database=self._dbname, user=self._username, password=self._password)
self.result_as_dict(self._result_as_dict)
def close(self):
"""Method disconnects from database
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.close()
else:
raise dbodriver.DBODriverException('Not connected')
def commit(self):
"""Method commits transaction
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.commit()
else:
raise dbodriver.DBODriverException('Not connected')
def error_code(self):
pass
def error_info(self):
pass
def qexec(self):
pass
def get_attribute(self):
pass
def in_transaction(self):
pass
def last_insert_id(self):
pass
def prepare(self):
pass
def query(self):
pass
def execute(self, sql, *parameters):
"""Method executes query
Args:
sql (str): SQL query
parameters (args): query parameters
Returns:
obj: cursor
"""
self._cursor.execute(sql, tuple(parameters))
return self._cursor
def quote(self):
pass
def rollback(self):
"""Method rollbacks transaction
Args:
none
Returns:
void
Raises:
exception: DBODriverException
"""
if type(self._dbcon).__name__.lower() == 'connection':
self._dbcon.rollback()
else:
raise dbodriver.DBODriverException('Not connected')
def set_attribute(self):
pass
def __getitem__(self, name):
"""Method gets item
Args:
name (str): item name
Returns:
obj: item value
"""
if hasattr(pymssql, name):
return getattr(pymssql, name)
def __getattr__(self, name):
"""Method gets attribute
Args:
name (str): attribute name
Returns:
obj: attribute value
"""
if type(self._dbcon).__name__.lower() == 'connection':
if hasattr(self._dbcon, name):
return getattr(self._dbcon, name)
if hasattr(pymssql, name):
return getattr(pymssql, name)
def table_exists(self, table_name):
"""Method checks if table exists
Args:
table_name (str): table
Returns:
bool: result
"""
if table_name is not None and table_name != '':
query = "SELECT count(*) found FROM information_schema.tables WHERE table_catalog=%s AND table_type='BASE TABLE' and table_name=%s"
self._cursor.execute(query, (self._dbname, table_name))
recs = self._cursor.fetchall()
result = True if (recs[0]['found'] == 1) else False
return result
def database_exists(self):
pass
def remove_database(self):
pass
def erase_database(self):
pass
def result_as_dict(self, state):
"""Method enables query result in dictionary form
Args:
state (bool): enable dictionary
Returns:
void
Raises:
error: TypeError
"""
if state in (True, False):
self._result_as_dict = state
if state == True:
self._cursor = self._dbcon.cursor(as_dict=True)
else:
self._cursor = self._dbcon.cursor()
else:
raise TypeError('Boolean value expected')
| 21.735714 | 143 | 0.533027 | [
"BSD-3-Clause"
] | hydratk/hydratk-lib-network | src/hydratk/lib/database/dbo/drivers/mssql/driver.py | 6,099 | Python |
import json
import pickle
import numpy as np
import pytest
import fsspec
from fsspec.implementations.ftp import FTPFileSystem
from fsspec.spec import AbstractFileSystem, AbstractBufferedFile
class DummyTestFS(AbstractFileSystem):
protocol = "mock"
_fs_contents = (
{"name": "top_level", "type": "directory"},
{"name": "top_level/second_level", "type": "directory"},
{"name": "top_level/second_level/date=2019-10-01", "type": "directory"},
{
"name": "top_level/second_level/date=2019-10-01/a.parquet",
"type": "file",
"size": 100,
},
{
"name": "top_level/second_level/date=2019-10-01/b.parquet",
"type": "file",
"size": 100,
},
{"name": "top_level/second_level/date=2019-10-02", "type": "directory"},
{
"name": "top_level/second_level/date=2019-10-02/a.parquet",
"type": "file",
"size": 100,
},
{"name": "top_level/second_level/date=2019-10-04", "type": "directory"},
{
"name": "top_level/second_level/date=2019-10-04/a.parquet",
"type": "file",
"size": 100,
},
{"name": "misc", "type": "directory"},
{"name": "misc/foo.txt", "type": "file", "size": 100},
{"name": "glob_test/hat/^foo.txt", "type": "file", "size": 100},
{"name": "glob_test/dollar/$foo.txt", "type": "file", "size": 100},
{"name": "glob_test/lbrace/{foo.txt", "type": "file", "size": 100},
{"name": "glob_test/rbrace/}foo.txt", "type": "file", "size": 100},
)
def __getitem__(self, name):
for item in self._fs_contents:
if item["name"] == name:
return item
raise IndexError("{name} not found!".format(name=name))
def ls(self, path, detail=True, **kwargs):
path = self._strip_protocol(path)
files = {
file["name"]: file
for file in self._fs_contents
if path == self._parent(file["name"])
}
if detail:
return [files[name] for name in sorted(files)]
return list(sorted(files))
@pytest.mark.parametrize(
"test_path, expected",
[
(
"mock://top_level/second_level/date=2019-10-01/a.parquet",
["top_level/second_level/date=2019-10-01/a.parquet"],
),
(
"mock://top_level/second_level/date=2019-10-01/*",
[
"top_level/second_level/date=2019-10-01/a.parquet",
"top_level/second_level/date=2019-10-01/b.parquet",
],
),
("mock://top_level/second_level/date=2019-10", []),
(
"mock://top_level/second_level/date=2019-10-0[1-4]",
[
"top_level/second_level/date=2019-10-01",
"top_level/second_level/date=2019-10-02",
"top_level/second_level/date=2019-10-04",
],
),
(
"mock://top_level/second_level/date=2019-10-0[1-4]/*",
[
"top_level/second_level/date=2019-10-01/a.parquet",
"top_level/second_level/date=2019-10-01/b.parquet",
"top_level/second_level/date=2019-10-02/a.parquet",
"top_level/second_level/date=2019-10-04/a.parquet",
],
),
(
"mock://top_level/second_level/date=2019-10-0[1-4]/[a].*",
[
"top_level/second_level/date=2019-10-01/a.parquet",
"top_level/second_level/date=2019-10-02/a.parquet",
"top_level/second_level/date=2019-10-04/a.parquet",
],
),
("mock://glob_test/hat/^foo.*", ["glob_test/hat/^foo.txt"]),
("mock://glob_test/dollar/$foo.*", ["glob_test/dollar/$foo.txt"]),
("mock://glob_test/lbrace/{foo.*", ["glob_test/lbrace/{foo.txt"]),
("mock://glob_test/rbrace/}foo.*", ["glob_test/rbrace/}foo.txt"]),
],
)
def test_glob(test_path, expected):
test_fs = DummyTestFS()
res = test_fs.glob(test_path)
res = sorted(res) # FIXME: py35 back-compat
assert res == expected
res = test_fs.glob(test_path, detail=True)
assert isinstance(res, dict)
assert sorted(res) == expected # FIXME: py35 back-compat
for name, info in res.items():
assert info == test_fs[name]
def test_find_details():
test_fs = DummyTestFS()
filenames = test_fs.find("/")
details = test_fs.find("/", detail=True)
for filename in filenames:
assert details[filename] == test_fs.info(filename)
def test_cache():
fs = DummyTestFS()
fs2 = DummyTestFS()
assert fs is fs2
assert len(fs._cache) == 1
del fs2
assert len(fs._cache) == 1
del fs
assert len(DummyTestFS._cache) == 1
DummyTestFS.clear_instance_cache()
assert len(DummyTestFS._cache) == 0
def test_alias():
with pytest.warns(FutureWarning, match="add_aliases"):
DummyTestFS(add_aliases=True)
def test_add_docs_warns():
with pytest.warns(FutureWarning, match="add_docs"):
AbstractFileSystem(add_docs=True)
def test_cache_options():
fs = DummyTestFS()
f = AbstractBufferedFile(fs, "misc/foo.txt", cache_type="bytes")
assert f.cache.trim
# TODO: dummy buffered file
f = AbstractBufferedFile(
fs, "misc/foo.txt", cache_type="bytes", cache_options=dict(trim=False)
)
assert f.cache.trim is False
f = fs.open("misc/foo.txt", cache_type="bytes", cache_options=dict(trim=False))
assert f.cache.trim is False
def test_trim_kwarg_warns():
fs = DummyTestFS()
with pytest.warns(FutureWarning, match="cache_options"):
AbstractBufferedFile(fs, "misc/foo.txt", cache_type="bytes", trim=False)
def test_eq():
fs = DummyTestFS()
result = fs == 1
assert result is False
def test_pickle_multiple():
a = DummyTestFS(1)
b = DummyTestFS(2, bar=1)
x = pickle.dumps(a)
y = pickle.dumps(b)
del a, b
DummyTestFS.clear_instance_cache()
result = pickle.loads(x)
assert result.storage_args == (1,)
assert result.storage_options == {}
result = pickle.loads(y)
assert result.storage_args == (2,)
assert result.storage_options == dict(bar=1)
def test_json():
a = DummyTestFS(1)
b = DummyTestFS(2, bar=1)
outa = a.to_json()
outb = b.to_json()
assert json.loads(outb) # is valid JSON
assert a != b
assert "bar" in outb
assert DummyTestFS.from_json(outa) is a
assert DummyTestFS.from_json(outb) is b
@pytest.mark.parametrize(
"dt",
[
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float32,
np.float64,
],
)
def test_readinto_with_numpy(tmpdir, dt):
store_path = str(tmpdir / "test_arr.npy")
arr = np.arange(10, dtype=dt)
arr.tofile(store_path)
arr2 = np.empty_like(arr)
with fsspec.open(store_path, "rb") as f:
f.readinto(arr2)
assert np.array_equal(arr, arr2)
@pytest.mark.parametrize(
"dt",
[
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.float32,
np.float64,
],
)
def test_readinto_with_multibyte(ftp_writable, tmpdir, dt):
host, port, user, pw = ftp_writable
ftp = FTPFileSystem(host=host, port=port, username=user, password=pw)
with ftp.open("/out", "wb") as fp:
arr = np.arange(10, dtype=dt)
fp.write(arr.tobytes())
with ftp.open("/out", "rb") as fp:
arr2 = np.empty_like(arr)
fp.readinto(arr2)
assert np.array_equal(arr, arr2)
| 28.278986 | 83 | 0.574888 | [
"BSD-3-Clause"
] | DavidKatz-il/filesystem_spec | fsspec/tests/test_spec.py | 7,805 | Python |
from .base import *
ALLOWED_HOSTS = ["localhost", "0.0.0.0", "127.0.0.1"]
DATABASES = {
'default': {
'ENGINE': os.getenv('SQL_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.getenv('SQL_DATABASE', os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.getenv('SQL_USER', 'user'),
'PASSWORD': os.getenv('SQL_PASSWORD', 'password'),
'HOST': os.getenv('SQL_HOST', 'localhost'),
'PORT': os.getenv('SQL_PORT', '5432'),
}
}
ASSETS_ASSET_FILE_PART_SIZE = 60
| 29.882353 | 80 | 0.594488 | [
"MIT"
] | andremargarin/digital-asset-management-api | digital_asset_management_api/settings/local.py | 508 | Python |
#!/usr/bin/python3
from datetime import datetime
import calendar
import sqlite3
import os
months = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December']
calendar_db = 'PythonicTeamsCalendarTest/calendar.db'
def initialise_calendar():
calendar_db = 'PythonicTeamsCalendarTest/calendar.db'
if not os.path.exists(calendar_db):
sqlite3.connect(calendar_db).close()
try:
conn = sqlite3.connect(calendar_db)
c = conn.cursor()
now = datetime.utcnow()
c.execute(f'''CREATE TABLE calendar(title VARCHAR(255), event TEXT, date_created DATETIME, day INTEGER, month VARCHAR(25), year INTEGER)''')
conn.commit()
conn.close()
return
except sqlite3.OperationalError as e:
print(e)
return
return
def retrieve_current_month():
now = datetime.utcnow()
current_month = months[now.month - 1]
return current_month
def retrieve_current_year():
now = datetime.utcnow()
current_year = now.year
return current_year
def retrieve_previous_month(current_month):
current_month_index = months.index(current_month)
prev_month = months[current_month_index -1]
if current_month == months[0]:
year = datetime.utcnow().year - 1
else:
year = datetime.utcnow().year
return prev_month, year
def retrieve_next_month(current_month, year):
current_month_index = months.index(current_month)
try:
next_month = months[current_month_index + 1]
year = year
return next_month, year
except IndexError:
current_month_index = 0
next_month = months[current_month_index]
year = year + 1
return next_month, year
def retrieve_month_dates(year, month):
month_calendar = calendar.monthcalendar(year=year, month=month)
# month_calendar[0].pop(0)
return month_calendar
def retrieve_current_month_index(month):
current_month_index = int(months.index(month))
return current_month_index
def add_new_event(Title, Event, Day, Month, Year):
try:
conn = sqlite3.connect(calendar_db)
c = conn.cursor()
now = datetime.utcnow()
c.execute('''INSERT INTO calendar(title, event, date_created, day, month, year) VALUES(?,?,?,?,?,?)''', (Title, Event, now, Day, Month, Year))
conn.commit()
conn.close()
except sqlite3.OperationalError as e:
print(e)
#function to retrieve all events in a given month.
#if not month return empty dict
#else return a dict with key "date": value "Event"
def retrieve_all_events_in_month(Month):
conn = None
try:
calendar_events = {} #dict
conn = sqlite3.connect(calendar_db)
c = conn.cursor()
c.execute('''SELECT * FROM calendar WHERE month = :month''', (Month,))
calendar_events_list = list(c.fetchall())
conn.close()
if calendar_events_list:
for calendar_db_entry in calendar_events_list:
calendar_events[calendar_db_entry[3]] = calendar_db_entry
return calendar_events
except sqlite3.OperationalError as e:
print(e)
#function to return events on a specific date
def retrieve_events_on_date(Day,Month,Year):
conn = None
try:
conn = sqlite3.connect(calendar_db)
c = conn.cursor()
c.execute('''SELECT * FROM calendar WHERE day = :day AND month = :month AND year = :year ''', (Day,Month,Year))
events_list = list(c.fetchall())
conn.close()
print('events: ', events_list)
return events_list
except sqlite3.OperationalError as e:
print(e)
| 32.6 | 152 | 0.650573 | [
"MIT"
] | druzgeorge/python-teams | PythonicTeamsCalendarTest/helpers.py | 3,749 | Python |
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import unittest2
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
PACK_ACTIONS_DIR = os.path.join(BASE_DIR, '../../../contrib/packs/actions')
PACK_ACTIONS_DIR = os.path.abspath(PACK_ACTIONS_DIR)
sys.path.insert(0, PACK_ACTIONS_DIR)
from st2common.util.monkey_patch import use_select_poll_workaround
use_select_poll_workaround()
from st2common.util.pack_management import eval_repo_url
__all__ = [
'InstallPackTestCase'
]
class InstallPackTestCase(unittest2.TestCase):
def test_eval_repo(self):
result = eval_repo_url('coditation/st2contrib')
self.assertEqual(result, 'https://github.com/coditation/st2contrib')
result = eval_repo_url('git@github.com:coditation/st2contrib.git')
self.assertEqual(result, 'git@github.com:coditation/st2contrib.git')
repo_url = 'https://github.com/coditation/st2contrib.git'
result = eval_repo_url(repo_url)
self.assertEqual(result, repo_url)
repo_url = 'https://git-wip-us.apache.org/repos/asf/libcloud.git'
result = eval_repo_url(repo_url)
self.assertEqual(result, repo_url)
| 32.592593 | 76 | 0.75 | [
"Apache-2.0"
] | avezraj/st2 | st2common/tests/unit/test_pack_management.py | 1,760 | Python |
import sys
import cv2
import os
from ast import literal_eval
from pathlib import Path
import shutil
import logging
import random
import pickle
import yaml
import subprocess
from PIL import Image
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import animation, rc
plt.rcParams['figure.figsize'] = 30, 30
np.set_printoptions(precision=3, suppress=True)
rc('animation', html='jshtml')
import torch
from augmentations import get_albu_transforms
IMAGE_DIR = '~/Kaggle/data/tensorflow-great-barrier-reef/train_images'
def load_image(video_id, video_frame, image_dir):
img_path = f'{image_dir}/video_{video_id}/{video_frame}.jpg'
assert os.path.exists(img_path), f'{img_path} does not exist.'
img = cv2.imread(img_path)
return img
def decode_annotations(annotaitons_str):
"""decode annotations in string to list of dict"""
return literal_eval(annotaitons_str)
def load_image_with_annotations(video_id, video_frame, image_dir, annotaitons_str):
img = load_image(video_id, video_frame, image_dir)
annotations = decode_annotations(annotaitons_str)
if len(annotations) > 0:
for ann in annotations:
cv2.rectangle(img, (ann['x'], ann['y']),
(ann['x'] + ann['width'], ann['y'] + ann['height']),
(255, 0, 0), thickness=2,)
return img
def draw_predictions(img, pred_bboxes):
img = img.copy()
if len(pred_bboxes) > 0:
for bbox in pred_bboxes:
conf = bbox[0]
x, y, w, h = bbox[1:].round().astype(int)
cv2.rectangle(img, (x, y),(x+w, y+h),(0, 255, 255), thickness=2,)
cv2.putText(img, f"{conf:.2}",(x, max(0, y-5)),
cv2.FONT_HERSHEY_SIMPLEX,0.5,(0, 0, 255),
thickness=1,
)
return img
def plot_img(df, idx, image_dir, pred_bboxes=None):
row = df.iloc[idx]
video_id = row.video_id
video_frame = row.video_frame
annotations_str = row.annotations
img = load_image_with_annotations(video_id, video_frame, image_dir, annotations_str)
if pred_bboxes and len(pred_bboxes) > 0:
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
img = draw_predictions(img, pred_bboxes)
plt.imshow(img[:, :, ::-1])
def calc_iou(bboxes1, bboxes2, bbox_mode='xywh'):
assert len(bboxes1.shape) == 2 and bboxes1.shape[1] == 4
assert len(bboxes2.shape) == 2 and bboxes2.shape[1] == 4
bboxes1 = bboxes1.copy()
bboxes2 = bboxes2.copy()
if bbox_mode == 'xywh':
bboxes1[:, 2:] += bboxes1[:, :2]
bboxes2[:, 2:] += bboxes2[:, :2]
x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)
x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1)
xA = np.maximum(x11, np.transpose(x21))
yA = np.maximum(y11, np.transpose(y21))
xB = np.minimum(x12, np.transpose(x22))
yB = np.minimum(y12, np.transpose(y22))
interArea = np.maximum((xB - xA + 1e-9), 0) * np.maximum((yB - yA + 1e-9), 0)
boxAArea = (x12 - x11 + 1e-9) * (y12 - y11 + 1e-9)
boxBArea = (x22 - x21 + 1e-9) * (y22 - y21 + 1e-9)
iou = interArea / (boxAArea + np.transpose(boxBArea) - interArea)
return iou
def f_beta(tp, fp, fn, beta=2):
if tp == 0:
return 0
return (1+beta**2)*tp / ((1+beta**2)*tp + beta**2*fn+fp)
def calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th, verbose=False):
gt_bboxes = gt_bboxes.copy()
pred_bboxes = pred_bboxes.copy()
tp = 0
fp = 0
for k, pred_bbox in enumerate(pred_bboxes): # fixed in ver.7
if len(gt_bboxes) == 0:
fp += len(pred_bboxes) - k # fix in ver.7
break
ious = calc_iou(gt_bboxes, pred_bbox[None, 1:])
max_iou = ious.max()
if max_iou >= iou_th:
tp += 1
gt_bboxes = np.delete(gt_bboxes, ious.argmax(), axis=0)
else:
fp += 1
fn = len(gt_bboxes)
return tp, fp, fn
def calc_is_correct(gt_bboxes, pred_bboxes, iou_th=0.5):
"""
gt_bboxes: (N, 4) np.array in xywh format
pred_bboxes: (N, 5) np.array in conf+xywh format
"""
if len(gt_bboxes) == 0 and len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, 0
return tps, fps, fns
elif len(gt_bboxes) == 0:
tps, fps, fns = 0, len(pred_bboxes), 0
return tps, fps, fns
elif len(pred_bboxes) == 0:
tps, fps, fns = 0, 0, len(gt_bboxes)
return tps, fps, fns
pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf
tps, fps, fns = 0, 0, 0
tp, fp, fn = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
return tps, fps, fns
def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False):
"""
gt_bboxes_list: list of (N, 4) np.array in xywh format
pred_bboxes_list: list of (N, 5) np.array in conf+xywh format
"""
#f2s = []
f2_dict = {'f2':0, "P":0, "R": 0}
all_tps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fps = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
all_fns = [list([0] * 11) for _ in range(len(gt_bboxes_list))]
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
tps, fps, fns = 0, 0, 0
for i, (gt_bboxes, pred_bboxes) in enumerate(zip(gt_bboxes_list, pred_bboxes_list)):
tp, fp, fn = calc_is_correct(gt_bboxes, pred_bboxes, iou_th)
tps += tp
fps += fp
fns += fn
all_tps[i][k] = tp
all_fps[i][k] = fp
all_fns[i][k] = fn
if verbose:
num_gt = len(gt_bboxes)
num_pred = len(pred_bboxes)
print(f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}')
f2 = f_beta(tps, fps, fns, beta=2)
precision = f_beta(tps, fps, fns, beta=0)
recall = f_beta(tps, fps, fns, beta=100)
f2_dict["f2_" + str(round(iou_th,3))] = f2
f2_dict["P_" + str(round(iou_th,3))] = precision
f2_dict["R_" + str(round(iou_th,3))] = recall
f2_dict['f2'] += f2 / 11
f2_dict['P'] += precision / 11
f2_dict['R'] += recall / 11
f2_dict["tps"] = all_tps
f2_dict["fps"] = all_fps
f2_dict["fns"] = all_fns
return f2_dict
def print_f2_dict(d):
print("Overall f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d['f2'], d['precision'], d['recall']))
for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)):
print(f"IOU {iou_th:.2f}:", end=" ")
print("f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d["f2_" + str(round(iou_th,3))],
d["precision_" + str(round(iou_th,3))],
d["recall_" + str(round(iou_th,3))]))
def get_path(row, params, infer=False):
row['old_image_path'] = params['root_dir'] / f'train_images/video_{row.video_id}/{row.video_frame}.jpg'
if infer:
row['image_path'] = row["old_image_path"]
else:
row['image_path'] = params['image_dir'] / f'video_{row.video_id}_{row.video_frame}.jpg'
row['label_path'] = params['label_dir'] / f'video_{row.video_id}_{row.video_frame}.txt'
return row
def make_copy(path, params):
# TODO: fix split issue
data = str(path).split('/')
filename = data[-1]
video_id = data[-2]
new_path = params["image_dir"] / f'{video_id}_{filename}'
shutil.copy(path, new_path)
return
# https://www.kaggle.com/awsaf49/great-barrier-reef-yolov5-train
def voc2yolo(image_height, image_width, bboxes):
"""
voc => [x1, y1, x2, y1]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]/ image_height
w = bboxes[..., 2] - bboxes[..., 0]
h = bboxes[..., 3] - bboxes[..., 1]
bboxes[..., 0] = bboxes[..., 0] + w/2
bboxes[..., 1] = bboxes[..., 1] + h/2
bboxes[..., 2] = w
bboxes[..., 3] = h
return bboxes
def yolo2voc(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
voc => [x1, y1, x2, y1]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]]* image_height
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]
return bboxes
def coco2yolo(image_height, image_width, bboxes):
"""
coco => [xmin, ymin, w, h]
yolo => [xmid, ymid, w, h] (normalized)
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# normolizinig
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]/ image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]/ image_height
# converstion (xmin, ymin) => (xmid, ymid)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]/2
return bboxes
def yolo2coco(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
coco => [xmin, ymin, w, h]
"""
bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int
# denormalizing
bboxes[..., [0, 2]]= bboxes[..., [0, 2]]* image_width
bboxes[..., [1, 3]]= bboxes[..., [1, 3]]* image_height
# converstion (xmid, ymid) => (xmin, ymin)
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2
return bboxes
def voc2coco(bboxes, image_height=720, image_width=1280):
bboxes = voc2yolo(image_height, image_width, bboxes)
bboxes = yolo2coco(image_height, image_width, bboxes)
return bboxes
def load_image(image_path):
return cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def draw_bboxes(img, bboxes, classes, colors = None, show_classes = None, bbox_format = 'yolo', class_name = False, line_thickness = 1):
image = img.copy()
show_classes = classes if show_classes is None else show_classes
colors = (0, 255 ,0) if colors is None else colors
if bbox_format == 'yolo':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = round(float(bbox[0])*image.shape[1])
y1 = round(float(bbox[1])*image.shape[0])
w = round(float(bbox[2])*image.shape[1]/2) #w/2
h = round(float(bbox[3])*image.shape[0]/2)
voc_bbox = (x1-w, y1-h, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(get_label(cls)),
line_thickness = line_thickness)
elif bbox_format == 'coco':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
color = colors[idx]
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
w = int(round(bbox[2]))
h = int(round(bbox[3]))
voc_bbox = (x1, y1, x1+w, y1+h)
plot_one_box(voc_bbox,
image,
color = color,
label = cls,
line_thickness = line_thickness)
elif bbox_format == 'voc_pascal':
for idx in range(len(bboxes)):
bbox = bboxes[idx]
cls = classes[idx]
cls_id = class_ids[idx]
color = colors[cls_id] if type(colors) is list else colors
if cls in show_classes:
x1 = int(round(bbox[0]))
y1 = int(round(bbox[1]))
x2 = int(round(bbox[2]))
y2 = int(round(bbox[3]))
voc_bbox = (x1, y1, x2, y2)
plot_one_box(voc_bbox,
image,
color = color,
label = cls if class_name else str(cls_id),
line_thickness = line_thickness)
else:
raise ValueError('wrong bbox format')
return image
def get_bbox(annots):
bboxes = [list(annot.values()) for annot in annots]
return bboxes
def get_imgsize(row):
row['width'], row['height'] = imagesize.get(row['image_path'])
return row
# https://www.kaggle.com/diegoalejogm/great-barrier-reefs-eda-with-animations
def create_animation(ims):
fig = plt.figure(figsize=(16, 12))
plt.axis('off')
im = plt.imshow(ims[0])
def animate_func(i):
im.set_array(ims[i])
return [im]
return animation.FuncAnimation(fig, animate_func, frames = len(ims), interval = 1000//12)
# https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py
def nms(dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
# https://github.com/DocF/Soft-NMS/blob/master/soft_nms.py
def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2):
"""
py_cpu_softnms
:param dets: boexs 坐标矩阵 format [y1, x1, y2, x2]
:param sc: 每个 boxes 对应的分数
:param Nt: iou 交叠门限
:param sigma: 使用 gaussian 函数的方差
:param thresh: 最后的分数门限
:param method: 使用的方法
:return: 留下的 boxes 的 index
"""
# indexes concatenate boxes with the last column
N = dets.shape[0]
indexes = np.array([np.arange(N)])
dets = np.concatenate((dets, indexes.T), axis=1)
# the order of boxes coordinate is [y1,x1,y2,x2]
y1 = dets[:, 0]
x1 = dets[:, 1]
y2 = dets[:, 2]
x2 = dets[:, 3]
scores = sc
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
for i in range(N):
# intermediate parameters for later parameters exchange
tBD = dets[i, :].copy()
tscore = scores[i].copy()
tarea = areas[i].copy()
pos = i + 1
#
if i != N-1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
if tscore < maxscore:
dets[i, :] = dets[maxpos + i + 1, :]
dets[maxpos + i + 1, :] = tBD
tBD = dets[i, :]
scores[i] = scores[maxpos + i + 1]
scores[maxpos + i + 1] = tscore
tscore = scores[i]
areas[i] = areas[maxpos + i + 1]
areas[maxpos + i + 1] = tarea
tarea = areas[i]
# IoU calculate
xx1 = np.maximum(dets[i, 1], dets[pos:, 1])
yy1 = np.maximum(dets[i, 0], dets[pos:, 0])
xx2 = np.minimum(dets[i, 3], dets[pos:, 3])
yy2 = np.minimum(dets[i, 2], dets[pos:, 2])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[pos:] - inter)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(ovr.shape)
weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt]
elif method == 2: # gaussian
weight = np.exp(-(ovr * ovr) / sigma)
else: # original NMS
weight = np.ones(ovr.shape)
weight[ovr > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = dets[:, 4][scores > thresh]
keep = inds.astype(int)
return keep
def seed_torch(seed=42):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
def create_logger(filename, filemode='a'):
# better logging file - output the in terminal as well
file_handler = logging.FileHandler(filename=filename, mode=filemode)
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [file_handler, stdout_handler]
formatter = "%(asctime)s %(levelname)s: %(message)s"
datefmt = "%m/%d/%Y %I:%M:%S %p"
logging.basicConfig(format=formatter, datefmt=datefmt,
level=logging.DEBUG, handlers=handlers)
return
def save_pickle(obj, folder_path):
pickle.dump(obj, open(folder_path, 'wb'), pickle.HIGHEST_PROTOCOL)
def load_pickle(folder_path):
return pickle.load(open(folder_path, 'rb'))
def save_yaml(obj, folder_path):
obj2 = obj.copy()
for key, value in obj2.items():
if isinstance(value, Path):
obj2[key] = str(value.resolve())
else:
obj2[key] = value
with open(folder_path, 'w') as file:
yaml.dump(obj2, file)
def load_yaml(folder_path):
with open(folder_path) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
return data
def load_model(params):
try:
model = torch.hub.load(params['repo'],
'custom',
path=params['ckpt_path'],
source='local',
force_reload=True) # local repo
except:
print("torch.hub.load failed, try torch.load")
model = torch.load(params['ckpt_path'])
model.conf = params['conf'] # NMS confidence threshold
model.iou = params['iou'] # NMS IoU threshold
model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for persons, cats and dogs
model.multi_label = False # NMS multiple labels per box
model.max_det = 50 # maximum number of detections per image
return model
def predict(model, img, size=768, augment=False, use_sahi=False):
if use_sahi:
from sahi.predict import get_sliced_prediction
results = get_sliced_prediction(
img,
model,
slice_height = 512,
slice_width = 512,
overlap_height_ratio = 0.2,
overlap_width_ratio = 0.2
)
preds = results.object_prediction_list
bboxes = np.array([pred.bbox.to_voc_bbox() for pred in preds])
else:
results = model(img, size=size, augment=augment) # custom inference size
preds = results.pandas().xyxy[0]
bboxes = preds[['xmin','ymin','xmax','ymax']].values
if len(bboxes):
height, width = img.shape[:2]
bboxes = voc2coco(bboxes,height,width).astype(int)
if use_sahi:
confs = np.array([pred.score.value for pred in preds])
else:
confs = preds.confidence.values
return bboxes, confs
else:
return np.array([]),[]
def format_prediction(bboxes, confs):
annot = ''
if len(bboxes)>0:
for idx in range(len(bboxes)):
xmin, ymin, w, h = bboxes[idx]
conf = confs[idx]
annot += f'{conf} {xmin} {ymin} {w} {h}'
annot +=' '
annot = annot.strip(' ')
return annot
def show_img(img, bboxes, confs, colors, bbox_format='yolo'):
labels = [str(round(conf,2)) for conf in confs]
img = draw_bboxes(img = img,
bboxes = bboxes,
classes = labels,
class_name = True,
colors = colors,
bbox_format = bbox_format,
line_thickness = 2)
return Image.fromarray(img)
def write_hyp(params):
with open(params["hyp_file"], mode="w") as f:
for key, val in params["hyp_param"].items():
f.write(f"{key}: {val}\n")
def class2dict(f):
return dict((name, getattr(f, name)) for name in dir(f) if not name.startswith('__'))
def upload(params):
data_version = "-".join(params["exp_name"].split("_"))
if os.path.exists(params["output_dir"] / "wandb"):
shutil.move(str(params["output_dir"] / "wandb"),
str(params["output_dir"].parent / f"{params['exp_name']}_wandb/")
)
with open(params["output_dir"] / "dataset-metadata.json", "w") as f:
f.write("{\n")
f.write(f""" "title": "{data_version}",\n""")
f.write(f""" "id": "vincentwang25/{data_version}",\n""")
f.write(""" "licenses": [\n""")
f.write(""" {\n""")
f.write(""" "name": "CC0-1.0"\n""")
f.write(""" }\n""")
f.write(""" ]\n""")
f.write("""}""")
subprocess.call(["kaggle", "datasets", "create", "-p", str(params["output_dir"]), "-r", "zip"])
def coco(df):
annotion_id = 0
images = []
annotations = []
categories = [{'id': 0, 'name': 'cots'}]
for i, row in df.iterrows():
images.append({
"id": i,
"file_name": f"video_{row['video_id']}_{row['video_frame']}.jpg",
"height": 720,
"width": 1280,
})
for bbox in row['annotations']:
annotations.append({
"id": annotion_id,
"image_id": i,
"category_id": 0,
"bbox": list(bbox.values()),
"area": bbox['width'] * bbox['height'],
"segmentation": [],
"iscrowd": 0
})
annotion_id += 1
json_file = {'categories':categories, 'images':images, 'annotations':annotations}
return json_file
def mmcfg_from_param(params):
from mmcv import Config
# model
cfg = Config.fromfile(params['hyp_param']['base_file'])
cfg.work_dir = str(params['output_dir'])
cfg.seed = 2022
cfg.gpu_ids = range(2)
cfg.load_from = params['hyp_param']['load_from']
if params['hyp_param']['model_type'] == 'faster_rcnn':
cfg.model.roi_head.bbox_head.num_classes = 1
cfg.model.roi_head.bbox_head.loss_bbox.type = params['hyp_param']['loss_fnc']
cfg.model.rpn_head.loss_bbox.type = params['hyp_param']['loss_fnc']
if params['hyp_param']['loss_fnc'] == "GIoULoss":
cfg.model.roi_head.bbox_head.reg_decoded_bbox = True
cfg.model.rpn_head.reg_decoded_bbox = True
cfg.model.train_cfg.rpn_proposal.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rpn.nms.type = params['hyp_param']['nms']
cfg.model.test_cfg.rcnn.nms.type = params['hyp_param']['nms']
cfg.model.train_cfg.rcnn.sampler.type = params['hyp_param']['sampler']
elif params['hyp_param']['model_type'] == 'swin':
pass # already changed
elif params['hyp_param']['model_type'] == 'vfnet':
cfg.model.bbox_head.num_classes = 1
if params['hyp_param'].get("optimizer", cfg.optimizer.type) == "AdamW":
cfg.optimizer = dict(
type="AdamW",
lr=params['hyp_param'].get("lr", cfg.optimizer.lr),
weight_decay=params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay
),
)
else:
cfg.optimizer.lr = params['hyp_param'].get("lr", cfg.optimizer.lr)
cfg.optimizer.weight_decay = params['hyp_param'].get(
"weight_decay", cfg.optimizer.weight_decay)
cfg.lr_config = dict(
policy='CosineAnnealing',
by_epoch=False,
warmup='linear',
warmup_iters= 1000,
warmup_ratio= 1/10,
min_lr=1e-07)
# data
cfg = add_data_pipeline(cfg, params)
cfg.runner.max_epochs = params['epochs']
cfg.evaluation.start = 1
cfg.evaluation.interval = 1
cfg.evaluation.save_best='auto'
cfg.evaluation.metric ='bbox'
cfg.checkpoint_config.interval = -1
cfg.log_config.interval = 500
cfg.log_config.with_step = True
cfg.log_config.by_epoch = True
cfg.log_config.hooks =[dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')]
cfg.workflow = [('train',1)]
logging.info(str(cfg))
return cfg
def add_data_pipeline(cfg, params):
cfg.dataset_type = 'COCODataset'
cfg.classes = ('cots',)
cfg.data_root = str(params['data_path'].resolve())
params['aug_param']['img_scale'] = (params['img_size'], params['img_size'])
cfg.img_scale = params['aug_param']['img_scale']
cfg.dataset_type = 'CocoDataset'
cfg.filter_empty_gt = False
cfg.data.filter_empty_gt = False
cfg.data.train.type = cfg.dataset_type
cfg.data.train.classes = cfg.classes
cfg.data.train.ann_file = str(params["cfg_dir"] / 'annotations_train.json')
cfg.data.train.img_prefix = cfg.data_root + '/images/'
cfg.data.train.filter_empty_gt = False
cfg.data.test.type = cfg.dataset_type
cfg.data.test.classes = cfg.classes
cfg.data.test.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.test.img_prefix = cfg.data_root + '/images/'
cfg.data.test.filter_empty_gt = False
cfg.data.val.type = cfg.dataset_type
cfg.data.val.classes = cfg.classes
cfg.data.val.ann_file = str(params["cfg_dir"] / 'annotations_valid.json')
cfg.data.val.img_prefix = cfg.data_root + '/images/'
cfg.data.val.filter_empty_gt = False
cfg.data.samples_per_gpu = params['batch'] // len(cfg.gpu_ids)
cfg.data.workers_per_gpu = params['workers'] // len(cfg.gpu_ids)
# train pipeline
albu_train_transforms = get_albu_transforms(params['aug_param'], is_train=True)
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
train_pipeline = []
else:
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)]
if params['aug_param']['use_mosaic']:
train_pipeline.append(dict(type='Mosaic', img_scale=cfg.img_scale, pad_val=114.0))
else:
train_pipeline.append(dict(type='Resize', img_scale=cfg.img_scale, keep_ratio=False))
train_pipeline = train_pipeline +[
dict(type='Pad', size_divisor=32),
dict(
type='Albu',
transforms=albu_train_transforms,
bbox_params=dict(
type='BboxParams',
format='pascal_voc',
label_fields=['gt_labels'],
min_visibility=0.0,
filter_lost_elements=True),
keymap={
'img': 'image',
'gt_bboxes': 'bboxes'
},
update_pad_shape=False,
skip_img_without_anno=False
)]
if params['aug_param']['use_mixup']:
train_pipeline.append(dict(type='MixUp', img_scale=cfg.img_scale, ratio_range=(0.8, 1.6), pad_val=114.0))
train_pipeline = train_pipeline +\
[
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect',
keys=['img', 'gt_bboxes', 'gt_labels'],
meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'img_norm_cfg')),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=cfg.img_scale,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=[cfg.img_scale],
flip=[False],
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Pad', size_divisor=32),
dict(type='RandomFlip', direction='horizontal'),
dict(type='Normalize', **cfg.img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
cfg.train_pipeline = train_pipeline
cfg.val_pipeline = val_pipeline
cfg.test_pipeline = test_pipeline
if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']:
cfg.train_dataset = dict(
type='MultiImageMixDataset',
dataset=dict(
type=cfg.dataset_type,
classes=cfg.classes,
ann_file=str(params["cfg_dir"] / 'annotations_train.json'),
img_prefix=cfg.data_root + '/images/',
pipeline=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
],
filter_empty_gt=False,
),
pipeline=cfg.train_pipeline
)
cfg.data.train = cfg.train_dataset
else:
cfg.data.train.pipeline = cfg.train_pipeline
cfg.data.val.pipeline = cfg.val_pipeline
cfg.data.test.pipeline = cfg.test_pipeline
return cfg
def find_ckp(output_dir):
return glob(output_dir / "best*.pth")[0] | 34.920177 | 138 | 0.554353 | [
"Apache-2.0"
] | VincentWang25/Kaggle_TGBR | src/util.py | 31,574 | Python |
#!/usr/bin/env python3
#
# # Copyright (c) 2021 Facebook, inc. and its affiliates. All Rights Reserved
#
#
from uimnet import utils
from uimnet import algorithms
from uimnet import workers
from omegaconf import OmegaConf
from pathlib import Path
import torch
import torch.distributed as dist
import torch.multiprocessing as tmp
import numpy as np
import os
import argparse
import pickle
import filelock
TRAIN_CFG = """
sweep_dir: null
output_dir: null # subfolder. Mutable at dispatch
dataset:
name: ImageNat
root: /checkpoint/ishmaelb/data/datasets/ILSVRC2012
equalize_partitions: True
seed: 0
batch_size: 256
splits_props:
train: 0.9
eval: 0.1
algorithm:
name: null
arch: null
use_mixed_precision: True
seed: 0 # Mutable at dispatch
sn: False
sn_coef: 1.0
sn_bn: True
experiment:
distributed: False
platform: local
evaluate_every: 10
checkpoint_every: 10
num_epochs: 100
## ----- Mutable on the worker during distributed/device setup.
output_dir: null
seed: 42 # Workers seed
device: 'cuda:0'
rank: null
local_rank: null
world_size: null
dist_protocol: null
dist_url: null
num_workers: 5
# ------
"""
def parse_arguments():
parser = argparse.ArgumentParser(description='Trains model')
parser.add_argument('-a', '--algorithm', type=str, required=True)
parser.add_argument('--arch', type=str, default='resnet18')
parser.add_argument('-m', '--model_dir', type=str, required=True)
parser.add_argument('-c', '--clustering_file', type=str, required=True)
parser.add_argument('--local_rank', type=int, default=None)
parser.add_argument('-d', '--distributed', action='store_true')
parser.add_argument('--dist_protocol', type=str, default='env')
return parser.parse_args()
def partition_datasets(train_cfg, partitions):
all_datasets = {}
for split_name in ['train', 'val']:
all_datasets[split_name] = utils.partition_dataset(name=train_cfg.dataset.name,
root=train_cfg.dataset.root,
split=split_name,
partitions=partitions,
equalize_partitions=train_cfg.dataset.equalize_partitions)
return all_datasets
def train_algorithm(train_cfg, Algorithm, dataset):
if utils.is_distributed():
os.environ['OMP_NUM_THREADS'] = train_cfg.experiment.num_workers
trainer = workers.Trainer()
output = trainer(train_cfg, Algorithm, dataset=dataset)
return output
@utils.timeit
def run_trainer(model_dir, algorithm_name, arch, clustering_file, distributed, dist_protocol):
model_path = Path(model_dir)
model_path.mkdir(parents=True, exist_ok=True)
train_cfg = OmegaConf.create(TRAIN_CFG)
OmegaConf.set_struct(train_cfg, True)
train_cfg.output_dir =model_dir
train_cfg.algorithm.name = algorithm_name
train_cfg.algorithm.arch = arch
train_cfg.experiment.distributed = distributed
train_cfg.experiment.dist_protocol = dist_protocol
with open(model_path / 'train_cfg.yaml', 'w') as fp:
OmegaConf.save(train_cfg, fp.name)
with filelock.FileLock(clustering_file + '.lock'):
with open(clustering_file, 'rb') as fp:
clustering = pickle.load(fp)
datasets = partition_datasets(train_cfg, partitions=clustering['partitions'])
Algorithm = utils.load_model_cls(train_cfg)
trainer_args = (train_cfg, Algorithm, datasets['train']['in'])
output = train_algorithm(*trainer_args)
return utils.pack(output)
if __name__ == '__main__':
args = parse_arguments()
trainer_output = run_trainer(args.model_dir, args.algorithm, arch=args.arch, clustering_file=args.clustering_file,
distributed=args.distributed, dist_protocol=args.dist_protocol)
| 28.466667 | 116 | 0.700494 | [
"MIT"
] | facebookresearch/uimnet | scripts/run_trainer.py | 3,843 | Python |
"""Configuration for SSDP tests."""
from typing import Optional, Sequence
from unittest.mock import AsyncMock, MagicMock, patch
from urllib.parse import urlparse
from async_upnp_client.client import UpnpDevice
from async_upnp_client.event_handler import UpnpEventHandler
from async_upnp_client.profiles.igd import StatusInfo
import pytest
from homeassistant.components import ssdp
from homeassistant.components.upnp.const import (
BYTES_RECEIVED,
BYTES_SENT,
CONFIG_ENTRY_ST,
CONFIG_ENTRY_UDN,
DOMAIN,
PACKETS_RECEIVED,
PACKETS_SENT,
ROUTER_IP,
ROUTER_UPTIME,
WAN_STATUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.util import dt
from tests.common import MockConfigEntry
TEST_UDN = "uuid:device"
TEST_ST = "urn:schemas-upnp-org:device:InternetGatewayDevice:1"
TEST_USN = f"{TEST_UDN}::{TEST_ST}"
TEST_LOCATION = "http://192.168.1.1/desc.xml"
TEST_HOSTNAME = urlparse(TEST_LOCATION).hostname
TEST_FRIENDLY_NAME = "mock-name"
TEST_DISCOVERY = ssdp.SsdpServiceInfo(
ssdp_usn=TEST_USN,
ssdp_st=TEST_ST,
ssdp_location=TEST_LOCATION,
upnp={
"_udn": TEST_UDN,
"location": TEST_LOCATION,
"usn": TEST_USN,
ssdp.ATTR_UPNP_DEVICE_TYPE: TEST_ST,
ssdp.ATTR_UPNP_FRIENDLY_NAME: TEST_FRIENDLY_NAME,
ssdp.ATTR_UPNP_MANUFACTURER: "mock-manufacturer",
ssdp.ATTR_UPNP_MODEL_NAME: "mock-model-name",
ssdp.ATTR_UPNP_UDN: TEST_UDN,
},
ssdp_headers={
"_host": TEST_HOSTNAME,
},
)
class MockUpnpDevice:
"""Mock async_upnp_client UpnpDevice."""
def __init__(self, location: str) -> None:
"""Initialize."""
self.device_url = location
@property
def manufacturer(self) -> str:
"""Get manufacturer."""
return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_MANUFACTURER]
@property
def name(self) -> str:
"""Get name."""
return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_FRIENDLY_NAME]
@property
def model_name(self) -> str:
"""Get the model name."""
return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_MODEL_NAME]
@property
def device_type(self) -> str:
"""Get the device type."""
return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_DEVICE_TYPE]
@property
def udn(self) -> str:
"""Get the UDN."""
return TEST_DISCOVERY.upnp[ssdp.ATTR_UPNP_UDN]
@property
def usn(self) -> str:
"""Get the USN."""
return f"{self.udn}::{self.device_type}"
@property
def unique_id(self) -> str:
"""Get the unique id."""
return self.usn
def reinit(self, new_upnp_device: UpnpDevice) -> None:
"""Reinitialize."""
self.device_url = new_upnp_device.device_url
class MockIgdDevice:
"""Mock async_upnp_client IgdDevice."""
def __init__(self, device: MockUpnpDevice, event_handler: UpnpEventHandler) -> None:
"""Initialize mock device."""
self.device = device
self.profile_device = device
self._timestamp = dt.utcnow()
self.traffic_times_polled = 0
self.status_times_polled = 0
self.traffic_data = {
BYTES_RECEIVED: 0,
BYTES_SENT: 0,
PACKETS_RECEIVED: 0,
PACKETS_SENT: 0,
}
self.status_data = {
WAN_STATUS: "Connected",
ROUTER_UPTIME: 10,
ROUTER_IP: "8.9.10.11",
}
@property
def name(self) -> str:
"""Get the name of the device."""
return self.profile_device.name
@property
def manufacturer(self) -> str:
"""Get the manufacturer of this device."""
return self.profile_device.manufacturer
@property
def model_name(self) -> str:
"""Get the model name of this device."""
return self.profile_device.model_name
@property
def udn(self) -> str:
"""Get the UDN of the device."""
return self.profile_device.udn
@property
def device_type(self) -> str:
"""Get the device type of this device."""
return self.profile_device.device_type
async def async_get_total_bytes_received(self) -> Optional[int]:
"""Get total bytes received."""
self.traffic_times_polled += 1
return self.traffic_data[BYTES_RECEIVED]
async def async_get_total_bytes_sent(self) -> Optional[int]:
"""Get total bytes sent."""
return self.traffic_data[BYTES_SENT]
async def async_get_total_packets_received(self) -> Optional[int]:
"""Get total packets received."""
return self.traffic_data[PACKETS_RECEIVED]
async def async_get_total_packets_sent(self) -> Optional[int]:
"""Get total packets sent."""
return self.traffic_data[PACKETS_SENT]
async def async_get_external_ip_address(
self, services: Optional[Sequence[str]] = None
) -> Optional[str]:
"""
Get the external IP address.
:param services List of service names to try to get action from, defaults to [WANIPC,WANPPP]
"""
return self.status_data[ROUTER_IP]
async def async_get_status_info(
self, services: Optional[Sequence[str]] = None
) -> Optional[StatusInfo]:
"""
Get status info.
:param services List of service names to try to get action from, defaults to [WANIPC,WANPPP]
"""
self.status_times_polled += 1
return StatusInfo(
self.status_data[WAN_STATUS], "", self.status_data[ROUTER_UPTIME]
)
@pytest.fixture(autouse=True)
def mock_upnp_device():
"""Mock homeassistant.components.upnp.Device."""
async def mock_async_create_upnp_device(
hass: HomeAssistant, location: str
) -> UpnpDevice:
"""Create UPnP device."""
return MockUpnpDevice(location)
with patch(
"homeassistant.components.upnp.device.async_create_upnp_device",
side_effect=mock_async_create_upnp_device,
) as mock_async_create_upnp_device, patch(
"homeassistant.components.upnp.device.IgdDevice", new=MockIgdDevice
) as mock_igd_device:
yield mock_async_create_upnp_device, mock_igd_device
@pytest.fixture
def mock_setup_entry():
"""Mock async_setup_entry."""
with patch(
"homeassistant.components.upnp.async_setup_entry",
return_value=AsyncMock(True),
) as mock_setup:
yield mock_setup
@pytest.fixture(autouse=True)
async def silent_ssdp_scanner(hass):
"""Start SSDP component and get Scanner, prevent actual SSDP traffic."""
with patch(
"homeassistant.components.ssdp.Scanner._async_start_ssdp_listeners"
), patch("homeassistant.components.ssdp.Scanner._async_stop_ssdp_listeners"), patch(
"homeassistant.components.ssdp.Scanner.async_scan"
):
yield
@pytest.fixture
async def ssdp_instant_discovery():
"""Instance discovery."""
# Set up device discovery callback.
async def register_callback(hass, callback, match_dict):
"""Immediately do callback."""
await callback(TEST_DISCOVERY, ssdp.SsdpChange.ALIVE)
return MagicMock()
with patch(
"homeassistant.components.ssdp.async_register_callback",
side_effect=register_callback,
) as mock_register, patch(
"homeassistant.components.ssdp.async_get_discovery_info_by_st",
return_value=[TEST_DISCOVERY],
) as mock_get_info:
yield (mock_register, mock_get_info)
@pytest.fixture
async def ssdp_no_discovery():
"""No discovery."""
# Set up device discovery callback.
async def register_callback(hass, callback, match_dict):
"""Don't do callback."""
return MagicMock()
with patch(
"homeassistant.components.ssdp.async_register_callback",
side_effect=register_callback,
) as mock_register, patch(
"homeassistant.components.ssdp.async_get_discovery_info_by_st",
return_value=[],
) as mock_get_info, patch(
"homeassistant.components.upnp.config_flow.SSDP_SEARCH_TIMEOUT",
0.1,
):
yield (mock_register, mock_get_info)
@pytest.fixture
async def setup_integration(
hass: HomeAssistant, mock_get_source_ip, ssdp_instant_discovery, mock_upnp_device
):
"""Create an initialized integration."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONFIG_ENTRY_UDN: TEST_UDN,
CONFIG_ENTRY_ST: TEST_ST,
},
)
# Load config_entry.
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
yield entry
| 29.666667 | 100 | 0.668134 | [
"Apache-2.0"
] | Aeroid/home-assistant-core | tests/components/upnp/conftest.py | 8,633 | Python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from transformers import AutoTokenizer
from flash.data.process import Postprocess
from flash.text.seq2seq.core.data import Seq2SeqData, Seq2SeqPreprocess
class SummarizationPostprocess(Postprocess):
def __init__(
self,
backbone: str = "sshleifer/tiny-mbart",
):
super().__init__()
# TODO: Should share the backbone or tokenizer over state
self.tokenizer = AutoTokenizer.from_pretrained(backbone, use_fast=True)
def uncollate(self, generated_tokens: Any) -> Any:
pred_str = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
pred_str = [str.strip(s) for s in pred_str]
return pred_str
class SummarizationData(Seq2SeqData):
preprocess_cls = Seq2SeqPreprocess
postprocess_cls = SummarizationPostprocess
| 33.116279 | 90 | 0.746489 | [
"Apache-2.0"
] | Pandinosaurus/lightning-flash | flash/text/seq2seq/summarization/data.py | 1,424 | Python |
"""Testing utilities.
"""
import os.path
import pytest
def test():
"""Initiate poliastro testing
"""
pytest.main([os.path.dirname(os.path.abspath(__file__))])
| 12.571429 | 61 | 0.653409 | [
"MIT"
] | AunSiro/poliastro | src/poliastro/testing.py | 176 | Python |
from xml.etree import ElementTree
from django.test import TestCase
from peacecorps.util import svg as svg_util
XML_HEADER = b'<?xml version="1.0" encoding="UTF-8"?>\n'
class ValidateTests(TestCase):
def test_must_be_xml(self):
svg_bytes = b'some text n stuff'
self.assertIsNone(svg_util.validate_svg(svg_bytes))
def test_must_be_svg(self):
svg_bytes = XML_HEADER + b'<SOMEOTHERTAG></SOMEOTHERTAG>'
self.assertIsNone(svg_util.validate_svg(svg_bytes))
class MakeSquareTests(TestCase):
def test_missing_or_bad_attrs(self):
svg = ElementTree.fromstring(XML_HEADER + b'<svg></svg>')
self.assertIsNone(svg_util.make_square(svg))
svg = ElementTree.fromstring(
XML_HEADER + b'<svg widTH="abc" height="2222px"></svg>')
self.assertIsNone(svg_util.make_square(svg))
def test_resized_no_viewbox(self):
svg = ElementTree.fromstring(
XML_HEADER
+ b'<svg width="30" hEIght="15"></svg>')
result = ElementTree.tostring(svg_util.make_square(svg))
self.assertTrue(b'width="80"' in result)
self.assertTrue(b'hEIght="80"' in result)
self.assertTrue(b'viewBox="0 -7 30 30"' in result)
def test_resized_float_width_height(self):
svg = ElementTree.fromstring(
XML_HEADER
+ b'<svg width="21.12" height="21.22"></svg>')
result = ElementTree.tostring(svg_util.make_square(svg))
self.assertTrue(b'width="80"' in result)
self.assertTrue(b'height="80"' in result)
self.assertTrue(b'viewBox="0 0 21 21"' in result)
def test_resized_with_viewbox(self):
svg = ElementTree.fromstring(
XML_HEADER
+ b'<svg width="30" height="15" vIewBox="-15 10 60 30"></svg>')
result = ElementTree.tostring(svg_util.make_square(svg))
self.assertTrue(b'width="80"' in result)
self.assertTrue(b'height="80"' in result)
self.assertTrue(b'vIewBox="-15 -5 60 60"' in result)
def test_resized_viewbox_no_width_height(self):
"""Truncate decimals"""
svg = ElementTree.fromstring(
XML_HEADER
+ b'<svg viewBox="-10.23 32.18 75.876 75.956"></svg>')
result = ElementTree.tostring(svg_util.make_square(svg))
self.assertTrue(b'width="80"' in result)
self.assertTrue(b'height="80"' in result)
self.assertTrue(b'viewBox="-10 32 75 75"' in result)
class ColorIconTests(TestCase):
def test_color_fill(self):
svg = ElementTree.fromstring(
XML_HEADER + b'<svg width="10" height="10">'
+ b'<g strOKe="none" fill="#123"></g></svg>')
with self.settings(SVG_COLORS={'white': '#fff', 'green': '#0f5'}):
result = svg_util.color_icon(svg)
self.assertEqual(2, len(result))
self.assertTrue('white' in result)
self.assertTrue('green' in result)
self.assertTrue(b'strOKe="none"' in
ElementTree.tostring(result['white']))
self.assertFalse(b'fill="#0f5"' in
ElementTree.tostring(result['white']))
self.assertTrue(b'fill="#0f5"' in
ElementTree.tostring(result['green']))
def test_color_stroke(self):
svg = ElementTree.fromstring(
XML_HEADER + b'<svg width="10" height="10">'
+ b'<g stroke="#000" fill="noNE"></g></svg>')
with self.settings(SVG_COLORS={'white': '#fff', 'green': '#0f5'}):
result = svg_util.color_icon(svg)
self.assertEqual(2, len(result))
self.assertTrue('white' in result)
self.assertTrue('green' in result)
self.assertTrue(b'fill="noNE"' in
ElementTree.tostring(result['white']))
self.assertFalse(b'stroke="#0f5"' in
ElementTree.tostring(result['white']))
self.assertTrue(b'stroke="#0f5"' in
ElementTree.tostring(result['green']))
def test_color_style(self):
svg = ElementTree.fromstring(
XML_HEADER + b'<svg width="10" height="10">'
+ b'<g sTYle="strOKe: #123; fILL: none;"></g></svg>')
with self.settings(SVG_COLORS={'white': '#fff', 'green': '#0f5'}):
result = svg_util.color_icon(svg)
self.assertEqual(2, len(result))
self.assertTrue('white' in result)
self.assertTrue('green' in result)
self.assertTrue(b'sTYle="stroke: #fff; fILL: none;"' in
ElementTree.tostring(result['white']))
def test_color_embedded_stylesheet(self):
svg = ElementTree.fromstring(
XML_HEADER + b'<svg width="10" height="10">'
+ b'<stYLe>\n.some_class{\nfill:#123; STroke: grey;}\n</stYLe>\n'
+ b'<g class="some_class"></g></svg>')
with self.settings(SVG_COLORS={'white': '#fff', 'green': '#0f5'}):
result = svg_util.color_icon(svg)
self.assertEqual(2, len(result))
self.assertTrue('white' in result)
self.assertTrue('green' in result)
self.assertTrue(b'fill: #fff;'
in ElementTree.tostring(result['white']))
self.assertTrue(b'stroke: #0f5;'
in ElementTree.tostring(result['green']))
| 42.700787 | 77 | 0.585654 | [
"CC0-1.0"
] | 18F/peacecorps-site | peacecorps/peacecorps/tests/test_util_svg.py | 5,423 | Python |
# -*- coding: utf-8 -*-
"""Plot to demonstrate the qualitative1 colormap.
"""
import numpy as np
import matplotlib.pyplot as plt
from typhon.plots import (figsize, cmap2rgba)
x = np.linspace(0, 10, 100)
fig, ax = plt.subplots(figsize=figsize(10))
ax.set_prop_cycle(color=cmap2rgba('qualitative1', 7))
for c in np.arange(1, 8):
ax.plot(x, (15 + x) * c, linewidth=3)
ax.set_xlim(x.min(), x.max())
fig.tight_layout()
plt.show()
| 19.818182 | 53 | 0.683486 | [
"MIT"
] | ChanJeunlam/typhon | doc/pyplots/plot_qualitative1.py | 436 | Python |
import html
import json
import random
import time
import pyowm
from datetime import datetime
from typing import Optional, List
import requests
from telegram import Message, Chat, Update, Bot, MessageEntity
from telegram import ParseMode
from telegram.ext import CommandHandler, run_async, Filters
from telegram import Update, Bot
from telegram.ext import run_async
from cinderella.modules.disable import DisableAbleCommandHandler
from cinderella import dispatcher, StartTime
from requests import get
def get_readable_time(seconds: int) -> str:
count = 0
ping_time = ""
time_list = []
time_suffix_list = ["s", "m", "h", "days"]
while count < 4:
count += 1
if count < 3:
remainder, result = divmod(seconds, 60)
else:
remainder, result = divmod(seconds, 24)
if seconds == 0 and remainder == 0:
break
time_list.append(int(result))
seconds = int(remainder)
for x in range(len(time_list)):
time_list[x] = str(time_list[x]) + time_suffix_list[x]
if len(time_list) == 4:
ping_time += time_list.pop() + ", "
time_list.reverse()
ping_time += ":".join(time_list)
return ping_time
@run_async
def ping(bot: Bot, update: Update):
start_time = time.time()
requests.get('https://api.telegram.org')
end_time = time.time()
ping_time = str(round((end_time - start_time), 2) % 60)
uptime = get_readable_time((time.time() - StartTime))
update.effective_message.reply_text(f"🏓 Pong!\n⏱️<b>Reply took:</b> {ping_time}s\n🔮<b>Service Uptime:</b> {uptime}", parse_mode=ParseMode.HTML)
@run_async
def uptime(bot: Bot, update: Update):
uptime = get_readable_time((time.time() - StartTime))
update.effective_message.reply_text(f"🔮Service Uptime: {uptime}")
__help__ = """
- /ping :get ping time of bot to telegram server
- /uptime: Find last service update time
"""
__mod_name__ = "PING"
PING_HANDLER = DisableAbleCommandHandler("ping", ping)
UPTIME_HANDLER = DisableAbleCommandHandler("uptime", uptime)
dispatcher.add_handler(UPTIME_HANDLER)
dispatcher.add_handler(PING_HANDLER)
| 28.959459 | 147 | 0.692021 | [
"MIT"
] | SLdevilX/Lexter | cinderella/modules/ping.py | 2,156 | Python |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
from common import network_metrics
from telemetry.page import page_test
from telemetry.value import scalar
CHROME_PROXY_VIA_HEADER = 'Chrome-Compression-Proxy'
class ChromeProxyMetricException(page_test.MeasurementFailure):
pass
class ChromeProxyResponse(network_metrics.HTTPResponse):
""" Represents an HTTP response from a timeline event."""
def __init__(self, event):
super(ChromeProxyResponse, self).__init__(event)
def ShouldHaveChromeProxyViaHeader(self):
resp = self.response
# Ignore https and data url
if resp.url.startswith('https') or resp.url.startswith('data:'):
return False
# Ignore 304 Not Modified and cache hit.
if resp.status == 304 or resp.served_from_cache:
return False
# Ignore invalid responses that don't have any header. Log a warning.
if not resp.headers:
logging.warning('response for %s does not any have header '
'(refer=%s, status=%s)',
resp.url, resp.GetHeader('Referer'), resp.status)
return False
return True
def HasResponseHeader(self, key, value):
response_header = self.response.GetHeader(key)
if not response_header:
return False
values = [v.strip() for v in response_header.split(',')]
return any(v == value for v in values)
def HasRequestHeader(self, key, value):
if key not in self.response.request_headers:
return False
request_header = self.response.request_headers[key]
values = [v.strip() for v in request_header.split(',')]
return any(v == value for v in values)
def HasChromeProxyViaHeader(self):
via_header = self.response.GetHeader('Via')
if not via_header:
return False
vias = [v.strip(' ') for v in via_header.split(',')]
# The Via header is valid if it has a 4-character version prefix followed by
# the proxy name, for example, "1.1 Chrome-Compression-Proxy".
return any(v[4:] == CHROME_PROXY_VIA_HEADER for v in vias)
def HasExtraViaHeader(self, extra_header):
return self.HasResponseHeader('Via', extra_header)
def IsValidByViaHeader(self):
return (not self.ShouldHaveChromeProxyViaHeader() or
self.HasChromeProxyViaHeader())
def GetChromeProxyRequestHeaderValue(self, key):
"""Get a specific Chrome-Proxy request header value.
Returns:
The value for a specific Chrome-Proxy request header value for a
given key. Returns None if no such key is present.
"""
if 'Chrome-Proxy' not in self.response.request_headers:
return None
chrome_proxy_request_header = self.response.request_headers['Chrome-Proxy']
values = [v.strip() for v in chrome_proxy_request_header.split(',')]
for value in values:
kvp = value.split('=', 1)
if len(kvp) == 2 and kvp[0].strip() == key:
return kvp[1].strip()
return None
def GetChromeProxyClientType(self):
"""Get the client type directive from the Chrome-Proxy request header.
Returns:
The client type directive from the Chrome-Proxy request header for the
request that lead to this response. For example, if the request header
"Chrome-Proxy: c=android" is present, then this method would return
"android". Returns None if no client type directive is present.
"""
return self.GetChromeProxyRequestHeaderValue('c')
def HasChromeProxyLoFiRequest(self):
return self.HasRequestHeader('Chrome-Proxy', "q=low")
def HasChromeProxyLoFiResponse(self):
return self.HasResponseHeader('Chrome-Proxy', "q=low")
def HasChromeProxyPassThroughRequest(self):
return self.HasRequestHeader('Chrome-Proxy', "pass-through")
| 35.685185 | 80 | 0.706539 | [
"MIT"
] | Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/chrome_proxy/common/chrome_proxy_metrics.py | 3,854 | Python |
import logging.config
from netdisco.discovery import NetworkDiscovery
LOG = logging.getLogger(__name__)
def discover():
hue_bridges = []
LOG.info('Searching for Hue devices...')
netdis = NetworkDiscovery()
netdis.scan()
for dev in netdis.discover():
for info in netdis.get_info(dev):
if 'name' in info and 'Philips hue' in info['name']:
hue_bridges.append(info)
LOG.info('Hue bridge found: %s', info['host'])
netdis.stop()
if len(hue_bridges) == 1:
return hue_bridges[0]['host']
if len(hue_bridges) == 2:
LOG.warning('More than one Hue bridge found.')
elif not hue_bridges:
LOG.warning('No Hue bridges found.')
return None
| 22.727273 | 64 | 0.616 | [
"MIT"
] | ChadiEM/philips-hue-hooks | philips_hue_hooks/discovery/hue_discovery.py | 750 | Python |
import importlib
import logging
import os
import sys
from pathlib import Path
logger = logging.getLogger('second.utils.loader')
CUSTOM_LOADED_MODULES = {}
def _get_possible_module_path(paths):
ret = []
for p in paths:
p = Path(p)
for path in p.glob("*"):
if path.suffix in ["py", ".so"] or (path.is_dir()):
if path.stem.isidentifier():
ret.append(path)
return ret
def _get_regular_import_name(path, module_paths):
path = Path(path)
for mp in module_paths:
mp = Path(mp)
if mp == path:
return path.stem
try:
relative_path = path.relative_to(Path(mp))
parts = list((relative_path.parent / relative_path.stem).parts)
module_name = '.'.join([mp.stem] + parts)
return module_name
except:
pass
return None
def import_file(path, name: str = None, add_to_sys=True,
disable_warning=False):
global CUSTOM_LOADED_MODULES
path = Path(path)
module_name = path.stem
try:
user_paths = os.environ['PYTHONPATH'].split(os.pathsep)
except KeyError:
user_paths = []
possible_paths = _get_possible_module_path(user_paths)
model_import_name = _get_regular_import_name(path, possible_paths)
if model_import_name is not None:
return import_name(model_import_name)
if name is not None:
module_name = name
spec = importlib.util.spec_from_file_location(module_name, path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
if not disable_warning:
logger.warning((
f"Failed to perform regular import for file {path}. "
"this means this file isn't in any folder in PYTHONPATH "
"or don't have __init__.py in that project. "
"directly file import may fail and some reflecting features are "
"disabled even if import succeed. please add your project to PYTHONPATH "
"or add __init__.py to ensure this file can be regularly imported. "
))
if add_to_sys: # this will enable find objects defined in a file.
# avoid replace system modules.
if module_name in sys.modules and module_name not in CUSTOM_LOADED_MODULES:
raise ValueError(f"{module_name} exists in system.")
CUSTOM_LOADED_MODULES[module_name] = module
sys.modules[module_name] = module
return module
def import_name(name, package=None):
module = importlib.import_module(name, package)
return module
| 32.974684 | 85 | 0.647601 | [
"MIT"
] | yukke42/SECOND | second/utils/loader.py | 2,605 | Python |
"""
This module defines the policies that will be used in order to sample the information flow
patterns to compare with.
The general approach is a function that takes in any eventual parameters and outputs a list of
pairs of DB_Ids for which the flow will be calculated.
"""
import random
import hashlib
import json
import numpy as np
from typing import Union, List, Tuple
import collections.abc
from bioflow.utils.log_behavior import get_logger
from bioflow.utils.general_utils import _is_int
log = get_logger(__name__)
def matched_sample_distribution(floats_arr: np.array, samples_no: int,
granularity: int = 100, logmode: bool = False) -> np.array:
"""
Tries to guess a distribution of floats and sample from it.
uses np.histogram with the number of bins equal to the granularity parameter. For each
sample, selects which bin to sample and then picks from the bin a float according to a
uniform distribution. if logmode is enabled, histogram will be in the log-space, as well as
the sampling.
:param floats_arr: array of floats for which to match the distribution
:param samples_no: number of random samples to retrieve
:param granularity: granularity at which to operate
:param logmode: if sample in log-space
:return: samples drawn from the empirically matched distribution
"""
if logmode:
floats_arr = np.log(floats_arr) # will crash if any are 0
hist, bin_edges = np.histogram(floats_arr, bins=granularity, density=True)
pad = np.arange(granularity)
locations = np.choice(pad, samples_no, p=hist)
samples = []
for i in locations:
samples.append(np.random.uniform(bin_edges[i], bin_edges[i+1]))
if logmode:
return np.exp(samples)
else:
return samples
def _reduce_distribution(floats_arr: np.array):
"""
Basically gets a distribution in the [0, 1] in 100 bins, rounds to the nearest 0.01. Used for
hashing and distribution matching
:param floats_arr: floats for which to calculate the rounded distribution
:return: rounded distribution
"""
normalized_arr = floats_arr / np.max(floats_arr)
bins = np.linspace(0, 1.001, 101) # because floats round funny
hist, bin_edges = np.histogram(normalized_arr, bins=bins, density=True)
rounded_hist = np.array(hist * 100).astype(np.int)
return rounded_hist
def _characterize_set(sample: Union[List[int], List[Tuple[int, float]]]):
"""
None-robust helper function to characterize a sample set by its length, nature of items in
teh sample and eventual distribution of weights within the sample.
:param sample: sample to characterize
:return: set length (0 if None), 1 if items are ids, 2 if ids and weights (0 if
None), rounded distribution ([] if None or items are ids)
"""
if sample is None:
return 0, 0, []
if len(sample) == 1:
if _is_int(sample[0]):
return 1, 1, []
else:
return 1, 2, []
if _is_int(sample[0]):
rounded_hist = [1] * 100
rounded_hist = np.array(rounded_hist).astype(np.int)
return len(sample), 1, rounded_hist.tolist()
else:
rounded_hist = _reduce_distribution(np.array(sample).astype(np.float)[:, 1])
return len(sample), 2, rounded_hist.tolist()
def characterize_flow_parameters(sample: Union[List[int], List[Tuple[int, float]]],
secondary_sample: Union[List[int], List[Tuple[int, float]], None],
sparse_rounds: int):
"""
Characterizes the primary and secondary sets and computes their hash, that can be used ot
match similar samples for random sampling.
:param sample: primary set
:param secondary_sample: secondary set
:param sparse_rounds: if sparse rounds are to be performed
:return: first set length, shape, hist, second set length, shape, hist, sparse rounds, hash
"""
prim_len, prim_shape, prim_hist = _characterize_set(sample)
sec_len, sec_shape, sec_hist = _characterize_set(secondary_sample)
_hash = hashlib.md5(json.dumps([prim_len, prim_shape, prim_hist,
sec_len, sec_shape, sec_hist,
sparse_rounds]).encode('utf-8')).hexdigest()
log.debug('hashed a flow parameters from:\n'
'%d/%d/%s; \n'
'%d/%d/%s; \n'
'%d \n'
'to %s' % (prim_len, prim_shape, prim_hist,
sec_len, sec_shape, sec_hist,
sparse_rounds, _hash))
return prim_len, prim_shape, prim_hist, sec_len, sec_shape, sec_hist, sparse_rounds, _hash
def _sample_floats(floats, float_sampling_method='exact', matched_distro_precision: int = 100):
"""
A wrapper methods to sample a float distribution according to a method
:param floats:
:param float_sampling_method: exact (permutation of weights) | distro (trying to match the
empirical distribution) | logdistro (trying to match the empirical distribution in the log
space)
:param matched_distro_precision: how closely to try to match the distribution (granularity
parameter pass-through to the matched_sample_distribution)
:return: sample of floats
"""
if float_sampling_method == 'exact':
ret_floats = floats.copy()
np.random.shuffle(ret_floats)
return ret_floats
if float_sampling_method == 'distro':
return matched_sample_distribution(floats, len(floats), granularity=matched_distro_precision)
if float_sampling_method == 'logdistro':
return matched_sample_distribution(floats, len(floats),
granularity=matched_distro_precision, logmode=True)
def matched_sampling(sample, secondary_sample,
background, samples, float_sampling_method='exact'):
"""
The general random sampling strategy that sample sets of the same size and shape as primary
and secondary sample set and, if they are weighted, try to match the random sample weights
according to the
:param sample: primary sample set
:param secondary_sample: secondary sample_set
:param background: background of ids (and potentially weights) from which to sample
:param samples: random samples wanted
:param sampling_mode: exact/distro/logdistro. the sampling parametrization method ingesting
all the parameters in a single string argument in the general case, here, a pass- through
parameter for the _sample_floats function if samples are weighted and the distribution of
weights is being matched.
:return:
"""
# What if we have an overlap between the items in the primary and the secondary
# samples? => sampling will always try to separate the two, the sampling will crash if there
# is not enough bacground to separate the two.
if _is_int(background[0]):
background_ids = np.array(background)
background_whg = np.ones_like(background_ids).astype(np.float)
else:
background_ids = np.array(background)[:, 0]
background_whg = np.array(background)[:, 1]
log.debug('debug sum %s, type: %s, all:%s' % (np.sum(background_whg),
type(background_whg),
background_whg))
background_whg /= np.sum(background_whg)
if secondary_sample is None:
if _is_int(sample[0]): # it should never be an int, but for safety ...
for i in range(0, samples):
selected = np.random.choice(background_ids, len(sample), p=background_whg,
replace=False)
yield i, selected, None
else:
for i in range(0, samples):
id_loads = np.random.choice(background_ids, len(sample), p=background_whg,
replace=False)
float_part = _sample_floats(np.array(sample)[:, 1], float_sampling_method)
ids_and_floats = [(_id, _float) for _id, _float in zip(id_loads, float_part)]
yield i, ids_and_floats, None
else:
if _is_int(sample[0]):
for i in range(0, samples):
selected = np.random.choice(background_ids,
len(sample)+len(secondary_sample),
p=background_whg, replace=False)
np.random.shuffle(selected)
yield i, selected[:len(sample)], selected[-len(secondary_sample):]
else:
for i in range(0, samples):
selected = np.random.choice(background_ids,
len(sample)+len(secondary_sample),
p=background_whg, replace=False)
np.random.shuffle(selected)
id_loads = selected[:len(sample)]
float_part = _sample_floats(np.array(sample)[:, 1], float_sampling_method)
ids_and_floats = [(_id, _float) for _id, _float in zip(id_loads, float_part)]
sec_id_loads = selected[-len(secondary_sample):]
sec_float_part = _sample_floats(np.array(secondary_sample)[:, 1], float_sampling_method)
sec_ids_and_floats = [(_id, _float) for _id, _float
in zip(sec_id_loads, sec_float_part)]
yield i, ids_and_floats, sec_ids_and_floats | 40.075314 | 104 | 0.642723 | [
"BSD-3-Clause"
] | chiffa/BioFlow | bioflow/algorithms_bank/sampling_policies.py | 9,578 | Python |
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'(?P<year>\d{4})-(?P<month>0[1-9]|1[0-2])-(?P<day>0[1-9]|[1-2][0-9]|3[0-1])/$',
views.DayLogViewer.as_view(), name="log_day"),
url(r'(?P<year>\d{4})-(?P<month>0[1-9]|1[0-2])-(?P<day>0[1-9]|[1-2][0-9]|3[0-1]).log$',
views.DayLogViewer.as_view(format='text'), name="log_day_text"),
url(r'^missed/(?P<nick>[\w\-\|]*)/$', views.MissedLogViewer.as_view(),
name="log_missed"),
url(r'^msg/(?P<msg_pk>\d+)/$', views.SingleLogViewer.as_view(),
name="log_message_permalink"),
url(r'^search/$', views.SearchLogViewer.as_view(), name='log_search'),
url(r'^kudos.json$', views.Kudos.as_view(), name='kudos_json'),
url(r'^kudos/$', views.ChannelKudos.as_view(), name='kudos'),
url(r'^help/$', views.Help.as_view(), name='help_bot'),
url(r'^stream/$', views.LogStream.as_view(), name='log_stream'),
url(r'^$', views.DayLogViewer.as_view(),
name="log_current"),
)
| 46.590909 | 91 | 0.600976 | [
"MIT"
] | Reception123/IRCLogBot | botbot/apps/logs/urls.py | 1,025 | Python |
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC
# MAGIC <div style="text-align: center; line-height: 0; padding-top: 9px;">
# MAGIC <img src="https://databricks.com/wp-content/uploads/2018/03/db-academy-rgb-1200px.png" alt="Databricks Learning" style="width: 600px">
# MAGIC </div>
# COMMAND ----------
# MAGIC %md
# MAGIC # Parsing Errors
# MAGIC
# MAGIC This code is going to throw several errors. Click on **`Run All`** above.
# COMMAND ----------
# MAGIC %run ../Includes/module-4/setup-lesson-4.02
# COMMAND ----------
# ANSWER
x = 1
x * 7
# COMMAND ----------
# MAGIC %md
# MAGIC Note that **`Run All`** execution mimics scheduled job execution; the **`Command skipped`** output we see below is the same we'll see in a job result.
# COMMAND ----------
# ANSWER
y = 99.52
y // 1
# COMMAND ----------
# MAGIC %md
# MAGIC The above is what we see when have Python errors
# COMMAND ----------
# ANSWER
import pyspark.sql.functions as F
# COMMAND ----------
# MAGIC %md
# MAGIC Let's look at a Spark error.
# MAGIC
# MAGIC While running multiple commands in a single cell, it can sometimes to be difficult to parse where an error is coming from.
# COMMAND ----------
# ANSWER
df = (spark.read
.format("csv")
.option("header", True)
.schema("date DATE, temp INTEGER")
.load("/databricks-datasets/weather/low_temps"))
df.createOrReplaceTempView("low_temps")
df.join(df, "date").groupBy("date").count()
# COMMAND ----------
# MAGIC %md
# MAGIC Sometimes an error isn't an error, but doesn't achieve what was intended.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC SELECT dayofmonth(date) FROM low_temps
# COMMAND ----------
# MAGIC %md
# MAGIC Use the below cell to figure out how to fix the code above.
# COMMAND ----------
display(df)
# COMMAND ----------
# MAGIC %md
# MAGIC Column names cause common errors when trying to save tables.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- ANSWER
# MAGIC CREATE TABLE test_table
# MAGIC AS (
# MAGIC SELECT dayofmonth(date) % 3 three_day_cycle FROM low_temps
# MAGIC )
# COMMAND ----------
# MAGIC %md
# MAGIC Run the following cell to delete the tables and files associated with this lesson.
# COMMAND ----------
DA.cleanup()
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC © 2022 Databricks, Inc. All rights reserved.<br/>
# MAGIC Apache, Apache Spark, Spark and the Spark logo are trademarks of the <a href="https://www.apache.org/">Apache Software Foundation</a>.<br/>
# MAGIC <br/>
# MAGIC <a href="https://databricks.com/privacy-policy">Privacy Policy</a> | <a href="https://databricks.com/terms-of-use">Terms of Use</a> | <a href="https://help.databricks.com/">Support</a>
| 23.405172 | 192 | 0.650829 | [
"CC0-1.0"
] | Code360In/advanced-data-engineering-with-databricks | Advanced-Data-Engineering-with-Databricks/Solutions/04 - Databricks in Production/ADE 4.02 - Error Prone.py | 2,715 | Python |
from electrum_blk.i18n import _
fullname = 'Ledger Wallet'
description = 'Provides support for Ledger hardware wallet'
requires = [('btchip', 'github.com/ledgerhq/btchip-python')]
registers_keystore = ('hardware', 'ledger', _("Ledger wallet"))
available_for = ['qt', 'cmdline']
| 34.875 | 63 | 0.738351 | [
"MIT"
] | CoinBlack/electrum | electrum_blk/plugins/ledger/__init__.py | 279 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['PrivateEndpointArgs', 'PrivateEndpoint']
@pulumi.input_type
class PrivateEndpointArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]] = None,
subnet: Optional[pulumi.Input['SubnetArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a PrivateEndpoint resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]] manual_private_link_service_connections: A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
:param pulumi.Input[str] private_endpoint_name: The name of the private endpoint.
:param pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]] private_link_service_connections: A grouping of information about the connection to the remote resource.
:param pulumi.Input['SubnetArgs'] subnet: The ID of the subnet from which the private IP will be allocated.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if manual_private_link_service_connections is not None:
pulumi.set(__self__, "manual_private_link_service_connections", manual_private_link_service_connections)
if private_endpoint_name is not None:
pulumi.set(__self__, "private_endpoint_name", private_endpoint_name)
if private_link_service_connections is not None:
pulumi.set(__self__, "private_link_service_connections", private_link_service_connections)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="manualPrivateLinkServiceConnections")
def manual_private_link_service_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]:
"""
A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
"""
return pulumi.get(self, "manual_private_link_service_connections")
@manual_private_link_service_connections.setter
def manual_private_link_service_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]):
pulumi.set(self, "manual_private_link_service_connections", value)
@property
@pulumi.getter(name="privateEndpointName")
def private_endpoint_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the private endpoint.
"""
return pulumi.get(self, "private_endpoint_name")
@private_endpoint_name.setter
def private_endpoint_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "private_endpoint_name", value)
@property
@pulumi.getter(name="privateLinkServiceConnections")
def private_link_service_connections(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]:
"""
A grouping of information about the connection to the remote resource.
"""
return pulumi.get(self, "private_link_service_connections")
@private_link_service_connections.setter
def private_link_service_connections(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['PrivateLinkServiceConnectionArgs']]]]):
pulumi.set(self, "private_link_service_connections", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input['SubnetArgs']]:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input['SubnetArgs']]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class PrivateEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[pulumi.InputType['SubnetArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Private endpoint resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]] manual_private_link_service_connections: A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
:param pulumi.Input[str] private_endpoint_name: The name of the private endpoint.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]] private_link_service_connections: A grouping of information about the connection to the remote resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[pulumi.InputType['SubnetArgs']] subnet: The ID of the subnet from which the private IP will be allocated.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PrivateEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Private endpoint resource.
:param str resource_name: The name of the resource.
:param PrivateEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PrivateEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
manual_private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
private_endpoint_name: Optional[pulumi.Input[str]] = None,
private_link_service_connections: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[pulumi.InputType['SubnetArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PrivateEndpointArgs.__new__(PrivateEndpointArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
__props__.__dict__["manual_private_link_service_connections"] = manual_private_link_service_connections
__props__.__dict__["private_endpoint_name"] = private_endpoint_name
__props__.__dict__["private_link_service_connections"] = private_link_service_connections
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["subnet"] = subnet
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interfaces"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20190901:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20180801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20180801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181001:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181001:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20181201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20181201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190401:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190401:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190601:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190601:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190701:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190701:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20190801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20190801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20191101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20191101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20191201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20191201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200301:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200301:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200401:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200401:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200501:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200501:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200601:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200601:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200701:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200701:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20200801:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20200801:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20201101:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20201101:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20210201:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20210201:PrivateEndpoint"), pulumi.Alias(type_="azure-native:network/v20210301:PrivateEndpoint"), pulumi.Alias(type_="azure-nextgen:network/v20210301:PrivateEndpoint")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpoint, __self__).__init__(
'azure-native:network/v20190901:PrivateEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpoint':
"""
Get an existing PrivateEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = PrivateEndpointArgs.__new__(PrivateEndpointArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["manual_private_link_service_connections"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_interfaces"] = None
__props__.__dict__["private_link_service_connections"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["subnet"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return PrivateEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="manualPrivateLinkServiceConnections")
def manual_private_link_service_connections(self) -> pulumi.Output[Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']]]:
"""
A grouping of information about the connection to the remote resource. Used when the network admin does not have access to approve connections to the remote resource.
"""
return pulumi.get(self, "manual_private_link_service_connections")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceResponse']]:
"""
An array of references to the network interfaces created for this private endpoint.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="privateLinkServiceConnections")
def private_link_service_connections(self) -> pulumi.Output[Optional[Sequence['outputs.PrivateLinkServiceConnectionResponse']]]:
"""
A grouping of information about the connection to the remote resource.
"""
return pulumi.get(self, "private_link_service_connections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the private endpoint resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def subnet(self) -> pulumi.Output[Optional['outputs.SubnetResponse']]:
"""
The ID of the subnet from which the private IP will be allocated.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 55.048023 | 3,065 | 0.696721 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/network/v20190901/private_endpoint.py | 19,487 | Python |
"""
Time complexity: O(n^2)
This sorting algorithm puts the smallest element in the first
place after the first iteration. Similarly, after the second
iteration, the second smallest value becomes the second value
of the list. The process continues and eventually the list
becomes sorted.
"""
for i in range(n):
for j in range(i+1, n):
if num[i] > num[j]:
num[i], num[j] = num[j], num[i]
| 26.8 | 61 | 0.706468 | [
"MIT"
] | safiulanik/useful-code-snippets | sorting-algorithms/selection_sort.py | 402 | Python |
from .sorter import Sorter
from .comparator import compare_date, compare_num, compare_str
from .parser import parser_list, parser_delimiter, parser_regex
| 38.5 | 63 | 0.850649 | [
"MIT"
] | aarteagamel/python-utils | devoutils/sorter/__init__.py | 154 | Python |
# -*- coding: utf-8 -*-
from django.conf import settings
VAPID_PUBLIC_KEY = getattr(settings, 'DJANGO_INFOPUSH_VAPID_PUBLIC_KEY', '')
VAPID_PRIVATE_KEY = getattr(settings, 'DJANGO_INFOPUSH_VAPID_PRIVATE_KEY', '')
VAPID_ADMIN_EMAIL = getattr(settings, 'DJANGO_INFOPUSH_VAPID_ADMIN_EMAIL', '')
FCM_SERVER_KEY = getattr(settings, 'DJANGO_INFOPUSH_FCM_SERVER_KEY', '')
FCM_SENDER_ID = getattr(settings, 'DJANGO_INFOPUSH_FCM_SENDER_ID', '')
# how many processes to use in a pushsend management command for parallel push
# 1 disables multiprocessing
PUSHSEND_WORKERS = int(getattr(settings, 'DJANGO_INFOPUSH_PUSHSEND_WORKERS', 3))
# default push icon
DEFAULT_ICON_URL = getattr(settings, 'DJANGO_INFOPUSH_DEFAULT_ICON_URL', "/static/push/img/icon.png")
MIN_ICON_W = int(getattr(settings, 'DJANGO_INFOPUSH_MIN_ICON_W', 192))
MIN_ICON_H = int(getattr(settings, 'DJANGO_INFOPUSH_MIN_ICON_H', 192))
# kb, max filesize of push icon
ICON_CUSTOM_MAX_FILESIZE = int(getattr(settings, 'DJANGO_INFOPUSH_ICON_CUSTOM_MAX_FILESIZE', 25))
# big push image for push in Chrome
# best aspect ration is 3:2 for desktop Chrome
# mobile Chrome will crop it vertically a little
# https://web-push-book.gauntface.com/chapter-05/02-display-a-notification/#image
MIN_BIG_IMAGE_W = int(getattr(settings, 'DJANGO_INFOPUSH_MIN_BIG_IMAGE_W', 1023))
MIN_BIG_IMAGE_H = int(getattr(settings, 'DJANGO_INFOPUSH_MIN_BIG_IMAGE_H', 682))
# kb, max filesize for big image
BIG_IMAGE_MAX_FILESIZE = int(getattr(settings, 'DJANGO_INFOPUSH_BIG_IMAGE_MAX_FILESIZE', 100))
# web-site as an "app" in manifest.json
# https://developers.google.com/web/updates/2014/11/Support-for-installable-web-apps-with-webapp-manifest-in-chrome-38-for-Android
APP_ICON_URLS = getattr(settings, 'DJANGO_INFOPUSH_APP_ICON_URLS', ["push/img/app_icon.png",])
# optional, https://developers.google.com/web/updates/2015/08/using-manifest-to-set-sitewide-theme-color
APP_THEME_COLOR = getattr(settings, 'DJANGO_INFOPUSH_APP_THEME_COLOR', None)
# optional, https://developers.google.com/web/tools/lighthouse/audits/custom-splash-screen
APP_BACKGROUND_COLOR = getattr(settings, 'DJANGO_INFOPUSH_APP_BACKGROUND_COLOR', None)
# error threshold after which we disable push subscription
ERROR_THRESHOLD = int(getattr(settings, 'DJANGO_INFOPUSH_ERROR_THRESHOLD', 30))
# do not change, it is here for easy import of this constant
GCM_URL = 'https://android.googleapis.com/gcm/send'
FCM_URL = 'https://fcm.googleapis.com/fcm/send'
# this setting allows to disable CSRF for django_infopush views only, if needed
USE_CSRF = bool(getattr(settings, 'DJANGO_INFOPUSH_USE_CSRF', True)) | 55.468085 | 130 | 0.801304 | [
"MIT"
] | kilgoretrout1985/django_infopush | push/settings.py | 2,607 | Python |
from django.contrib.auth import get_user_model
from django.contrib.auth.password_validation import validate_password
from rest_framework import serializers
class SignupSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['username', 'first_name', 'last_name', 'email', 'password', ]
extra_kwargs = {
'password': {'write_only': True}
}
def validate_password(self, value):
validate_password(value)
return value
def create(self, validated_data):
user = get_user_model()(**validated_data)
user.set_password(validated_data['password'])
user.save()
return user | 29.125 | 79 | 0.67382 | [
"MIT"
] | Sarwar242/crud_templete_django | authentication/serializers.py | 699 | Python |