code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
"""Lighting channels module for Zigbee Home Automation."""
from __future__ import annotations
from contextlib import suppress
from zigpy.zcl.clusters import lighting
from .. import registries
from ..const import REPORT_CONFIG_DEFAULT
from .base import ClientChannel, ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Ballast.cluster_id)
class Ballast(ZigbeeChannel):
"""Ballast channel."""
@registries.CLIENT_CHANNELS_REGISTRY.register(lighting.Color.cluster_id)
class ColorClientChannel(ClientChannel):
"""Color client channel."""
@registries.BINDABLE_CLUSTERS.register(lighting.Color.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Color.cluster_id)
class ColorChannel(ZigbeeChannel):
"""Color channel."""
CAPABILITIES_COLOR_XY = 0x08
CAPABILITIES_COLOR_TEMP = 0x10
UNSUPPORTED_ATTRIBUTE = 0x86
REPORT_CONFIG = (
{"attr": "current_x", "config": REPORT_CONFIG_DEFAULT},
{"attr": "current_y", "config": REPORT_CONFIG_DEFAULT},
{"attr": "color_temperature", "config": REPORT_CONFIG_DEFAULT},
)
MAX_MIREDS: int = 500
MIN_MIREDS: int = 153
ZCL_INIT_ATTRS = {
"color_mode": False,
"color_temp_physical_min": True,
"color_temp_physical_max": True,
"color_capabilities": True,
"color_loop_active": False,
}
@property
def color_capabilities(self) -> int:
"""Return color capabilities of the light."""
with suppress(KeyError):
return self.cluster["color_capabilities"]
if self.cluster.get("color_temperature") is not None:
return self.CAPABILITIES_COLOR_XY | self.CAPABILITIES_COLOR_TEMP
return self.CAPABILITIES_COLOR_XY
@property
def color_mode(self) -> int | None:
"""Return cached value of the color_mode attribute."""
return self.cluster.get("color_mode")
@property
def color_loop_active(self) -> int | None:
"""Return cached value of the color_loop_active attribute."""
return self.cluster.get("color_loop_active")
@property
def color_temperature(self) -> int | None:
"""Return cached value of color temperature."""
return self.cluster.get("color_temperature")
@property
def current_x(self) -> int | None:
"""Return cached value of the current_x attribute."""
return self.cluster.get("current_x")
@property
def current_y(self) -> int | None:
"""Return cached value of the current_y attribute."""
return self.cluster.get("current_y")
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this channel supports."""
return self.cluster.get("color_temp_physical_min", self.MIN_MIREDS)
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this channel supports."""
return self.cluster.get("color_temp_physical_max", self.MAX_MIREDS)
| [
"contextlib.suppress"
] | [((1482, 1500), 'contextlib.suppress', 'suppress', (['KeyError'], {}), '(KeyError)\n', (1490, 1500), False, 'from contextlib import suppress\n')] |
"""Classes to represent Packet Filter's queueing schedulers and statistics."""
import pf._struct
from pf._base import PFObject
from pf.constants import *
from pf._utils import rate2str
__all__ = ["ServiceCurve",
"FlowQueue",
"PFQueue",
"PFQueueStats"]
class ServiceCurve(PFObject):
""" """
_struct_type = pf._struct.pf_queue_scspec
def __init__(self, bandwidth, burst=0, time=0):
""" """
if isinstance(bandwidth, pf._struct.pf_queue_scspec):
self._from_struct(bandwidth)
else:
self.bandwidth = bandwidth
self.burst = burst
self.time = time
def _from_struct(self, sc):
""" """
self.bandwidth = self._get_bandwidth(sc.m2)
self.burst = self._get_bandwidth(sc.m1)
self.time = sc.d
def _to_struct(self):
""" """
sc = pf._struct.pf_queue_scspec()
if (isinstance(self.bandwidth, basestring) and
self.bandwidth.endswith("%")):
sc.m2.percent = int(self.bandwidth[:-1])
else:
sc.m2.absolute = self.bandwidth
if (isinstance(self.burst, basestring) and
self.burst.endswith("%")):
sc.m1.percent = int(self.burst[:-1])
else:
sc.m1.absolute = self.burst
sc.d = self.time
return sc
def _get_bandwidth(self, bw):
""" """
return "{}%".format(bw.percent) if bw.percent else bw.absolute
def _str_bandwidth(self, bw):
""" """
return bw if isinstance(bw, basestring) else rate2str(bw)
def _to_string(self):
""" """
s = self._str_bandwidth(self.bandwidth)
if self.time:
s += " burst {}".format(self._str_bandwidth(self.burst))
s += " for {.time}ms".format(self)
return s
class FlowQueue(PFObject):
""" """
_struct_type = pf._struct.pf_queue_fqspec
def __init__(self, flows, quantum=0, target=0, interval=0):
""" """
if isinstance(flows, pf._struct.pf_queue_fqspec):
self._from_struct(flows)
else:
self.flows = flows
self.quantum = quantum
self.target = target * 1000000
self.interval = interval * 1000000
def _from_struct(self, fq):
""" """
self.flows = fq.flows
self.quantum = fq.quantum
self.target = fq.target
self.interval = fq.interval
def _to_struct(self):
""" """
fq = pf._struct.pf_queue_fqspec()
fq.flows = self.flows
fq.quantum = self.quantum
fq.target = self.target
fq.interval = self.interval
return fq
def _to_string(self):
""" """
s = "flows {.flows}".format(self)
if self.quantum:
s += " quantum {.quantum}".format(self)
if self.interval:
s += " interval {}ms".format(self.interval / 1000000)
if self.target:
s += " target {}ms".format(self.target / 1000000)
return s
class PFQueue(PFObject):
""" """
_struct_type = pf._struct.pf_queuespec
def __init__(self, queue=None, **kw):
""" """
if isinstance(queue, basestring):
queue = pf._struct.pf_queuespec(qname=queue, qlimit=DEFAULT_QLIMIT)
elif queue is None:
queue = pf._struct.pf_queuespec()
super(PFQueue, self).__init__(queue, **kw)
self.stats = PFQueueStats()
def _from_struct(self, q):
""" """
self.qname = q.qname
self.parent = q.parent
self.ifname = q.ifname
self.flags = q.flags
self.qlimit = q.qlimit
self.qid = q.qid
self.parent_qid = q.parent_qid
self.realtime = ServiceCurve(q.realtime)
self.linkshare = ServiceCurve(q.linkshare)
self.upperlimit = ServiceCurve(q.upperlimit)
self.flowqueue = FlowQueue(q.flowqueue)
def _to_struct(self):
""" """
q = pf._struct.pf_queuespec()
q.qname = self.qname
q.parent = self.parent
q.ifname = self.ifname
q.flags = self.flags
q.qlimit = self.qlimit
q.qid = self.qid
q.parent_qid = self.parent_qid
q.realtime = self.realtime._to_struct()
q.linkshare = self.linkshare._to_struct()
q.upperlimit = self.upperlimit._to_struct()
q.flowqueue = self.flowqueue._to_struct()
return q
def _to_string(self):
""" """
s = "queue {.qname}".format(self)
if self.parent and not self.parent.startswith("_"):
s += " parent {.parent}".format(self)
elif self.ifname:
s += " on {.ifname}".format(self)
if self.flags & PFQS_FLOWQUEUE:
s += " {.flowqueue}".format(self)
if self.linkshare.bandwidth or self.linkshare.burst:
s += " bandwidth {}".format(self.linkshare)
if self.realtime.bandwidth:
s += ", min {}".format(self.realtime)
if self.upperlimit.bandwidth:
s += ", max {}".format(self.upperlimit)
if self.flags & PFQS_DEFAULT:
s += " default"
if self.qlimit:
s += " qlimit {.qlimit}".format(self)
return s
class PFQueueStats(PFObject):
""" """
_struct_type = pf._struct.hfsc_class_stats
def __init__(self, stats=None):
""" """
if stats is None:
stats = pf._struct.hfsc_class_stats()
super(PFQueueStats, self).__init__(stats)
def _from_struct(self, s):
""" """
self.qlength = s.qlength
self.qlimit = s.qlimit
self.packets = (s.xmit_cnt.packets, s.drop_cnt.packets)
self.bytes = (s.xmit_cnt.bytes, s.drop_cnt.bytes)
def _to_string(self):
""" """
s = " [ pkts: {0.packets[0]:10} bytes: {0.bytes[0]:10} " + \
"dropped pkts: {0.packets[1]:6} bytes: {0.bytes[1]:6} ]\n" + \
" [ qlength: {0.qlength:3}/{0.qlimit:3} ]"
return s.format(self)
| [
"pf._utils.rate2str"
] | [((1594, 1606), 'pf._utils.rate2str', 'rate2str', (['bw'], {}), '(bw)\n', (1602, 1606), False, 'from pf._utils import rate2str\n')] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .environment import *
from .environment_setting import *
from .gallery_image import *
from .get_environment import *
from .get_environment_setting import *
from .get_gallery_image import *
from .get_global_user_environment import *
from .get_global_user_operation_batch_status import *
from .get_global_user_operation_status import *
from .get_global_user_personal_preferences import *
from .get_lab import *
from .get_lab_account import *
from .get_lab_account_regional_availability import *
from .get_user import *
from .lab import *
from .lab_account import *
from .list_global_user_environments import *
from .list_global_user_labs import *
from .user import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:labservices/v20181015:Environment":
return Environment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:EnvironmentSetting":
return EnvironmentSetting(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:GalleryImage":
return GalleryImage(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:Lab":
return Lab(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:LabAccount":
return LabAccount(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:User":
return User(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "labservices/v20181015", _module_instance)
_register_module()
| [
"pulumi.ResourceOptions",
"pulumi.runtime.register_resource_module"
] | [((2259, 2361), 'pulumi.runtime.register_resource_module', 'pulumi.runtime.register_resource_module', (['"""azure-native"""', '"""labservices/v20181015"""', '_module_instance'], {}), "('azure-native',\n 'labservices/v20181015', _module_instance)\n", (2298, 2361), False, 'import pulumi\n'), ((1382, 1413), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'urn': 'urn'}), '(urn=urn)\n', (1404, 1413), False, 'import pulumi\n'), ((1544, 1575), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'urn': 'urn'}), '(urn=urn)\n', (1566, 1575), False, 'import pulumi\n'), ((1694, 1725), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'urn': 'urn'}), '(urn=urn)\n', (1716, 1725), False, 'import pulumi\n'), ((1826, 1857), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'urn': 'urn'}), '(urn=urn)\n', (1848, 1857), False, 'import pulumi\n'), ((1972, 2003), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'urn': 'urn'}), '(urn=urn)\n', (1994, 2003), False, 'import pulumi\n'), ((2106, 2137), 'pulumi.ResourceOptions', 'pulumi.ResourceOptions', ([], {'urn': 'urn'}), '(urn=urn)\n', (2128, 2137), False, 'import pulumi\n')] |
'''
(c) Copyright 2013 Telefonica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefonica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefonica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
'''
from unittest import TestCase
from mock import MagicMock, patch
from commons.json_schema_validator.schema_reader import SchemaField
from commons.json_schema_validator.schema_reader import SchemaReader
from users.serializers import UserCollectionSerializer
class UserSerializerTests(TestCase):
def setUp(self):
super(UserSerializerTests, self).setUp()
mock_schema_instance = MagicMock(name='mock_schema_instance')
mock_schema_instance.return_value = [
SchemaField(name='username', field_type='string', required=True),
SchemaField(name='password', field_type='string', required=True),
SchemaField(name='is_admin', field_type='boolean', required=True, default=False)
]
mock_get_schema_fields = MagicMock(name='mock_get_schema')
mock_get_schema_fields.return_value = mock_schema_instance
# mock schema instance
schema_reader = SchemaReader()
self.patcher_validate = patch.object(schema_reader, 'validate_object') # @UndefinedVariable
self.patcher_schema = patch.object(schema_reader, # @UndefinedVariable
'get_schema_fields', mock_schema_instance)
self.patcher_schema.start()
self.patcher_validate.start()
def tearDown(self):
self.patcher_schema.stop()
self.patcher_validate.stop()
def test_deserialize_user_should_work(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>'})
self.assertEquals(True, serializer.is_valid(), "Serialization invalid")
def test_deserialize_user_invalid_is_admin_should_work(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'is_admin': 'si'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
def test_deserialize_user_empty_user_should_give_error_invalid(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': '', 'password': '<PASSWORD>'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"invalid",
serializer.errors['username'][0],
'Invalid error message')
def test_deserialize_user_null_user_should_give_required_error(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'password': '<PASSWORD>'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"required",
serializer.errors['username'][0],
'Invalid error message')
def test_deserialize_user_large_user_ne_should_give_invalid_error(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'a' * 600, 'password': '<PASSWORD>'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"invalid",
serializer.errors['username'][0],
'Invalid error message')
def test_deserialize_user_with_invalid_origins_should_give_error(self):
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'origins': ["????"]})
self.assertEquals(False, serializer.is_valid())
self.assertEquals(u"invalid",
serializer.errors['origins'][0],
'Invalid error message')
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'origins': [" tugo"]})
self.assertEquals(False, serializer.is_valid())
self.assertEquals(u"invalid",
serializer.errors['origins'][0],
'Invalid error message')
def test_deserialize_user_with_invalid_classes_should_give_error(self):
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'classes': ["????"]})
self.assertEquals(False, serializer.is_valid())
self.assertEquals(u"invalid",
serializer.errors['classes'][0],
'Invalid error message')
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'classes': [" sms"]})
self.assertEquals(False, serializer.is_valid())
self.assertEquals(u"invalid",
serializer.errors['classes'][0],
'Invalid error message')
def test_deserialize_user_invalid_username_should_give_error(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'User.user', 'password': '<PASSWORD>'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"invalid",
serializer.errors['username'][0],
'Invalid error message')
def test_deserialize_user_invalid_is_admin_should_give_error(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'usera', 'password': '<PASSWORD>', 'is_admin': 0})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"invalid",
serializer.errors['is_admin'][0],
'Invalid error message')
| [
"commons.json_schema_validator.schema_reader.SchemaField",
"users.serializers.UserCollectionSerializer",
"mock.patch.object",
"commons.json_schema_validator.schema_reader.SchemaReader",
"mock.MagicMock"
] | [((790, 828), 'mock.MagicMock', 'MagicMock', ([], {'name': '"""mock_schema_instance"""'}), "(name='mock_schema_instance')\n", (799, 828), False, 'from mock import MagicMock, patch\n'), ((1156, 1189), 'mock.MagicMock', 'MagicMock', ([], {'name': '"""mock_get_schema"""'}), "(name='mock_get_schema')\n", (1165, 1189), False, 'from mock import MagicMock, patch\n'), ((1312, 1326), 'commons.json_schema_validator.schema_reader.SchemaReader', 'SchemaReader', ([], {}), '()\n', (1324, 1326), False, 'from commons.json_schema_validator.schema_reader import SchemaReader\n'), ((1359, 1405), 'mock.patch.object', 'patch.object', (['schema_reader', '"""validate_object"""'], {}), "(schema_reader, 'validate_object')\n", (1371, 1405), False, 'from mock import MagicMock, patch\n'), ((1458, 1528), 'mock.patch.object', 'patch.object', (['schema_reader', '"""get_schema_fields"""', 'mock_schema_instance'], {}), "(schema_reader, 'get_schema_fields', mock_schema_instance)\n", (1470, 1528), False, 'from mock import MagicMock, patch\n'), ((1899, 1976), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'user', 'password': '<PASSWORD>'}"}), "(data={'username': 'user', 'password': '<PASSWORD>'})\n", (1923, 1976), False, 'from users.serializers import UserCollectionSerializer\n'), ((2211, 2310), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'user', 'password': '<PASSWORD>', 'is_admin': 'si'}"}), "(data={'username': 'user', 'password': '<PASSWORD>',\n 'is_admin': 'si'})\n", (2235, 2310), False, 'from users.serializers import UserCollectionSerializer\n'), ((2550, 2623), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': '', 'password': '<PASSWORD>'}"}), "(data={'username': '', 'password': '<PASSWORD>'})\n", (2574, 2623), False, 'from users.serializers import UserCollectionSerializer\n'), ((3016, 3073), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'password': '<PASSWORD>'}"}), "(data={'password': '<PASSWORD>'})\n", (3040, 3073), False, 'from users.serializers import UserCollectionSerializer\n'), ((3470, 3555), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'a' * 600, 'password': '<PASSWORD>'}"}), "(data={'username': 'a' * 600, 'password': '<PASSWORD>'}\n )\n", (3494, 3555), False, 'from users.serializers import UserCollectionSerializer\n'), ((3879, 3981), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'user', 'password': '<PASSWORD>', 'origins': ['????']}"}), "(data={'username': 'user', 'password': '<PASSWORD>',\n 'origins': ['????']})\n", (3903, 3981), False, 'from users.serializers import UserCollectionSerializer\n'), ((4203, 4306), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'user', 'password': '<PASSWORD>', 'origins': [' tugo']}"}), "(data={'username': 'user', 'password': '<PASSWORD>',\n 'origins': [' tugo']})\n", (4227, 4306), False, 'from users.serializers import UserCollectionSerializer\n'), ((4605, 4707), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'user', 'password': '<PASSWORD>', 'classes': ['????']}"}), "(data={'username': 'user', 'password': '<PASSWORD>',\n 'classes': ['????']})\n", (4629, 4707), False, 'from users.serializers import UserCollectionSerializer\n'), ((4929, 5031), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'user', 'password': '<PASSWORD>', 'classes': [' sms']}"}), "(data={'username': 'user', 'password': '<PASSWORD>',\n 'classes': [' sms']})\n", (4953, 5031), False, 'from users.serializers import UserCollectionSerializer\n'), ((5392, 5478), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'User.user', 'password': '<PASSWORD>'}"}), "(data={'username': 'User.user', 'password':\n '<PASSWORD>'})\n", (5416, 5478), False, 'from users.serializers import UserCollectionSerializer\n'), ((5865, 5962), 'users.serializers.UserCollectionSerializer', 'UserCollectionSerializer', ([], {'data': "{'username': 'usera', 'password': '<PASSWORD>', 'is_admin': 0}"}), "(data={'username': 'usera', 'password':\n '<PASSWORD>', 'is_admin': 0})\n", (5889, 5962), False, 'from users.serializers import UserCollectionSerializer\n'), ((883, 947), 'commons.json_schema_validator.schema_reader.SchemaField', 'SchemaField', ([], {'name': '"""username"""', 'field_type': '"""string"""', 'required': '(True)'}), "(name='username', field_type='string', required=True)\n", (894, 947), False, 'from commons.json_schema_validator.schema_reader import SchemaField\n'), ((957, 1021), 'commons.json_schema_validator.schema_reader.SchemaField', 'SchemaField', ([], {'name': '"""password"""', 'field_type': '"""string"""', 'required': '(True)'}), "(name='password', field_type='string', required=True)\n", (968, 1021), False, 'from commons.json_schema_validator.schema_reader import SchemaField\n'), ((1031, 1116), 'commons.json_schema_validator.schema_reader.SchemaField', 'SchemaField', ([], {'name': '"""is_admin"""', 'field_type': '"""boolean"""', 'required': '(True)', 'default': '(False)'}), "(name='is_admin', field_type='boolean', required=True, default=False\n )\n", (1042, 1116), False, 'from commons.json_schema_validator.schema_reader import SchemaField\n')] |
''' User views '''
from datetime import timedelta
from flask import request, jsonify, make_response, redirect, json, render_template
from flask_jwt_extended import (create_access_token, jwt_required)
from flask_restful import Resource
from flask_login import login_user, current_user
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from src import db, api
from .models import User
from .schemas import UserSchema
class UserLoginResource(Resource):
model = User
schema = UserSchema
def get(self):
return make_response(render_template('login.html'))
def post(self):
if request.json:
data = request.json
user = self.model.query.filter(self.model.email == data['email']).first()
if user and self.model.check_password(user, data['password']):
expires = timedelta(days=365)
user = UserSchema(only=('id', 'email', 'is_admin')).dump(user).data
return make_response(
jsonify({'id': user,
'authentication_token': create_access_token(identity=user['id'], expires_delta=expires)}), 200)
else:
return make_response(jsonify({"error": {"code": 400, "msg": "No such user/wrong password."}}), 400)
else:
data = request.form
user = self.model.query.filter(self.model.email == data['email']).first()
if user and self.model.check_password(user, data['password']) and login_user(user):
return make_response(redirect('/admin/', 302))
else:
return make_response(redirect('/api/v1/login', 403))
class UserRegisterResource(Resource):
model = User
schema = UserSchema
def post(self):
data = request.json
if not data:
return make_response(jsonify({'error': 'No data'}), 400)
user = User.query.filter(User.email == data['email']).first()
if user:
return make_response(jsonify({'error': 'User already exists'}), 403)
user, errors = self.schema().load(data)
if errors:
return make_response(jsonify(errors), 400)
try:
user.set_password(data['password'])
db.session.add(user)
db.session.commit()
except (IntegrityError, InvalidRequestError) as e:
print(e)
db.session.rollback()
return make_response(jsonify(error={'code': 400 }), 400)
expires = timedelta(days=365)
return make_response(
jsonify(created_user={'id': user.id,
'user': self.schema(only=('id', 'email', 'is_admin')).dump(user).data,
'authentication_token': create_access_token(identity=user.id,
expires_delta=expires)}), 200)
api.add_resource(UserLoginResource, '/login/', endpoint='login')
api.add_resource(UserRegisterResource, '/register/', endpoint='register') | [
"flask.render_template",
"src.db.session.commit",
"flask_login.login_user",
"flask_jwt_extended.create_access_token",
"flask.redirect",
"src.db.session.add",
"src.db.session.rollback",
"datetime.timedelta",
"src.api.add_resource",
"flask.jsonify"
] | [((2986, 3050), 'src.api.add_resource', 'api.add_resource', (['UserLoginResource', '"""/login/"""'], {'endpoint': '"""login"""'}), "(UserLoginResource, '/login/', endpoint='login')\n", (3002, 3050), False, 'from src import db, api\n'), ((3051, 3124), 'src.api.add_resource', 'api.add_resource', (['UserRegisterResource', '"""/register/"""'], {'endpoint': '"""register"""'}), "(UserRegisterResource, '/register/', endpoint='register')\n", (3067, 3124), False, 'from src import db, api\n'), ((2513, 2532), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (2522, 2532), False, 'from datetime import timedelta\n'), ((557, 586), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (572, 586), False, 'from flask import request, jsonify, make_response, redirect, json, render_template\n'), ((2258, 2278), 'src.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (2272, 2278), False, 'from src import db, api\n'), ((2291, 2310), 'src.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2308, 2310), False, 'from src import db, api\n'), ((854, 873), 'datetime.timedelta', 'timedelta', ([], {'days': '(365)'}), '(days=365)\n', (863, 873), False, 'from datetime import timedelta\n'), ((1507, 1523), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (1517, 1523), False, 'from flask_login import login_user, current_user\n'), ((1859, 1888), 'flask.jsonify', 'jsonify', (["{'error': 'No data'}"], {}), "({'error': 'No data'})\n", (1866, 1888), False, 'from flask import request, jsonify, make_response, redirect, json, render_template\n'), ((2015, 2056), 'flask.jsonify', 'jsonify', (["{'error': 'User already exists'}"], {}), "({'error': 'User already exists'})\n", (2022, 2056), False, 'from flask import request, jsonify, make_response, redirect, json, render_template\n'), ((2163, 2178), 'flask.jsonify', 'jsonify', (['errors'], {}), '(errors)\n', (2170, 2178), False, 'from flask import request, jsonify, make_response, redirect, json, render_template\n'), ((2403, 2424), 'src.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (2422, 2424), False, 'from src import db, api\n'), ((1217, 1289), 'flask.jsonify', 'jsonify', (["{'error': {'code': 400, 'msg': 'No such user/wrong password.'}}"], {}), "({'error': {'code': 400, 'msg': 'No such user/wrong password.'}})\n", (1224, 1289), False, 'from flask import request, jsonify, make_response, redirect, json, render_template\n'), ((1562, 1586), 'flask.redirect', 'redirect', (['"""/admin/"""', '(302)'], {}), "('/admin/', 302)\n", (1570, 1586), False, 'from flask import request, jsonify, make_response, redirect, json, render_template\n'), ((1643, 1673), 'flask.redirect', 'redirect', (['"""/api/v1/login"""', '(403)'], {}), "('/api/v1/login', 403)\n", (1651, 1673), False, 'from flask import request, jsonify, make_response, redirect, json, render_template\n'), ((2458, 2486), 'flask.jsonify', 'jsonify', ([], {'error': "{'code': 400}"}), "(error={'code': 400})\n", (2465, 2486), False, 'from flask import request, jsonify, make_response, redirect, json, render_template\n'), ((2755, 2815), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'user.id', 'expires_delta': 'expires'}), '(identity=user.id, expires_delta=expires)\n', (2774, 2815), False, 'from flask_jwt_extended import create_access_token, jwt_required\n'), ((1090, 1153), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': "user['id']", 'expires_delta': 'expires'}), "(identity=user['id'], expires_delta=expires)\n", (1109, 1153), False, 'from flask_jwt_extended import create_access_token, jwt_required\n')] |
import numpy as np
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
import scipy.stats as sts
import xgboost as xgb
from xiter import *
import pandas as pd
import argparse
from datetime import datetime
def timer(start_time=None):
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print('\n Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))
parser=argparse.ArgumentParser()
parser.add_argument("--end",type=float,default=100000.,help='end ratio')
parser.add_argument("--save",type=str,default="test_",help='save name')
parser.add_argument("--network",type=str,default="rnn",help='network name on symbols/')
parser.add_argument("--right",type=str,default="/scratch/yjdata/gluon100_img",help='which train sample (qq,gg,zq,zg)')
parser.add_argument("--pt",type=int,default=200,help='pt range pt~pt*1.1')
parser.add_argument("--ptmin",type=float,default=0.,help='pt range pt~pt*1.1')
parser.add_argument("--ptmax",type=float,default=2.,help='pt range pt~pt*1.1')
parser.add_argument("--epochs",type=int,default=10,help='num epochs')
parser.add_argument("--batch_size",type=int,default=100000,help='batch_size')
parser.add_argument("--loss",type=str,default="categorical_crossentropy",help='network name on symbols/')
parser.add_argument("--gpu",type=int,default=0,help='gpu number')
parser.add_argument("--isz",type=int,default=0,help='0 or z or not')
parser.add_argument("--eta",type=float,default=0.,help='end ratio')
parser.add_argument("--etabin",type=float,default=1,help='end ratio')
parser.add_argument("--unscale",type=int,default=0,help='end ratio')
args=parser.parse_args()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
batch_size=args.batch_size
params = {
'max_depth': sts.randint(1,6),
'learning_rate': sts.uniform(0.0010,0.500),
'n_estimators': sts.randint(10,101)
}
model=xgb.XGBClassifier(objective='binary:logistic',tree_method="gpu_hist")
if(args.isz==1):
if(args.etabin==1):
loaded=np.load("zqmixed{}pteta.npz".format(args.pt))
print("zqmixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("zqmixed{}pt.npz".format(args.pt))
print("zqmixed{}pt.npz".format(args.pt))
elif(args.isz==-1):
if(args.etabin==1):
loaded=np.load("qqmixed{}pteta.npz".format(args.pt))
print("qqmixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("qqmixed{}pt.npz".format(args.pt))
print("qqmixed{}pt.npz".format(args.pt))
elif(args.isz==0):
if(args.etabin==1):
if(args.unscale==1):
loaded=np.load("unscalemixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("mixed{}pteta.npz".format(args.pt))
print("etabin 1")
else:
if(args.unscale==1):
loaded=np.load("unscalemixed{}pt.npz".format(args.pt))
else:
loaded=np.load("mixed{}pt.npz".format(args.pt))
print("etabin 2.4")
data=loaded["bdtset"][:,:5]
label=loaded["label"]
line=int(30000)
endline=int(40000)
if(len(label)<40000):
line=int(len(label)*3./4.)
endline=len(label)
X=data[0:line]
vx=data[line:endline]
Y=label[0:line]
vy=label[line:endline]
Y=np.array(Y)[:,0]
folds = 3
param_comb = 100
skf = KFold(n_splits=folds, shuffle = True, random_state = 173)
#skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001)
random_search = RandomizedSearchCV(model, param_distributions=params, n_iter=param_comb, scoring='log_loss', n_jobs=6, cv=skf.split(X,Y), verbose=3, random_state=173 )
# Here we go
start_time = timer(None) # timing starts from this point for "start_time" variable
random_search.fit(X, Y)
timer(start_time)
#print(random_search.predict(X[:10]))
#print('\n All results:')
#print(random_search.cv_results_)
#print('\n Best estimator:')
#print(random_search.best_estimator_)
print('\n Best normalized gini score for %d-fold search with %d parameter combinations:' % (folds, param_comb))
print(random_search.best_score_ * 2 - 1)
#print('\n Best hyperparameters:')
#print(random_search.best_params_)
results = pd.DataFrame(random_search.cv_results_)
results.to_csv('xgb/{}-{}.csv'.format(args.save,args.pt), index=False)
#random_search.best_estimator_.save_model("bdt-{}.dat".format(args.pt))
| [
"scipy.stats.randint",
"argparse.ArgumentParser",
"scipy.stats.uniform",
"numpy.array",
"datetime.datetime.now",
"pandas.DataFrame",
"sklearn.model_selection.KFold",
"xgboost.XGBClassifier"
] | [((727, 752), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (750, 752), False, 'import argparse\n'), ((2254, 2324), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'objective': '"""binary:logistic"""', 'tree_method': '"""gpu_hist"""'}), "(objective='binary:logistic', tree_method='gpu_hist')\n", (2271, 2324), True, 'import xgboost as xgb\n'), ((3518, 3571), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'folds', 'shuffle': '(True)', 'random_state': '(173)'}), '(n_splits=folds, shuffle=True, random_state=173)\n', (3523, 3571), False, 'from sklearn.model_selection import KFold\n'), ((4361, 4400), 'pandas.DataFrame', 'pd.DataFrame', (['random_search.cv_results_'], {}), '(random_search.cv_results_)\n', (4373, 4400), True, 'import pandas as pd\n'), ((2124, 2141), 'scipy.stats.randint', 'sts.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (2135, 2141), True, 'import scipy.stats as sts\n'), ((2167, 2190), 'scipy.stats.uniform', 'sts.uniform', (['(0.001)', '(0.5)'], {}), '(0.001, 0.5)\n', (2178, 2190), True, 'import scipy.stats as sts\n'), ((2218, 2238), 'scipy.stats.randint', 'sts.randint', (['(10)', '(101)'], {}), '(10, 101)\n', (2229, 2238), True, 'import scipy.stats as sts\n'), ((3467, 3478), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (3475, 3478), True, 'import numpy as np\n'), ((430, 444), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (442, 444), False, 'from datetime import datetime\n'), ((526, 540), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (538, 540), False, 'from datetime import datetime\n')] |
import random
import json
import gym
from gym import spaces
import pandas as pd
import numpy as np
MAX_ACCOUNT_BALANCE = 2147483647
MAX_NUM_SHARES = 2147483647
MAX_SHARE_PRICE = 5000
MAX_VOLUME = 1000e8
MAX_AMOUNT = 3e10
MAX_OPEN_POSITIONS = 5
MAX_STEPS = 20000
MAX_DAY_CHANGE = 1
INITIAL_ACCOUNT_BALANCE = 10000
DATA_HIS_PERIOD = 5
# position constant
FLAT = 0 # no position
LONG = 1 # buy position
SHORT = 2 # sell position
# action constant
HOLD = 0
BUY = 1
SELL = 2
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self, df,show_trade=True):
super(StockTradingEnv, self).__init__()
# show the trade info
self.show_trade = show_trade
self.actions=["FLAT","LONG","SHORT"]
self.fee = 0.0005 # brokage commission
self.df = df
self.closeprices = self.df['close'].values
self.reward_range = (0, MAX_ACCOUNT_BALANCE)
# Actions of the format Buy x%, Sell x%, Hold, etc.
self.action_space = spaces.Discrete(len(self.actions))
# self.action_space = spaces.Box(
# low=np.array([0, 0]), high=np.array([3, 1]), dtype=np.float16)
# Prices contains the OHCL values for the last five prices
self.observation_space = spaces.Box(
low=0, high=1, shape=(DATA_HIS_PERIOD+1,6), dtype=np.float16)
self.history = []
def _next_observation(self):
obs = np.array([
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'open'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'high'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'low'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'close'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'volume'].values / MAX_NUM_SHARES,
])
# Append additional data and scale each value to between 0-1
obs = np.append(obs,[[self.balance / MAX_ACCOUNT_BALANCE,
self.max_net_worth / MAX_ACCOUNT_BALANCE,
self.shares_held / MAX_NUM_SHARES,
self.cost_basis / MAX_SHARE_PRICE,
self.total_shares_sold / MAX_NUM_SHARES,
self.total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE)]],axis=0)
return obs
def _take_action(self, action):
# Set the current price to a random price within the time step
# current_price = random.uniform(
# self.df.loc[self.current_step, "open"], self.df.loc[self.current_step, "close"])
# Set the current price to the last close price
self.close_price = self.df.loc[self.current_step,"close"]
amount = 0.5 #the old version has this variable, so reserve
# action comes from the agent
# 1 buy, 2 sell, 0 hold
# single position can be opened per trade
# valid action sequence would be
# LONG : buy - hold - hold - sell
# SHORT : sell - hold - hold - buy
# invalid action sequence is just considered hold
# (e.g.) "buy - buy" would be considred "buy - hold"
self.action = HOLD #hold
if action == BUY: #buy
if self.position == FLAT: # if previous position was flat
self.position = LONG #update position to long
self.action = BUY # record action as buy
self.entry_price = self.close_price
# Buy amount % of balance in shares
total_possible = int(self.balance / self.close_price)
shares_bought = int(total_possible * amount)//100 *100
self.krw_balance = shares_bought * self.entry_price # buy balance
commission = round(self.fee * self.krw_balance,2) # commission fee
self.shares_held = shares_bought
self.balance -= self.krw_balance-commission
#self.cost_basis = (prev_cost + additional_cost) / (self.shares_held + shares_bought)
elif self.position == SHORT: # if previous position was short
self.position = FLAT # update position to flat
self.action = BUY # record action as buy
self.exit_price = self.close_price
self.reward += ((self.entry_price - self.exit_price) / self.exit_price + 1) * (
1 - self.fee) ** 2 - 1 # calculate reward
#self.krw_balance = self.krw_balance * (1.0 + self.reward) # evaluate cumulative return in krw-won
self.balance += round(self.krw_balance * (1.0 + self.reward),2) # calcuate the total balance
self.n_short += 1 # record number of short
self.total_shares_sold += self.shares_held
self.total_sales_value += self.shares_held * self.close_price
self.entry_price = 0 # clear entry price
self.shares_held = 0 # clear the shares_
elif action == SELL:
if self.position == FLAT:
self.position = SHORT
self.action = SELL
self.entry_price = self.close_price
# Sell amount % of shares held
total_possible = int(self.balance / self.close_price)
self.shares_held = int(total_possible * amount)//100 *100
self.krw_balance = self.shares_held * self.entry_price # buy balance
commission = round(self.fee * self.krw_balance,2) # commission fee
self.balance -= self.krw_balance-commission
elif self.position == LONG:
self.position = FLAT
self.action = SELL
self.exit_price = self.close_price
self.reward += ((self.exit_price - self.entry_price) / self.entry_price + 1) * (1 - self.fee) ** 2 - 1
#self.krw_balance = self.krw_balance * (1.0 + self.reward)
self.balance += round(self.krw_balance*(1.0+self.reward),2)
self.n_long += 1
self.total_shares_buy += self.shares_held
self.total_buys_value += self.shares_held * self.close_price
self.shares_held = 0
self.entry_price = 0
# [coin + krw_won] total value evaluated in krw won
if (self.position == LONG):
temp_reward = ((self.close_price - self.entry_price) / self.entry_price + 1) * (
1 - self.fee) ** 2 - 1
new_portfolio = self.krw_balance * (1.0 + temp_reward)
elif (self.position == SHORT):
temp_reward = ((self.entry_price - self.close_price) / self.close_price + 1) * (
1 - self.fee) ** 2 - 1
new_portfolio = self.krw_balance * (1.0 + temp_reward)
else:
temp_reward = 0
new_portfolio = 0
self.net_worth = self.balance + new_portfolio
if self.net_worth > self.max_net_worth:
self.max_net_worth = self.net_worth
if self.shares_held == 0:
self.cost_basis = 0
self.portfolio = round(new_portfolio,2)
def step(self, action):
# Execute one time step within the environment
self._take_action(action)
done = False
self.current_step += 1
delay_modifier = (self.current_step / MAX_STEPS)
# profits
#reward = self.net_worth - INITIAL_ACCOUNT_BALANCE
#reward = 1 if reward > 0 else -100
if self.net_worth <= 0:
done = True
if self.current_step > len(self.df.loc[:, 'open'].values) - 1:
self.current_step = DATA_HIS_PERIOD # loop training
# when loop training, then clear the history
self.action = HOLD
self.position = FLAT
self.balance = INITIAL_ACCOUNT_BALANCE
self.net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.krw_balance = 0
self.reward = 0
self.portfolio = 0
self.shares_held = 0
self.cost_basis = 0
self.total_shares_buy = 0
self.total_buys_value = 0
self.total_shares_sold = 0
self.total_sales_value = 0
self.n_long = 0
self.n_short = 0
self.history=[]
# done = True
if (self.show_trade and self.current_step % 1 == 0):
print("Tick: {0}/ Portfolio (krw-won): {1}, balance: {2}".format(self.current_step, self.portfolio,self.net_worth))
print("Long: {0}/ Short: {1}".format(self.n_long, self.n_short))
# save the history data
self.history.append([
self.action,
self.position,
self.current_step,
self.close_price,
self.krw_balance,
self.balance,
self.max_net_worth,
self.shares_held,
self.portfolio,
self.total_shares_buy,
self.total_buys_value,
self.total_shares_sold,
self.total_sales_value])
#self.history.append((self.action, self.current_step, self.closingPrice, self.portfolio, self.reward))
obs = self._next_observation()
if (self.current_step > (self.df.shape[0]) - 1):
self.done = True
self.reward = self.get_profit() # return reward at end of the game
return obs, self.net_worth, done, {'portfolio': np.array([self.portfolio]),
"history": self.history,
"n_trades": {'long': self.n_long, 'short': self.n_short}}
#return obs, reward, done, {}
def get_profit(self):
if(self.position == LONG):
profit = ((self.close_Price - self.entry_price)/self.entry_price + 1)*(1-self.fee)**2 - 1
elif(self.position == SHORT):
profit = ((self.entry_price - self.close_Price)/self.close_Price + 1)*(1-self.fee)**2 - 1
else:
profit = 0
return profit
def reset(self, new_df=None):
# Reset the state of the environment to an initial state
self.action = HOLD
self.position = FLAT
self.balance = INITIAL_ACCOUNT_BALANCE
self.net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.krw_balance = 0
self.reward =0
self.portfolio =0
self.shares_held = 0
self.cost_basis = 0
self.total_shares_buy =0
self.total_buys_value=0
self.total_shares_sold = 0
self.total_sales_value = 0
self.n_long=0
self.n_short=0
self.history=[]
# pass test dataset to environment
if new_df:
self.df = new_df
# Set the current step to a random point within the data frame
# self.current_step = random.randint(
# 0, len(self.df.loc[:, 'open'].values) - 6)
# the observation include the given period history data
self.current_step = DATA_HIS_PERIOD #random.randint(DATA_HIS_PERIOD,len(self.df.loc[:,'open'].values)-1)
# for i in range(DATA_HIS_PERIOD):
# self.history.append([0.0,0.0,0.0,0.0,0.0,0.0])
return self._next_observation()
def render(self, mode='human', close=False):
# Render the environment to the screen
profit = self.net_worth - INITIAL_ACCOUNT_BALANCE
print('-'*30)
print(f'Step: {self.current_step}')
print(f'Balance: {self.balance}')
print(f'Shares held: {self.shares_held} (Total sold: {self.total_shares_sold})')
print(f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})')
print(f'Net worth: {self.net_worth} (Max net worth: {self.max_net_worth})')
print(f'Profit: {profit}')
return profit
| [
"numpy.append",
"numpy.array",
"gym.spaces.Box"
] | [((1340, 1415), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(DATA_HIS_PERIOD + 1, 6)', 'dtype': 'np.float16'}), '(low=0, high=1, shape=(DATA_HIS_PERIOD + 1, 6), dtype=np.float16)\n', (1350, 1415), False, 'from gym import spaces\n'), ((1501, 2047), 'numpy.array', 'np.array', (["[self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step, 'open']\n .values / MAX_SHARE_PRICE, self.df.loc[self.current_step -\n DATA_HIS_PERIOD:self.current_step, 'high'].values / MAX_SHARE_PRICE, \n self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'low'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step -\n DATA_HIS_PERIOD:self.current_step, 'close'].values / MAX_SHARE_PRICE, \n self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'volume'].values / MAX_NUM_SHARES]"], {}), "([self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'open'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step -\n DATA_HIS_PERIOD:self.current_step, 'high'].values / MAX_SHARE_PRICE, \n self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'low'].values / MAX_SHARE_PRICE, self.df.loc[self.current_step -\n DATA_HIS_PERIOD:self.current_step, 'close'].values / MAX_SHARE_PRICE, \n self.df.loc[self.current_step - DATA_HIS_PERIOD:self.current_step,\n 'volume'].values / MAX_NUM_SHARES])\n", (1509, 2047), True, 'import numpy as np\n'), ((2166, 2455), 'numpy.append', 'np.append', (['obs', '[[self.balance / MAX_ACCOUNT_BALANCE, self.max_net_worth /\n MAX_ACCOUNT_BALANCE, self.shares_held / MAX_NUM_SHARES, self.cost_basis /\n MAX_SHARE_PRICE, self.total_shares_sold / MAX_NUM_SHARES, self.\n total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE)]]'], {'axis': '(0)'}), '(obs, [[self.balance / MAX_ACCOUNT_BALANCE, self.max_net_worth /\n MAX_ACCOUNT_BALANCE, self.shares_held / MAX_NUM_SHARES, self.cost_basis /\n MAX_SHARE_PRICE, self.total_shares_sold / MAX_NUM_SHARES, self.\n total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE)]], axis=0)\n', (2175, 2455), True, 'import numpy as np\n'), ((9930, 9956), 'numpy.array', 'np.array', (['[self.portfolio]'], {}), '([self.portfolio])\n', (9938, 9956), True, 'import numpy as np\n')] |
import json
import os
import pathlib
import time
from tqdm import tqdm
from aggregator import aggregate
from download import DOWNLOAD_PATH, download_files, unzip_files
from tqdm.contrib.concurrent import process_map
def main():
start = time.time()
# print("Downloading files...")
# download_files()
# print("Unzipping shapefiles...")
# unzip_files()
state_ids = []
for file in os.listdir(DOWNLOAD_PATH):
file_path = os.path.join(DOWNLOAD_PATH, file)
if os.path.isfile(file_path) and pathlib.Path(file_path).suffix == ".txt":
state_ids.append(file[file.index("BG") + 2 : file.index(".")])
# print("Computing population JSON heatmaps...")
# compute_json_heatmaps(state_ids)
print("Aggregating JSON files into one...")
aggegrate_json_files(state_ids)
end = time.time()
print(f"Done in {(end - start):0.2f}s")
def compute_json_heatmaps(state_ids):
data_files = []
for state_id in state_ids:
data_files.append(
(
state_id,
os.path.join(DOWNLOAD_PATH, f"CenPop2020_Mean_BG{state_id}.txt"),
os.path.join(DOWNLOAD_PATH, f"tl_2020_{state_id}_bg", f"tl_2020_{state_id}_bg.shp"),
)
)
process_map(create_json_for_state, data_files, max_workers=4)
def aggegrate_json_files(state_ids):
with open("public/data/pop.json", "w") as f:
f.write("""{"type": "FeatureCollection", "features": [""")
# state_ids = state_ids[:2]
features = []
for state_id in tqdm(state_ids):
geojson = None
with open(os.path.join(DOWNLOAD_PATH, f"{state_id}.json")) as f:
geojson = json.load(f)
with open("public/data/pop.json", "a") as f:
f.write(json.dumps(geojson["features"])[1:-1] + ("," if state_id != state_ids[-1] else ""))
with open("public/data/pop.json", "a") as f:
f.write("]}")
def create_json_for_state(args):
return aggregate(*args, hide_output=True)
if __name__ == "__main__":
main()
| [
"tqdm.contrib.concurrent.process_map",
"os.listdir",
"pathlib.Path",
"tqdm.tqdm",
"os.path.join",
"json.dumps",
"aggregator.aggregate",
"os.path.isfile",
"json.load",
"time.time"
] | [((243, 254), 'time.time', 'time.time', ([], {}), '()\n', (252, 254), False, 'import time\n'), ((410, 435), 'os.listdir', 'os.listdir', (['DOWNLOAD_PATH'], {}), '(DOWNLOAD_PATH)\n', (420, 435), False, 'import os\n'), ((839, 850), 'time.time', 'time.time', ([], {}), '()\n', (848, 850), False, 'import time\n'), ((1265, 1326), 'tqdm.contrib.concurrent.process_map', 'process_map', (['create_json_for_state', 'data_files'], {'max_workers': '(4)'}), '(create_json_for_state, data_files, max_workers=4)\n', (1276, 1326), False, 'from tqdm.contrib.concurrent import process_map\n'), ((1554, 1569), 'tqdm.tqdm', 'tqdm', (['state_ids'], {}), '(state_ids)\n', (1558, 1569), False, 'from tqdm import tqdm\n'), ((1978, 2012), 'aggregator.aggregate', 'aggregate', (['*args'], {'hide_output': '(True)'}), '(*args, hide_output=True)\n', (1987, 2012), False, 'from aggregator import aggregate\n'), ((457, 490), 'os.path.join', 'os.path.join', (['DOWNLOAD_PATH', 'file'], {}), '(DOWNLOAD_PATH, file)\n', (469, 490), False, 'import os\n'), ((503, 528), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (517, 528), False, 'import os\n'), ((1689, 1701), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1698, 1701), False, 'import json\n'), ((1069, 1133), 'os.path.join', 'os.path.join', (['DOWNLOAD_PATH', 'f"""CenPop2020_Mean_BG{state_id}.txt"""'], {}), "(DOWNLOAD_PATH, f'CenPop2020_Mean_BG{state_id}.txt')\n", (1081, 1133), False, 'import os\n'), ((1151, 1238), 'os.path.join', 'os.path.join', (['DOWNLOAD_PATH', 'f"""tl_2020_{state_id}_bg"""', 'f"""tl_2020_{state_id}_bg.shp"""'], {}), "(DOWNLOAD_PATH, f'tl_2020_{state_id}_bg',\n f'tl_2020_{state_id}_bg.shp')\n", (1163, 1238), False, 'import os\n'), ((1612, 1659), 'os.path.join', 'os.path.join', (['DOWNLOAD_PATH', 'f"""{state_id}.json"""'], {}), "(DOWNLOAD_PATH, f'{state_id}.json')\n", (1624, 1659), False, 'import os\n'), ((533, 556), 'pathlib.Path', 'pathlib.Path', (['file_path'], {}), '(file_path)\n', (545, 556), False, 'import pathlib\n'), ((1776, 1807), 'json.dumps', 'json.dumps', (["geojson['features']"], {}), "(geojson['features'])\n", (1786, 1807), False, 'import json\n')] |
from PIL import Image
import os, glob
import numpy as np
from sklearn import model_selection
classes = ["car", "bycycle", "motorcycle", "pedestrian"]
num_class = len(classes)
image_size = 50
# 画像の読み込み
X = []
Y = []
for index, classlabel in enumerate(classes):
photos_dir = "./" + classlabel
files = glob.glob(photos_dir + "/*.jpg")
for i, file in enumerate(files):
if i >=237: break
image = Image.open(file)
image = image.convert("RGB")
image = image.resize((image_size, image_size))
data = np.asarray(image) / 255
X.append(data)
Y.append(index)
X = np.array(X)
Y = np.array(Y)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y)
xy = (X_train, X_test, y_train, y_test)
np.save("./vehicle.npy", xy) | [
"PIL.Image.open",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"numpy.array",
"numpy.save",
"glob.glob"
] | [((623, 634), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (631, 634), True, 'import numpy as np\n'), ((639, 650), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (647, 650), True, 'import numpy as np\n'), ((687, 725), 'sklearn.model_selection.train_test_split', 'model_selection.train_test_split', (['X', 'Y'], {}), '(X, Y)\n', (719, 725), False, 'from sklearn import model_selection\n'), ((766, 794), 'numpy.save', 'np.save', (['"""./vehicle.npy"""', 'xy'], {}), "('./vehicle.npy', xy)\n", (773, 794), True, 'import numpy as np\n'), ((311, 343), 'glob.glob', 'glob.glob', (["(photos_dir + '/*.jpg')"], {}), "(photos_dir + '/*.jpg')\n", (320, 343), False, 'import os, glob\n'), ((423, 439), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (433, 439), False, 'from PIL import Image\n'), ((547, 564), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (557, 564), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# Copyright (c) 2016, <NAME> <<EMAIL>>
# All rights reserved.
# See LICENSE.txt
# Copyright (c) 2004 <NAME> (http://www.owlfish.com/)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# If you make any bug fixes or feature enhancements please let me know!
"""
Unit test cases.
"""
from __future__ import unicode_literals
import unittest
import os
import io
import logging
import logging.config
from simpletal import simpleTAL, simpleTALES
if (os.path.exists("logging.ini")):
logging.config.fileConfig("logging.ini")
else:
logging.basicConfig()
class TALAttributesTestCases(unittest.TestCase):
def setUp(self):
self.context = simpleTALES.Context()
self.context.addGlobal('test', 'testing')
self.context.addGlobal('link', 'www.owlfish.com')
self.context.addGlobal('needsQuoting', """Does "this" work?""")
self.context.addGlobal('number', 5)
self.context.addGlobal('uniQuote', 'Does "this" work?')
self.context.addGlobal('anotherdefault', {
'inhere': simpleTALES.DEFAULTVALUE
})
def _runTest_(self, txt, result, errMsg="Error"):
template = simpleTAL.compileHTMLTemplate(txt)
file = io.StringIO()
template.expand(self.context, file)
realResult = file.getvalue()
self.assertEqual(
realResult, result,
"%s - \npassed in: %s \ngot back %s \nexpected %s\n\nTemplate: %s"
% (errMsg, txt, realResult, result, template))
def testAddingAnAttribute(self):
self._runTest_(
'<html tal:attributes="link link" href="owlfish.com">Hello</html>',
'<html link="www.owlfish.com" href="owlfish.com">Hello</html>',
"Addition of attribute 'link' failed.")
def testRemovingAnAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href nothing" href="owlfish.com">Hello</html>',
'<html class="test">Hello</html>',
"Removal of attribute 'href' failed.")
def testDefaultAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href default" href="owlfish.com">Hello</html>',
'<html class="test" href="owlfish.com">Hello</html>',
"Defaulting of attribute 'href' failed.")
def testAnotherDefaultAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href anotherdefault/inhere" href="owlfish.com">Hello</html>',
'<html class="test" href="owlfish.com">Hello</html>',
"Defaulting of attribute 'href' failed.")
def testMultipleAttributes(self):
self._runTest_(
'<html old="still here" class="test" tal:attributes="href default;class nothing;new test" href="owlfish.com">Hello</html>',
'<html new="testing" old="still here" href="owlfish.com">Hello</html>',
"Setting multiple attributes at once failed.")
def testMultipleAttributesSpace(self):
self._runTest_(
'<html old="still here" class="test" tal:attributes="href default ; class string:Hello there; new test" href="owlfish.com">Hello</html>',
'<html class="Hello there" new="testing" old="still here" href="owlfish.com">Hello</html>',
"Setting multiple attributes at once, with spaces between semi-colons, failed."
)
def testMultipleAttributesEscaped(self):
self._runTest_(
'<html old="still " here" class="test" tal:attributes="href default ; class string: Semi-colon;;test;new test " href="owlfish.com">Hello</html>',
'''<html class="Semi-colon;test" new="testing" old='still " here' href="owlfish.com">Hello</html>''',
"Setting multiple attributes at once, with spaces between semi-colons, failed."
)
def testAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href needsQuoting">Hello</html>',
"""<html href='Does "this" work?' existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testNumberAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href number">Hello</html>',
"""<html href="5" existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testNumberAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href uniQuote">Hello</html>',
"""<html href='Does "this" work?' existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testOriginalAttributes(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="newAtt attrs/existingatt" tal:content="attrs/existingatt">Hello</html>',
"""<html newAtt='"Testing"' existingatt='"Testing"'>"Testing"</html>""",
"Accessing existing attributes failed.")
def testMultipleOriginalAttributes(self):
self._runTest_(
'<html one="Value One" two="Value two" three="Value three" tal:attributes="four attrs/three" tal:content="attrs/one">Hello</html>',
"""<html four="Value three" one="Value One" two="Value two" three="Value three">Value One</html>""",
"Accessing multiple existing attributes failed.")
def testAmpersandEscapeInAttributes(self):
self._runTest_(
'<html existingAtt="&Testing&" tal:attributes="newAtt attrs/existingatt" tal:content="attrs/existingatt">Hello</html>',
"""<html newAtt="&Testing&" existingatt="&Testing&">&Testing&</html>""",
"Accessing existing attributes failed.")
#~ def testAttributeCase (self):
#~ self._runTest_ ('<html HREF="Testing" tal:attributes="HREF test">Hello</html>'
#~ ,"""<html href="testing">Hello</html>"""
#~ ,"HTML Attributes not treated as case insensitive.")
if __name__ == '__main__':
unittest.main()
| [
"logging.basicConfig",
"os.path.exists",
"simpletal.simpleTALES.Context",
"logging.config.fileConfig",
"simpletal.simpleTAL.compileHTMLTemplate",
"unittest.main",
"io.StringIO"
] | [((1938, 1967), 'os.path.exists', 'os.path.exists', (['"""logging.ini"""'], {}), "('logging.ini')\n", (1952, 1967), False, 'import os\n'), ((1974, 2014), 'logging.config.fileConfig', 'logging.config.fileConfig', (['"""logging.ini"""'], {}), "('logging.ini')\n", (1999, 2014), False, 'import logging\n'), ((2025, 2046), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (2044, 2046), False, 'import logging\n'), ((7558, 7573), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7571, 7573), False, 'import unittest\n'), ((2142, 2163), 'simpletal.simpleTALES.Context', 'simpleTALES.Context', ([], {}), '()\n', (2161, 2163), False, 'from simpletal import simpleTAL, simpleTALES\n'), ((2635, 2669), 'simpletal.simpleTAL.compileHTMLTemplate', 'simpleTAL.compileHTMLTemplate', (['txt'], {}), '(txt)\n', (2664, 2669), False, 'from simpletal import simpleTAL, simpleTALES\n'), ((2685, 2698), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2696, 2698), False, 'import io\n')] |
from pathlib import Path
from typing import List
from fasta_reader import FASTAItem, FASTAWriter, read_fasta
__all__ = ["downsample"]
def downsample(infile: Path, outfile: Path, size: int, random):
targets: List[FASTAItem] = list(read_fasta(infile))
if size > len(targets):
raise ValueError("Size is greater than the number of targets.")
targets = random.choice(targets, size, replace=False).tolist()
with FASTAWriter(outfile) as writer:
for target in targets:
writer.write_item(target.defline, target.sequence)
| [
"fasta_reader.FASTAWriter",
"fasta_reader.read_fasta"
] | [((238, 256), 'fasta_reader.read_fasta', 'read_fasta', (['infile'], {}), '(infile)\n', (248, 256), False, 'from fasta_reader import FASTAItem, FASTAWriter, read_fasta\n'), ((436, 456), 'fasta_reader.FASTAWriter', 'FASTAWriter', (['outfile'], {}), '(outfile)\n', (447, 456), False, 'from fasta_reader import FASTAItem, FASTAWriter, read_fasta\n')] |
import os
from pymongo import MongoClient
from dotenv import load_dotenv
def database_entry(data):
try:
load_dotenv()
mongo_string = os.getenv('MONGODB_AUTH_URI')
client = MongoClient(mongo_string)
database = client[os.getenv('MONGODB_DB')]
col = database['users']
col.insert_one(data)
return True
except Exception as e:
print(e)
return False
if __name__ == "__main__":
pass
| [
"pymongo.MongoClient",
"os.getenv",
"dotenv.load_dotenv"
] | [((118, 131), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (129, 131), False, 'from dotenv import load_dotenv\n'), ((155, 184), 'os.getenv', 'os.getenv', (['"""MONGODB_AUTH_URI"""'], {}), "('MONGODB_AUTH_URI')\n", (164, 184), False, 'import os\n'), ((202, 227), 'pymongo.MongoClient', 'MongoClient', (['mongo_string'], {}), '(mongo_string)\n', (213, 227), False, 'from pymongo import MongoClient\n'), ((254, 277), 'os.getenv', 'os.getenv', (['"""MONGODB_DB"""'], {}), "('MONGODB_DB')\n", (263, 277), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import unittest
from src.graph import Graph
from src.maximum_cut import maximum_cut, maximum_cut_for_bipartite_graph
class MaximumCut(unittest.TestCase):
def test_maximum_cut_for_bipartite_graphs(self):
""" Given the following bipartite graph.
(a)-----(b)
\
\----(c)
(d)-----(e)
/
(f)----/
\
\----(g)
"""
g = Graph.build(edges=[('a', 'b'), ('a', 'c'),
('d', 'e'), ('f', 'e'), ('f', 'g')],
directed=False)
(left, right) = maximum_cut_for_bipartite_graph(g)
self.assertIn(len(left), [3,4], 'either 3 or 4')
self.assertIn(len(right), [3,4], 'eighter 3 or 4')
self.assertEqual(7, len(left)+len(right), 'no vertex counted twice')
def test_maximum_cut_for_larger_bipartite_graphs(self):
""" A sligthly larger graph:
(a) (c)
| \ /|
| x |
| / \ |
(b) (d)
| \ /|
| x |
| / \ |
(e) (f)
"""
g = Graph.build(edges=[('a', 'b'), ('a', 'd'), ('c', 'b'), ('c', 'd'),
('b', 'e'), ('b', 'f'), ('d', 'e'), ('d', 'f')],
directed=False)
(left, right) = maximum_cut_for_bipartite_graph(g)
self.assertIn(set(left), [set(['a', 'c', 'e', 'f']), set(['b', 'd'])])
self.assertIn(set(right), [set(['a', 'c', 'e', 'f']), set(['b', 'd'])])
self.assertNotEqual(left, right, 'not the same subsets')
def test_maximum_cut(self):
""" Given a graph:
(u)----(v)
| \ / |
| \/ |
| /\ |
| / \ |
(w)---(x)
"""
g = Graph.build(edges=[
('u', 'v'), ('u', 'w'), ('u', 'x'), ('v', 'x'),('w', 'x')],
directed=False)
(left, right) = maximum_cut(g)
expected = [{'u', 'v'}, {'w', 'x'}, {'x', 'u'}, {'w', 'v'}]
self.assertNotEqual(left, right, 'no common vertices between cuts')
self.assertIn(set(left), expected, 'should correctly split the graph')
self.assertIn(set(right), expected, 'should correctly split the graph')
def test_weighted_maximum_cut(self):
""" Given the following weighted graph.
(u)-3-(v)
| \ / |
| 5\/1 4
2 /\ |
| / \ |
(w)-6-(x)
"""
g = Graph.build(edges=[
('u', 'v', 3), ('u', 'w', 2), ('u', 'x', 5),
('v', 'x', 4),('w', 'x', 6)],
directed=False)
(left, right) = maximum_cut(g)
self.assertEqual(2, len(left), 'left should contain 2 vertices')
self.assertEqual(2, len(right), 'right should contain 2 vertices')
| [
"src.graph.Graph.build",
"src.maximum_cut.maximum_cut_for_bipartite_graph",
"src.maximum_cut.maximum_cut"
] | [((480, 579), 'src.graph.Graph.build', 'Graph.build', ([], {'edges': "[('a', 'b'), ('a', 'c'), ('d', 'e'), ('f', 'e'), ('f', 'g')]", 'directed': '(False)'}), "(edges=[('a', 'b'), ('a', 'c'), ('d', 'e'), ('f', 'e'), ('f',\n 'g')], directed=False)\n", (491, 579), False, 'from src.graph import Graph\n'), ((655, 689), 'src.maximum_cut.maximum_cut_for_bipartite_graph', 'maximum_cut_for_bipartite_graph', (['g'], {}), '(g)\n', (686, 689), False, 'from src.maximum_cut import maximum_cut, maximum_cut_for_bipartite_graph\n'), ((1161, 1296), 'src.graph.Graph.build', 'Graph.build', ([], {'edges': "[('a', 'b'), ('a', 'd'), ('c', 'b'), ('c', 'd'), ('b', 'e'), ('b', 'f'), (\n 'd', 'e'), ('d', 'f')]", 'directed': '(False)'}), "(edges=[('a', 'b'), ('a', 'd'), ('c', 'b'), ('c', 'd'), ('b',\n 'e'), ('b', 'f'), ('d', 'e'), ('d', 'f')], directed=False)\n", (1172, 1296), False, 'from src.graph import Graph\n'), ((1372, 1406), 'src.maximum_cut.maximum_cut_for_bipartite_graph', 'maximum_cut_for_bipartite_graph', (['g'], {}), '(g)\n', (1403, 1406), False, 'from src.maximum_cut import maximum_cut, maximum_cut_for_bipartite_graph\n'), ((1825, 1924), 'src.graph.Graph.build', 'Graph.build', ([], {'edges': "[('u', 'v'), ('u', 'w'), ('u', 'x'), ('v', 'x'), ('w', 'x')]", 'directed': '(False)'}), "(edges=[('u', 'v'), ('u', 'w'), ('u', 'x'), ('v', 'x'), ('w',\n 'x')], directed=False)\n", (1836, 1924), False, 'from src.graph import Graph\n'), ((1969, 1983), 'src.maximum_cut.maximum_cut', 'maximum_cut', (['g'], {}), '(g)\n', (1980, 1983), False, 'from src.maximum_cut import maximum_cut, maximum_cut_for_bipartite_graph\n'), ((2533, 2648), 'src.graph.Graph.build', 'Graph.build', ([], {'edges': "[('u', 'v', 3), ('u', 'w', 2), ('u', 'x', 5), ('v', 'x', 4), ('w', 'x', 6)]", 'directed': '(False)'}), "(edges=[('u', 'v', 3), ('u', 'w', 2), ('u', 'x', 5), ('v', 'x', \n 4), ('w', 'x', 6)], directed=False)\n", (2544, 2648), False, 'from src.graph import Graph\n'), ((2712, 2726), 'src.maximum_cut.maximum_cut', 'maximum_cut', (['g'], {}), '(g)\n', (2723, 2726), False, 'from src.maximum_cut import maximum_cut, maximum_cut_for_bipartite_graph\n')] |
from pymongo import MongoClient
def displayGroup(results):
for result in results:
print (result)
def firstIsALastIsVowel(collection):
key = {'first' : True, "last" : True}
cond = {'first' : 'a', 'last' :
{'$in' : ["a","e","i","o","u"]}}
initial = {'count' : 0}
reduce = "function (obj, prev) { prev.count++; }"
results = collection.group(key, cond, initial, reduce)
print ("\n\n'A' words grouped by first and last" + \
" letter that end with a vowel:")
displayGroup(results)
def firstLetterTotals(collection):
key = {'first' : True}
cond = {}
initial = {'vowels' : 0, 'cons' : 0}
reduce = "function (obj, prev) { " + \
"prev.vowels += obj.stats.vowels; " + \
"prev.cons += obj.stats.consonants; " + \
"}"
finalize = "function (obj) { " + \
"obj.total = obj.vowels + obj.cons; " + \
"}"
results = collection.group(key, cond, initial, reduce, finalize)
print ("\n\nWords grouped by first letter " + \
"with totals:")
displayGroup(results)
if __name__=="__main__":
mongo = MongoClient('mongodb://localhost:27017/')
db = mongo['words']
collection = db['word_stats']
firstIsALastIsVowel(collection)
firstLetterTotals(collection) | [
"pymongo.MongoClient"
] | [((1199, 1240), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://localhost:27017/"""'], {}), "('mongodb://localhost:27017/')\n", (1210, 1240), False, 'from pymongo import MongoClient\n')] |
# Copyright 2020 Toyota Research Institute. All rights reserved.
from packnet_sfm.utils.image import flip_lr, interpolate_scales
from packnet_sfm.utils.misc import filter_dict
from packnet_sfm.utils.types import is_tensor, is_list, is_numpy
def flip(tensor, flip_fn):
"""
Flip tensors or list of tensors based on a function
Parameters
----------
tensor : torch.Tensor or list[torch.Tensor] or list[list[torch.Tensor]]
Tensor to be flipped
flip_fn : Function
Flip function
Returns
-------
tensor : torch.Tensor or list[torch.Tensor] or list[list[torch.Tensor]]
Flipped tensor or list of tensors
"""
if not is_list(tensor):
return flip_fn(tensor)
else:
if not is_list(tensor[0]):
return [flip_fn(val) for val in tensor]
else:
return [[flip_fn(v) for v in val] for val in tensor]
def merge_outputs(*outputs):
"""
Merges model outputs for logging
Parameters
----------
outputs : tuple of dict
Outputs to be merged
Returns
-------
output : dict
Dictionary with a "metrics" key containing a dictionary with various metrics and
all other keys that are not "loss" (it is handled differently).
"""
ignore = ['loss'] # Keys to ignore
combine = ['metrics'] # Keys to combine
merge = {key: {} for key in combine}
for output in outputs:
# Iterate over all keys
for key, val in output.items():
# Combine these keys
if key in combine:
for sub_key, sub_val in output[key].items():
assert sub_key not in merge[key].keys(), \
'Combining duplicated key {} to {}'.format(sub_key, key)
merge[key][sub_key] = sub_val
# Ignore these keys
elif key not in ignore:
assert key not in merge.keys(), \
'Adding duplicated key {}'.format(key)
merge[key] = val
return merge
def stack_batch(batch):
"""
Stack multi-camera batches (B,N,C,H,W becomes BN,C,H,W)
Parameters
----------
batch : dict
Batch
Returns
-------
batch : dict
Stacked batch
"""
# If there is multi-camera information
if len(batch['rgb'].shape) == 5:
assert batch['rgb'].shape[0] == 1, 'Only batch size 1 is supported for multi-cameras'
# Loop over all keys
for key in batch.keys():
# If list, stack every item
if is_list(batch[key]):
if is_tensor(batch[key][0]) or is_numpy(batch[key][0]):
batch[key] = [sample[0] for sample in batch[key]]
# Else, stack single item
else:
batch[key] = batch[key][0]
return batch
def flip_batch_input(batch):
"""
Flip batch input information (copies data first)
Parameters
----------
batch : dict
Batch information
Returns
-------
batch : dict
Flipped batch
"""
# Flip tensors
for key in filter_dict(batch, [
'rgb', 'rgb_context',
'input_depth', 'input_depth_context',
]):
batch[key] = flip(batch[key], flip_lr)
# Flip intrinsics
for key in filter_dict(batch, [
'intrinsics'
]):
batch[key] = batch[key].clone()
batch[key][:, 0, 2] = batch['rgb'].shape[3] - batch[key][:, 0, 2]
# Return flipped batch
return batch
def flip_output(output):
"""
Flip output information
Parameters
----------
output : dict
Dictionary of model outputs (e.g. with keys like 'inv_depths' and 'uncertainty')
Returns
-------
output : dict
Flipped output
"""
# Flip tensors
for key in filter_dict(output, [
'uncertainty', 'logits_semantic', 'ord_probability',
'inv_depths', 'inv_depths_context', 'inv_depths1', 'inv_depths2',
'pred_depth', 'pred_depth_context', 'pred_depth1', 'pred_depth2',
'pred_inv_depth', 'pred_inv_depth_context', 'pred_inv_depth1', 'pred_inv_depth2',
]):
output[key] = flip(output[key], flip_lr)
return output
def upsample_output(output, mode='nearest', align_corners=None):
"""
Upsample multi-scale outputs to full resolution.
Parameters
----------
output : dict
Dictionary of model outputs (e.g. with keys like 'inv_depths' and 'uncertainty')
mode : str
Which interpolation mode is used
align_corners: bool or None
Whether corners will be aligned during interpolation
Returns
-------
output : dict
Upsampled output
"""
for key in filter_dict(output, [
'inv_depths', 'uncertainty'
]):
output[key] = interpolate_scales(
output[key], mode=mode, align_corners=align_corners)
for key in filter_dict(output, [
'inv_depths_context'
]):
output[key] = [interpolate_scales(
val, mode=mode, align_corners=align_corners) for val in output[key]]
return output
| [
"packnet_sfm.utils.types.is_list",
"packnet_sfm.utils.image.interpolate_scales",
"packnet_sfm.utils.misc.filter_dict",
"packnet_sfm.utils.types.is_numpy",
"packnet_sfm.utils.types.is_tensor"
] | [((3118, 3203), 'packnet_sfm.utils.misc.filter_dict', 'filter_dict', (['batch', "['rgb', 'rgb_context', 'input_depth', 'input_depth_context']"], {}), "(batch, ['rgb', 'rgb_context', 'input_depth', 'input_depth_context']\n )\n", (3129, 3203), False, 'from packnet_sfm.utils.misc import filter_dict\n'), ((3307, 3341), 'packnet_sfm.utils.misc.filter_dict', 'filter_dict', (['batch', "['intrinsics']"], {}), "(batch, ['intrinsics'])\n", (3318, 3341), False, 'from packnet_sfm.utils.misc import filter_dict\n'), ((3824, 4128), 'packnet_sfm.utils.misc.filter_dict', 'filter_dict', (['output', "['uncertainty', 'logits_semantic', 'ord_probability', 'inv_depths',\n 'inv_depths_context', 'inv_depths1', 'inv_depths2', 'pred_depth',\n 'pred_depth_context', 'pred_depth1', 'pred_depth2', 'pred_inv_depth',\n 'pred_inv_depth_context', 'pred_inv_depth1', 'pred_inv_depth2']"], {}), "(output, ['uncertainty', 'logits_semantic', 'ord_probability',\n 'inv_depths', 'inv_depths_context', 'inv_depths1', 'inv_depths2',\n 'pred_depth', 'pred_depth_context', 'pred_depth1', 'pred_depth2',\n 'pred_inv_depth', 'pred_inv_depth_context', 'pred_inv_depth1',\n 'pred_inv_depth2'])\n", (3835, 4128), False, 'from packnet_sfm.utils.misc import filter_dict\n'), ((4726, 4776), 'packnet_sfm.utils.misc.filter_dict', 'filter_dict', (['output', "['inv_depths', 'uncertainty']"], {}), "(output, ['inv_depths', 'uncertainty'])\n", (4737, 4776), False, 'from packnet_sfm.utils.misc import filter_dict\n'), ((4914, 4957), 'packnet_sfm.utils.misc.filter_dict', 'filter_dict', (['output', "['inv_depths_context']"], {}), "(output, ['inv_depths_context'])\n", (4925, 4957), False, 'from packnet_sfm.utils.misc import filter_dict\n'), ((679, 694), 'packnet_sfm.utils.types.is_list', 'is_list', (['tensor'], {}), '(tensor)\n', (686, 694), False, 'from packnet_sfm.utils.types import is_tensor, is_list, is_numpy\n'), ((4814, 4885), 'packnet_sfm.utils.image.interpolate_scales', 'interpolate_scales', (['output[key]'], {'mode': 'mode', 'align_corners': 'align_corners'}), '(output[key], mode=mode, align_corners=align_corners)\n', (4832, 4885), False, 'from packnet_sfm.utils.image import flip_lr, interpolate_scales\n'), ((752, 770), 'packnet_sfm.utils.types.is_list', 'is_list', (['tensor[0]'], {}), '(tensor[0])\n', (759, 770), False, 'from packnet_sfm.utils.types import is_tensor, is_list, is_numpy\n'), ((2567, 2586), 'packnet_sfm.utils.types.is_list', 'is_list', (['batch[key]'], {}), '(batch[key])\n', (2574, 2586), False, 'from packnet_sfm.utils.types import is_tensor, is_list, is_numpy\n'), ((4996, 5059), 'packnet_sfm.utils.image.interpolate_scales', 'interpolate_scales', (['val'], {'mode': 'mode', 'align_corners': 'align_corners'}), '(val, mode=mode, align_corners=align_corners)\n', (5014, 5059), False, 'from packnet_sfm.utils.image import flip_lr, interpolate_scales\n'), ((2607, 2631), 'packnet_sfm.utils.types.is_tensor', 'is_tensor', (['batch[key][0]'], {}), '(batch[key][0])\n', (2616, 2631), False, 'from packnet_sfm.utils.types import is_tensor, is_list, is_numpy\n'), ((2635, 2658), 'packnet_sfm.utils.types.is_numpy', 'is_numpy', (['batch[key][0]'], {}), '(batch[key][0])\n', (2643, 2658), False, 'from packnet_sfm.utils.types import is_tensor, is_list, is_numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_orthoexon
----------------------------------
Tests for `orthoexon` module.
"""
import os
import pytest
@pytest.fixture
def exon_id_with_quotes():
return "'ENSE00001229068.1'"
@pytest.fixture
def exon_id():
return "ENSE00001229068.1"
def test_separate_with_quotes(exon_id_with_quotes):
from orthoexon.util import separate
test = separate(exon_id_with_quotes)
true = "ENSE00001229068"
assert test == true
def test_separate(exon_id):
from orthoexon.util import separate
test = separate(exon_id)
true = "ENSE00001229068"
assert test == true
@pytest.fixture
def location():
return "chr20:10256140-10256211:+:0"
def test_splitstart(location):
from orthoexon.util import splitstart
test = splitstart(location)
true = '10256140'
assert test == true
def test_splitend(location):
from orthoexon.util import splitend
test = splitend(location)
true = '10256211'
assert test == true
@pytest.fixture
def human_gtf_filename(table_folder):
return os.path.join(table_folder, 'humanrbfox2andfmr1andsnap25.gtf')
@pytest.fixture
def human_gtf_database(table_folder):
return os.path.join(table_folder, 'humanrbfox2andfmr1andsnap25.gtf.db')
@pytest.fixture
def human_fasta(table_folder):
return os.path.join(table_folder, 'GRCm38.p3.genome.fa')
def test_translate(exon_id, human_fasta, human_gtf_database):
from orthoexon.util import translate
from orthoexon.util import separate
for index, species1gene in enumerate(human_gtf_database.features_of_type('gene')):
species1gffutilsgeneid = str(species1gene['gene_id'])
species1geneid = separate(species1gffutilsgeneid)
for exon in human_gtf_database.children(species1geneid,
featuretype='CDS',
order_by='start'):
if exon_id == exon:
test = translate(exon, human_fasta)
break
break
true = 'MAEDADMRNELEEMQRRADQLADE'
assert test == true
# def test_getsequence(exon, human_gtf_database):
# from orthoexon.util import getsequence
#
# test = getsequence(exon, human_gtf_database)
# true = 'ATGGCCGAAGACGCAGACATGCGCAATGAGCTGGAGGAGATGCAGCGAAGGGCTGACCAGTT' \
# 'GGCTGATGAG'
#
# assert test == true
# def test_make_sequence_array(finalsequencedf):
# from orthoexon.util import make_sequence_array
#
# test = make_sequence_array(finalsequencedf)
# true = ......
#
# assert test == true | [
"orthoexon.util.translate",
"os.path.join",
"orthoexon.util.separate",
"orthoexon.util.splitend",
"orthoexon.util.splitstart"
] | [((406, 435), 'orthoexon.util.separate', 'separate', (['exon_id_with_quotes'], {}), '(exon_id_with_quotes)\n', (414, 435), False, 'from orthoexon.util import separate\n'), ((573, 590), 'orthoexon.util.separate', 'separate', (['exon_id'], {}), '(exon_id)\n', (581, 590), False, 'from orthoexon.util import separate\n'), ((806, 826), 'orthoexon.util.splitstart', 'splitstart', (['location'], {}), '(location)\n', (816, 826), False, 'from orthoexon.util import splitstart\n'), ((957, 975), 'orthoexon.util.splitend', 'splitend', (['location'], {}), '(location)\n', (965, 975), False, 'from orthoexon.util import splitend\n'), ((1090, 1151), 'os.path.join', 'os.path.join', (['table_folder', '"""humanrbfox2andfmr1andsnap25.gtf"""'], {}), "(table_folder, 'humanrbfox2andfmr1andsnap25.gtf')\n", (1102, 1151), False, 'import os\n'), ((1218, 1282), 'os.path.join', 'os.path.join', (['table_folder', '"""humanrbfox2andfmr1andsnap25.gtf.db"""'], {}), "(table_folder, 'humanrbfox2andfmr1andsnap25.gtf.db')\n", (1230, 1282), False, 'import os\n'), ((1342, 1391), 'os.path.join', 'os.path.join', (['table_folder', '"""GRCm38.p3.genome.fa"""'], {}), "(table_folder, 'GRCm38.p3.genome.fa')\n", (1354, 1391), False, 'import os\n'), ((1710, 1742), 'orthoexon.util.separate', 'separate', (['species1gffutilsgeneid'], {}), '(species1gffutilsgeneid)\n', (1718, 1742), False, 'from orthoexon.util import separate\n'), ((1997, 2025), 'orthoexon.util.translate', 'translate', (['exon', 'human_fasta'], {}), '(exon, human_fasta)\n', (2006, 2025), False, 'from orthoexon.util import translate\n')] |
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
# Board diagram/pinout:
# https://user-images.githubusercontent.com/1450143/133655492-532d5e9a-0635-4889-85c9-68683d06cae0.png
# http://dl.sipeed.com/TANG/Nano/HDK/Tang-NANO-2704(Schematic).pdf
from migen import *
from litex.build.generic_platform import *
from litex.build.gowin.platform import GowinPlatform
from litex.build.openfpgaloader import OpenFPGALoader
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk24", 0, Pins("35"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("16"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("17"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("18"), IOStandard("LVCMOS33")),
# Buttons.
("user_btn", 0, Pins("15"), IOStandard("LVCMOS33")),
("user_btn", 0, Pins("14"), IOStandard("LVCMOS33")),
# Serial
("serial", 0,
Subsignal("tx", Pins("8")),
Subsignal("rx", Pins("9")),
IOStandard("LVCMOS33")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = []
# Platform -----------------------------------------------------------------------------------------
class Platform(GowinPlatform):
default_clk_name = "clk24"
default_clk_period = 1e9/24e6
def __init__(self):
GowinPlatform.__init__(self, "GW1N-LV1QN48C6/I5", _io, _connectors, toolchain="gowin", devicename="GW1N-1")
self.toolchain.options["use_done_as_gpio"] = 1
def create_programmer(self):
return OpenFPGALoader("tangnano")
def do_finalize(self, fragment):
GowinPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk24", loose=True), 1e9/24e6)
| [
"litex.build.gowin.platform.GowinPlatform.__init__",
"litex.build.openfpgaloader.OpenFPGALoader",
"litex.build.gowin.platform.GowinPlatform.do_finalize"
] | [((1481, 1592), 'litex.build.gowin.platform.GowinPlatform.__init__', 'GowinPlatform.__init__', (['self', '"""GW1N-LV1QN48C6/I5"""', '_io', '_connectors'], {'toolchain': '"""gowin"""', 'devicename': '"""GW1N-1"""'}), "(self, 'GW1N-LV1QN48C6/I5', _io, _connectors,\n toolchain='gowin', devicename='GW1N-1')\n", (1503, 1592), False, 'from litex.build.gowin.platform import GowinPlatform\n'), ((1693, 1719), 'litex.build.openfpgaloader.OpenFPGALoader', 'OpenFPGALoader', (['"""tangnano"""'], {}), "('tangnano')\n", (1707, 1719), False, 'from litex.build.openfpgaloader import OpenFPGALoader\n'), ((1766, 1807), 'litex.build.gowin.platform.GowinPlatform.do_finalize', 'GowinPlatform.do_finalize', (['self', 'fragment'], {}), '(self, fragment)\n', (1791, 1807), False, 'from litex.build.gowin.platform import GowinPlatform\n')] |
import torch
from torch.distributions.kl import kl_divergence
from torch.nn.utils.convert_parameters import (vector_to_parameters,
parameters_to_vector)
from rl_utils.optimization import conjugate_gradient
from rl_utils.torch_utils import (weighted_mean, detach_distribution, weighted_normalize)
class MetaLearner(object):
"""Meta-learner
The meta-learner is responsible for sampling the trajectories/episodes
(before and after the one-step adaptation), compute the inner loss, compute
the updated parameters based on the inner-loss, and perform the meta-update.
[1] <NAME>, <NAME>, <NAME>, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] <NAME>, <NAME>, "Reinforcement learning: An introduction",
2018 (http://incompleteideas.net/book/the-book-2nd.html)
[3] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, "High-Dimensional Continuous Control Using Generalized
Advantage Estimation", 2016 (https://arxiv.org/abs/1506.02438)
[4] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, "Trust Region Policy Optimization", 2015
(https://arxiv.org/abs/1502.05477)
"""
def __init__(self, sampler, policy, baseline, gamma=0.95,
fast_lr=0.5, tau=1.0, device='cpu'):
self.sampler = sampler
self.policy = policy
self.baseline = baseline
self.gamma = gamma
self.fast_lr = fast_lr
self.tau = tau
self.to(device)
def inner_loss(self, episodes, params=None):
"""Compute the inner loss for the one-step gradient update. The inner
loss is REINFORCE with baseline [2], computed on advantages estimated
with Generalized Advantage Estimation (GAE, [3]).
"""
values = self.baseline(episodes)
advantages = episodes.gae(values, tau=self.tau)
advantages = weighted_normalize(advantages, weights=episodes.mask)
pi = self.policy(episodes.observations, params=params)
log_probs = pi.log_prob(episodes.actions)
if log_probs.dim() > 2:
log_probs = torch.sum(log_probs, dim=2)
loss = -weighted_mean(log_probs * advantages, dim=0, weights=episodes.mask)
return loss
def adapt(self, episodes, first_order=False, params=None, lr=None):
"""Adapt the parameters of the policy network to a new task, from
sampled trajectories `episodes`, with a one-step gradient update [1].
"""
if lr is None:
lr = self.fast_lr
# Fit the baseline to the training episodes
self.baseline.fit(episodes)
# Get the loss on the training episodes
loss = self.inner_loss(episodes, params=params)
# Get the new parameters after a one-step gradient update
params = self.policy.update_params(loss, step_size=lr, first_order=first_order, params=params)
return params, loss
def sample(self, tasks, first_order=False):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
"""
episodes = []
losses = []
for task in tasks:
self.sampler.reset_task(task)
self.policy.reset_context()
train_episodes = self.sampler.sample(self.policy, gamma=self.gamma)
# inner loop (for CAVIA, this only updates the context parameters)
params, loss = self.adapt(train_episodes, first_order=first_order)
# rollouts after inner loop update
valid_episodes = self.sampler.sample(self.policy, params=params, gamma=self.gamma)
episodes.append((train_episodes, valid_episodes))
losses.append(loss.item())
return episodes, losses
def test(self, tasks, num_steps, batch_size, halve_lr):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.batchsize
"""
episodes_per_task = []
for task in tasks:
# reset context params (for cavia) and task
self.policy.reset_context()
self.sampler.reset_task(task)
# start with blank params
params = None
# gather some initial experience and log performance
test_episodes = self.sampler.sample(self.policy, gamma=self.gamma, params=params, batch_size=batch_size)
# initialise list which will log all rollouts for the current task
curr_episodes = [test_episodes]
for i in range(1, num_steps + 1):
# lower learning rate after first update (for MAML, as described in their paper)
if i == 1 and halve_lr:
lr = self.fast_lr / 2
else:
lr = self.fast_lr
# inner-loop update
params, loss = self.adapt(test_episodes, first_order=True, params=params, lr=lr)
# get new rollouts
test_episodes = self.sampler.sample(self.policy, gamma=self.gamma, params=params, batch_size=batch_size)
curr_episodes.append(test_episodes)
episodes_per_task.append(curr_episodes)
self.policy.reset_context()
return episodes_per_task
def kl_divergence(self, episodes, old_pis=None):
kls = []
if old_pis is None:
old_pis = [None] * len(episodes)
for (train_episodes, valid_episodes), old_pi in zip(episodes, old_pis):
# this is the inner-loop update
self.policy.reset_context()
params, _ = self.adapt(train_episodes)
pi = self.policy(valid_episodes.observations, params=params)
if old_pi is None:
old_pi = detach_distribution(pi)
mask = valid_episodes.mask
if valid_episodes.actions.dim() > 2:
mask = mask.unsqueeze(2)
kl = weighted_mean(kl_divergence(pi, old_pi), dim=0, weights=mask)
kls.append(kl)
return torch.mean(torch.stack(kls, dim=0))
def hessian_vector_product(self, episodes, damping=1e-2):
"""Hessian-vector product, based on the Perlmutter method."""
def _product(vector):
kl = self.kl_divergence(episodes)
grads = torch.autograd.grad(kl, self.policy.parameters(), create_graph=True)
flat_grad_kl = parameters_to_vector(grads)
grad_kl_v = torch.dot(flat_grad_kl, vector)
grad2s = torch.autograd.grad(grad_kl_v, self.policy.parameters())
flat_grad2_kl = parameters_to_vector(grad2s)
return flat_grad2_kl + damping * vector
return _product
def surrogate_loss(self, episodes, old_pis=None):
losses, kls, pis = [], [], []
if old_pis is None:
old_pis = [None] * len(episodes)
for (train_episodes, valid_episodes), old_pi in zip(episodes, old_pis):
# do inner-loop update
self.policy.reset_context()
params, _ = self.adapt(train_episodes)
with torch.set_grad_enabled(old_pi is None):
# get action values after inner-loop update
pi = self.policy(valid_episodes.observations, params=params)
pis.append(detach_distribution(pi))
if old_pi is None:
old_pi = detach_distribution(pi)
values = self.baseline(valid_episodes)
advantages = valid_episodes.gae(values, tau=self.tau)
advantages = weighted_normalize(advantages, weights=valid_episodes.mask)
log_ratio = (pi.log_prob(valid_episodes.actions)
- old_pi.log_prob(valid_episodes.actions))
if log_ratio.dim() > 2:
log_ratio = torch.sum(log_ratio, dim=2)
ratio = torch.exp(log_ratio)
loss = -weighted_mean(ratio * advantages, dim=0, weights=valid_episodes.mask)
losses.append(loss)
mask = valid_episodes.mask
if valid_episodes.actions.dim() > 2:
mask = mask.unsqueeze(2)
kl = weighted_mean(kl_divergence(pi, old_pi), dim=0, weights=mask)
kls.append(kl)
return torch.mean(torch.stack(losses, dim=0)), torch.mean(torch.stack(kls, dim=0)), pis
def step(self, episodes, max_kl=1e-3, cg_iters=10, cg_damping=1e-2,
ls_max_steps=10, ls_backtrack_ratio=0.5):
"""Meta-optimization step (ie. update of the initial parameters), based
on Trust Region Policy Optimization (TRPO, [4]).
"""
old_loss, _, old_pis = self.surrogate_loss(episodes)
# this part will take higher order gradients through the inner loop:
grads = torch.autograd.grad(old_loss, self.policy.parameters())
grads = parameters_to_vector(grads)
# Compute the step direction with Conjugate Gradient
hessian_vector_product = self.hessian_vector_product(episodes, damping=cg_damping)
stepdir = conjugate_gradient(hessian_vector_product, grads, cg_iters=cg_iters)
# Compute the Lagrange multiplier
shs = 0.5 * torch.dot(stepdir, hessian_vector_product(stepdir))
lagrange_multiplier = torch.sqrt(shs / max_kl)
step = stepdir / lagrange_multiplier
# Save the old parameters
old_params = parameters_to_vector(self.policy.parameters())
print()
# Line search
step_size = 1.0
for _ in range(ls_max_steps):
vector_to_parameters(old_params - step_size * step, self.policy.parameters())
loss, kl, _ = self.surrogate_loss(episodes, old_pis=old_pis)
improve = loss - old_loss
if (improve.item() < 0.0) and (kl.item() < max_kl):
break
step_size *= ls_backtrack_ratio
else:
print('no update?')
vector_to_parameters(old_params, self.policy.parameters())
print('improve:', improve.item())
print('kl:', kl.item())
print('step_size:', step_size)
return loss
def to(self, device, **kwargs):
self.policy.to(device, **kwargs)
self.baseline.to(device, **kwargs)
self.device = device
| [
"rl_utils.torch_utils.weighted_mean",
"rl_utils.optimization.conjugate_gradient",
"torch.nn.utils.convert_parameters.parameters_to_vector",
"rl_utils.torch_utils.weighted_normalize",
"torch.stack",
"torch.sqrt",
"torch.exp",
"torch.distributions.kl.kl_divergence",
"torch.sum",
"rl_utils.torch_utils.detach_distribution",
"torch.set_grad_enabled",
"torch.dot"
] | [((1968, 2021), 'rl_utils.torch_utils.weighted_normalize', 'weighted_normalize', (['advantages'], {'weights': 'episodes.mask'}), '(advantages, weights=episodes.mask)\n', (1986, 2021), False, 'from rl_utils.torch_utils import weighted_mean, detach_distribution, weighted_normalize\n'), ((8994, 9021), 'torch.nn.utils.convert_parameters.parameters_to_vector', 'parameters_to_vector', (['grads'], {}), '(grads)\n', (9014, 9021), False, 'from torch.nn.utils.convert_parameters import vector_to_parameters, parameters_to_vector\n'), ((9193, 9261), 'rl_utils.optimization.conjugate_gradient', 'conjugate_gradient', (['hessian_vector_product', 'grads'], {'cg_iters': 'cg_iters'}), '(hessian_vector_product, grads, cg_iters=cg_iters)\n', (9211, 9261), False, 'from rl_utils.optimization import conjugate_gradient\n'), ((9407, 9431), 'torch.sqrt', 'torch.sqrt', (['(shs / max_kl)'], {}), '(shs / max_kl)\n', (9417, 9431), False, 'import torch\n'), ((2192, 2219), 'torch.sum', 'torch.sum', (['log_probs'], {'dim': '(2)'}), '(log_probs, dim=2)\n', (2201, 2219), False, 'import torch\n'), ((2237, 2304), 'rl_utils.torch_utils.weighted_mean', 'weighted_mean', (['(log_probs * advantages)'], {'dim': '(0)', 'weights': 'episodes.mask'}), '(log_probs * advantages, dim=0, weights=episodes.mask)\n', (2250, 2304), False, 'from rl_utils.torch_utils import weighted_mean, detach_distribution, weighted_normalize\n'), ((6148, 6171), 'torch.stack', 'torch.stack', (['kls'], {'dim': '(0)'}), '(kls, dim=0)\n', (6159, 6171), False, 'import torch\n'), ((6499, 6526), 'torch.nn.utils.convert_parameters.parameters_to_vector', 'parameters_to_vector', (['grads'], {}), '(grads)\n', (6519, 6526), False, 'from torch.nn.utils.convert_parameters import vector_to_parameters, parameters_to_vector\n'), ((6552, 6583), 'torch.dot', 'torch.dot', (['flat_grad_kl', 'vector'], {}), '(flat_grad_kl, vector)\n', (6561, 6583), False, 'import torch\n'), ((6690, 6718), 'torch.nn.utils.convert_parameters.parameters_to_vector', 'parameters_to_vector', (['grad2s'], {}), '(grad2s)\n', (6710, 6718), False, 'from torch.nn.utils.convert_parameters import vector_to_parameters, parameters_to_vector\n'), ((5861, 5884), 'rl_utils.torch_utils.detach_distribution', 'detach_distribution', (['pi'], {}), '(pi)\n', (5880, 5884), False, 'from rl_utils.torch_utils import weighted_mean, detach_distribution, weighted_normalize\n'), ((6046, 6071), 'torch.distributions.kl.kl_divergence', 'kl_divergence', (['pi', 'old_pi'], {}), '(pi, old_pi)\n', (6059, 6071), False, 'from torch.distributions.kl import kl_divergence\n'), ((7189, 7227), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(old_pi is None)'], {}), '(old_pi is None)\n', (7211, 7227), False, 'import torch\n'), ((7663, 7722), 'rl_utils.torch_utils.weighted_normalize', 'weighted_normalize', (['advantages'], {'weights': 'valid_episodes.mask'}), '(advantages, weights=valid_episodes.mask)\n', (7681, 7722), False, 'from rl_utils.torch_utils import weighted_mean, detach_distribution, weighted_normalize\n'), ((7985, 8005), 'torch.exp', 'torch.exp', (['log_ratio'], {}), '(log_ratio)\n', (7994, 8005), False, 'import torch\n'), ((8420, 8446), 'torch.stack', 'torch.stack', (['losses'], {'dim': '(0)'}), '(losses, dim=0)\n', (8431, 8446), False, 'import torch\n'), ((8460, 8483), 'torch.stack', 'torch.stack', (['kls'], {'dim': '(0)'}), '(kls, dim=0)\n', (8471, 8483), False, 'import torch\n'), ((7394, 7417), 'rl_utils.torch_utils.detach_distribution', 'detach_distribution', (['pi'], {}), '(pi)\n', (7413, 7417), False, 'from rl_utils.torch_utils import weighted_mean, detach_distribution, weighted_normalize\n'), ((7484, 7507), 'rl_utils.torch_utils.detach_distribution', 'detach_distribution', (['pi'], {}), '(pi)\n', (7503, 7507), False, 'from rl_utils.torch_utils import weighted_mean, detach_distribution, weighted_normalize\n'), ((7933, 7960), 'torch.sum', 'torch.sum', (['log_ratio'], {'dim': '(2)'}), '(log_ratio, dim=2)\n', (7942, 7960), False, 'import torch\n'), ((8031, 8100), 'rl_utils.torch_utils.weighted_mean', 'weighted_mean', (['(ratio * advantages)'], {'dim': '(0)', 'weights': 'valid_episodes.mask'}), '(ratio * advantages, dim=0, weights=valid_episodes.mask)\n', (8044, 8100), False, 'from rl_utils.torch_utils import weighted_mean, detach_distribution, weighted_normalize\n'), ((8314, 8339), 'torch.distributions.kl.kl_divergence', 'kl_divergence', (['pi', 'old_pi'], {}), '(pi, old_pi)\n', (8327, 8339), False, 'from torch.distributions.kl import kl_divergence\n')] |
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from ...models import Request
DURATION_OPTIONS = {
'hours': lambda amount: timezone.now() - timedelta(hours=amount),
'days': lambda amount: timezone.now() - timedelta(days=amount),
'weeks': lambda amount: timezone.now() - timedelta(weeks=amount),
'months': lambda amount: timezone.now() + relativedelta(months=-amount),
'years': lambda amount: timezone.now() + relativedelta(years=-amount),
}
try:
# to keep backward Python 2 compatibility
input = raw_input
except NameError:
pass
class Command(BaseCommand):
help = 'Purge old requests.'
def add_arguments(self, parser):
parser.add_argument(
'amount',
type=int,
)
parser.add_argument('duration')
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Tells Django to NOT prompt the user for input of any kind.'
)
def handle(self, *args, **options):
amount = options['amount']
duration = options['duration']
# Check we have the correct values
if duration[-1] != 's': # If its not plural, make it plural
duration_plural = '{0}s'.format(duration)
else:
duration_plural = duration
if duration_plural not in DURATION_OPTIONS:
raise CommandError('Amount must be {0}'.format(', '.join(DURATION_OPTIONS)))
qs = Request.objects.filter(time__lte=DURATION_OPTIONS[duration_plural](amount))
count = qs.count()
if count == 0:
print('There are no requests to delete.')
return
if options.get('interactive'):
confirm = input('''
You have requested a database reset.
This will IRREVERSIBLY DESTROY any
requests created before {0} {1} ago.
That is a total of {2} requests.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel:'''.format(amount, duration, count))
else:
confirm = 'yes'
if confirm == 'yes':
qs.delete()
else:
print('Purge cancelled')
| [
"django.utils.timezone.now",
"datetime.timedelta",
"dateutil.relativedelta.relativedelta"
] | [((262, 276), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (274, 276), False, 'from django.utils import timezone\n'), ((279, 302), 'datetime.timedelta', 'timedelta', ([], {'hours': 'amount'}), '(hours=amount)\n', (288, 302), False, 'from datetime import timedelta\n'), ((331, 345), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (343, 345), False, 'from django.utils import timezone\n'), ((348, 370), 'datetime.timedelta', 'timedelta', ([], {'days': 'amount'}), '(days=amount)\n', (357, 370), False, 'from datetime import timedelta\n'), ((400, 414), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (412, 414), False, 'from django.utils import timezone\n'), ((417, 440), 'datetime.timedelta', 'timedelta', ([], {'weeks': 'amount'}), '(weeks=amount)\n', (426, 440), False, 'from datetime import timedelta\n'), ((471, 485), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (483, 485), False, 'from django.utils import timezone\n'), ((488, 517), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(-amount)'}), '(months=-amount)\n', (501, 517), False, 'from dateutil.relativedelta import relativedelta\n'), ((547, 561), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (559, 561), False, 'from django.utils import timezone\n'), ((564, 592), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'years': '(-amount)'}), '(years=-amount)\n', (577, 592), False, 'from dateutil.relativedelta import relativedelta\n')] |
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "<NAME>"
import numpy as np
from astropy import wcs
from bokeh.layouts import row, widgetbox,gridplot
from bokeh.models import CustomJS, Slider,HoverTool,ColorBar,LinearColorMapper,LabelSet,ColumnDataSource
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.palettes import Plasma256
class Image(object):
def __init__(self,data,header):
self.data=data
self.header=header
def change_image_contrast(self, attr, old, new):
# print attr,old,new
self.fig_im.glyph.color_mapper.update(low=self.graph_min_slider.value, high=self.graph_max_slider.value)
def get_html_draw(self,w=None,h=None, catalog=None, plot=False, vmin=None, vmax=None):
#import plotly
#import plotly.graph_objs as go
#from plotly.graph_objs import Layout
# print('vmin,vmax',vmin,vmax)
msk = ~np.isnan(self.data)
if vmin is None:
vmin = self.data[msk].min()
if vmax is None:
vmax = self.data[msk].max()
min_s = self.data.min()
max_s = self.data.max()
r = self.data.shape[0] * 2
c = self.data.shape[1] * 2
fig = figure(plot_width=w, plot_height=h, x_range=(0, c * 0.5), y_range=(0, r * 0.5),
tools=['pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])
w = wcs.WCS(self.header)
color_mapper = LinearColorMapper(low=min_s, high=max_s, palette=Plasma256)
fig_im = fig.image(image=[self.data], x=[0], y=[0], dw=[c * 0.5], dh=[r * 0.5],
color_mapper=color_mapper)
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
renderers=[fig_im])
fig.add_tools(hover)
#fig, (ax) = plt.subplots(1, 1, figsize=(4, 3), subplot_kw={'projection': WCS(self.header)})
#im = ax.imshow(self.data,
# origin='lower',
# zorder=1,
# interpolation='none',
# aspect='equal',
# cmap=plt.get_cmap('jet'),
# vmin=vmin,
# vmax=vmax)
if catalog is not None:
lon = catalog.ra
lat = catalog.dec
if len(lat) > 0.:
pixcrd = w.wcs_world2pix(np.column_stack((lon, lat)), 0)
msk = ~np.isnan(pixcrd[:, 0])
#ax.plot(pixcrd[:, 0][msk], pixcrd[:, 1][msk], 'o', mfc='none')
source = ColumnDataSource(data=dict(lon=pixcrd[:, 0][msk]+0.5,
lat=pixcrd[:, 1][msk]+0.5,
names=catalog.name[msk]))
#for ID, (x, y) in enumerate(pixcrd):
# if msk[ID]:
# # print ('xy',(pixcrd[:, 0][ID], pixcrd[:, 1][ID]))
# ax.annotate('%s' % catalog.name[ID], xy=(x, y), color='white')
#print(pixcrd[:][msk])
fig.scatter(x='lon', y='lat', marker='circle', size=15,
line_color="white", fill_color=None, alpha=1.0, source=source)
labels = LabelSet(x='lon', y='lat', text='names', level='glyph',
x_offset=5, y_offset=5, render_mode='canvas', source=source, text_color='white')
fig.add_layout(labels)
#print'cat', catalog[msk]
color_bar = ColorBar(color_mapper=color_mapper,
label_standoff=12, border_line_color=None, location=(0, 0))
JS_code_slider = """
var vmin = low_slider.value;
var vmax = high_slider.value;
fig_im.glyph.color_mapper.high = vmax;
fig_im.glyph.color_mapper.low = vmin;
"""
callback = CustomJS(args=dict(fig_im=fig_im), code=JS_code_slider)
self.graph_min_slider = Slider(title="Sig. Min", start=min_s, end=max_s, step=1, value=min_s, callback=callback)
self.graph_max_slider = Slider(title="Sig. Max", start=min_s, end=max_s, step=1, value=max_s * 0.8,
callback=callback)
self.graph_min_slider.on_change('value', self.change_image_contrast)
self.graph_max_slider.on_change('value', self.change_image_contrast)
callback.args["low_slider"] = self.graph_min_slider
callback.args["high_slider"] = self.graph_max_slider
#ax.set_xlabel('RA')
#ax.set_ylabel('DEC')
#ax.grid(True, color='white')
#fig.colorbar(im, ax=ax)
#plugins.connect(fig, plugins.MousePosition(fontsize=14))
#if plot == True:
# print('plot', plot)
# mpld3.show()
fig.add_layout(color_bar, 'right')
layout = row(
fig, widgetbox(self.graph_min_slider, self.graph_max_slider),
)
#curdoc().add_root(layout)
#output_file("slider.html", title="slider.py example")
#from bokeh.io import show
#show(layout)
script, div = components(layout)
html_dict = {}
html_dict['script'] = script
html_dict['div'] = div
return html_dict
class ScatterPlot(object):
def __init__(self,w,h,x_label=None,y_label=None,x_range=None,y_range=None,title=None,y_axis_type='linear',x_axis_type='linear'):
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y")])
self.fig = figure(title=title, width=w, height=h,x_range=x_range,y_range=y_range,
y_axis_type=y_axis_type,
x_axis_type=x_axis_type,
tools=[hover, 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']
)
if x_label is not None:
self.fig.xaxis.axis_label = x_label
if y_label is not None:
self.fig.yaxis.axis_label = y_label
def add_errorbar(self, x, y, xerr=None, yerr=None, color='red',
point_kwargs={}, error_kwargs={}):
self.fig.circle(x, y, color=color, **point_kwargs)
if xerr is not None:
x_err_x = []
x_err_y = []
for px, py, err in zip(x, y, xerr):
x_err_x.append((px - err, px + err))
x_err_y.append((py, py))
self.fig.multi_line(x_err_x, x_err_y, color=color, **error_kwargs)
if yerr is not None:
y_err_x = []
y_err_y = []
for px, py, err in zip(x, y, yerr):
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
self.fig.multi_line(y_err_x, y_err_y, color=color, **error_kwargs)
def add_step_line(self,x,y,legend=None):
#print('a')
self.fig.step(x,y,name=legend, mode="center")
#print('b')
def add_line(self,x,y,legend=None,color=None):
self.fig.line(x,y,legend=legend,line_color=color)
def get_html_draw(self):
layout = row(
self.fig
)
#curdoc().add_root(layout)
#show(layout)
script, div = components(layout)
#print ('script',script)
#print ('div',div)
html_dict = {}
html_dict['script'] = script
html_dict['div'] = div
return html_dict
class GridPlot(object):
def __init__(self,f1,f2,w=None,h=None):
self.f1=f1
self.f2=f2
def get_html_draw(self,w=None,h=None):
#l = layout([self.f1.fig],[self.f2.fig])
grid = gridplot([self.f1.fig,self.f2.fig],ncols=1,plot_width=w, plot_height=h)
#curdoc().add_root(grid)
#show(grid)
#output_file("test.html")
script, div = components(grid)
html_dict={}
html_dict['script']=script
html_dict['div'] = div
return html_dict
| [
"bokeh.models.ColorBar",
"bokeh.layouts.row",
"bokeh.plotting.figure",
"bokeh.layouts.widgetbox",
"bokeh.embed.components",
"bokeh.models.LinearColorMapper",
"numpy.column_stack",
"bokeh.layouts.gridplot",
"builtins.zip",
"numpy.isnan",
"bokeh.models.Slider",
"bokeh.models.LabelSet",
"astropy.wcs.WCS",
"bokeh.models.HoverTool"
] | [((1396, 1547), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'w', 'plot_height': 'h', 'x_range': '(0, c * 0.5)', 'y_range': '(0, r * 0.5)', 'tools': "['pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']"}), "(plot_width=w, plot_height=h, x_range=(0, c * 0.5), y_range=(0, r * \n 0.5), tools=['pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])\n", (1402, 1547), False, 'from bokeh.plotting import figure\n'), ((1577, 1597), 'astropy.wcs.WCS', 'wcs.WCS', (['self.header'], {}), '(self.header)\n', (1584, 1597), False, 'from astropy import wcs\n'), ((1621, 1680), 'bokeh.models.LinearColorMapper', 'LinearColorMapper', ([], {'low': 'min_s', 'high': 'max_s', 'palette': 'Plasma256'}), '(low=min_s, high=max_s, palette=Plasma256)\n', (1638, 1680), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((1841, 1932), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('x', '$x'), ('y', '$y'), ('value', '@image')]", 'renderers': '[fig_im]'}), "(tooltips=[('x', '$x'), ('y', '$y'), ('value', '@image')],\n renderers=[fig_im])\n", (1850, 1932), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((3718, 3818), 'bokeh.models.ColorBar', 'ColorBar', ([], {'color_mapper': 'color_mapper', 'label_standoff': '(12)', 'border_line_color': 'None', 'location': '(0, 0)'}), '(color_mapper=color_mapper, label_standoff=12, border_line_color=\n None, location=(0, 0))\n', (3726, 3818), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((4214, 4306), 'bokeh.models.Slider', 'Slider', ([], {'title': '"""Sig. Min"""', 'start': 'min_s', 'end': 'max_s', 'step': '(1)', 'value': 'min_s', 'callback': 'callback'}), "(title='Sig. Min', start=min_s, end=max_s, step=1, value=min_s,\n callback=callback)\n", (4220, 4306), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((4335, 4433), 'bokeh.models.Slider', 'Slider', ([], {'title': '"""Sig. Max"""', 'start': 'min_s', 'end': 'max_s', 'step': '(1)', 'value': '(max_s * 0.8)', 'callback': 'callback'}), "(title='Sig. Max', start=min_s, end=max_s, step=1, value=max_s * 0.8,\n callback=callback)\n", (4341, 4433), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((5358, 5376), 'bokeh.embed.components', 'components', (['layout'], {}), '(layout)\n', (5368, 5376), False, 'from bokeh.embed import components\n'), ((5675, 5721), 'bokeh.models.HoverTool', 'HoverTool', ([], {'tooltips': "[('x', '$x'), ('y', '$y')]"}), "(tooltips=[('x', '$x'), ('y', '$y')])\n", (5684, 5721), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((5742, 5946), 'bokeh.plotting.figure', 'figure', ([], {'title': 'title', 'width': 'w', 'height': 'h', 'x_range': 'x_range', 'y_range': 'y_range', 'y_axis_type': 'y_axis_type', 'x_axis_type': 'x_axis_type', 'tools': "[hover, 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']"}), "(title=title, width=w, height=h, x_range=x_range, y_range=y_range,\n y_axis_type=y_axis_type, x_axis_type=x_axis_type, tools=[hover,\n 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])\n", (5748, 5946), False, 'from bokeh.plotting import figure\n'), ((7279, 7292), 'bokeh.layouts.row', 'row', (['self.fig'], {}), '(self.fig)\n', (7282, 7292), False, 'from bokeh.layouts import row, widgetbox, gridplot\n'), ((7397, 7415), 'bokeh.embed.components', 'components', (['layout'], {}), '(layout)\n', (7407, 7415), False, 'from bokeh.embed import components\n'), ((7814, 7888), 'bokeh.layouts.gridplot', 'gridplot', (['[self.f1.fig, self.f2.fig]'], {'ncols': '(1)', 'plot_width': 'w', 'plot_height': 'h'}), '([self.f1.fig, self.f2.fig], ncols=1, plot_width=w, plot_height=h)\n', (7822, 7888), False, 'from bokeh.layouts import row, widgetbox, gridplot\n'), ((7995, 8011), 'bokeh.embed.components', 'components', (['grid'], {}), '(grid)\n', (8005, 8011), False, 'from bokeh.embed import components\n'), ((1093, 1112), 'numpy.isnan', 'np.isnan', (['self.data'], {}), '(self.data)\n', (1101, 1112), True, 'import numpy as np\n'), ((5110, 5165), 'bokeh.layouts.widgetbox', 'widgetbox', (['self.graph_min_slider', 'self.graph_max_slider'], {}), '(self.graph_min_slider, self.graph_max_slider)\n', (5119, 5165), False, 'from bokeh.layouts import row, widgetbox, gridplot\n'), ((6486, 6501), 'builtins.zip', 'zip', (['x', 'y', 'xerr'], {}), '(x, y, xerr)\n', (6489, 6501), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((6787, 6802), 'builtins.zip', 'zip', (['x', 'y', 'yerr'], {}), '(x, y, yerr)\n', (6790, 6802), False, 'from builtins import bytes, str, open, super, range, zip, round, input, int, pow, object, map, zip\n'), ((3443, 3583), 'bokeh.models.LabelSet', 'LabelSet', ([], {'x': '"""lon"""', 'y': '"""lat"""', 'text': '"""names"""', 'level': '"""glyph"""', 'x_offset': '(5)', 'y_offset': '(5)', 'render_mode': '"""canvas"""', 'source': 'source', 'text_color': '"""white"""'}), "(x='lon', y='lat', text='names', level='glyph', x_offset=5,\n y_offset=5, render_mode='canvas', source=source, text_color='white')\n", (3451, 3583), False, 'from bokeh.models import CustomJS, Slider, HoverTool, ColorBar, LinearColorMapper, LabelSet, ColumnDataSource\n'), ((2568, 2595), 'numpy.column_stack', 'np.column_stack', (['(lon, lat)'], {}), '((lon, lat))\n', (2583, 2595), True, 'import numpy as np\n'), ((2624, 2646), 'numpy.isnan', 'np.isnan', (['pixcrd[:, 0]'], {}), '(pixcrd[:, 0])\n', (2632, 2646), True, 'import numpy as np\n')] |
from pathlib import Path
from testaid.pathlist import PathList
def test_testaid_unit_pathlist_roles_blacklist(testvars_roles_blacklist):
assert testvars_roles_blacklist is not None
def test_testaid_unit_pathlist_roles_whitelist(testvars_roles_whitelist):
assert testvars_roles_whitelist is not None
def test_testaid_unit_pathlist_get(tmp_path):
msd = tmp_path / 'molecule_scenario_directory'
dir1 = msd / 'dir1'
dir1.mkdir(parents=True)
dir2 = tmp_path / 'dir2'
dir2.mkdir()
file1 = dir1 / 'file1.yml'
file1.touch()
file2 = dir1 / 'file2.yml'
file2.touch()
file3 = dir2 / 'file3.yml'
file3.touch()
my_pathlist = [Path(file3), Path(file1), Path(file2)]
my_pathstring = 'dir1:../dir2/file3.yml'
pathlist = PathList(my_pathstring, msd)
assert pathlist.get() == my_pathlist
| [
"testaid.pathlist.PathList",
"pathlib.Path"
] | [((774, 802), 'testaid.pathlist.PathList', 'PathList', (['my_pathstring', 'msd'], {}), '(my_pathstring, msd)\n', (782, 802), False, 'from testaid.pathlist import PathList\n'), ((675, 686), 'pathlib.Path', 'Path', (['file3'], {}), '(file3)\n', (679, 686), False, 'from pathlib import Path\n'), ((688, 699), 'pathlib.Path', 'Path', (['file1'], {}), '(file1)\n', (692, 699), False, 'from pathlib import Path\n'), ((701, 712), 'pathlib.Path', 'Path', (['file2'], {}), '(file2)\n', (705, 712), False, 'from pathlib import Path\n')] |
"""
AJAX for SQLite Viewer plugin
"""
from yapsy.IPlugin import IPlugin
from flask import Response, jsonify
import json
import logging
import sqlite3
class FaSqliteAjax(IPlugin):
def __init__(self):
self.display_name = 'SQLite Ajax'
self.popularity = 0
self.cache = True
self.fast = False
self.action = False
IPlugin.__init__(self)
def activate(self):
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def check(self, evidence, path_on_disk):
"""Checks if the file is compatible with this plugin"""
return True
def mimetype(self, mimetype):
"""Returns the mimetype of this plugins get command"""
return "application/json"
def get(self, evidence, helper, path_on_disk, request):
"""Returns the result of this plugin to be displayed in a browser"""
method = helper.get_request_value(request, 'method', raise_key_error=True)
if method == "base":
return self.base_tree(path_on_disk)
elif method == "children":
return self.get_children(request, helper, path_on_disk)
elif method == "values":
return self.values(request, helper, path_on_disk)
logging.error('Unknown method "' + method + '" provided')
raise ValueError('Method "' + method + '" is not valid')
def base_tree(self, path_on_disk):
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
base_tree = []
cursor.execute("SELECT * FROM sqlite_master WHERE type='table';")
cursor.fetchone()
# Master Table
base_tree.append({'title': u'Master Table (1)',
'key': u'master',
'folder': True,
'lazy': True
})
# Tables
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
base_tree.append({'title': u'Tables (' + unicode(len(tables)) + u')',
'key': u'table',
'folder': True,
'lazy': True
})
# Views
cursor.execute("SELECT name FROM sqlite_master WHERE type='view';")
views = cursor.fetchall()
base_tree.append({'title': u'Views (' + unicode(len(views)) + u')',
'key': u'view',
'folder': True,
'lazy': True
})
# Indexes
cursor.execute("SELECT name FROM sqlite_master WHERE type='index';")
indexes = cursor.fetchall()
base_tree.append({'title': u'Indexes (' + unicode(len(indexes)) + u')',
'key': u'index',
'folder': True,
'lazy': True
})
# Triggers
cursor.execute("SELECT name FROM sqlite_master WHERE type='trigger';")
triggers = cursor.fetchall()
base_tree.append({'title': u'Triggers (' + unicode(len(triggers)) + u')',
'key': u'trigger',
'folder': True,
'lazy': True
})
connection.close()
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(base_tree), mimetype='application/json')
def get_children(self, request, helper, path_on_disk):
key = unicode(helper.get_request_value(request, 'key'))
children = []
if key == u'master':
children.append({'title': u'Master Table (1)',
'key': u'sqlite_master',
'folder': False,
'lazy': False
})
else:
for child in self.get_tables(key, path_on_disk):
children.append({'title': child,
'key': child,
'folder': False,
'lazy': False
})
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(children), mimetype='application/json')
def get_tables(self, key, path_on_disk):
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
tables = []
table_list = cursor.execute("SELECT name FROM sqlite_master WHERE type='" + key + "';")
for table in table_list:
tables.append(unicode(table[0]))
connection.close()
return tables
def values(self, request, helper, path_on_disk):
key = unicode(helper.get_request_value(request, 'key'))
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
cursor.execute("pragma table_info('" + key + "')")
rows = cursor.fetchall()
table = [ u'<table id="sqlitet01" class="display">', u' <thead><tr>' ]
for row in rows:
table.append(u' <th>' + unicode(row[1]) + u'</th>')
table.append(u' </tr> </thead>')
cursor.execute('SELECT * FROM ' + key)
rows = cursor.fetchall()
for row in rows:
table.append(u' <tr>')
for item in row:
try:
table.append(u' <td>' + unicode(item) + u'</td>')
except:
table.append(u' <td>' + unicode(type(item)) + u'</td>')
table.append(u' </tr>')
table.append(u'</table>')
connection.close()
return jsonify({'table': '\n'.join(table)}) | [
"yapsy.IPlugin.IPlugin.activate",
"sqlite3.connect",
"json.dumps",
"yapsy.IPlugin.IPlugin.deactivate",
"yapsy.IPlugin.IPlugin.__init__",
"logging.error"
] | [((364, 386), 'yapsy.IPlugin.IPlugin.__init__', 'IPlugin.__init__', (['self'], {}), '(self)\n', (380, 386), False, 'from yapsy.IPlugin import IPlugin\n'), ((420, 442), 'yapsy.IPlugin.IPlugin.activate', 'IPlugin.activate', (['self'], {}), '(self)\n', (436, 442), False, 'from yapsy.IPlugin import IPlugin\n'), ((493, 517), 'yapsy.IPlugin.IPlugin.deactivate', 'IPlugin.deactivate', (['self'], {}), '(self)\n', (511, 517), False, 'from yapsy.IPlugin import IPlugin\n'), ((1301, 1358), 'logging.error', 'logging.error', (['(\'Unknown method "\' + method + \'" provided\')'], {}), '(\'Unknown method "\' + method + \'" provided\')\n', (1314, 1358), False, 'import logging\n'), ((1485, 1514), 'sqlite3.connect', 'sqlite3.connect', (['path_on_disk'], {}), '(path_on_disk)\n', (1500, 1514), False, 'import sqlite3\n'), ((4591, 4620), 'sqlite3.connect', 'sqlite3.connect', (['path_on_disk'], {}), '(path_on_disk)\n', (4606, 4620), False, 'import sqlite3\n'), ((5043, 5072), 'sqlite3.connect', 'sqlite3.connect', (['path_on_disk'], {}), '(path_on_disk)\n', (5058, 5072), False, 'import sqlite3\n'), ((3550, 3571), 'json.dumps', 'json.dumps', (['base_tree'], {}), '(base_tree)\n', (3560, 3571), False, 'import json\n'), ((4473, 4493), 'json.dumps', 'json.dumps', (['children'], {}), '(children)\n', (4483, 4493), False, 'import json\n')] |
import math
from constants import Constants
from utils import vector2d
from wpilib import SmartDashboard as Dash
from autonomous import pursuitpoint
class PurePursuit():
"""An implementation of the Pure Pursuit path tracking algorithm."""
def __init__(self, path):
self.path = path
self.pursuit_points = [pursuitpoint.PursuitPoint(p, c) for p, c in zip(
self.path.getPoints(), self.path.getCurvatures())]
self.last_lookahead_index = 0
self.cur_curvature = 0
self.target_velocities = vector2d.Vector2D()
self.closest_point_index = 0
def computeVelocities(self):
"""Compute the velocities along the path."""
# Compute the velocities along the path using the curvature and Constants.CURVE_VELOCITY
for ppoint in self.pursuit_points:
if abs(ppoint.curvature) <= Constants.CURVATURE_THRESHOLD:
velocity = Constants.MAX_VELOCITY
else:
velocity = min(Constants.MAX_VELOCITY,
Constants.CURVE_VELOCITY/ppoint.curvature)
ppoint.velocity = velocity
# Limit the acceleration of the velocities
for i in reversed(range(0, len(self.pursuit_points)-1)):
distance = self.pursuit_points[i].point.getDistance(
self.pursuit_points[i+1].point)
new_velocity = math.sqrt(
self.pursuit_points[i+1].velocity**2 + (2 * Constants.MAX_ACCELERATION * distance))
new_velocity = min(self.pursuit_points[i].velocity, new_velocity)
self.pursuit_points[i].velocity = new_velocity
def updateLookaheadPointIndex2(self, state):
"""Update the lookahead point given the current robot state.
Uses the minimum distance point if the state is more than
Constants.LOOKAHEAD_DIST from all points, otherwise uses the
closes point to self.loohead_distance"""
# Compute point distances to state and differences from those distances to Constants.LOOKAHEAD_DIST
distances = [math.hypot(state.x - ppoint.point.x,
state.y - ppoint.point.y) for ppoint in self.pursuit_points]
differences = [abs(d-Constants.LOOKAHEAD_DIST) for d in distances]
min_distance = min(distances)
# Get new lookahead index
if min_distance <= Constants.LOOKAHEAD_DIST:
self.last_lookahead_index = differences.index(min(differences))
else:
self.last_lookahead_index = distances.index(min_distance)
def updateLookaheadPointIndex(self, state):
"""Loop over the points in the path to get the lookahead point given the current robot state."""
for i in range(self.last_lookahead_index, len(self.pursuit_points)-1):
lookahead = self.computeLookaheadPoint(
self.pursuit_points[i].point, self.pursuit_points[i+1].point, state)
if lookahead != None:
self.last_lookahead_index = i
def computeLookaheadPoint(self, start, end, state):
"""Compute the lookahead point given the current robot state.
Returns a point if the current state is Constants.LOOKAHEAD_DIST
from between start and end, otherwise returns None."""
# Algorithm for circle line segment intersection found here: https://stackoverflow.com/questions/1073336/circle-line-segment-collision-detection-algorithm/1084899#1084899
segment_direction = end - start
center_to_start = start - state
a = segment_direction * segment_direction
b = 2 * (center_to_start * segment_direction)
c = (center_to_start * center_to_start) - Constants.LOOKAHEAD_DIST ** 2
discriminant = b**2 - (4 * a * c)
if discriminant < 0:
return None
else:
discriminant = math.sqrt(discriminant)
t0 = (-b - discriminant) / (2 * a)
t1 = (-b + discriminant) / (2 * a)
if t0 >= 0 and t0 <= 1:
return start + t0 * segment_direction
if t1 >= 0 and t1 <= 1:
return start + t1 * segment_direction
return None
def updateCurvature(self, state):
"""Update the curvature from the current lookahead point to the current robot position."""
lookahead = self.pursuit_points[self.last_lookahead_index].point
# Transform the lookahead and state.pos to get an aligned vector
transform = lookahead - state.pos
transform = transform.getRotated(-state.angle)
# Use the transformed vector to calculate the curvature (derived from https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf#page=12)
self.cur_curvature = (2 * transform.x) / Constants.LOOKAHEAD_DIST**2
def updateClosestPointIndex(self, state):
"""Update the index of the closest point to the current robot position."""
index = self.closest_point_index
smallest_distance = self.pursuit_points[index].point.getDistance(state)
for i in range(0, len(self.pursuit_points)):
distance = self.pursuit_points[i].point.getDistance(state)
if smallest_distance > distance:
smallest_distance = distance
index = i
self.closest_point_index = index
def updateTargetVelocities(self, state):
"""Update the target velocities of the left and right wheels."""
robot_velocity = self.pursuit_points[self.closest_point_index].velocity
# Use kinematics (http://robotsforroboticists.com/drive-kinematics/) and algebra to find wheel target velocties
l_velocity = robot_velocity * \
(2 + self.cur_curvature * Constants.TRACK_WIDTH) / \
2 / Constants.PURE_PURSUIT_KV
r_velocity = robot_velocity * \
(2 - self.cur_curvature * Constants.TRACK_WIDTH) / \
2 / Constants.PURE_PURSUIT_KV
scale = max(abs(l_velocity), abs(r_velocity))
if scale > 1:
l_velocity /= scale
r_velocity /= scale
self.target_velocities = vector2d.Vector2D(l_velocity, r_velocity)
def update(self, state):
"""Update the pure pursuit follower(runs all update functions)."""
# TODO which lookahead function to use
self.updateLookaheadPointIndex(state.pos)
# self.updateLookaheadPointIndex2(state.pos)
self.updateCurvature(state)
self.updateClosestPointIndex(state.pos)
self.updateTargetVelocities(state.pos)
def outputToSmartDashboard(self):
"""Output values to the smart dashboard."""
lookahead = self.pursuit_points[self.last_lookahead_index].point
closest = self.pursuit_points[self.closest_point_index].point
Dash.putNumberArray("Lookahead Point", [lookahead.x, lookahead.y])
Dash.putNumber("Curvature", self.cur_curvature)
Dash.putNumberArray("Closes Point", [closest.x, closest.y])
Dash.putNumberArray("Target Velocities", [
self.target_velocities.x, self.target_velocities.y])
#print("Lookahead Point - {}".format(lookahead))
#print("Curvature - {}".format(self.cur_curvature))
#print("Closes Point - {}".format(closest))
#print("Target Velocities - {}".format(self.target_velocities))
# print("------------------------------")
def isDone(self):
"""Check if the path is done being followed."""
return (len(self.pursuit_points) - self.closest_point_index) <= 1
| [
"autonomous.pursuitpoint.PursuitPoint",
"wpilib.SmartDashboard.putNumber",
"utils.vector2d.Vector2D",
"math.sqrt",
"math.hypot",
"wpilib.SmartDashboard.putNumberArray"
] | [((546, 565), 'utils.vector2d.Vector2D', 'vector2d.Vector2D', ([], {}), '()\n', (563, 565), False, 'from utils import vector2d\n'), ((6145, 6186), 'utils.vector2d.Vector2D', 'vector2d.Vector2D', (['l_velocity', 'r_velocity'], {}), '(l_velocity, r_velocity)\n', (6162, 6186), False, 'from utils import vector2d\n'), ((6815, 6881), 'wpilib.SmartDashboard.putNumberArray', 'Dash.putNumberArray', (['"""Lookahead Point"""', '[lookahead.x, lookahead.y]'], {}), "('Lookahead Point', [lookahead.x, lookahead.y])\n", (6834, 6881), True, 'from wpilib import SmartDashboard as Dash\n'), ((6890, 6937), 'wpilib.SmartDashboard.putNumber', 'Dash.putNumber', (['"""Curvature"""', 'self.cur_curvature'], {}), "('Curvature', self.cur_curvature)\n", (6904, 6937), True, 'from wpilib import SmartDashboard as Dash\n'), ((6946, 7005), 'wpilib.SmartDashboard.putNumberArray', 'Dash.putNumberArray', (['"""Closes Point"""', '[closest.x, closest.y]'], {}), "('Closes Point', [closest.x, closest.y])\n", (6965, 7005), True, 'from wpilib import SmartDashboard as Dash\n'), ((7014, 7113), 'wpilib.SmartDashboard.putNumberArray', 'Dash.putNumberArray', (['"""Target Velocities"""', '[self.target_velocities.x, self.target_velocities.y]'], {}), "('Target Velocities', [self.target_velocities.x, self.\n target_velocities.y])\n", (7033, 7113), True, 'from wpilib import SmartDashboard as Dash\n'), ((332, 363), 'autonomous.pursuitpoint.PursuitPoint', 'pursuitpoint.PursuitPoint', (['p', 'c'], {}), '(p, c)\n', (357, 363), False, 'from autonomous import pursuitpoint\n'), ((1393, 1493), 'math.sqrt', 'math.sqrt', (['(self.pursuit_points[i + 1].velocity ** 2 + 2 * Constants.MAX_ACCELERATION *\n distance)'], {}), '(self.pursuit_points[i + 1].velocity ** 2 + 2 * Constants.\n MAX_ACCELERATION * distance)\n', (1402, 1493), False, 'import math\n'), ((2082, 2144), 'math.hypot', 'math.hypot', (['(state.x - ppoint.point.x)', '(state.y - ppoint.point.y)'], {}), '(state.x - ppoint.point.x, state.y - ppoint.point.y)\n', (2092, 2144), False, 'import math\n'), ((3872, 3895), 'math.sqrt', 'math.sqrt', (['discriminant'], {}), '(discriminant)\n', (3881, 3895), False, 'import math\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: DensityPeaks.py
# @Author: <NAME>
# @Time: 5/3/22 09:55
# @Version: 4.0
import math
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from instance_selection import ENN
from .utils import split
class STDPNF:
"""
<NAME>., <NAME>., & <NAME>. (2019). A self-training method based on density
peaks and an extended parameter-free local noise filter for k nearest
neighbor. Knowledge-Based Systems, 184, 104895.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
Self-training semi-supervised classification based on density peaks of
data. Neurocomputing, 275, 180-191.
"""
def __init__(
self,
dc=None,
distance_metric="euclidean",
k=3,
gauss_cutoff=True,
percent=2.0,
density_threshold=None,
distance_threshold=None,
anormal=True,
filtering=False,
classifier=None,
classifier_params=None,
filter_method=None,
):
"""Semi Supervised Algorithm based on Density Peaks."""
self.dc = dc
self.distance_metric = distance_metric
self.k = k
self.gauss_cutoff = gauss_cutoff
self.percent = percent
self.density_threshold = density_threshold
self.distance_threshold = distance_threshold
self.anormal = anormal
self.filtering = filtering
if classifier is not None:
if isinstance(classifier_params, dict):
self.classifier = classifier(**classifier_params)
else:
self.classifier = classifier()
else:
self.classifier = None
if filter_method is not None and filter_method != "ENANE":
self.filter = filter_method()
elif isinstance(filter_method, str) and filter_method == "ENANE":
self.filter = filter_method
else:
self.filter = None
self.y = None
self.low = None
self.u = None
self.classifier_stdpnf = None
self.order = None
self.structure = None
self.structure_stdnpf = None
self.n_id = None
self.distances = None
self.max_dis = None
self.min_dis = None
self.rho = None
self.delta = None
self.nneigh = None
self.data = None
def __build_distance(self):
"""
Calculate distance dict.
:return: distance dict, max distance, min distance
"""
from scipy.spatial.distance import pdist, squareform
distance_matrix = pdist(self.data, metric=self.distance_metric)
distance_matrix = squareform(distance_matrix)
triangle_upper = np.triu_indices(self.data.shape[0], 1)
triangle_upper = distance_matrix[triangle_upper]
distance = {}
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
distance[(i, j)] = distance_matrix[i, j]
distance[(j, i)] = distance_matrix[i, j]
max_dis, min_dis = np.max(triangle_upper), np.min(triangle_upper)
return distance, max_dis, min_dis
def __auto_select_dc(self):
"""
Auto select the local density threshold that let average neighbor is 1-2
percent of all nodes.
:return: dc that local density threshold
"""
max_dis, min_dis = self.max_dis, self.min_dis
dc = (max_dis + min_dis) / 2
while True:
nneighs = (
sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2
)
if 0.01 <= nneighs <= 0.02:
break
# binary search
if nneighs < 0.01:
min_dis = dc
else:
max_dis = dc
dc = (max_dis + min_dis) / 2
if max_dis - min_dis < 0.0001:
break
return dc
def __select_dc(self):
"""
Select the local density threshold, default is the method used in paper,
'auto' is auto select.
:return: dc that local density threshold
"""
if self.dc == "auto":
dc = self.__auto_select_dc()
else:
position = int(self.n_id * (self.n_id + 1) /
2 * self.percent / 100)
dc = np.sort(list(self.distances.values()))[
position * 2 + self.n_id]
return dc
def __local_density(self):
"""
Compute all points' local density.
:return: local density vector that index is the point index
"""
def gauss_func(dij, dc):
"""
> The function takes in a distance value and a cutoff value, and
returns the value of the Gaussian function at that point
:param dij: distance between two nodes
:param dc: The cutoff distance
:return: the value of the gaussian function.
"""
return math.exp(-((dij / dc) ** 2))
def cutoff_func(dij, dc):
"""
If the distance between two atoms is less than the cutoff distance,
return 1, otherwise return 0
:param dij: distance between atoms i and j
:param dc: cutoff distance
:return: 1 if dij < dc, else 0
"""
return 1 if dij < dc else 0
func = gauss_func if self.gauss_cutoff else cutoff_func
rho = [0] * self.n_id
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
temp = func(self.distances[(i, j)], self.dc)
rho[i] += temp
rho[j] += temp
return np.array(rho, np.float32)
def __min_neighbor_and_distance(self):
"""
Compute all points' min util to the higher local density point(which is
the nearest neighbor).
:return: distance vector, nearest neighbor vector
"""
if self.rho is None:
raise ValueError("Encountered rho as None.")
sort_rho_idx = np.argsort(-self.rho)
delta, nneigh = [float(self.max_dis)] * self.n_id, [0] * self.n_id
delta[sort_rho_idx[0]] = -1.0
for i in range(self.n_id):
for j in range(0, i):
old_i, old_j = sort_rho_idx[i], sort_rho_idx[j]
if self.distances[(old_i, old_j)] < delta[old_i]:
delta[old_i] = self.distances[(old_i, old_j)]
nneigh[old_i] = old_j
delta[sort_rho_idx[0]] = max(delta)
return np.array(delta, np.float32), np.array(nneigh, np.float32)
def __structure(self):
"""
The function takes the data and the nearest neighbor indices and creates
a dataframe with the following columns:
- sample: the data point
- next: the index of the nearest neighbor
- previous: the index of the nearest neighbor of the nearest neighbor
- label: the label of the data point
The function also creates a copy of the dataframe called
structure_stdnpf
"""
self.structure = dict.fromkeys(range(self.n_id))
for index, sample in enumerate(self.data):
self.structure[index] = [
sample,
int(self.nneigh[index]),
None,
self.y[index] if index < len(self.y) else -1,
]
for index in range(self.n_id):
if self.structure[self.structure[index][1]][2] is None:
self.structure[self.structure[index][1]][2] = index
self.structure = pd.DataFrame(
self.structure, index=["sample", "next", "previous", "label"]
).transpose()
self.structure_stdnpf = self.structure.copy(deep=True)
def __step_a(self):
"""
> The function takes the labeled samples and trains the classifier on
them
:return: The samples that have been labeled.
"""
samples_labeled = self.structure.loc[self.structure["label"] != -1]
sam_lab = samples_labeled["sample"].to_list()
y_without = samples_labeled["label"].to_list()
self.classifier.fit(sam_lab, y_without)
return samples_labeled
def __discover_structure(self):
"""Discovers the under laying structure."""
self._fit_without()
def __nan_search(self):
"""
For each point, find the set of points that are within a distance of r,
and the set of points that are within a distance of r+1.
The set of points that are within a distance of r+1 is a superset of the
set of points that are within a distance of r.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is also a superset
of the set of points that are within a distance of r+3.
And so on.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is
:return: nan, r
"""
r = 1
nan = defaultdict(set)
nb = dict.fromkeys(range(self.n_id), 0)
knn = defaultdict(set)
rnn = defaultdict(set)
cnt = defaultdict(int)
while True:
search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree")
search.fit(self.data)
for index, sample in enumerate(self.data):
r_neighs = search.kneighbors(
[sample], return_distance=False)[0][1:]
knn[index].update(list(r_neighs))
for neigh in r_neighs:
nb[neigh] += 1
rnn[neigh].add(index)
cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0))
if r > 2 and cnt[r] == cnt[r - 1]:
r -= 1
break
r += 1
for index in range(self.n_id):
nan[index] = knn[index].intersection(rnn[index])
return nan, r
def __enane(self, fx, nan, r):
"""
> The function takes in the dataframe, the list of indices of the
unlabeled data, the list of indices of the neighbors of the unlabeled
data, and the number of neighbors to use in the KNN classifier. It
then creates a new dataframe with the labeled data and the unlabeled
data, and uses the KNN classifier to predict the labels of the
unlabeled data. It then checks if the predicted label is the same as
the label of the majority of the neighbors of the unlabeled data. If
it is, then it adds the index of the unlabeled data to the list of
indices of the data to be labeled
:param fx: the indexes of the unlabeled data
:param nan: a list of lists, where each list contains the indices of the
neighbors of a sample
:param r: the number of neighbors to consider
:return: The indexes of the samples that are going to be labeled and the
labels that are going to be assigned to them.
"""
es = []
es_pred = []
local_structure = self.structure_stdnpf.copy(deep=True)
base_estimator = KNeighborsClassifier(
n_neighbors=r, metric=self.distance_metric
)
labeled_data = local_structure.loc[local_structure["label"] != -1]
nan_unlabeled = local_structure.loc[fx]
data = pd.concat([labeled_data, nan_unlabeled], join="inner")
enane_model = SelfTrainingClassifier(base_estimator)
enane_model.fit(data["sample"].tolist(), data["label"].tolist())
enane_pred = enane_model.predict(nan_unlabeled["sample"].tolist())
for (row_index, _), pred in zip(nan_unlabeled.iterrows(), enane_pred):
usefulness = 0
harmfulness = 0
for neigh in nan[row_index]:
if local_structure.loc[neigh, "label"] == pred:
usefulness += 1
else:
harmfulness += 1
if usefulness >= harmfulness:
es.append(row_index)
es_pred.append(pred)
return es, es_pred
def __init_values(self, low, u, y):
"""
It takes in the lower and upper bounds of the data, and the data itself,
and then calculates the distances between the data points,
the maximum distance, the minimum distance, the dc value, the rho
value, the delta value, the number of neighbors, and the structure
of the data
:param low: lower bound of the data
:param u: upper bound of the data
:param y: the labels of the data
"""
self.y = y
self.low = low
self.u = u
self.data = np.concatenate((low, u), axis=0)
self.n_id = self.data.shape[0]
self.distances, self.max_dis, self.min_dis = self.__build_distance()
self.dc = self.__select_dc()
self.rho = self.__local_density()
self.delta, self.nneigh = self.__min_neighbor_and_distance()
self.__structure()
def _fit_without(self):
"""
The function takes in a classifier, and then labels the next point,
and then labels the previous points, without filtering.
"""
if self.classifier is None:
self.classifier = SVC()
count = 1
self.order = dict.fromkeys(range(self.n_id), 0)
count = self._label_next_point(count)
self._label_previous_points(count)
def _label_previous_points(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the previous samples of those samples. It then labels those samples
and repeats the process until there are no more samples to label
:param count: the number of the current iteration
"""
while True:
samples_labeled = self.__step_a()
prev_rows = samples_labeled["previous"].to_numpy()
prev_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for prev_row in prev_rows:
if prev_row not in samples_labeled_index and prev_row is not None:
prev_unlabeled.append(prev_row)
self.order[prev_row] = count
if len(prev_unlabeled) == 0:
break
unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled]
lu = unlabeled_prev_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, prev_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
def _label_next_point(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the next samples in the structure. If the next samples are not
labeled, it labels them and updates the order of the samples
:param count: the number of the next point to be labeled
:return: The number of labeled samples.
"""
while True:
samples_labeled = self.__step_a()
next_rows = samples_labeled["next"].to_numpy()
next_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for next_row in next_rows:
if next_row not in samples_labeled_index:
next_unlabeled.append(next_row)
self.order[next_row] = count
if len(next_unlabeled) == 0:
break
unlabeled_next_of_labeled = self.structure.loc[next_unlabeled]
lu = unlabeled_next_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, next_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
return count
def _fit_stdpnf(self):
"""
Self Training based on Density Peaks and a parameter-free noise
filter.
"""
self.__discover_structure()
nan, lambda_param = self.__nan_search()
self.classifier_stdpnf = KNeighborsClassifier(
n_neighbors=self.k, metric=self.distance_metric
)
self.classifier_stdpnf.fit(self.low, self.y)
count = 1
while count <= max(self.order.values()):
unlabeled_rows = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] == -1
].index.to_list()
unlabeled_indexes = []
for row in unlabeled_rows:
if self.order[row] == count:
unlabeled_indexes.append(row)
if isinstance(self.filter, str) and self.filter == "ENANE":
filtered_indexes, filtered_labels = self.__enane(
unlabeled_indexes, nan, lambda_param
)
self.structure_stdnpf.at[filtered_indexes,
"label"] = filtered_labels
else:
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
complete = labeled_data["sample"]
complete_y = labeled_data["label"]
result = self._if_filter(complete, complete_y)
self._results_to_structure(complete, result)
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
count += 1
labeled_data = self.structure_stdnpf.loc[self.structure_stdnpf["label"] != -1]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
def _results_to_structure(self, complete, result):
"""
> This function takes the results of the model and compares them to the
complete data set. If the result is not in the complete data set, it is
added to the structure data set.
:param complete: the complete dataset
:param result: the result of the clustering
"""
results_to_unlabeled = []
for r in result.to_numpy():
is_in = False
for c in complete:
if np.array_equal(r, c):
is_in = True
if not is_in:
results_to_unlabeled.append(r)
for r in results_to_unlabeled:
self.structure_stdnpf.at[np.array(self.structure_stdnpf["sample"], r)][
"label"
] = -1
def _if_filter(self, complete, complete_y):
"""
If the filter is an ENN, then filter the original data, otherwise
filter the complete data
:param complete: the complete dataframe
:param complete_y: the complete y values
:return: The result is a dataframe with the filtered data.
"""
if isinstance(self.filter, ENN):
original = pd.DataFrame(self.low)
original_y = pd.DataFrame(self.y)
result, _ = self.filter.filter_original_complete(
original, original_y, complete, complete_y
)
else:
result, _ = self.filter.filter(complete, complete_y)
return result
def fit(self, samples, y):
"""Fit method."""
try:
l, u, y = split(samples, y)
except IndexError:
raise ValueError("Dimensions do not match.")
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
self.__init_values(l, u, y)
if self.filtering:
self._fit_stdpnf()
else:
self._fit_without()
def predict(self, src):
"""
Predict based on a trained classifier.
:param src: The source image
:return: The classifier is being returned.
"""
if self.classifier is None:
raise AssertionError("The model needs to be fitted first.")
return self.classifier.predict(src)
| [
"sklearn.semi_supervised.SelfTrainingClassifier",
"scipy.spatial.distance.squareform",
"sklearn.preprocessing.LabelEncoder",
"numpy.triu_indices",
"pandas.DataFrame",
"scipy.spatial.distance.pdist",
"sklearn.neighbors.KNeighborsClassifier",
"numpy.max",
"numpy.argsort",
"numpy.array",
"collections.defaultdict",
"numpy.array_equal",
"numpy.concatenate",
"numpy.min",
"sklearn.neighbors.NearestNeighbors",
"math.exp",
"pandas.concat",
"sklearn.svm.SVC"
] | [((2864, 2909), 'scipy.spatial.distance.pdist', 'pdist', (['self.data'], {'metric': 'self.distance_metric'}), '(self.data, metric=self.distance_metric)\n', (2869, 2909), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2936, 2963), 'scipy.spatial.distance.squareform', 'squareform', (['distance_matrix'], {}), '(distance_matrix)\n', (2946, 2963), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((2990, 3028), 'numpy.triu_indices', 'np.triu_indices', (['self.data.shape[0]', '(1)'], {}), '(self.data.shape[0], 1)\n', (3005, 3028), True, 'import numpy as np\n'), ((5980, 6005), 'numpy.array', 'np.array', (['rho', 'np.float32'], {}), '(rho, np.float32)\n', (5988, 6005), True, 'import numpy as np\n'), ((6355, 6376), 'numpy.argsort', 'np.argsort', (['(-self.rho)'], {}), '(-self.rho)\n', (6365, 6376), True, 'import numpy as np\n'), ((9557, 9573), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9568, 9573), False, 'from collections import defaultdict\n'), ((9636, 9652), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9647, 9652), False, 'from collections import defaultdict\n'), ((9667, 9683), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (9678, 9683), False, 'from collections import defaultdict\n'), ((9698, 9714), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (9709, 9714), False, 'from collections import defaultdict\n'), ((11671, 11735), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'r', 'metric': 'self.distance_metric'}), '(n_neighbors=r, metric=self.distance_metric)\n', (11691, 11735), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((11897, 11951), 'pandas.concat', 'pd.concat', (['[labeled_data, nan_unlabeled]'], {'join': '"""inner"""'}), "([labeled_data, nan_unlabeled], join='inner')\n", (11906, 11951), True, 'import pandas as pd\n'), ((11975, 12013), 'sklearn.semi_supervised.SelfTrainingClassifier', 'SelfTrainingClassifier', (['base_estimator'], {}), '(base_estimator)\n', (11997, 12013), False, 'from sklearn.semi_supervised import SelfTrainingClassifier\n'), ((13238, 13270), 'numpy.concatenate', 'np.concatenate', (['(low, u)'], {'axis': '(0)'}), '((low, u), axis=0)\n', (13252, 13270), True, 'import numpy as np\n'), ((16707, 16776), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'self.k', 'metric': 'self.distance_metric'}), '(n_neighbors=self.k, metric=self.distance_metric)\n', (16727, 16776), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((20181, 20195), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (20193, 20195), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((3332, 3354), 'numpy.max', 'np.max', (['triangle_upper'], {}), '(triangle_upper)\n', (3338, 3354), True, 'import numpy as np\n'), ((3356, 3378), 'numpy.min', 'np.min', (['triangle_upper'], {}), '(triangle_upper)\n', (3362, 3378), True, 'import numpy as np\n'), ((5271, 5297), 'math.exp', 'math.exp', (['(-(dij / dc) ** 2)'], {}), '(-(dij / dc) ** 2)\n', (5279, 5297), False, 'import math\n'), ((6858, 6885), 'numpy.array', 'np.array', (['delta', 'np.float32'], {}), '(delta, np.float32)\n', (6866, 6885), True, 'import numpy as np\n'), ((6887, 6915), 'numpy.array', 'np.array', (['nneigh', 'np.float32'], {}), '(nneigh, np.float32)\n', (6895, 6915), True, 'import numpy as np\n'), ((9757, 9813), 'sklearn.neighbors.NearestNeighbors', 'NearestNeighbors', ([], {'n_neighbors': '(r + 1)', 'algorithm': '"""kd_tree"""'}), "(n_neighbors=r + 1, algorithm='kd_tree')\n", (9773, 9813), False, 'from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors\n'), ((13821, 13826), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (13824, 13826), False, 'from sklearn.svm import SVC\n'), ((19667, 19689), 'pandas.DataFrame', 'pd.DataFrame', (['self.low'], {}), '(self.low)\n', (19679, 19689), True, 'import pandas as pd\n'), ((19715, 19735), 'pandas.DataFrame', 'pd.DataFrame', (['self.y'], {}), '(self.y)\n', (19727, 19735), True, 'import pandas as pd\n'), ((7906, 7981), 'pandas.DataFrame', 'pd.DataFrame', (['self.structure'], {'index': "['sample', 'next', 'previous', 'label']"}), "(self.structure, index=['sample', 'next', 'previous', 'label'])\n", (7918, 7981), True, 'import pandas as pd\n'), ((18964, 18984), 'numpy.array_equal', 'np.array_equal', (['r', 'c'], {}), '(r, c)\n', (18978, 18984), True, 'import numpy as np\n'), ((19168, 19212), 'numpy.array', 'np.array', (["self.structure_stdnpf['sample']", 'r'], {}), "(self.structure_stdnpf['sample'], r)\n", (19176, 19212), True, 'import numpy as np\n')] |
"""
# Definition for a Node.
"""
class TreeNode(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def levelOrder(self, root):
"""
:type root: Node
:rtype: List[List[int]]
"""
if root is None:
return []
from Queue import Queue
que = Queue()
que.put(root)
ans, tmp, k = [], [], 1
while que.qsize() != 0:
node = que.get()
tmp.append(node.val)
k -= 1
for child in node.children:
que.put(child)
if k == 0:
k = que.qsize()
ans.append(list(tmp))
tmp = []
return ans
node2 = TreeNode(2, [])
node3 = TreeNode(3, [])
children = [node2, node3]
node1 = TreeNode(1, children)
solution = Solution()
print(solution.levelOrder(node1))
| [
"Queue.Queue"
] | [((388, 395), 'Queue.Queue', 'Queue', ([], {}), '()\n', (393, 395), False, 'from Queue import Queue\n')] |
from django.db import models
from vaccine_card.vaccination.models import Vaccine
class State(models.Model):
name = models.CharField(max_length=20, verbose_name='Nome')
class Meta:
verbose_name = 'Unidade Federativa'
def __str__(self):
return self.name
class City(models.Model):
name = models.CharField(max_length=50, verbose_name='Nome')
state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=State._meta.verbose_name)
class Meta:
verbose_name = 'Município'
def __str__(self):
return self.name
class Address(models.Model):
logradouro = models.CharField(max_length=150, verbose_name='Logradouro')
numero = models.CharField(max_length=4, verbose_name='Número')
complemento = models.CharField(max_length=50, null=True, blank=True, verbose_name='Complemento')
bairro = models.CharField(max_length=150, verbose_name='Bairro')
cep = models.CharField(max_length=8, verbose_name='CEP')
# state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=State._meta.verbose_name)
city = models.ForeignKey(City, on_delete=models.CASCADE, verbose_name=City._meta.verbose_name)
class Meta:
verbose_name = 'Endereço'
class HealthCenter(models.Model):
cnes = models.CharField(max_length=7, verbose_name='CNES')
cnpj = models.CharField(max_length=14, verbose_name='CNPJ')
name = models.CharField(max_length=255, verbose_name='Razão Social')
created_at = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name='Criado em:')
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name='Atualizado em:')
address = models.ManyToManyField(Address, verbose_name=Address._meta.verbose_name)
class Meta:
verbose_name = 'Estabelecimento de Saúde'
verbose_name_plural = 'Estabelecimentos de Saúde'
def __str__(self):
return self.name
class Stock(models.Model):
lot = models.PositiveSmallIntegerField(verbose_name='Lote')
created_at = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name='Criado em:')
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name='Atualizado em:')
health_center = models.ForeignKey(HealthCenter, on_delete=models.CASCADE,
verbose_name=HealthCenter._meta.verbose_name)
vaccines = models.ManyToManyField(Vaccine, through='VaccineStock', verbose_name=Vaccine._meta.verbose_name)
class Meta:
verbose_name = 'Estoque'
class VaccineStock(models.Model):
amount = models.PositiveSmallIntegerField(verbose_name='Quantidade recebida')
remaining = models.PositiveSmallIntegerField(verbose_name='Quantidade restante')
vaccine = models.ForeignKey(Vaccine, on_delete=models.DO_NOTHING, verbose_name=Vaccine._meta.verbose_name)
stock = models.ForeignKey(Stock, on_delete=models.DO_NOTHING, verbose_name=Stock._meta.verbose_name)
class Meta:
verbose_name = 'Estoque de Vacina'
def __str__(self):
return self.vaccine.name
| [
"django.db.models.ForeignKey",
"django.db.models.ManyToManyField",
"django.db.models.DateTimeField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.CharField"
] | [((122, 174), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'verbose_name': '"""Nome"""'}), "(max_length=20, verbose_name='Nome')\n", (138, 174), False, 'from django.db import models\n'), ((324, 376), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""Nome"""'}), "(max_length=50, verbose_name='Nome')\n", (340, 376), False, 'from django.db import models\n'), ((389, 483), 'django.db.models.ForeignKey', 'models.ForeignKey', (['State'], {'on_delete': 'models.CASCADE', 'verbose_name': 'State._meta.verbose_name'}), '(State, on_delete=models.CASCADE, verbose_name=State._meta\n .verbose_name)\n', (406, 483), False, 'from django.db import models\n'), ((628, 687), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'verbose_name': '"""Logradouro"""'}), "(max_length=150, verbose_name='Logradouro')\n", (644, 687), False, 'from django.db import models\n'), ((701, 754), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(4)', 'verbose_name': '"""Número"""'}), "(max_length=4, verbose_name='Número')\n", (717, 754), False, 'from django.db import models\n'), ((773, 860), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)', 'verbose_name': '"""Complemento"""'}), "(max_length=50, null=True, blank=True, verbose_name=\n 'Complemento')\n", (789, 860), False, 'from django.db import models\n'), ((869, 924), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)', 'verbose_name': '"""Bairro"""'}), "(max_length=150, verbose_name='Bairro')\n", (885, 924), False, 'from django.db import models\n'), ((935, 985), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'verbose_name': '"""CEP"""'}), "(max_length=8, verbose_name='CEP')\n", (951, 985), False, 'from django.db import models\n'), ((1102, 1194), 'django.db.models.ForeignKey', 'models.ForeignKey', (['City'], {'on_delete': 'models.CASCADE', 'verbose_name': 'City._meta.verbose_name'}), '(City, on_delete=models.CASCADE, verbose_name=City._meta.\n verbose_name)\n', (1119, 1194), False, 'from django.db import models\n'), ((1288, 1339), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(7)', 'verbose_name': '"""CNES"""'}), "(max_length=7, verbose_name='CNES')\n", (1304, 1339), False, 'from django.db import models\n'), ((1351, 1403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(14)', 'verbose_name': '"""CNPJ"""'}), "(max_length=14, verbose_name='CNPJ')\n", (1367, 1403), False, 'from django.db import models\n'), ((1415, 1476), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""Razão Social"""'}), "(max_length=255, verbose_name='Razão Social')\n", (1431, 1476), False, 'from django.db import models\n'), ((1495, 1582), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'auto_now': '(False)', 'verbose_name': '"""Criado em:"""'}), "(auto_now_add=True, auto_now=False, verbose_name=\n 'Criado em:')\n", (1515, 1582), False, 'from django.db import models\n'), ((1595, 1686), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(False)', 'auto_now': '(True)', 'verbose_name': '"""Atualizado em:"""'}), "(auto_now_add=False, auto_now=True, verbose_name=\n 'Atualizado em:')\n", (1615, 1686), False, 'from django.db import models\n'), ((1697, 1769), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Address'], {'verbose_name': 'Address._meta.verbose_name'}), '(Address, verbose_name=Address._meta.verbose_name)\n', (1719, 1769), False, 'from django.db import models\n'), ((1983, 2036), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'verbose_name': '"""Lote"""'}), "(verbose_name='Lote')\n", (2015, 2036), False, 'from django.db import models\n'), ((2055, 2142), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'auto_now': '(False)', 'verbose_name': '"""Criado em:"""'}), "(auto_now_add=True, auto_now=False, verbose_name=\n 'Criado em:')\n", (2075, 2142), False, 'from django.db import models\n'), ((2155, 2246), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(False)', 'auto_now': '(True)', 'verbose_name': '"""Atualizado em:"""'}), "(auto_now_add=False, auto_now=True, verbose_name=\n 'Atualizado em:')\n", (2175, 2246), False, 'from django.db import models\n'), ((2263, 2371), 'django.db.models.ForeignKey', 'models.ForeignKey', (['HealthCenter'], {'on_delete': 'models.CASCADE', 'verbose_name': 'HealthCenter._meta.verbose_name'}), '(HealthCenter, on_delete=models.CASCADE, verbose_name=\n HealthCenter._meta.verbose_name)\n', (2280, 2371), False, 'from django.db import models\n'), ((2421, 2522), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Vaccine'], {'through': '"""VaccineStock"""', 'verbose_name': 'Vaccine._meta.verbose_name'}), "(Vaccine, through='VaccineStock', verbose_name=\n Vaccine._meta.verbose_name)\n", (2443, 2522), False, 'from django.db import models\n'), ((2617, 2685), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'verbose_name': '"""Quantidade recebida"""'}), "(verbose_name='Quantidade recebida')\n", (2649, 2685), False, 'from django.db import models\n'), ((2702, 2770), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'verbose_name': '"""Quantidade restante"""'}), "(verbose_name='Quantidade restante')\n", (2734, 2770), False, 'from django.db import models\n'), ((2786, 2887), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Vaccine'], {'on_delete': 'models.DO_NOTHING', 'verbose_name': 'Vaccine._meta.verbose_name'}), '(Vaccine, on_delete=models.DO_NOTHING, verbose_name=\n Vaccine._meta.verbose_name)\n', (2803, 2887), False, 'from django.db import models\n'), ((2895, 2992), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Stock'], {'on_delete': 'models.DO_NOTHING', 'verbose_name': 'Stock._meta.verbose_name'}), '(Stock, on_delete=models.DO_NOTHING, verbose_name=Stock.\n _meta.verbose_name)\n', (2912, 2992), False, 'from django.db import models\n')] |
import os
import numpy as np
import pytest
import easyidp
from easyidp.core.objects import ReconsProject, Points
from easyidp.io import metashape
module_path = os.path.join(easyidp.__path__[0], "io/tests")
def test_init_reconsproject():
attempt1 = ReconsProject("agisoft")
assert attempt1.software == "metashape"
attempt2 = ReconsProject("Metashape")
assert attempt2.software == "metashape"
with pytest.raises(LookupError):
attempt3 = ReconsProject("not_supported_sfm")
def test_local2world2local():
attempt1 = ReconsProject("agisoft")
attempt1.transform.matrix = np.asarray([[-0.86573098, -0.01489186, 0.08977677, 7.65034123],
[0.06972335, 0.44334391, 0.74589315, 1.85910928],
[-0.05848325, 0.74899678, -0.43972184, -0.1835615],
[0., 0., 0., 1.]], dtype=np.float)
w_pos = Points([0.5, 1, 1.5])
l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965])
w_pos_ans = Points([0.4999999999999978, 0.9999999999999993, 1.5])
world_pos = attempt1.local2world(l_pos)
np.testing.assert_array_almost_equal(w_pos_ans.values, world_pos.values, decimal=6)
local_pos = attempt1.world2local(w_pos)
np.testing.assert_array_almost_equal(l_pos.values, local_pos.values, decimal=6)
def test_metashape_project_local_points_on_raw():
test_project_folder = easyidp.test_full_path("data/metashape/goya_test.psx")
chunks = metashape.open_project(test_project_folder)
chunk = chunks[0]
# test for single point
l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965])
p_dis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=False)
p_undis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=True)
# pro_api_out = np.asarray([2218.883386793118, 1991.4709388015149])
my_undistort_out = Points([2220.854889556147, 1992.6933680261686])
my_distort_out = Points([2218.47960556, 1992.46356322])
np.testing.assert_array_almost_equal(p_dis_out.values, my_distort_out.values)
np.testing.assert_array_almost_equal(p_undis_out.values, my_undistort_out.values)
# test for multiple points
l_pos_points = Points([[7.960064093299587, 1.3019528769064523, -2.6697181763370965],
[7.960064093299587, 1.3019528769064523, -2.6697181763370965]])
p_dis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=False)
p_undis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=True)
my_undistort_outs = Points([[2220.854889556147, 1992.6933680261686],
[2220.854889556147, 1992.6933680261686]])
my_distort_outs = Points([[2218.47960556, 1992.46356322],
[2218.47960556, 1992.46356322]])
np.testing.assert_array_almost_equal(p_dis_outs.values, my_distort_outs.values)
np.testing.assert_array_almost_equal(p_undis_outs.values, my_undistort_outs.values)
def test_world2crs_and_on_raw_images():
test_project_folder = easyidp.test_full_path("data/metashape/wheat_tanashi.psx")
chunks = metashape.open_project(test_project_folder)
chunk = chunks[0]
local = Points([11.870130675203006, 0.858098777517136, -12.987136541275])
geocentric = Points([-3943658.7087006606, 3363404.124223561, 3704651.3067566575])
geodetic = Points([139.54033578028609, 35.73756358928734, 96.87827569602781], columns=['lon', 'lat', 'alt'])
idp_world = chunk.local2world(local)
np.testing.assert_array_almost_equal(idp_world.values, geocentric.values, decimal=1)
idp_crs = chunk.world2crs(idp_world)
np.testing.assert_array_almost_equal(idp_crs.values, geodetic.values)
camera_id = 56 # camera_label = 'DJI_0057'
camera_pix_ans = Points([2391.7104647010146, 1481.8987733175165])
idp_cam_pix = chunk.project_local_points_on_raw(local, camera_id, distortion_correct=True)
np.testing.assert_array_almost_equal(camera_pix_ans.values, idp_cam_pix.values)
| [
"easyidp.test_full_path",
"numpy.testing.assert_array_almost_equal",
"easyidp.core.objects.Points",
"easyidp.core.objects.ReconsProject",
"numpy.asarray",
"os.path.join",
"easyidp.io.metashape.open_project",
"pytest.raises"
] | [((162, 207), 'os.path.join', 'os.path.join', (['easyidp.__path__[0]', '"""io/tests"""'], {}), "(easyidp.__path__[0], 'io/tests')\n", (174, 207), False, 'import os\n'), ((256, 280), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""agisoft"""'], {}), "('agisoft')\n", (269, 280), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((341, 367), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""Metashape"""'], {}), "('Metashape')\n", (354, 367), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((551, 575), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""agisoft"""'], {}), "('agisoft')\n", (564, 575), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((608, 821), 'numpy.asarray', 'np.asarray', (['[[-0.86573098, -0.01489186, 0.08977677, 7.65034123], [0.06972335, \n 0.44334391, 0.74589315, 1.85910928], [-0.05848325, 0.74899678, -\n 0.43972184, -0.1835615], [0.0, 0.0, 0.0, 1.0]]'], {'dtype': 'np.float'}), '([[-0.86573098, -0.01489186, 0.08977677, 7.65034123], [0.06972335,\n 0.44334391, 0.74589315, 1.85910928], [-0.05848325, 0.74899678, -\n 0.43972184, -0.1835615], [0.0, 0.0, 0.0, 1.0]], dtype=np.float)\n', (618, 821), True, 'import numpy as np\n'), ((953, 974), 'easyidp.core.objects.Points', 'Points', (['[0.5, 1, 1.5]'], {}), '([0.5, 1, 1.5])\n', (959, 974), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((987, 1055), 'easyidp.core.objects.Points', 'Points', (['[7.960064093299587, 1.3019528769064523, -2.6697181763370965]'], {}), '([7.960064093299587, 1.3019528769064523, -2.6697181763370965])\n', (993, 1055), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1072, 1125), 'easyidp.core.objects.Points', 'Points', (['[0.4999999999999978, 0.9999999999999993, 1.5]'], {}), '([0.4999999999999978, 0.9999999999999993, 1.5])\n', (1078, 1125), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1175, 1262), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['w_pos_ans.values', 'world_pos.values'], {'decimal': '(6)'}), '(w_pos_ans.values, world_pos.values,\n decimal=6)\n', (1211, 1262), True, 'import numpy as np\n'), ((1308, 1387), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['l_pos.values', 'local_pos.values'], {'decimal': '(6)'}), '(l_pos.values, local_pos.values, decimal=6)\n', (1344, 1387), True, 'import numpy as np\n'), ((1466, 1520), 'easyidp.test_full_path', 'easyidp.test_full_path', (['"""data/metashape/goya_test.psx"""'], {}), "('data/metashape/goya_test.psx')\n", (1488, 1520), False, 'import easyidp\n'), ((1534, 1577), 'easyidp.io.metashape.open_project', 'metashape.open_project', (['test_project_folder'], {}), '(test_project_folder)\n', (1556, 1577), False, 'from easyidp.io import metashape\n'), ((1642, 1710), 'easyidp.core.objects.Points', 'Points', (['[7.960064093299587, 1.3019528769064523, -2.6697181763370965]'], {}), '([7.960064093299587, 1.3019528769064523, -2.6697181763370965])\n', (1648, 1710), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((1981, 2028), 'easyidp.core.objects.Points', 'Points', (['[2220.854889556147, 1992.6933680261686]'], {}), '([2220.854889556147, 1992.6933680261686])\n', (1987, 2028), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2050, 2088), 'easyidp.core.objects.Points', 'Points', (['[2218.47960556, 1992.46356322]'], {}), '([2218.47960556, 1992.46356322])\n', (2056, 2088), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2094, 2171), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_dis_out.values', 'my_distort_out.values'], {}), '(p_dis_out.values, my_distort_out.values)\n', (2130, 2171), True, 'import numpy as np\n'), ((2176, 2262), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_undis_out.values', 'my_undistort_out.values'], {}), '(p_undis_out.values, my_undistort_out.\n values)\n', (2212, 2262), True, 'import numpy as np\n'), ((2309, 2446), 'easyidp.core.objects.Points', 'Points', (['[[7.960064093299587, 1.3019528769064523, -2.6697181763370965], [\n 7.960064093299587, 1.3019528769064523, -2.6697181763370965]]'], {}), '([[7.960064093299587, 1.3019528769064523, -2.6697181763370965], [\n 7.960064093299587, 1.3019528769064523, -2.6697181763370965]])\n', (2315, 2446), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2684, 2779), 'easyidp.core.objects.Points', 'Points', (['[[2220.854889556147, 1992.6933680261686], [2220.854889556147, \n 1992.6933680261686]]'], {}), '([[2220.854889556147, 1992.6933680261686], [2220.854889556147, \n 1992.6933680261686]])\n', (2690, 2779), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2829, 2901), 'easyidp.core.objects.Points', 'Points', (['[[2218.47960556, 1992.46356322], [2218.47960556, 1992.46356322]]'], {}), '([[2218.47960556, 1992.46356322], [2218.47960556, 1992.46356322]])\n', (2835, 2901), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((2937, 3016), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_dis_outs.values', 'my_distort_outs.values'], {}), '(p_dis_outs.values, my_distort_outs.values)\n', (2973, 3016), True, 'import numpy as np\n'), ((3021, 3109), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['p_undis_outs.values', 'my_undistort_outs.values'], {}), '(p_undis_outs.values, my_undistort_outs\n .values)\n', (3057, 3109), True, 'import numpy as np\n'), ((3173, 3231), 'easyidp.test_full_path', 'easyidp.test_full_path', (['"""data/metashape/wheat_tanashi.psx"""'], {}), "('data/metashape/wheat_tanashi.psx')\n", (3195, 3231), False, 'import easyidp\n'), ((3245, 3288), 'easyidp.io.metashape.open_project', 'metashape.open_project', (['test_project_folder'], {}), '(test_project_folder)\n', (3267, 3288), False, 'from easyidp.io import metashape\n'), ((3325, 3390), 'easyidp.core.objects.Points', 'Points', (['[11.870130675203006, 0.858098777517136, -12.987136541275]'], {}), '([11.870130675203006, 0.858098777517136, -12.987136541275])\n', (3331, 3390), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3408, 3476), 'easyidp.core.objects.Points', 'Points', (['[-3943658.7087006606, 3363404.124223561, 3704651.3067566575]'], {}), '([-3943658.7087006606, 3363404.124223561, 3704651.3067566575])\n', (3414, 3476), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3492, 3594), 'easyidp.core.objects.Points', 'Points', (['[139.54033578028609, 35.73756358928734, 96.87827569602781]'], {'columns': "['lon', 'lat', 'alt']"}), "([139.54033578028609, 35.73756358928734, 96.87827569602781], columns=\n ['lon', 'lat', 'alt'])\n", (3498, 3594), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((3636, 3724), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['idp_world.values', 'geocentric.values'], {'decimal': '(1)'}), '(idp_world.values, geocentric.values,\n decimal=1)\n', (3672, 3724), True, 'import numpy as np\n'), ((3767, 3836), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['idp_crs.values', 'geodetic.values'], {}), '(idp_crs.values, geodetic.values)\n', (3803, 3836), True, 'import numpy as np\n'), ((3908, 3956), 'easyidp.core.objects.Points', 'Points', (['[2391.7104647010146, 1481.8987733175165]'], {}), '([2391.7104647010146, 1481.8987733175165])\n', (3914, 3956), False, 'from easyidp.core.objects import ReconsProject, Points\n'), ((4057, 4136), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['camera_pix_ans.values', 'idp_cam_pix.values'], {}), '(camera_pix_ans.values, idp_cam_pix.values)\n', (4093, 4136), True, 'import numpy as np\n'), ((422, 448), 'pytest.raises', 'pytest.raises', (['LookupError'], {}), '(LookupError)\n', (435, 448), False, 'import pytest\n'), ((469, 503), 'easyidp.core.objects.ReconsProject', 'ReconsProject', (['"""not_supported_sfm"""'], {}), "('not_supported_sfm')\n", (482, 503), False, 'from easyidp.core.objects import ReconsProject, Points\n')] |
#!/usr/bin/env python
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.slider import Slider
from kivy.graphics import Color, Bezier, Line
class BezierTest(FloatLayout):
def __init__(self, points=[], loop=False, *args, **kwargs):
super(BezierTest, self).__init__(*args, **kwargs)
self.d = 10
self.points = points
self.loop = loop
self.current_point = None
with self.canvas:
Color(1.0, 0.0, 0.0)
self.bezier = Bezier(
points=self.points,
segments=150,
loop=self.loop,
dash_length=100,
dash_offset=10)
Color(1.0, 0.0, 1.0)
self.line = Line(
points=self.points+self.points[:2],
dash_offset=10,
dash_length=100)
s = Slider(y=0, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_bezier_dash_offset)
self.add_widget(s)
s = Slider(y=50, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_line_dash_offset)
self.add_widget(s)
def _set_bezier_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.bezier.dash_length = 100 - value
self.bezier.dash_offset = value
def _set_line_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.line.dash_length = 100 - value
self.line.dash_offset = value
def on_touch_down(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
for i, p in enumerate(list(zip(self.points[::2], self.points[1::2]))):
if (
abs(touch.pos[0] - self.pos[0] - p[0]) < self.d and
abs(touch.pos[1] - self.pos[1] - p[1]) < self.d):
self.current_point = i + 1
return True
return super(BezierTest, self).on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
if self.current_point:
self.current_point = None
return True
return super(BezierTest, self).on_touch_up(touch)
def on_touch_move(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
c = self.current_point
if c:
self.points[(c - 1) * 2] = touch.pos[0] - self.pos[0]
self.points[(c - 1) * 2 + 1] = touch.pos[1] - self.pos[1]
self.bezier.points = self.points
self.line.points = self.points + self.points[:2]
return True
return super(BezierTest, self).on_touch_move(touch)
class Main(App):
def build(self):
from math import cos, sin, radians
x = y = 150
l = 100
# Pacman !
points = [x, y]
for i in range(45, 360, 45):
i = radians(i)
points.extend([x + cos(i) * l, y + sin(i) * l])
return BezierTest(points=points, loop=True)
if __name__ == '__main__':
Main().run()
| [
"kivy.uix.slider.Slider",
"kivy.graphics.Line",
"kivy.graphics.Bezier",
"math.radians",
"math.cos",
"kivy.graphics.Color",
"math.sin"
] | [((921, 987), 'kivy.uix.slider.Slider', 'Slider', ([], {'y': '(0)', 'pos_hint': "{'x': 0.3}", 'size_hint': '(0.7, None)', 'height': '(50)'}), "(y=0, pos_hint={'x': 0.3}, size_hint=(0.7, None), height=50)\n", (927, 987), False, 'from kivy.uix.slider import Slider\n'), ((1077, 1144), 'kivy.uix.slider.Slider', 'Slider', ([], {'y': '(50)', 'pos_hint': "{'x': 0.3}", 'size_hint': '(0.7, None)', 'height': '(50)'}), "(y=50, pos_hint={'x': 0.3}, size_hint=(0.7, None), height=50)\n", (1083, 1144), False, 'from kivy.uix.slider import Slider\n'), ((476, 496), 'kivy.graphics.Color', 'Color', (['(1.0)', '(0.0)', '(0.0)'], {}), '(1.0, 0.0, 0.0)\n', (481, 496), False, 'from kivy.graphics import Color, Bezier, Line\n'), ((524, 617), 'kivy.graphics.Bezier', 'Bezier', ([], {'points': 'self.points', 'segments': '(150)', 'loop': 'self.loop', 'dash_length': '(100)', 'dash_offset': '(10)'}), '(points=self.points, segments=150, loop=self.loop, dash_length=100,\n dash_offset=10)\n', (530, 617), False, 'from kivy.graphics import Color, Bezier, Line\n'), ((728, 748), 'kivy.graphics.Color', 'Color', (['(1.0)', '(0.0)', '(1.0)'], {}), '(1.0, 0.0, 1.0)\n', (733, 748), False, 'from kivy.graphics import Color, Bezier, Line\n'), ((773, 848), 'kivy.graphics.Line', 'Line', ([], {'points': '(self.points + self.points[:2])', 'dash_offset': '(10)', 'dash_length': '(100)'}), '(points=self.points + self.points[:2], dash_offset=10, dash_length=100)\n', (777, 848), False, 'from kivy.graphics import Color, Bezier, Line\n'), ((3080, 3090), 'math.radians', 'radians', (['i'], {}), '(i)\n', (3087, 3090), False, 'from math import cos, sin, radians\n'), ((3122, 3128), 'math.cos', 'cos', (['i'], {}), '(i)\n', (3125, 3128), False, 'from math import cos, sin, radians\n'), ((3138, 3144), 'math.sin', 'sin', (['i'], {}), '(i)\n', (3141, 3144), False, 'from math import cos, sin, radians\n')] |
#!/usr/bin/env python
# encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8") | [
"sys.setdefaultencoding"
] | [((65, 96), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (87, 96), False, 'import sys\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from config import CONFIG
import json
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import io
import math
import os
import time
from absl import flags
from absl import logging
from easydict import EasyDict
import matplotlib
matplotlib.use('Agg')
FLAGS = flags.FLAGS
def visualize_batch(data, global_step, batch_size, num_steps):
"""Visualizes a batch."""
frames = data['frames']
frames_list = tf.unstack(frames, num=num_steps, axis=1)
frames_summaries = tf.concat(frames_list, axis=2)
batch_list = tf.split(frames_summaries, batch_size, axis=0)
batch_summaries = tf.concat(batch_list, axis=1)
tf.summary.image('train_batch', batch_summaries, step=global_step)
def visualize_nearest_neighbours(model, data, global_step, batch_size,
num_steps, num_frames_per_step, split):
"""Visualize nearest neighbours in embedding space."""
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
cnn = model['cnn']
emb = model['emb']
if 'tcn' in CONFIG.TRAINING_ALGO:
cnn_feats = get_cnn_feats(
cnn, data, training=False, num_steps=2 * num_steps)
emb_feats = emb(cnn_feats, 2 * num_steps)
emb_feats = tf.stack(
tf.split(emb_feats, 2 * num_steps, axis=0)[::2], axis=1)
else:
cnn_feats = get_cnn_feats(cnn, data, training=False)
emb_feats = emb(cnn_feats, num_steps)
emb_feats = tf.stack(tf.split(emb_feats, num_steps, axis=0), axis=1)
query_feats = emb_feats[0]
if CONFIG.OPTICALFLOW:
frames = data['video_frames']
else:
frames = data['frames']
image_list = tf.unstack(frames, num=batch_size, axis=0)
if 'tcn' in CONFIG.TRAINING_ALGO:
im_list = [image_list[0]
[num_frames_per_step - 1::num_frames_per_step][::2]]
else:
im_list = [image_list[0][num_frames_per_step - 1::num_frames_per_step]]
sim_matrix = np.zeros(
(batch_size-1, num_steps, num_steps), dtype=np.float32)
for i in range(1, batch_size):
candidate_feats = emb_feats[i]
if 'tcn' in CONFIG.TRAINING_ALGO:
img_list = tf.unstack(image_list[i], num=2 * num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step][::2]
else:
img_list = tf.unstack(image_list[i], num=num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step]
nn_img_list = []
for j in range(num_steps):
curr_query_feats = tf.tile(query_feats[j:j+1], [num_steps, 1])
mean_squared_distance = tf.reduce_mean(
tf.math.squared_difference(curr_query_feats, candidate_feats), axis=1)
sim_matrix[i-1, j] = softmax(-1.0 * mean_squared_distance)
nn_img_list.append(img_list[tf.argmin(mean_squared_distance)])
nn_img = tf.stack(nn_img_list, axis=0)
im_list.append(nn_img)
def vstack(im):
return tf.concat(tf.unstack(im, num=num_steps), axis=1)
summary_im = tf.expand_dims(tf.concat([vstack(im) for im in im_list],
axis=0), axis=0)
tf.summary.image('%s/nn' % split, summary_im, step=global_step)
# Convert sim_matrix to float32 as summary_image doesn't take float64
sim_matrix = sim_matrix.astype(np.float32)
tf.summary.image('%s/similarity_matrix' % split,
np.expand_dims(sim_matrix, axis=3), step=global_step)
def softmax(w, t=1.0):
e = np.exp(np.array(w) / t)
dist = e / np.sum(e)
return dist
def random_choice_noreplace(m, n, axis=-1):
# Generate m random permuations of range (0, n)
# NumPy version: np.random.rand(m,n).argsort(axis=axis)
return tf.cast(tf.argsort(tf.random.uniform((m, n)), axis=axis), tf.int64)
def gen_cycles(num_cycles, batch_size, cycle_len):
"""Generate cycles for alignment."""
random_cycles = random_choice_noreplace(
num_cycles, batch_size)[:, :cycle_len]
return random_cycles
def get_warmup_lr(lr, global_step, lr_params):
"""Returns learning rate during warm up phase."""
if lr_params.NUM_WARMUP_STEPS > 0:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(
lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_lr = lr_params.INITIAL_LR * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
lr = (1.0 - is_warmup) * lr + is_warmup * warmup_lr
return lr
# Minimally adapted from Tensorflow object_detection code.
def manual_stepping(global_step, boundaries, rates):
boundaries = [0] + boundaries
num_boundaries = len(boundaries)
rate_index = tf.reduce_max(
tf.where(
tf.greater_equal(global_step, boundaries),
list(range(num_boundaries)), [0] * num_boundaries))
return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries))
def get_lr_fn(optimizer_config):
"""Returns function that provides current learning rate based on config.
NOTE: This returns a function as in Eager we need to call assign to update
the learning rate.
Args:
optimizer_config: EasyDict, contains params required to initialize the
learning rate and the learning rate decay function.
Returns:
lr_fn: function, this can be called to return the current learning rate
based on the provided config.
Raises:
ValueError: in case invalid params have been passed in the config.
"""
lr_params = optimizer_config.LR
# pylint: disable=g-long-lambda
if lr_params.DECAY_TYPE == 'exp_decay':
def lr_fn(lr, global_step): return tf.train.exponential_decay(
lr,
global_step,
lr_params.EXP_DECAY_STEPS,
lr_params.EXP_DECAY_RATE,
staircase=True)()
elif lr_params.DECAY_TYPE == 'manual':
lr_step_boundaries = [int(x)
for x in lr_params.MANUAL_LR_STEP_BOUNDARIES]
f = lr_params.MANUAL_LR_DECAY_RATE
learning_rate_sequence = [(lr_params.INITIAL_LR) * f**p
for p in range(len(lr_step_boundaries) + 1)]
def lr_fn(lr, global_step): return manual_stepping(
global_step, lr_step_boundaries, learning_rate_sequence)
elif lr_params.DECAY_TYPE == 'fixed':
def lr_fn(lr, global_step): return lr_params.INITIAL_LR
elif lr_params.DECAY_TYPE == 'poly':
def lr_fn(lr, global_step): return tf.train.polynomial_decay(
lr,
global_step,
CONFIG.TRAIN.MAX_ITERS,
end_learning_rate=0.0,
power=1.0,
cycle=False)
else:
raise ValueError('Learning rate decay type %s not supported. Only support'
'the following decay types: fixed, exp_decay, manual,'
'and poly.')
return (lambda lr, global_step: get_warmup_lr(lr_fn(lr, global_step),
global_step, lr_params))
def get_optimizer(optimizer_config, learning_rate):
"""Returns optimizer based on config and learning rate."""
if optimizer_config.TYPE == 'AdamOptimizer':
opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
elif optimizer_config.TYPE == 'MomentumOptimizer':
opt = tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=0.9)
else:
raise ValueError('Optimizer %s not supported. Only support the following'
'optimizers: AdamOptimizer, MomentumOptimizer .')
return opt
def get_lr_opt_global_step():
"""Intializes learning rate, optimizer and global step."""
optimizer = get_optimizer(CONFIG.OPTIMIZER, CONFIG.OPTIMIZER.LR.INITIAL_LR)
global_step = optimizer.iterations
learning_rate = optimizer.learning_rate
return learning_rate, optimizer, global_step
def create_ckpt(logdir, restore=False, **ckpt_objects):
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(
ckpt_manager.latest_checkpoint) if restore else -1
return ckpt_manager, status, checkpoint
def restore_ckpt(logdir, **ckpt_objects):
"""Create and restore checkpoint (if one exists on the path)."""
# Instantiate checkpoint and restore from any pre-existing checkpoint.
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(ckpt_manager.latest_checkpoint)
return ckpt_manager, status, checkpoint
def to_dict(config):
if isinstance(config, list):
return [to_dict(c) for c in config]
elif isinstance(config, EasyDict):
return dict([(k, to_dict(v)) for k, v in config.items()])
else:
return config
def setup_train_dir(logdir, overwrite=False, force_train=True):
"""Setups directory for training."""
tf.io.gfile.makedirs(logdir)
config_path = os.path.join(logdir, 'config.json')
if not os.path.exists(config_path) or overwrite:
logging.info(
'Using the existing passed in config as no config.json file exists in '
'%s', logdir)
with tf.io.gfile.GFile(config_path, 'w') as config_file:
config = dict([(k, to_dict(v)) for k, v in CONFIG.items()])
json.dump(config, config_file, sort_keys=True, indent=4)
else:
logging.info(
'Using config from config.json that exists in %s.', logdir)
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
CONFIG.update(config_dict)
train_logs_dir = os.path.join(logdir, 'train.logs')
if os.path.exists(train_logs_dir) and not force_train:
raise ValueError('You might be overwriting a directory that already '
'has train_logs. Please provide a new logdir name in '
'config or pass --force_train while launching script.')
tf.io.gfile.makedirs(train_logs_dir)
def setup_eval_dir(logdir, config_timeout_seconds=1):
"""Setups directory for evaluation."""
tf.io.gfile.makedirs(logdir)
tf.io.gfile.makedirs(os.path.join(logdir, 'eval_logs'))
config_path = os.path.join(logdir, 'config.json')
while not tf.io.gfile.exists(config_path):
logging.info('Waiting for config to exist. Going to sleep '
' %s for secs.', config_timeout_seconds)
time.sleep(config_timeout_seconds)
while True:
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
if config_dict is None:
time.sleep(config_timeout_seconds)
else:
break
CONFIG.update(config_dict)
def get_data(iterator):
"""Return a data dict which contains all the requested sequences."""
data = iterator.get_next()
return data, data['chosen_steps'], data['seq_lens']
@tf.function
def get_cnn_feats(cnn, data, training, num_steps=None):
"""Passes data through base CNN."""
if num_steps is None:
if training:
num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
else:
num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
cnn.num_steps = num_steps
cnn_feats = cnn(data['frames'])
return cnn_feats
def get_context_steps(step):
num_steps = CONFIG.DATA.NUM_STEPS
stride = CONFIG.DATA.FRAME_STRIDE
# We don't want to see the future.
steps = np.arange(step - (num_steps - 1) * stride, step + stride, stride)
return steps
def get_indices(curr_idx, num_steps, seq_len):
steps = range(curr_idx, curr_idx + num_steps)
single_steps = np.concatenate([get_context_steps(step) for step in steps])
single_steps = np.concatenate(np.array(list(map(get_context_steps,
np.arange(curr_idx, curr_idx + num_steps)))))
single_steps = np.maximum(0, single_steps)
single_steps = np.minimum(seq_len, single_steps)
return single_steps
def get_embeddings_dataset(model, iterator, frames_per_batch,
keep_data=False, optical_flow=False, keep_labels=True,
max_embs=None, callbacks=[]):
"""Get embeddings from a one epoch iterator."""
keep_labels = keep_labels and CONFIG.DATA.FRAME_LABELS
num_frames_per_step = CONFIG.DATA.NUM_STEPS
cnn = model['cnn']
emb = model['emb']
embs_list = []
labels_list = []
steps_list = []
seq_lens_list = []
names_list = []
seq_labels_list = []
if keep_data:
frames_list = []
if optical_flow:
frame_original_list = []
n = 0
def cond(n):
if max_embs is None:
return True
else:
return n < max_embs
# Make Recurrent Layers stateful, set batch size.
# We do this as we are embedding the whole sequence and that can take
# more than one batch to be passed and we don't want to automatically
# reset hidden states after each batch.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = True
gru_layer.input_spec[0].shape = [1, ]
while cond(n):
try:
print(n)
embs = []
labels = []
steps = []
seq_lens = []
names = []
seq_labels = []
if keep_data:
frames = []
if optical_flow:
frame_original = []
# Reset GRU states for each video.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.reset_states()
data, chosen_steps, seq_len = get_data(iterator)
seq_len = seq_len.numpy()[0]
num_batches = int(math.ceil(float(seq_len)/frames_per_batch))
for i in range(num_batches):
if (i + 1) * frames_per_batch > seq_len:
num_steps = seq_len - i * frames_per_batch
else:
num_steps = frames_per_batch
curr_idx = i * frames_per_batch
curr_data = {}
for k, v in data.items():
# Need to do this as some modalities might not exist.
if len(v.shape) > 1 and v.shape[1] != 0:
idxes = get_indices(curr_idx, num_steps, seq_len)
curr_data[k] = tf.gather(v, idxes, axis=1)
else:
curr_data[k] = v
cnn_feats = get_cnn_feats(cnn, curr_data,
num_steps=num_frames_per_step * num_steps,
training=False)
emb_feats = emb(cnn_feats, num_steps)
logging.debug('On sequence number %d, frames embedded %d', n,
curr_idx + num_steps)
# np.save(tf.io.gfile.GFile('/air/team/saman/test_weights_old.npy', 'w'), cnn.weights[0].numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_batch_old.npy', 'w'), curr_data["frames"])
# np.save(tf.io.gfile.GFile('/air/team/saman/test_cnn_old.npy', 'w'), cnn_feats.numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_emb_old.npy', 'w'), emb_feats.numpy())
embs.append(emb_feats.numpy())
for f in callbacks:
f(np.concatenate(embs), data, chosen_steps, seq_len)
steps.append(chosen_steps.numpy()[0])
seq_lens.append(seq_len * [seq_len])
all_labels = data['frame_labels'].numpy()[0]
name = data['name'].numpy()[0]
names.append(seq_len * [name])
seq_label = data['seq_labels'].numpy()[0]
seq_labels.append(seq_len * [seq_label])
labels.append(all_labels)
embs = np.concatenate(embs, axis=0)
labels = np.concatenate(labels, axis=0)
steps = np.concatenate(steps, axis=0)
seq_lens = np.concatenate(seq_lens, axis=0)
names = np.concatenate(names, axis=0)
seq_labels = np.concatenate(seq_labels, axis=0)
if keep_data:
frames.append(data['frames'].numpy()[0])
frames = np.concatenate(frames, axis=0)
if optical_flow:
frame_original.append(data['video_frames'].numpy()[0])
frame_original = np.concatenate(frame_original, axis=0)
if keep_labels:
labels = labels[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(labels)
seq_labels = seq_labels[~np.isnan(embs).any(axis=1)]
names = names[~np.isnan(embs).any(axis=1)]
seq_lens = seq_lens[~np.isnan(embs).any(axis=1)]
steps = steps[~np.isnan(embs).any(axis=1)]
if keep_data:
frames = frames[~np.isnan(embs).any(axis=1)]
if optical_flow:
frame_original = frame_original[~np.isnan(embs).any(axis=1)]
embs = embs[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(seq_lens)
assert len(embs) == len(steps)
assert len(names) == len(steps)
embs_list.append(embs)
if keep_labels:
labels_list.append(labels)
seq_labels_list.append(seq_labels)
steps_list.append(steps)
seq_lens_list.append(seq_lens)
names_list.append(names)
if keep_data:
frames_list.append(frames)
if optical_flow:
frame_original_list.append(frame_original)
n += 1
except tf.errors.OutOfRangeError:
logging.info('Finished embedding the dataset.')
break
dataset = {'embs': embs_list,
'seq_lens': seq_lens_list,
'steps': steps_list,
'names': names_list,
'seq_labels': seq_labels_list}
if keep_data:
dataset['frames'] = frames_list
if optical_flow:
dataset['frames_original'] = frame_original_list
if keep_labels:
dataset['labels'] = labels_list
# Reset statefulness to recurrent layers for other evaluation tasks.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = False
return dataset
def gen_plot(x, y):
"""Create a pyplot, save to buffer and return TB compatible image."""
plt.figure()
plt.plot(x, y)
plt.title('Val Accuracy')
plt.ylim(0, 1)
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
class Stopwatch(object):
"""Simple timer for measuring elapsed time."""
def __init__(self):
self.reset()
def elapsed(self):
return time.time() - self.time
def done(self, target_interval):
return self.elapsed() >= target_interval
def reset(self):
self.time = time.time()
def set_learning_phase(f):
"""Sets the correct learning phase before calling function f."""
def wrapper(*args, **kwargs):
"""Calls the function f after setting proper learning phase."""
if 'training' not in kwargs:
raise ValueError('Function called with set_learning_phase decorator which'
' does not have training argument.')
training = kwargs['training']
if training:
# Set learning_phase to True to use models in training mode.
tf.keras.backend.set_learning_phase(1)
else:
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
return f(*args, **kwargs)
return wrapper
def load_config(config_path):
config = None
if os.path.exists(config_path):
with open(config_path) as f:
config = json.load(f)
assert config is not None, "config file is not provided or is corrupted"
return config
def prepare_gpu(ind=-1):
ind = int(ind)
GPUS = tf.config.experimental.list_physical_devices('GPU')
if GPUS:
if ind > -1:
tf.config.experimental.set_visible_devices(GPUS[ind], 'GPU')
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
logging.info([len(GPUS), "Physical GPUs,", len(logical_gpus),
"Logical GPUs"])
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
logging.info(e)
os.environ["CUDA_VISIBLE_DEVICES"] = str(ind)
| [
"tensorflow.unstack",
"tensorflow.train.Checkpoint",
"tensorflow.tile",
"tensorflow.split",
"io.BytesIO",
"absl.logging.info",
"time.sleep",
"tensorflow.config.experimental.list_logical_devices",
"numpy.array",
"tensorflow.config.experimental.set_visible_devices",
"config.CONFIG.items",
"tensorflow.cast",
"numpy.arange",
"tensorflow.summary.image",
"os.path.exists",
"tensorflow.io.gfile.GFile",
"tensorflow.math.squared_difference",
"json.dump",
"matplotlib.pyplot.plot",
"tensorflow.keras.optimizers.SGD",
"tensorflow.concat",
"numpy.concatenate",
"config.CONFIG.update",
"tensorflow.train.exponential_decay",
"matplotlib.pyplot.ylim",
"numpy.maximum",
"tensorflow.train.CheckpointManager",
"tensorflow.io.gfile.exists",
"tensorflow.stack",
"tensorflow.random.uniform",
"tensorflow.one_hot",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"tensorflow.gather",
"numpy.isnan",
"matplotlib.pyplot.title",
"tensorflow.expand_dims",
"time.time",
"tensorflow.train.polynomial_decay",
"tensorflow.argmin",
"numpy.minimum",
"tensorflow.config.experimental.set_memory_growth",
"os.path.join",
"tensorflow.io.gfile.makedirs",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"tensorflow.constant",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.tight_layout",
"numpy.expand_dims",
"json.load",
"absl.logging.debug",
"tensorflow.keras.backend.set_learning_phase",
"tensorflow.greater_equal",
"tensorflow.config.experimental.list_physical_devices"
] | [((406, 427), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (420, 427), False, 'import matplotlib\n'), ((591, 632), 'tensorflow.unstack', 'tf.unstack', (['frames'], {'num': 'num_steps', 'axis': '(1)'}), '(frames, num=num_steps, axis=1)\n', (601, 632), True, 'import tensorflow as tf\n'), ((656, 686), 'tensorflow.concat', 'tf.concat', (['frames_list'], {'axis': '(2)'}), '(frames_list, axis=2)\n', (665, 686), True, 'import tensorflow as tf\n'), ((704, 750), 'tensorflow.split', 'tf.split', (['frames_summaries', 'batch_size'], {'axis': '(0)'}), '(frames_summaries, batch_size, axis=0)\n', (712, 750), True, 'import tensorflow as tf\n'), ((773, 802), 'tensorflow.concat', 'tf.concat', (['batch_list'], {'axis': '(1)'}), '(batch_list, axis=1)\n', (782, 802), True, 'import tensorflow as tf\n'), ((807, 873), 'tensorflow.summary.image', 'tf.summary.image', (['"""train_batch"""', 'batch_summaries'], {'step': 'global_step'}), "('train_batch', batch_summaries, step=global_step)\n", (823, 873), True, 'import tensorflow as tf\n'), ((1150, 1188), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (1185, 1188), True, 'import tensorflow as tf\n'), ((1874, 1916), 'tensorflow.unstack', 'tf.unstack', (['frames'], {'num': 'batch_size', 'axis': '(0)'}), '(frames, num=batch_size, axis=0)\n', (1884, 1916), True, 'import tensorflow as tf\n'), ((2169, 2235), 'numpy.zeros', 'np.zeros', (['(batch_size - 1, num_steps, num_steps)'], {'dtype': 'np.float32'}), '((batch_size - 1, num_steps, num_steps), dtype=np.float32)\n', (2177, 2235), True, 'import numpy as np\n'), ((3456, 3519), 'tensorflow.summary.image', 'tf.summary.image', (["('%s/nn' % split)", 'summary_im'], {'step': 'global_step'}), "('%s/nn' % split, summary_im, step=global_step)\n", (3472, 3519), True, 'import tensorflow as tf\n'), ((8624, 8659), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**ckpt_objects)\n', (8643, 8659), True, 'import tensorflow as tf\n'), ((8679, 8788), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'logdir', 'max_to_keep': '(10)', 'keep_checkpoint_every_n_hours': '(1)'}), '(checkpoint, directory=logdir, max_to_keep=10,\n keep_checkpoint_every_n_hours=1)\n', (8705, 8788), True, 'import tensorflow as tf\n'), ((9241, 9276), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), '(**ckpt_objects)\n', (9260, 9276), True, 'import tensorflow as tf\n'), ((9296, 9405), 'tensorflow.train.CheckpointManager', 'tf.train.CheckpointManager', (['checkpoint'], {'directory': 'logdir', 'max_to_keep': '(10)', 'keep_checkpoint_every_n_hours': '(1)'}), '(checkpoint, directory=logdir, max_to_keep=10,\n keep_checkpoint_every_n_hours=1)\n', (9322, 9405), True, 'import tensorflow as tf\n'), ((9891, 9919), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['logdir'], {}), '(logdir)\n', (9911, 9919), True, 'import tensorflow as tf\n'), ((9938, 9973), 'os.path.join', 'os.path.join', (['logdir', '"""config.json"""'], {}), "(logdir, 'config.json')\n", (9950, 9973), False, 'import os\n'), ((10640, 10674), 'os.path.join', 'os.path.join', (['logdir', '"""train.logs"""'], {}), "(logdir, 'train.logs')\n", (10652, 10674), False, 'import os\n'), ((10977, 11013), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['train_logs_dir'], {}), '(train_logs_dir)\n', (10997, 11013), True, 'import tensorflow as tf\n'), ((11117, 11145), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['logdir'], {}), '(logdir)\n', (11137, 11145), True, 'import tensorflow as tf\n'), ((11224, 11259), 'os.path.join', 'os.path.join', (['logdir', '"""config.json"""'], {}), "(logdir, 'config.json')\n", (11236, 11259), False, 'import os\n'), ((11727, 11753), 'config.CONFIG.update', 'CONFIG.update', (['config_dict'], {}), '(config_dict)\n', (11740, 11753), False, 'from config import CONFIG\n'), ((12501, 12566), 'numpy.arange', 'np.arange', (['(step - (num_steps - 1) * stride)', '(step + stride)', 'stride'], {}), '(step - (num_steps - 1) * stride, step + stride, stride)\n', (12510, 12566), True, 'import numpy as np\n'), ((12952, 12979), 'numpy.maximum', 'np.maximum', (['(0)', 'single_steps'], {}), '(0, single_steps)\n', (12962, 12979), True, 'import numpy as np\n'), ((12999, 13032), 'numpy.minimum', 'np.minimum', (['seq_len', 'single_steps'], {}), '(seq_len, single_steps)\n', (13009, 13032), True, 'import numpy as np\n'), ((19706, 19718), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19716, 19718), True, 'import matplotlib.pyplot as plt\n'), ((19723, 19737), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (19731, 19737), True, 'import matplotlib.pyplot as plt\n'), ((19742, 19767), 'matplotlib.pyplot.title', 'plt.title', (['"""Val Accuracy"""'], {}), "('Val Accuracy')\n", (19751, 19767), True, 'import matplotlib.pyplot as plt\n'), ((19772, 19786), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (19780, 19786), True, 'import matplotlib.pyplot as plt\n'), ((19791, 19809), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19807, 19809), True, 'import matplotlib.pyplot as plt\n'), ((19820, 19832), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (19830, 19832), False, 'import io\n'), ((19837, 19867), 'matplotlib.pyplot.savefig', 'plt.savefig', (['buf'], {'format': '"""png"""'}), "(buf, format='png')\n", (19848, 19867), True, 'import matplotlib.pyplot as plt\n'), ((20023, 20047), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (20037, 20047), True, 'import tensorflow as tf\n'), ((21221, 21248), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (21235, 21248), False, 'import os\n'), ((21475, 21526), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (21519, 21526), True, 'import tensorflow as tf\n'), ((3172, 3201), 'tensorflow.stack', 'tf.stack', (['nn_img_list'], {'axis': '(0)'}), '(nn_img_list, axis=0)\n', (3180, 3201), True, 'import tensorflow as tf\n'), ((3715, 3749), 'numpy.expand_dims', 'np.expand_dims', (['sim_matrix'], {'axis': '(3)'}), '(sim_matrix, axis=3)\n', (3729, 3749), True, 'import numpy as np\n'), ((3841, 3850), 'numpy.sum', 'np.sum', (['e'], {}), '(e)\n', (3847, 3850), True, 'import numpy as np\n'), ((4484, 4514), 'tensorflow.cast', 'tf.cast', (['global_step', 'tf.int32'], {}), '(global_step, tf.int32)\n', (4491, 4514), True, 'import tensorflow as tf\n'), ((4542, 4597), 'tensorflow.constant', 'tf.constant', (['lr_params.NUM_WARMUP_STEPS'], {'dtype': 'tf.int32'}), '(lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)\n', (4553, 4597), True, 'import tensorflow as tf\n'), ((4641, 4678), 'tensorflow.cast', 'tf.cast', (['global_steps_int', 'tf.float32'], {}), '(global_steps_int, tf.float32)\n', (4648, 4678), True, 'import tensorflow as tf\n'), ((4708, 4745), 'tensorflow.cast', 'tf.cast', (['warmup_steps_int', 'tf.float32'], {}), '(warmup_steps_int, tf.float32)\n', (4715, 4745), True, 'import tensorflow as tf\n'), ((4901, 4957), 'tensorflow.cast', 'tf.cast', (['(global_steps_int < warmup_steps_int)', 'tf.float32'], {}), '(global_steps_int < warmup_steps_int, tf.float32)\n', (4908, 4957), True, 'import tensorflow as tf\n'), ((7775, 7828), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (7799, 7828), True, 'import tensorflow as tf\n'), ((10035, 10139), 'absl.logging.info', 'logging.info', (['"""Using the existing passed in config as no config.json file exists in %s"""', 'logdir'], {}), "(\n 'Using the existing passed in config as no config.json file exists in %s',\n logdir)\n", (10047, 10139), False, 'from absl import logging\n'), ((10383, 10455), 'absl.logging.info', 'logging.info', (['"""Using config from config.json that exists in %s."""', 'logdir'], {}), "('Using config from config.json that exists in %s.', logdir)\n", (10395, 10455), False, 'from absl import logging\n'), ((10591, 10617), 'config.CONFIG.update', 'CONFIG.update', (['config_dict'], {}), '(config_dict)\n', (10604, 10617), False, 'from config import CONFIG\n'), ((10682, 10712), 'os.path.exists', 'os.path.exists', (['train_logs_dir'], {}), '(train_logs_dir)\n', (10696, 10712), False, 'import os\n'), ((11171, 11204), 'os.path.join', 'os.path.join', (['logdir', '"""eval_logs"""'], {}), "(logdir, 'eval_logs')\n", (11183, 11204), False, 'import os\n'), ((11274, 11305), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['config_path'], {}), '(config_path)\n', (11292, 11305), True, 'import tensorflow as tf\n'), ((11315, 11417), 'absl.logging.info', 'logging.info', (['"""Waiting for config to exist. Going to sleep %s for secs."""', 'config_timeout_seconds'], {}), "('Waiting for config to exist. Going to sleep %s for secs.',\n config_timeout_seconds)\n", (11327, 11417), False, 'from absl import logging\n'), ((11446, 11480), 'time.sleep', 'time.sleep', (['config_timeout_seconds'], {}), '(config_timeout_seconds)\n', (11456, 11480), False, 'import time\n'), ((20381, 20392), 'time.time', 'time.time', ([], {}), '()\n', (20390, 20392), False, 'import time\n'), ((1669, 1707), 'tensorflow.split', 'tf.split', (['emb_feats', 'num_steps'], {'axis': '(0)'}), '(emb_feats, num_steps, axis=0)\n', (1677, 1707), True, 'import tensorflow as tf\n'), ((2825, 2870), 'tensorflow.tile', 'tf.tile', (['query_feats[j:j + 1]', '[num_steps, 1]'], {}), '(query_feats[j:j + 1], [num_steps, 1])\n', (2832, 2870), True, 'import tensorflow as tf\n'), ((3279, 3308), 'tensorflow.unstack', 'tf.unstack', (['im'], {'num': 'num_steps'}), '(im, num=num_steps)\n', (3289, 3308), True, 'import tensorflow as tf\n'), ((3809, 3820), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (3817, 3820), True, 'import numpy as np\n'), ((4055, 4080), 'tensorflow.random.uniform', 'tf.random.uniform', (['(m, n)'], {}), '((m, n))\n', (4072, 4080), True, 'import tensorflow as tf\n'), ((5279, 5320), 'tensorflow.greater_equal', 'tf.greater_equal', (['global_step', 'boundaries'], {}), '(global_step, boundaries)\n', (5295, 5320), True, 'import tensorflow as tf\n'), ((5419, 5463), 'tensorflow.one_hot', 'tf.one_hot', (['rate_index'], {'depth': 'num_boundaries'}), '(rate_index, depth=num_boundaries)\n', (5429, 5463), True, 'import tensorflow as tf\n'), ((7898, 7964), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate', 'momentum': '(0.9)'}), '(learning_rate=learning_rate, momentum=0.9)\n', (7921, 7964), True, 'import tensorflow as tf\n'), ((9985, 10012), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (9999, 10012), False, 'import os\n'), ((10172, 10207), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""w"""'], {}), "(config_path, 'w')\n", (10189, 10207), True, 'import tensorflow as tf\n'), ((10308, 10364), 'json.dump', 'json.dump', (['config', 'config_file'], {'sort_keys': '(True)', 'indent': '(4)'}), '(config, config_file, sort_keys=True, indent=4)\n', (10317, 10364), False, 'import json\n'), ((10482, 10517), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""r"""'], {}), "(config_path, 'r')\n", (10499, 10517), True, 'import tensorflow as tf\n'), ((10560, 10582), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (10569, 10582), False, 'import json\n'), ((11511, 11546), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['config_path', '"""r"""'], {}), "(config_path, 'r')\n", (11528, 11546), True, 'import tensorflow as tf\n'), ((11589, 11611), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (11598, 11611), False, 'import json\n'), ((11656, 11690), 'time.sleep', 'time.sleep', (['config_timeout_seconds'], {}), '(config_timeout_seconds)\n', (11666, 11690), False, 'import time\n'), ((17059, 17087), 'numpy.concatenate', 'np.concatenate', (['embs'], {'axis': '(0)'}), '(embs, axis=0)\n', (17073, 17087), True, 'import numpy as np\n'), ((17109, 17139), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (17123, 17139), True, 'import numpy as np\n'), ((17161, 17190), 'numpy.concatenate', 'np.concatenate', (['steps'], {'axis': '(0)'}), '(steps, axis=0)\n', (17175, 17190), True, 'import numpy as np\n'), ((17214, 17246), 'numpy.concatenate', 'np.concatenate', (['seq_lens'], {'axis': '(0)'}), '(seq_lens, axis=0)\n', (17228, 17246), True, 'import numpy as np\n'), ((17267, 17296), 'numpy.concatenate', 'np.concatenate', (['names'], {'axis': '(0)'}), '(names, axis=0)\n', (17281, 17296), True, 'import numpy as np\n'), ((17322, 17356), 'numpy.concatenate', 'np.concatenate', (['seq_labels'], {'axis': '(0)'}), '(seq_labels, axis=0)\n', (17336, 17356), True, 'import numpy as np\n'), ((20228, 20239), 'time.time', 'time.time', ([], {}), '()\n', (20237, 20239), False, 'import time\n'), ((20931, 20969), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(1)'], {}), '(1)\n', (20966, 20969), True, 'import tensorflow as tf\n'), ((21071, 21109), 'tensorflow.keras.backend.set_learning_phase', 'tf.keras.backend.set_learning_phase', (['(0)'], {}), '(0)\n', (21106, 21109), True, 'import tensorflow as tf\n'), ((21308, 21320), 'json.load', 'json.load', (['f'], {}), '(f)\n', (21317, 21320), False, 'import json\n'), ((21573, 21633), 'tensorflow.config.experimental.set_visible_devices', 'tf.config.experimental.set_visible_devices', (['GPUS[ind]', '"""GPU"""'], {}), "(GPUS[ind], 'GPU')\n", (21615, 21633), True, 'import tensorflow as tf\n'), ((21843, 21893), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (21886, 21893), True, 'import tensorflow as tf\n'), ((1466, 1508), 'tensorflow.split', 'tf.split', (['emb_feats', '(2 * num_steps)'], {'axis': '(0)'}), '(emb_feats, 2 * num_steps, axis=0)\n', (1474, 1508), True, 'import tensorflow as tf\n'), ((2581, 2651), 'tensorflow.unstack', 'tf.unstack', (['image_list[i]'], {'num': '(num_steps * num_frames_per_step)', 'axis': '(0)'}), '(image_list[i], num=num_steps * num_frames_per_step, axis=0)\n', (2591, 2651), True, 'import tensorflow as tf\n'), ((2937, 2998), 'tensorflow.math.squared_difference', 'tf.math.squared_difference', (['curr_query_feats', 'candidate_feats'], {}), '(curr_query_feats, candidate_feats)\n', (2963, 2998), True, 'import tensorflow as tf\n'), ((6209, 6325), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['lr', 'global_step', 'lr_params.EXP_DECAY_STEPS', 'lr_params.EXP_DECAY_RATE'], {'staircase': '(True)'}), '(lr, global_step, lr_params.EXP_DECAY_STEPS,\n lr_params.EXP_DECAY_RATE, staircase=True)\n', (6235, 6325), True, 'import tensorflow as tf\n'), ((15907, 15994), 'absl.logging.debug', 'logging.debug', (['"""On sequence number %d, frames embedded %d"""', 'n', '(curr_idx + num_steps)'], {}), "('On sequence number %d, frames embedded %d', n, curr_idx +\n num_steps)\n", (15920, 15994), False, 'from absl import logging\n'), ((17465, 17495), 'numpy.concatenate', 'np.concatenate', (['frames'], {'axis': '(0)'}), '(frames, axis=0)\n', (17479, 17495), True, 'import numpy as np\n'), ((17629, 17667), 'numpy.concatenate', 'np.concatenate', (['frame_original'], {'axis': '(0)'}), '(frame_original, axis=0)\n', (17643, 17667), True, 'import numpy as np\n'), ((18925, 18972), 'absl.logging.info', 'logging.info', (['"""Finished embedding the dataset."""'], {}), "('Finished embedding the dataset.')\n", (18937, 18972), False, 'from absl import logging\n'), ((21764, 21815), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (21804, 21815), True, 'import tensorflow as tf\n'), ((22130, 22145), 'absl.logging.info', 'logging.info', (['e'], {}), '(e)\n', (22142, 22145), False, 'from absl import logging\n'), ((2384, 2458), 'tensorflow.unstack', 'tf.unstack', (['image_list[i]'], {'num': '(2 * num_steps * num_frames_per_step)', 'axis': '(0)'}), '(image_list[i], num=2 * num_steps * num_frames_per_step, axis=0)\n', (2394, 2458), True, 'import tensorflow as tf\n'), ((3119, 3151), 'tensorflow.argmin', 'tf.argmin', (['mean_squared_distance'], {}), '(mean_squared_distance)\n', (3128, 3151), True, 'import tensorflow as tf\n'), ((12887, 12928), 'numpy.arange', 'np.arange', (['curr_idx', '(curr_idx + num_steps)'], {}), '(curr_idx, curr_idx + num_steps)\n', (12896, 12928), True, 'import numpy as np\n'), ((16585, 16605), 'numpy.concatenate', 'np.concatenate', (['embs'], {}), '(embs)\n', (16599, 16605), True, 'import numpy as np\n'), ((7047, 7164), 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['lr', 'global_step', 'CONFIG.TRAIN.MAX_ITERS'], {'end_learning_rate': '(0.0)', 'power': '(1.0)', 'cycle': '(False)'}), '(lr, global_step, CONFIG.TRAIN.MAX_ITERS,\n end_learning_rate=0.0, power=1.0, cycle=False)\n', (7072, 7164), True, 'import tensorflow as tf\n'), ((10279, 10293), 'config.CONFIG.items', 'CONFIG.items', ([], {}), '()\n', (10291, 10293), False, 'from config import CONFIG\n'), ((15539, 15566), 'tensorflow.gather', 'tf.gather', (['v', 'idxes'], {'axis': '(1)'}), '(v, idxes, axis=1)\n', (15548, 15566), True, 'import tensorflow as tf\n'), ((17843, 17857), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17851, 17857), True, 'import numpy as np\n'), ((17899, 17913), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17907, 17913), True, 'import numpy as np\n'), ((17960, 17974), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17968, 17974), True, 'import numpy as np\n'), ((18015, 18029), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18023, 18029), True, 'import numpy as np\n'), ((18262, 18276), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18270, 18276), True, 'import numpy as np\n'), ((17730, 17744), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (17738, 17744), True, 'import numpy as np\n'), ((18102, 18116), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18110, 18116), True, 'import numpy as np\n'), ((18208, 18222), 'numpy.isnan', 'np.isnan', (['embs'], {}), '(embs)\n', (18216, 18222), True, 'import numpy as np\n')] |
import copy
import time
from collections import defaultdict
import cloudpickle
import numpy as np
import pandas as pd
import woodwork as ww
from sklearn.model_selection import BaseCrossValidator
from .pipeline_search_plots import PipelineSearchPlots
from evalml.automl.automl_algorithm import IterativeAlgorithm
from evalml.automl.callbacks import log_error_callback
from evalml.automl.engine import SequentialEngine
from evalml.automl.utils import (
check_all_pipeline_names_unique,
get_default_primary_search_objective,
make_data_splitter
)
from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import (
get_core_objectives,
get_non_core_objectives,
get_objective
)
from evalml.pipelines import (
MeanBaselineRegressionPipeline,
ModeBaselineBinaryPipeline,
ModeBaselineMulticlassPipeline,
TimeSeriesBaselineBinaryPipeline,
TimeSeriesBaselineMulticlassPipeline,
TimeSeriesBaselineRegressionPipeline
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import split_data
from evalml.problem_types import ProblemTypes, handle_problem_types
from evalml.tuners import SKOptTuner
from evalml.utils import convert_to_seconds, infer_feature_types
from evalml.utils.logger import (
get_logger,
log_subtitle,
log_title,
time_elapsed,
update_pipeline
)
logger = get_logger(__file__)
class AutoMLSearch:
"""Automated Pipeline search."""
_MAX_NAME_LEN = 40
# Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes.
plot = PipelineSearchPlots
def __init__(self,
X_train=None,
y_train=None,
problem_type=None,
objective='auto',
max_iterations=None,
max_time=None,
patience=None,
tolerance=None,
data_splitter=None,
allowed_pipelines=None,
allowed_model_families=None,
start_iteration_callback=None,
add_result_callback=None,
error_callback=None,
additional_objectives=None,
random_seed=0,
n_jobs=-1,
tuner_class=None,
optimize_thresholds=True,
ensembling=False,
max_batches=None,
problem_configuration=None,
train_best_pipeline=True,
pipeline_parameters=None,
_ensembling_split_size=0.2,
_pipelines_per_batch=5):
"""Automated pipeline search
Arguments:
X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required.
y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks.
problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list.
objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
When set to 'auto', chooses:
- LogLossBinary for binary classification problems,
- LogLossMulticlass for multiclass classification problems, and
- R2 for regression problems.
max_iterations (int): Maximum number of iterations to search. If max_iterations and
max_time is not set, then max_iterations will default to max_iterations of 5.
max_time (int, str): Maximum time to search for pipelines.
This will not start a new pipeline search after the duration
has elapsed. If it is an integer, then the time will be in seconds.
For strings, time can be specified as seconds, minutes, or hours.
patience (int): Number of iterations without improvement to stop search early. Must be positive.
If None, early stopping is disabled. Defaults to None.
tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
Only applicable if patience is not None. Defaults to None.
allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search.
The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause
allowed_model_families to be ignored.
allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all
model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary`
to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided,
this parameter will be ignored.
data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.
tuner_class: The tuner class to use. Defaults to SKOptTuner.
optimize_thresholds (bool): Whether or not to optimize the binary pipeline threshold. Defaults to True.
start_iteration_callback (callable): Function called before each pipeline training iteration.
Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.
add_result_callback (callable): Function called after each pipeline training iteration.
Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.
error_callback (callable): Function called when `search()` errors and raises an Exception.
Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
Defaults to None, which will call `log_error_callback`.
additional_objectives (list): Custom set of objectives to score on.
Will override default objectives for problem type if not empty.
random_seed (int): Seed for the random number generator. Defaults to 0.
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over.
If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False.
max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and
max_iterations have precedence over stopping the search.
problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
in time series problems, values should be passed in for the gap and max_delay variables.
train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True.
pipeline_parameters (dict): A dict of the parameters used to initalize a pipeline with.
_ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True.
Must be between 0 and 1, exclusive. Defaults to 0.2
_pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
The first batch will train a baseline pipline + one of each pipeline family allowed in the search.
"""
if X_train is None:
raise ValueError('Must specify training data as a 2d array using the X_train argument')
if y_train is None:
raise ValueError('Must specify training data target values as a 1d vector using the y_train argument')
try:
self.problem_type = handle_problem_types(problem_type)
except ValueError:
raise ValueError('choose one of (binary, multiclass, regression) as problem_type')
self.tuner_class = tuner_class or SKOptTuner
self.start_iteration_callback = start_iteration_callback
self.add_result_callback = add_result_callback
self.error_callback = error_callback or log_error_callback
self.data_splitter = data_splitter
self.optimize_thresholds = optimize_thresholds
self.ensembling = ensembling
if objective == 'auto':
objective = get_default_primary_search_objective(self.problem_type.value)
objective = get_objective(objective, return_instance=False)
self.objective = self._validate_objective(objective)
if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator):
raise ValueError("Not a valid data splitter")
if not objective.is_defined_for_problem_type(self.problem_type):
raise ValueError("Given objective {} is not compatible with a {} problem.".format(self.objective.name, self.problem_type.value))
if additional_objectives is None:
additional_objectives = get_core_objectives(self.problem_type)
# if our main objective is part of default set of objectives for problem_type, remove it
existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None)
if existing_main_objective is not None:
additional_objectives.remove(existing_main_objective)
else:
additional_objectives = [get_objective(o) for o in additional_objectives]
additional_objectives = [self._validate_objective(obj) for obj in additional_objectives]
self.additional_objectives = additional_objectives
self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives}
if not isinstance(max_time, (int, float, str, type(None))):
raise TypeError(f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}..")
if isinstance(max_time, (int, float)) and max_time < 0:
raise ValueError(f"Parameter max_time must be None or non-negative. Received {max_time}.")
if max_batches is not None and max_batches < 0:
raise ValueError(f"Parameter max_batches must be None or non-negative. Received {max_batches}.")
if max_iterations is not None and max_iterations < 0:
raise ValueError(f"Parameter max_iterations must be None or non-negative. Received {max_iterations}.")
self.max_time = convert_to_seconds(max_time) if isinstance(max_time, str) else max_time
self.max_iterations = max_iterations
self.max_batches = max_batches
self._pipelines_per_batch = _pipelines_per_batch
if not self.max_iterations and not self.max_time and not self.max_batches:
self.max_batches = 1
logger.info("Using default limit of max_batches=1.\n")
if patience and (not isinstance(patience, int) or patience < 0):
raise ValueError("patience value must be a positive integer. Received {} instead".format(patience))
if tolerance and (tolerance > 1.0 or tolerance < 0.0):
raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".format(tolerance))
self.patience = patience
self.tolerance = tolerance or 0.0
self._results = {
'pipeline_results': {},
'search_order': [],
'errors': []
}
self.random_seed = random_seed
self.n_jobs = n_jobs
self.plot = None
try:
self.plot = PipelineSearchPlots(self)
except ImportError:
logger.warning("Unable to import plotly; skipping pipeline search plotting\n")
self.allowed_pipelines = allowed_pipelines
self.allowed_model_families = allowed_model_families
self._automl_algorithm = None
self._start = 0.0
self._baseline_cv_scores = {}
self.show_batch_output = False
self._validate_problem_type()
self.problem_configuration = self._validate_problem_configuration(problem_configuration)
self._train_best_pipeline = train_best_pipeline
self._best_pipeline = None
self._searched = False
self.X_train = infer_feature_types(X_train)
self.y_train = infer_feature_types(y_train)
self.ensembling_indices = None
default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration,
n_splits=3, shuffle=True, random_seed=self.random_seed)
self.data_splitter = self.data_splitter or default_data_splitter
self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
self.search_iteration_plot = None
self._interrupted = False
if self.allowed_pipelines is None:
logger.info("Generating pipelines to search over...")
allowed_estimators = get_estimators(self.problem_type, self.allowed_model_families)
logger.debug(f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}")
self.allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in allowed_estimators]
if self.allowed_pipelines == []:
raise ValueError("No allowed pipelines to search")
check_all_pipeline_names_unique(self.allowed_pipelines)
run_ensembling = self.ensembling
if run_ensembling and len(self.allowed_pipelines) == 1:
logger.warning("Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run.")
run_ensembling = False
if run_ensembling and self.max_iterations is not None:
# Baseline + first batch + each pipeline iteration + 1
first_ensembling_iteration = (1 + len(self.allowed_pipelines) + len(self.allowed_pipelines) * self._pipelines_per_batch + 1)
if self.max_iterations < first_ensembling_iteration:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling.")
else:
logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that.")
if self.max_batches and self.max_iterations is None:
self.show_batch_output = True
if run_ensembling:
ensemble_nth_batch = len(self.allowed_pipelines) + 1
num_ensemble_batches = (self.max_batches - 1) // ensemble_nth_batch
if num_ensemble_batches == 0:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling.")
else:
logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.")
self.max_iterations = (1 + len(self.allowed_pipelines) +
self._pipelines_per_batch * (self.max_batches - 1 - num_ensemble_batches) +
num_ensemble_batches)
else:
self.max_iterations = 1 + len(self.allowed_pipelines) + (self._pipelines_per_batch * (self.max_batches - 1))
if run_ensembling:
if not (0 < _ensembling_split_size < 1):
raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}")
X_shape = ww.DataTable(np.arange(self.X_train.shape[0]))
_, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed)
self.ensembling_indices = ensembling_indices.to_dataframe()[0].tolist()
self._engine = SequentialEngine(self.X_train,
self.y_train,
self.ensembling_indices,
self,
should_continue_callback=self._should_continue,
pre_evaluation_callback=self._pre_evaluation_callback,
post_evaluation_callback=self._post_evaluation_callback)
self.allowed_model_families = list(set([p.model_family for p in (self.allowed_pipelines)]))
logger.debug(f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}")
logger.debug(f"allowed_model_families set to {self.allowed_model_families}")
if len(self.problem_configuration):
pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters}
else:
pipeline_params = self.pipeline_parameters
self._automl_algorithm = IterativeAlgorithm(
max_iterations=self.max_iterations,
allowed_pipelines=self.allowed_pipelines,
tuner_class=self.tuner_class,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
number_features=self.X_train.shape[1],
pipelines_per_batch=self._pipelines_per_batch,
ensembling=run_ensembling,
pipeline_params=pipeline_params
)
def _pre_evaluation_callback(self, pipeline):
if self.start_iteration_callback:
self.start_iteration_callback(pipeline.__class__, pipeline.parameters, self)
desc = f"{pipeline.name}"
if len(desc) > AutoMLSearch._MAX_NAME_LEN:
desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..."
desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN)
batch_number = 1
if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0:
batch_number = self._automl_algorithm.batch_number
update_pipeline(logger,
desc,
len(self._results['pipeline_results']) + 1,
self.max_iterations,
self._start,
batch_number,
self.show_batch_output)
def _validate_objective(self, objective):
non_core_objectives = get_non_core_objectives()
if isinstance(objective, type):
if objective in non_core_objectives:
raise ValueError(f"{objective.name.lower()} is not allowed in AutoML! "
"Use evalml.objectives.utils.get_core_objective_names() "
"to get all objective names allowed in automl.")
return objective()
return objective
def __str__(self):
def _print_list(obj_list):
lines = sorted(['\t{}'.format(o.name) for o in obj_list])
return '\n'.join(lines)
def _get_funct_name(function):
if callable(function):
return function.__name__
else:
return None
search_desc = (
f"{handle_problem_types(self.problem_type).name} Search\n\n"
f"Parameters: \n{'='*20}\n"
f"Objective: {get_objective(self.objective).name}\n"
f"Max Time: {self.max_time}\n"
f"Max Iterations: {self.max_iterations}\n"
f"Max Batches: {self.max_batches}\n"
f"Allowed Pipelines: \n{_print_list(self.allowed_pipelines or [])}\n"
f"Patience: {self.patience}\n"
f"Tolerance: {self.tolerance}\n"
f"Data Splitting: {self.data_splitter}\n"
f"Tuner: {self.tuner_class.__name__}\n"
f"Start Iteration Callback: {_get_funct_name(self.start_iteration_callback)}\n"
f"Add Result Callback: {_get_funct_name(self.add_result_callback)}\n"
f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n"
f"Random Seed: {self.random_seed}\n"
f"n_jobs: {self.n_jobs}\n"
f"Optimize Thresholds: {self.optimize_thresholds}\n"
)
rankings_desc = ""
if not self.rankings.empty:
rankings_str = self.rankings.drop(['parameters'], axis='columns').to_string()
rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}"
return search_desc + rankings_desc
def _validate_problem_configuration(self, problem_configuration=None):
if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]:
required_parameters = {'gap', 'max_delay'}
if not problem_configuration or not all(p in problem_configuration for p in required_parameters):
raise ValueError("user_parameters must be a dict containing values for at least the gap and max_delay "
f"parameters. Received {problem_configuration}.")
return problem_configuration or {}
def _handle_keyboard_interrupt(self):
"""Presents a prompt to the user asking if they want to stop the search.
Returns:
bool: If True, search should terminate early
"""
leading_char = "\n"
start_of_loop = time.time()
while True:
choice = input(leading_char + "Do you really want to exit search (y/n)? ").strip().lower()
if choice == "y":
logger.info("Exiting AutoMLSearch.")
return True
elif choice == "n":
# So that the time in this loop does not count towards the time budget (if set)
time_in_loop = time.time() - start_of_loop
self._start += time_in_loop
return False
else:
leading_char = ""
def search(self, show_iteration_plot=True):
"""Find the best pipeline for the data set.
Arguments:
feature_types (list, optional): list of feature types, either numerical or categorical.
Categorical features will automatically be encoded
show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook.
Disabled by default in non-Jupyter enviroments.
"""
if self._searched:
logger.info("AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again.")
return
# don't show iteration plot outside of a jupyter notebook
if show_iteration_plot:
try:
get_ipython
except NameError:
show_iteration_plot = False
log_title(logger, "Beginning pipeline search")
logger.info("Optimizing for %s. " % self.objective.name)
logger.info("{} score is better.\n".format('Greater' if self.objective.greater_is_better else 'Lower'))
logger.info(f"Using {self._engine.__class__.__name__} to train and score pipelines.")
if self.max_batches is not None:
logger.info(f"Searching up to {self.max_batches} batches for a total of {self.max_iterations} pipelines. ")
elif self.max_iterations is not None:
logger.info("Searching up to %s pipelines. " % self.max_iterations)
if self.max_time is not None:
logger.info("Will stop searching for new pipelines after %d seconds.\n" % self.max_time)
logger.info("Allowed model families: %s\n" % ", ".join([model.value for model in self.allowed_model_families]))
self.search_iteration_plot = None
if self.plot:
self.search_iteration_plot = self.plot.search_iteration_plot(interactive_plot=show_iteration_plot)
self._start = time.time()
try:
self._add_baseline_pipelines()
except KeyboardInterrupt:
if self._handle_keyboard_interrupt():
self._interrupted = True
current_batch_pipelines = []
current_batch_pipeline_scores = []
new_pipeline_ids = []
loop_interrupted = False
while self._should_continue():
try:
if not loop_interrupted:
current_batch_pipelines = self._automl_algorithm.next_batch()
except StopIteration:
logger.info('AutoML Algorithm out of recommendations, ending')
break
try:
new_pipeline_ids = self._engine.evaluate_batch(current_batch_pipelines)
loop_interrupted = False
except KeyboardInterrupt:
loop_interrupted = True
if self._handle_keyboard_interrupt():
break
full_rankings = self.full_rankings
current_batch_idx = full_rankings['id'].isin(new_pipeline_ids)
current_batch_pipeline_scores = full_rankings[current_batch_idx]['score']
if len(current_batch_pipeline_scores) and current_batch_pipeline_scores.isna().all():
raise AutoMLSearchException(f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.")
self.search_duration = time.time() - self._start
elapsed_time = time_elapsed(self._start)
desc = f"\nSearch finished after {elapsed_time}"
desc = desc.ljust(self._MAX_NAME_LEN)
logger.info(desc)
self._find_best_pipeline()
if self._best_pipeline is not None:
best_pipeline = self.rankings.iloc[0]
best_pipeline_name = best_pipeline["pipeline_name"]
logger.info(f"Best pipeline: {best_pipeline_name}")
logger.info(f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}")
self._searched = True
def _find_best_pipeline(self):
"""Finds the best pipeline in the rankings
If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding"""
if len(self.rankings) == 0:
return
best_pipeline = self.rankings.iloc[0]
if not (self._best_pipeline and self._best_pipeline == self.get_pipeline(best_pipeline['id'])):
best_pipeline = self.get_pipeline(best_pipeline['id'])
if self._train_best_pipeline:
if best_pipeline.model_family == ModelFamily.ENSEMBLE:
X_train, y_train = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
else:
X_train = self.X_train
y_train = self.y_train
if hasattr(self.data_splitter, "transform_sample"):
train_indices = self.data_splitter.transform_sample(X_train, y_train)
X_train = X_train.iloc[train_indices]
y_train = y_train.iloc[train_indices]
best_pipeline = self._engine.train_pipeline(best_pipeline, X_train, y_train,
self.optimize_thresholds, self.objective)
self._best_pipeline = best_pipeline
def _num_pipelines(self):
"""Return the number of pipeline evaluations which have been made
Returns:
int: the number of pipeline evaluations made in the search
"""
return len(self._results['pipeline_results'])
def _should_continue(self):
"""Given the original stopping criterion and current state, should the search continue?
Returns:
bool: True if yes, False if no.
"""
if self._interrupted:
return False
# for add_to_rankings
if self._searched:
return True
# Run at least one pipeline for every search
num_pipelines = self._num_pipelines()
if num_pipelines == 0:
return True
# check max_time and max_iterations
elapsed = time.time() - self._start
if self.max_time and elapsed >= self.max_time:
return False
elif self.max_iterations and num_pipelines >= self.max_iterations:
return False
# check for early stopping
if self.patience is None or self.tolerance is None:
return True
first_id = self._results['search_order'][0]
best_score = self._results['pipeline_results'][first_id]['score']
num_without_improvement = 0
for id in self._results['search_order'][1:]:
curr_score = self._results['pipeline_results'][id]['score']
significant_change = abs((curr_score - best_score) / best_score) > self.tolerance
score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score
if score_improved and significant_change:
best_score = curr_score
num_without_improvement = 0
else:
num_without_improvement += 1
if num_without_improvement >= self.patience:
logger.info("\n\n{} iterations without improvement. Stopping search early...".format(self.patience))
return False
return True
def _validate_problem_type(self):
for obj in self.additional_objectives:
if not obj.is_defined_for_problem_type(self.problem_type):
raise ValueError("Additional objective {} is not compatible with a {} problem.".format(obj.name, self.problem_type.value))
for pipeline in self.allowed_pipelines or []:
if pipeline.problem_type != self.problem_type:
raise ValueError("Given pipeline {} is not compatible with problem_type {}.".format(pipeline.name, self.problem_type.value))
def _add_baseline_pipelines(self):
"""Fits a baseline pipeline to the data.
This is the first pipeline fit during search.
"""
if self.problem_type == ProblemTypes.BINARY:
baseline = ModeBaselineBinaryPipeline(parameters={})
elif self.problem_type == ProblemTypes.MULTICLASS:
baseline = ModeBaselineMulticlassPipeline(parameters={})
elif self.problem_type == ProblemTypes.REGRESSION:
baseline = MeanBaselineRegressionPipeline(parameters={})
else:
pipeline_class = {ProblemTypes.TIME_SERIES_REGRESSION: TimeSeriesBaselineRegressionPipeline,
ProblemTypes.TIME_SERIES_MULTICLASS: TimeSeriesBaselineMulticlassPipeline,
ProblemTypes.TIME_SERIES_BINARY: TimeSeriesBaselineBinaryPipeline}[self.problem_type]
gap = self.problem_configuration['gap']
max_delay = self.problem_configuration['max_delay']
baseline = pipeline_class(parameters={"pipeline": {"gap": gap, "max_delay": max_delay},
"Time Series Baseline Estimator": {"gap": gap, "max_delay": max_delay}})
self._engine.evaluate_batch([baseline])
@staticmethod
def _get_mean_cv_scores_for_all_objectives(cv_data, objective_name_to_class):
scores = defaultdict(int)
n_folds = len(cv_data)
for fold_data in cv_data:
for field, value in fold_data['all_objective_scores'].items():
# The 'all_objective_scores' field contains scores for all objectives
# but also fields like "# Training" and "# Testing", so we want to exclude them since
# they are not scores
if field in objective_name_to_class:
scores[field] += value
return {objective: float(score) / n_folds for objective, score in scores.items()}
def _post_evaluation_callback(self, pipeline, evaluation_results):
training_time = evaluation_results['training_time']
cv_data = evaluation_results['cv_data']
cv_scores = evaluation_results['cv_scores']
is_baseline = pipeline.model_family == ModelFamily.BASELINE
cv_score = cv_scores.mean()
percent_better_than_baseline = {}
mean_cv_all_objectives = self._get_mean_cv_scores_for_all_objectives(cv_data, self.objective_name_to_class)
if is_baseline:
self._baseline_cv_scores = mean_cv_all_objectives
for obj_name in mean_cv_all_objectives:
objective_class = self.objective_name_to_class[obj_name]
# In the event add_to_rankings is called before search _baseline_cv_scores will be empty so we will return
# nan for the base score.
percent_better = objective_class.calculate_percent_difference(mean_cv_all_objectives[obj_name],
self._baseline_cv_scores.get(obj_name, np.nan))
percent_better_than_baseline[obj_name] = percent_better
high_variance_cv = self._check_for_high_variance(pipeline, cv_scores)
pipeline_id = len(self._results['pipeline_results'])
self._results['pipeline_results'][pipeline_id] = {
"id": pipeline_id,
"pipeline_name": pipeline.name,
"pipeline_class": type(pipeline),
"pipeline_summary": pipeline.summary,
"parameters": pipeline.parameters,
"score": cv_score,
"high_variance_cv": high_variance_cv,
"training_time": training_time,
"cv_data": cv_data,
"percent_better_than_baseline_all_objectives": percent_better_than_baseline,
"percent_better_than_baseline": percent_better_than_baseline[self.objective.name],
"validation_score": cv_scores[0]
}
if pipeline.model_family == ModelFamily.ENSEMBLE:
input_pipeline_ids = [self._automl_algorithm._best_pipeline_info[model_family]["id"] for model_family in self._automl_algorithm._best_pipeline_info]
self._results['pipeline_results'][pipeline_id]["input_pipeline_ids"] = input_pipeline_ids
self._results['search_order'].append(pipeline_id)
if not is_baseline:
score_to_minimize = -cv_score if self.objective.greater_is_better else cv_score
try:
self._automl_algorithm.add_result(score_to_minimize, pipeline, self._results['pipeline_results'][pipeline_id])
except PipelineNotFoundError:
pass
if self.search_iteration_plot:
self.search_iteration_plot.update()
if self.add_result_callback:
self.add_result_callback(self._results['pipeline_results'][pipeline_id], pipeline, self)
return pipeline_id
def _check_for_high_variance(self, pipeline, cv_scores, threshold=0.2):
"""Checks cross-validation scores and logs a warning if variance is higher than specified threshhold."""
pipeline_name = pipeline.name
high_variance_cv = bool(abs(cv_scores.std() / cv_scores.mean()) > threshold)
if high_variance_cv:
logger.warning(f"High coefficient of variation (cv >= {threshold}) within cross validation scores. {pipeline_name} may not perform as estimated on unseen data.")
return high_variance_cv
def get_pipeline(self, pipeline_id):
"""Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline
initialized with the parameters used to train that pipeline during automl search.
Arguments:
pipeline_id (int): pipeline to retrieve
Returns:
PipelineBase: untrained pipeline instance associated with the provided ID
"""
pipeline_results = self.results['pipeline_results'].get(pipeline_id)
if pipeline_results is None:
raise PipelineNotFoundError("Pipeline not found in automl results")
pipeline_class = pipeline_results.get('pipeline_class')
parameters = pipeline_results.get('parameters')
if pipeline_class is None or parameters is None:
raise PipelineNotFoundError("Pipeline class or parameters not found in automl results")
return pipeline_class(parameters, random_seed=self.random_seed)
def describe_pipeline(self, pipeline_id, return_dict=False):
"""Describe a pipeline
Arguments:
pipeline_id (int): pipeline to describe
return_dict (bool): If True, return dictionary of information
about pipeline. Defaults to False.
Returns:
Description of specified pipeline. Includes information such as
type of pipeline components, problem, training time, cross validation, etc.
"""
if pipeline_id not in self._results['pipeline_results']:
raise PipelineNotFoundError("Pipeline not found")
pipeline = self.get_pipeline(pipeline_id)
pipeline_results = self._results['pipeline_results'][pipeline_id]
pipeline.describe()
if pipeline.model_family == ModelFamily.ENSEMBLE:
logger.info("Input for ensembler are pipelines with IDs: " + str(pipeline_results['input_pipeline_ids']))
log_subtitle(logger, "Training")
logger.info("Training for {} problems.".format(pipeline.problem_type))
if self.optimize_thresholds and self.objective.is_defined_for_problem_type(ProblemTypes.BINARY) and self.objective.can_optimize_threshold:
logger.info("Objective to optimize binary classification pipeline thresholds for: {}".format(self.objective))
logger.info("Total training time (including CV): %.1f seconds" % pipeline_results["training_time"])
log_subtitle(logger, "Cross Validation", underline="-")
all_objective_scores = [fold["all_objective_scores"] for fold in pipeline_results["cv_data"]]
all_objective_scores = pd.DataFrame(all_objective_scores)
for c in all_objective_scores:
if c in ["# Training", "# Validation"]:
all_objective_scores[c] = all_objective_scores[c].astype("object")
continue
mean = all_objective_scores[c].mean(axis=0)
std = all_objective_scores[c].std(axis=0)
all_objective_scores.loc["mean", c] = mean
all_objective_scores.loc["std", c] = std
all_objective_scores.loc["coef of var", c] = std / mean if abs(mean) > 0 else np.inf
all_objective_scores = all_objective_scores.fillna("-")
with pd.option_context('display.float_format', '{:.3f}'.format, 'expand_frame_repr', False):
logger.info(all_objective_scores)
if return_dict:
return pipeline_results
def add_to_rankings(self, pipeline):
"""Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run.
Arguments:
pipeline (PipelineBase): pipeline to train and evaluate.
"""
pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name'] == pipeline.name]
for parameter in pipeline_rows['parameters']:
if pipeline.parameters == parameter:
return
self._engine.evaluate_batch([pipeline])
self._find_best_pipeline()
@property
def results(self):
"""Class that allows access to a copy of the results from `automl_search`.
Returns: dict containing `pipeline_results`: a dict with results from each pipeline,
and `search_order`: a list describing the order the pipelines were searched.
"""
return copy.deepcopy(self._results)
@property
def rankings(self):
"""Returns a pandas.DataFrame with scoring results from the highest-scoring set of parameters used with each pipeline."""
return self.full_rankings.drop_duplicates(subset="pipeline_name", keep="first")
@property
def full_rankings(self):
"""Returns a pandas.DataFrame with scoring results from all pipelines searched"""
ascending = True
if self.objective.greater_is_better:
ascending = False
full_rankings_cols = ["id", "pipeline_name", "score", "validation_score",
"percent_better_than_baseline", "high_variance_cv", "parameters"]
if not self._results['pipeline_results']:
return pd.DataFrame(columns=full_rankings_cols)
rankings_df = pd.DataFrame(self._results['pipeline_results'].values())
rankings_df = rankings_df[full_rankings_cols]
rankings_df.sort_values("score", ascending=ascending, inplace=True)
rankings_df.reset_index(drop=True, inplace=True)
return rankings_df
@property
def best_pipeline(self):
"""Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
Returns:
PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
"""
if not self._best_pipeline:
raise PipelineNotFoundError("automl search must be run before selecting `best_pipeline`.")
return self._best_pipeline
def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL):
"""Saves AutoML object at file path
Arguments:
file_path (str): location to save file
pickle_protocol (int): the pickle data stream format.
Returns:
None
"""
with open(file_path, 'wb') as f:
cloudpickle.dump(self, f, protocol=pickle_protocol)
@staticmethod
def load(file_path):
"""Loads AutoML object at file path
Arguments:
file_path (str): location to find file to load
Returns:
AutoSearchBase object
"""
with open(file_path, 'rb') as f:
return cloudpickle.load(f)
def train_pipelines(self, pipelines):
"""Train a list of pipelines on the training data.
This can be helpful for training pipelines once the search is complete.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
Returns:
Dict[str, PipelineBase]: Dictionary keyed by pipeline name that maps to the fitted pipeline.
Note that the any pipelines that error out during training will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.train_batch(pipelines)
def score_pipelines(self, pipelines, X_holdout, y_holdout, objectives):
"""Score a list of pipelines on the given holdout data.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
X_holdout (ww.DataTable, pd.DataFrame): Holdout features.
y_holdout (ww.DataTable, pd.DataFrame): Holdout targets for scoring.
objectives (list(str), list(ObjectiveBase)): Objectives used for scoring.
Returns:
Dict[str, Dict[str, float]]: Dictionary keyed by pipeline name that maps to a dictionary of scores.
Note that the any pipelines that error out during scoring will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.score_batch(pipelines, X_holdout, y_holdout, objectives)
| [
"evalml.pipelines.ModeBaselineMulticlassPipeline",
"pandas.option_context",
"evalml.exceptions.PipelineNotFoundError",
"copy.deepcopy",
"evalml.pipelines.utils.make_pipeline",
"evalml.objectives.get_non_core_objectives",
"evalml.utils.logger.get_logger",
"numpy.arange",
"evalml.utils.logger.log_subtitle",
"cloudpickle.load",
"evalml.utils.infer_feature_types",
"evalml.utils.convert_to_seconds",
"evalml.objectives.get_objective",
"pandas.DataFrame",
"evalml.automl.utils.check_all_pipeline_names_unique",
"evalml.automl.automl_algorithm.IterativeAlgorithm",
"evalml.exceptions.AutoMLSearchException",
"evalml.automl.utils.make_data_splitter",
"evalml.automl.utils.get_default_primary_search_objective",
"evalml.utils.logger.time_elapsed",
"evalml.preprocessing.split_data",
"evalml.pipelines.MeanBaselineRegressionPipeline",
"cloudpickle.dump",
"time.time",
"evalml.problem_types.handle_problem_types",
"evalml.utils.logger.log_title",
"evalml.pipelines.ModeBaselineBinaryPipeline",
"evalml.automl.engine.SequentialEngine",
"collections.defaultdict",
"evalml.pipelines.components.utils.get_estimators",
"evalml.objectives.get_core_objectives"
] | [((1498, 1518), 'evalml.utils.logger.get_logger', 'get_logger', (['__file__'], {}), '(__file__)\n', (1508, 1518), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((9426, 9473), 'evalml.objectives.get_objective', 'get_objective', (['objective'], {'return_instance': '(False)'}), '(objective, return_instance=False)\n', (9439, 9473), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((13288, 13316), 'evalml.utils.infer_feature_types', 'infer_feature_types', (['X_train'], {}), '(X_train)\n', (13307, 13316), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((13340, 13368), 'evalml.utils.infer_feature_types', 'infer_feature_types', (['y_train'], {}), '(y_train)\n', (13359, 13368), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((13441, 13600), 'evalml.automl.utils.make_data_splitter', 'make_data_splitter', (['self.X_train', 'self.y_train', 'self.problem_type', 'self.problem_configuration'], {'n_splits': '(3)', 'shuffle': '(True)', 'random_seed': 'self.random_seed'}), '(self.X_train, self.y_train, self.problem_type, self.\n problem_configuration, n_splits=3, shuffle=True, random_seed=self.\n random_seed)\n', (13459, 13600), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((14514, 14569), 'evalml.automl.utils.check_all_pipeline_names_unique', 'check_all_pipeline_names_unique', (['self.allowed_pipelines'], {}), '(self.allowed_pipelines)\n', (14545, 14569), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((17239, 17488), 'evalml.automl.engine.SequentialEngine', 'SequentialEngine', (['self.X_train', 'self.y_train', 'self.ensembling_indices', 'self'], {'should_continue_callback': 'self._should_continue', 'pre_evaluation_callback': 'self._pre_evaluation_callback', 'post_evaluation_callback': 'self._post_evaluation_callback'}), '(self.X_train, self.y_train, self.ensembling_indices, self,\n should_continue_callback=self._should_continue, pre_evaluation_callback\n =self._pre_evaluation_callback, post_evaluation_callback=self.\n _post_evaluation_callback)\n', (17255, 17488), False, 'from evalml.automl.engine import SequentialEngine\n'), ((18259, 18600), 'evalml.automl.automl_algorithm.IterativeAlgorithm', 'IterativeAlgorithm', ([], {'max_iterations': 'self.max_iterations', 'allowed_pipelines': 'self.allowed_pipelines', 'tuner_class': 'self.tuner_class', 'random_seed': 'self.random_seed', 'n_jobs': 'self.n_jobs', 'number_features': 'self.X_train.shape[1]', 'pipelines_per_batch': 'self._pipelines_per_batch', 'ensembling': 'run_ensembling', 'pipeline_params': 'pipeline_params'}), '(max_iterations=self.max_iterations, allowed_pipelines=\n self.allowed_pipelines, tuner_class=self.tuner_class, random_seed=self.\n random_seed, n_jobs=self.n_jobs, number_features=self.X_train.shape[1],\n pipelines_per_batch=self._pipelines_per_batch, ensembling=\n run_ensembling, pipeline_params=pipeline_params)\n', (18277, 18600), False, 'from evalml.automl.automl_algorithm import IterativeAlgorithm\n'), ((19640, 19665), 'evalml.objectives.get_non_core_objectives', 'get_non_core_objectives', ([], {}), '()\n', (19663, 19665), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((22545, 22556), 'time.time', 'time.time', ([], {}), '()\n', (22554, 22556), False, 'import time\n'), ((24004, 24050), 'evalml.utils.logger.log_title', 'log_title', (['logger', '"""Beginning pipeline search"""'], {}), "(logger, 'Beginning pipeline search')\n", (24013, 24050), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((25067, 25078), 'time.time', 'time.time', ([], {}), '()\n', (25076, 25078), False, 'import time\n'), ((26570, 26595), 'evalml.utils.logger.time_elapsed', 'time_elapsed', (['self._start'], {}), '(self._start)\n', (26582, 26595), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((32472, 32488), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (32483, 32488), False, 'from collections import defaultdict\n'), ((38449, 38481), 'evalml.utils.logger.log_subtitle', 'log_subtitle', (['logger', '"""Training"""'], {}), "(logger, 'Training')\n", (38461, 38481), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((38948, 39003), 'evalml.utils.logger.log_subtitle', 'log_subtitle', (['logger', '"""Cross Validation"""'], {'underline': '"""-"""'}), "(logger, 'Cross Validation', underline='-')\n", (38960, 39003), False, 'from evalml.utils.logger import get_logger, log_subtitle, log_title, time_elapsed, update_pipeline\n'), ((39138, 39172), 'pandas.DataFrame', 'pd.DataFrame', (['all_objective_scores'], {}), '(all_objective_scores)\n', (39150, 39172), True, 'import pandas as pd\n'), ((40904, 40932), 'copy.deepcopy', 'copy.deepcopy', (['self._results'], {}), '(self._results)\n', (40917, 40932), False, 'import copy\n'), ((8755, 8789), 'evalml.problem_types.handle_problem_types', 'handle_problem_types', (['problem_type'], {}), '(problem_type)\n', (8775, 8789), False, 'from evalml.problem_types import ProblemTypes, handle_problem_types\n'), ((9344, 9405), 'evalml.automl.utils.get_default_primary_search_objective', 'get_default_primary_search_objective', (['self.problem_type.value'], {}), '(self.problem_type.value)\n', (9380, 9405), False, 'from evalml.automl.utils import check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter\n'), ((9997, 10035), 'evalml.objectives.get_core_objectives', 'get_core_objectives', (['self.problem_type'], {}), '(self.problem_type)\n', (10016, 10035), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((11491, 11519), 'evalml.utils.convert_to_seconds', 'convert_to_seconds', (['max_time'], {}), '(max_time)\n', (11509, 11519), False, 'from evalml.utils import convert_to_seconds, infer_feature_types\n'), ((14032, 14094), 'evalml.pipelines.components.utils.get_estimators', 'get_estimators', (['self.problem_type', 'self.allowed_model_families'], {}), '(self.problem_type, self.allowed_model_families)\n', (14046, 14094), False, 'from evalml.pipelines.components.utils import get_estimators\n'), ((17001, 17135), 'evalml.preprocessing.split_data', 'split_data', (['X_shape', 'self.y_train'], {'problem_type': 'self.problem_type', 'test_size': '_ensembling_split_size', 'random_seed': 'self.random_seed'}), '(X_shape, self.y_train, problem_type=self.problem_type, test_size\n =_ensembling_split_size, random_seed=self.random_seed)\n', (17011, 17135), False, 'from evalml.preprocessing import split_data\n'), ((26521, 26532), 'time.time', 'time.time', ([], {}), '()\n', (26530, 26532), False, 'import time\n'), ((29296, 29307), 'time.time', 'time.time', ([], {}), '()\n', (29305, 29307), False, 'import time\n'), ((31329, 31370), 'evalml.pipelines.ModeBaselineBinaryPipeline', 'ModeBaselineBinaryPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31355, 31370), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((37083, 37144), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline not found in automl results"""'], {}), "('Pipeline not found in automl results')\n", (37104, 37144), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((37340, 37426), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline class or parameters not found in automl results"""'], {}), "(\n 'Pipeline class or parameters not found in automl results')\n", (37361, 37426), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((38065, 38108), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""Pipeline not found"""'], {}), "('Pipeline not found')\n", (38086, 38108), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((39768, 39858), 'pandas.option_context', 'pd.option_context', (['"""display.float_format"""', '"""{:.3f}""".format', '"""expand_frame_repr"""', '(False)'], {}), "('display.float_format', '{:.3f}'.format,\n 'expand_frame_repr', False)\n", (39785, 39858), True, 'import pandas as pd\n'), ((41672, 41712), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'full_rankings_cols'}), '(columns=full_rankings_cols)\n', (41684, 41712), True, 'import pandas as pd\n'), ((42510, 42599), 'evalml.exceptions.PipelineNotFoundError', 'PipelineNotFoundError', (['"""automl search must be run before selecting `best_pipeline`."""'], {}), "(\n 'automl search must be run before selecting `best_pipeline`.')\n", (42531, 42599), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((42990, 43041), 'cloudpickle.dump', 'cloudpickle.dump', (['self', 'f'], {'protocol': 'pickle_protocol'}), '(self, f, protocol=pickle_protocol)\n', (43006, 43041), False, 'import cloudpickle\n'), ((43333, 43352), 'cloudpickle.load', 'cloudpickle.load', (['f'], {}), '(f)\n', (43349, 43352), False, 'import cloudpickle\n'), ((10434, 10450), 'evalml.objectives.get_objective', 'get_objective', (['o'], {}), '(o)\n', (10447, 10450), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((14243, 14367), 'evalml.pipelines.utils.make_pipeline', 'make_pipeline', (['self.X_train', 'self.y_train', 'estimator', 'self.problem_type'], {'custom_hyperparameters': 'self.pipeline_parameters'}), '(self.X_train, self.y_train, estimator, self.problem_type,\n custom_hyperparameters=self.pipeline_parameters)\n', (14256, 14367), False, 'from evalml.pipelines.utils import make_pipeline\n'), ((16925, 16957), 'numpy.arange', 'np.arange', (['self.X_train.shape[0]'], {}), '(self.X_train.shape[0])\n', (16934, 16957), True, 'import numpy as np\n'), ((26351, 26498), 'evalml.exceptions.AutoMLSearchException', 'AutoMLSearchException', (['f"""All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}."""'], {}), "(\n f'All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.'\n )\n", (26372, 26498), False, 'from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError\n'), ((31453, 31498), 'evalml.pipelines.ModeBaselineMulticlassPipeline', 'ModeBaselineMulticlassPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31483, 31498), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((20439, 20478), 'evalml.problem_types.handle_problem_types', 'handle_problem_types', (['self.problem_type'], {}), '(self.problem_type)\n', (20459, 20478), False, 'from evalml.problem_types import ProblemTypes, handle_problem_types\n'), ((20563, 20592), 'evalml.objectives.get_objective', 'get_objective', (['self.objective'], {}), '(self.objective)\n', (20576, 20592), False, 'from evalml.objectives import get_core_objectives, get_non_core_objectives, get_objective\n'), ((31581, 31626), 'evalml.pipelines.MeanBaselineRegressionPipeline', 'MeanBaselineRegressionPipeline', ([], {'parameters': '{}'}), '(parameters={})\n', (31611, 31626), False, 'from evalml.pipelines import MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline\n'), ((22950, 22961), 'time.time', 'time.time', ([], {}), '()\n', (22959, 22961), False, 'import time\n')] |
import graphene
from graphql_jwt.decorators import setup_jwt_cookie
from . import mixins, types
from .decorators import social_auth
class SocialAuthMutation(mixins.SocialAuthMixin, graphene.Mutation):
social = graphene.Field(types.SocialType)
class Meta:
abstract = True
class Arguments:
provider = graphene.String(required=True)
code = graphene.String(required=True)
@classmethod
@setup_jwt_cookie
@social_auth
def mutate(cls, root, info, social, **kwargs):
return cls.resolve(root, info, social, **kwargs)
class SocialAuth(mixins.ResolveMixin, SocialAuthMutation):
"""Social Auth Mutation"""
class SocialAuthJWT(mixins.JSONWebTokenMixin, SocialAuthMutation):
"""Social Auth for JSON Web Token (JWT)"""
| [
"graphene.String",
"graphene.Field"
] | [((217, 249), 'graphene.Field', 'graphene.Field', (['types.SocialType'], {}), '(types.SocialType)\n', (231, 249), False, 'import graphene\n'), ((332, 362), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (347, 362), False, 'import graphene\n'), ((378, 408), 'graphene.String', 'graphene.String', ([], {'required': '(True)'}), '(required=True)\n', (393, 408), False, 'import graphene\n')] |
#!/usr/bin/env python3
import os
import argparse
import logging
import isce
import isceobj
from components.stdproc.stdproc import crossmul
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
def createParser():
'''
Command Line Parser.
'''
parser = argparse.ArgumentParser( description='Generate offset field between two Sentinel swaths')
parser.add_argument('-m', '--master', type=str, dest='master', required=True,
help='Master image')
parser.add_argument('-s', '--slave', type=str, dest='slave', required=True,
help='Slave image')
parser.add_argument('-o', '--outdir', type=str, dest='prefix', default='crossmul',
help='Prefix of output int and amp files')
parser.add_argument('-a', '--alks', type=int, dest='azlooks', default=1,
help='Azimuth looks')
parser.add_argument('-r', '--rlks', type=int, dest='rglooks', default=1,
help='Range looks')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
def run(imageSlc1, imageSlc2, resampName, azLooks, rgLooks):
objSlc1 = isceobj.createSlcImage()
#right now imageSlc1 and 2 are just text files, need to open them as image
IU.copyAttributes(imageSlc1, objSlc1)
objSlc1.setAccessMode('read')
objSlc1.createImage()
objSlc2 = isceobj.createSlcImage()
IU.copyAttributes(imageSlc2, objSlc2)
objSlc2.setAccessMode('read')
objSlc2.createImage()
slcWidth = imageSlc1.getWidth()
intWidth = int(slcWidth / rgLooks)
lines = min(imageSlc1.getLength(), imageSlc2.getLength())
resampAmp = resampName + '.amp'
resampInt = resampName + '.int'
objInt = isceobj.createIntImage()
objInt.setFilename(resampInt)
objInt.setWidth(intWidth)
imageInt = isceobj.createIntImage()
IU.copyAttributes(objInt, imageInt)
objInt.setAccessMode('write')
objInt.createImage()
objAmp = isceobj.createAmpImage()
objAmp.setFilename(resampAmp)
objAmp.setWidth(intWidth)
imageAmp = isceobj.createAmpImage()
IU.copyAttributes(objAmp, imageAmp)
objAmp.setAccessMode('write')
objAmp.createImage()
objCrossmul = crossmul.createcrossmul()
objCrossmul.width = slcWidth
objCrossmul.length = lines
objCrossmul.LooksDown = azLooks
objCrossmul.LooksAcross = rgLooks
objCrossmul.crossmul(objSlc1, objSlc2, objInt, objAmp)
for obj in [objInt, objAmp, objSlc1, objSlc2]:
obj.finalizeImage()
return imageInt, imageAmp
def main(iargs=None):
inps = cmdLineParse(iargs)
img1 = isceobj.createImage()
img1.load(inps.master + '.xml')
img2 = isceobj.createImage()
img2.load(inps.slave + '.xml')
os.makedirs(os.path.dirname(inps.prefix), exist_ok=True)
run(img1, img2, inps.prefix, inps.azlooks, inps.rglooks)
if __name__ == '__main__':
main()
'''
Main driver.
'''
| [
"argparse.ArgumentParser",
"isceobj.createImage",
"isceobj.createAmpImage",
"os.path.dirname",
"isceobj.createSlcImage",
"components.stdproc.stdproc.crossmul.createcrossmul",
"isceobj.createIntImage",
"iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes"
] | [((275, 368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate offset field between two Sentinel swaths"""'}), "(description=\n 'Generate offset field between two Sentinel swaths')\n", (298, 368), False, 'import argparse\n'), ((1151, 1175), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (1173, 1175), False, 'import isceobj\n'), ((1260, 1297), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (['imageSlc1', 'objSlc1'], {}), '(imageSlc1, objSlc1)\n', (1277, 1297), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((1373, 1397), 'isceobj.createSlcImage', 'isceobj.createSlcImage', ([], {}), '()\n', (1395, 1397), False, 'import isceobj\n'), ((1402, 1439), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (['imageSlc2', 'objSlc2'], {}), '(imageSlc2, objSlc2)\n', (1419, 1439), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((1726, 1750), 'isceobj.createIntImage', 'isceobj.createIntImage', ([], {}), '()\n', (1748, 1750), False, 'import isceobj\n'), ((1830, 1854), 'isceobj.createIntImage', 'isceobj.createIntImage', ([], {}), '()\n', (1852, 1854), False, 'import isceobj\n'), ((1859, 1894), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (['objInt', 'imageInt'], {}), '(objInt, imageInt)\n', (1876, 1894), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((1968, 1992), 'isceobj.createAmpImage', 'isceobj.createAmpImage', ([], {}), '()\n', (1990, 1992), False, 'import isceobj\n'), ((2072, 2096), 'isceobj.createAmpImage', 'isceobj.createAmpImage', ([], {}), '()\n', (2094, 2096), False, 'import isceobj\n'), ((2101, 2136), 'iscesys.ImageUtil.ImageUtil.ImageUtil.copyAttributes', 'IU.copyAttributes', (['objAmp', 'imageAmp'], {}), '(objAmp, imageAmp)\n', (2118, 2136), True, 'from iscesys.ImageUtil.ImageUtil import ImageUtil as IU\n'), ((2215, 2240), 'components.stdproc.stdproc.crossmul.createcrossmul', 'crossmul.createcrossmul', ([], {}), '()\n', (2238, 2240), False, 'from components.stdproc.stdproc import crossmul\n'), ((2617, 2638), 'isceobj.createImage', 'isceobj.createImage', ([], {}), '()\n', (2636, 2638), False, 'import isceobj\n'), ((2687, 2708), 'isceobj.createImage', 'isceobj.createImage', ([], {}), '()\n', (2706, 2708), False, 'import isceobj\n'), ((2761, 2789), 'os.path.dirname', 'os.path.dirname', (['inps.prefix'], {}), '(inps.prefix)\n', (2776, 2789), False, 'import os\n')] |
# Copyright (c) 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from urllib.parse import urlparse
import click
import uvicorn
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.responses import PlainTextResponse
from starlette.routing import Mount, Route, WebSocketRoute
from starlette.staticfiles import StaticFiles
from starlette.status import HTTP_403_FORBIDDEN
from platformio.commands.home.rpc.handlers.account import AccountRPC
from platformio.commands.home.rpc.handlers.app import AppRPC
from platformio.commands.home.rpc.handlers.ide import IDERPC
from platformio.commands.home.rpc.handlers.misc import MiscRPC
from platformio.commands.home.rpc.handlers.os import OSRPC
from platformio.commands.home.rpc.handlers.piocore import PIOCoreRPC
from platformio.commands.home.rpc.handlers.project import ProjectRPC
from platformio.commands.home.rpc.server import WebSocketJSONRPCServerFactory
from platformio.compat import aio_get_running_loop
from platformio.exception import PlatformioException
from platformio.package.manager.core import get_core_package_dir
from platformio.proc import force_exit
class ShutdownMiddleware:
def __init__(self, app):
self.app = app
async def __call__(self, scope, receive, send):
if scope["type"] == "http" and b"__shutdown__" in scope.get("query_string", {}):
await shutdown_server()
await self.app(scope, receive, send)
async def shutdown_server(_=None):
aio_get_running_loop().call_later(0.5, force_exit)
return PlainTextResponse("Server has been shutdown!")
async def protected_page(_):
return PlainTextResponse(
"Protected PlatformIO Home session", status_code=HTTP_403_FORBIDDEN
)
def run_server(host, port, no_open, shutdown_timeout, home_url):
contrib_dir = get_core_package_dir("contrib-piohome")
if not os.path.isdir(contrib_dir):
raise PlatformioException("Invalid path to PIO Home Contrib")
ws_rpc_factory = WebSocketJSONRPCServerFactory(shutdown_timeout)
ws_rpc_factory.addObjectHandler(AccountRPC(), namespace="account")
ws_rpc_factory.addObjectHandler(AppRPC(), namespace="app")
ws_rpc_factory.addObjectHandler(IDERPC(), namespace="ide")
ws_rpc_factory.addObjectHandler(MiscRPC(), namespace="misc")
ws_rpc_factory.addObjectHandler(OSRPC(), namespace="os")
ws_rpc_factory.addObjectHandler(PIOCoreRPC(), namespace="core")
ws_rpc_factory.addObjectHandler(ProjectRPC(), namespace="project")
path = urlparse(home_url).path
routes = [
WebSocketRoute(path + "wsrpc", ws_rpc_factory, name="wsrpc"),
Route(path + "__shutdown__", shutdown_server, methods=["POST"]),
Mount(path, StaticFiles(directory=contrib_dir, html=True), name="static"),
]
if path != "/":
routes.append(Route("/", protected_page))
uvicorn.run(
Starlette(
middleware=[Middleware(ShutdownMiddleware)],
routes=routes,
on_startup=[
lambda: click.echo(
"PIO Home has been started. Press Ctrl+C to shutdown."
),
lambda: None if no_open else click.launch(home_url),
],
),
host=host,
port=port,
log_level="warning",
)
| [
"platformio.package.manager.core.get_core_package_dir",
"click.echo",
"click.launch",
"platformio.commands.home.rpc.handlers.piocore.PIOCoreRPC",
"platformio.commands.home.rpc.handlers.os.OSRPC",
"starlette.routing.Route",
"platformio.commands.home.rpc.handlers.account.AccountRPC",
"platformio.commands.home.rpc.handlers.project.ProjectRPC",
"os.path.isdir",
"platformio.commands.home.rpc.server.WebSocketJSONRPCServerFactory",
"starlette.middleware.Middleware",
"starlette.responses.PlainTextResponse",
"platformio.commands.home.rpc.handlers.misc.MiscRPC",
"platformio.commands.home.rpc.handlers.app.AppRPC",
"platformio.commands.home.rpc.handlers.ide.IDERPC",
"platformio.exception.PlatformioException",
"starlette.routing.WebSocketRoute",
"urllib.parse.urlparse",
"starlette.staticfiles.StaticFiles",
"platformio.compat.aio_get_running_loop"
] | [((2105, 2151), 'starlette.responses.PlainTextResponse', 'PlainTextResponse', (['"""Server has been shutdown!"""'], {}), "('Server has been shutdown!')\n", (2122, 2151), False, 'from starlette.responses import PlainTextResponse\n'), ((2194, 2285), 'starlette.responses.PlainTextResponse', 'PlainTextResponse', (['"""Protected PlatformIO Home session"""'], {'status_code': 'HTTP_403_FORBIDDEN'}), "('Protected PlatformIO Home session', status_code=\n HTTP_403_FORBIDDEN)\n", (2211, 2285), False, 'from starlette.responses import PlainTextResponse\n'), ((2380, 2419), 'platformio.package.manager.core.get_core_package_dir', 'get_core_package_dir', (['"""contrib-piohome"""'], {}), "('contrib-piohome')\n", (2400, 2419), False, 'from platformio.package.manager.core import get_core_package_dir\n'), ((2551, 2598), 'platformio.commands.home.rpc.server.WebSocketJSONRPCServerFactory', 'WebSocketJSONRPCServerFactory', (['shutdown_timeout'], {}), '(shutdown_timeout)\n', (2580, 2598), False, 'from platformio.commands.home.rpc.server import WebSocketJSONRPCServerFactory\n'), ((2431, 2457), 'os.path.isdir', 'os.path.isdir', (['contrib_dir'], {}), '(contrib_dir)\n', (2444, 2457), False, 'import os\n'), ((2473, 2528), 'platformio.exception.PlatformioException', 'PlatformioException', (['"""Invalid path to PIO Home Contrib"""'], {}), "('Invalid path to PIO Home Contrib')\n", (2492, 2528), False, 'from platformio.exception import PlatformioException\n'), ((2635, 2647), 'platformio.commands.home.rpc.handlers.account.AccountRPC', 'AccountRPC', ([], {}), '()\n', (2645, 2647), False, 'from platformio.commands.home.rpc.handlers.account import AccountRPC\n'), ((2706, 2714), 'platformio.commands.home.rpc.handlers.app.AppRPC', 'AppRPC', ([], {}), '()\n', (2712, 2714), False, 'from platformio.commands.home.rpc.handlers.app import AppRPC\n'), ((2769, 2777), 'platformio.commands.home.rpc.handlers.ide.IDERPC', 'IDERPC', ([], {}), '()\n', (2775, 2777), False, 'from platformio.commands.home.rpc.handlers.ide import IDERPC\n'), ((2832, 2841), 'platformio.commands.home.rpc.handlers.misc.MiscRPC', 'MiscRPC', ([], {}), '()\n', (2839, 2841), False, 'from platformio.commands.home.rpc.handlers.misc import MiscRPC\n'), ((2897, 2904), 'platformio.commands.home.rpc.handlers.os.OSRPC', 'OSRPC', ([], {}), '()\n', (2902, 2904), False, 'from platformio.commands.home.rpc.handlers.os import OSRPC\n'), ((2958, 2970), 'platformio.commands.home.rpc.handlers.piocore.PIOCoreRPC', 'PIOCoreRPC', ([], {}), '()\n', (2968, 2970), False, 'from platformio.commands.home.rpc.handlers.piocore import PIOCoreRPC\n'), ((3026, 3038), 'platformio.commands.home.rpc.handlers.project.ProjectRPC', 'ProjectRPC', ([], {}), '()\n', (3036, 3038), False, 'from platformio.commands.home.rpc.handlers.project import ProjectRPC\n'), ((3073, 3091), 'urllib.parse.urlparse', 'urlparse', (['home_url'], {}), '(home_url)\n', (3081, 3091), False, 'from urllib.parse import urlparse\n'), ((3120, 3180), 'starlette.routing.WebSocketRoute', 'WebSocketRoute', (["(path + 'wsrpc')", 'ws_rpc_factory'], {'name': '"""wsrpc"""'}), "(path + 'wsrpc', ws_rpc_factory, name='wsrpc')\n", (3134, 3180), False, 'from starlette.routing import Mount, Route, WebSocketRoute\n'), ((3190, 3253), 'starlette.routing.Route', 'Route', (["(path + '__shutdown__')", 'shutdown_server'], {'methods': "['POST']"}), "(path + '__shutdown__', shutdown_server, methods=['POST'])\n", (3195, 3253), False, 'from starlette.routing import Mount, Route, WebSocketRoute\n'), ((2043, 2065), 'platformio.compat.aio_get_running_loop', 'aio_get_running_loop', ([], {}), '()\n', (2063, 2065), False, 'from platformio.compat import aio_get_running_loop\n'), ((3275, 3320), 'starlette.staticfiles.StaticFiles', 'StaticFiles', ([], {'directory': 'contrib_dir', 'html': '(True)'}), '(directory=contrib_dir, html=True)\n', (3286, 3320), False, 'from starlette.staticfiles import StaticFiles\n'), ((3386, 3412), 'starlette.routing.Route', 'Route', (['"""/"""', 'protected_page'], {}), "('/', protected_page)\n", (3391, 3412), False, 'from starlette.routing import Mount, Route, WebSocketRoute\n'), ((3475, 3505), 'starlette.middleware.Middleware', 'Middleware', (['ShutdownMiddleware'], {}), '(ShutdownMiddleware)\n', (3485, 3505), False, 'from starlette.middleware import Middleware\n'), ((3584, 3650), 'click.echo', 'click.echo', (['"""PIO Home has been started. Press Ctrl+C to shutdown."""'], {}), "('PIO Home has been started. Press Ctrl+C to shutdown.')\n", (3594, 3650), False, 'import click\n'), ((3735, 3757), 'click.launch', 'click.launch', (['home_url'], {}), '(home_url)\n', (3747, 3757), False, 'import click\n')] |
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Messages for the Machine Provider API."""
# pylint: disable=unused-wildcard-import, wildcard-import
from protorpc import messages
from components.machine_provider.dimensions import *
from components.machine_provider.instructions import *
from components.machine_provider.policies import *
class CatalogMachineRetrievalRequest(messages.Message):
"""Represents a request to retrieve a machine from the catalog."""
# Hostname of the machine to retrieve.
hostname = messages.StringField(1, required=True)
# Backend which added the machine.
backend = messages.EnumField(Backend, 2)
class CatalogMachineRetrievalResponse(messages.Message):
"""Represents a response to a catalog machine retrieval request."""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1)
# Policies governing this machine.
policies = messages.MessageField(Policies, 2)
# State of the CatalogMachineEntry.
state = messages.StringField(3)
# Cloud Pub/Sub subscription the machine must listen to for instructions.
pubsub_subscription = messages.StringField(4)
# Project the Cloud Pub/Sub subscription exists in.
pubsub_subscription_project = messages.StringField(5)
# Cloud Pub/Sub topic the machine must be subscribed to.
pubsub_topic = messages.StringField(6)
# Project the Cloud Pub/Sub topic exists in.
pubsub_topic_project = messages.StringField(7)
# Timestamp indicating lease expiration seconds from epoch in UTC.
lease_expiration_ts = messages.IntegerField(8)
class CatalogMachineAdditionRequest(messages.Message):
"""Represents a request to add a machine to the catalog.
dimensions.backend must be specified.
dimensions.hostname must be unique per backend.
"""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1, required=True)
# Policies instance specifying machine-specific configuration.
policies = messages.MessageField(Policies, 2, required=True)
class CatalogMachineBatchAdditionRequest(messages.Message):
"""Represents a batched set of CatalogMachineAdditionRequests.
dimensions.backend must be specified in each CatalogMachineAdditionRequest.
dimensions.hostname must be unique per backend.
"""
# CatalogMachineAdditionRequest instances to batch together.
requests = messages.MessageField(
CatalogMachineAdditionRequest, 1, repeated=True)
class CatalogMachineDeletionRequest(messages.Message):
"""Represents a request to delete a machine in the catalog."""
# Dimensions instance specifying what sort of machine this is.
dimensions = messages.MessageField(Dimensions, 1, required=True)
class CatalogManipulationRequestError(messages.Enum):
"""Represents an error in a catalog manipulation request."""
# Per backend, hostnames must be unique in the catalog.
HOSTNAME_REUSE = 1
# Tried to lookup an entry that didn't exist.
ENTRY_NOT_FOUND = 2
# Didn't specify a backend.
UNSPECIFIED_BACKEND = 3
# Specified backend didn't match the backend originating the request.
MISMATCHED_BACKEND = 4
# Didn't specify a hostname.
UNSPECIFIED_HOSTNAME = 5
# Proposed Cloud Pub/Sub topic was invalid.
INVALID_TOPIC = 6
# Proposed Cloud Pub/Sub project was invalid.
INVALID_PROJECT = 7
# Didn't specify a Cloud Pub/Sub topic.
UNSPECIFIED_TOPIC = 8
# Attempted to delete a leased machine.
LEASED = 9
class CatalogManipulationResponse(messages.Message):
"""Represents a response to a catalog manipulation request."""
# CatalogManipulationRequestError instance indicating an error with the
# request, or None if there is no error.
error = messages.EnumField(CatalogManipulationRequestError, 1)
# CatalogMachineAdditionRequest this response is in reference to.
machine_addition_request = messages.MessageField(
CatalogMachineAdditionRequest, 2)
# CatalogMachineDeletionRequest this response is in reference to.
machine_deletion_request = messages.MessageField(
CatalogMachineDeletionRequest, 3)
class CatalogBatchManipulationResponse(messages.Message):
"""Represents a response to a batched catalog manipulation request."""
responses = messages.MessageField(
CatalogManipulationResponse, 1, repeated=True)
class LeaseRequest(messages.Message):
"""Represents a request for a lease on a machine."""
# Per-user unique ID used to deduplicate requests.
request_id = messages.StringField(1, required=True)
# Dimensions instance specifying what sort of machine to lease.
dimensions = messages.MessageField(Dimensions, 2, required=True)
# Desired length of the lease in seconds.
duration = messages.IntegerField(3)
# Cloud Pub/Sub topic name to communicate on regarding this request.
pubsub_topic = messages.StringField(4)
# Cloud Pub/Sub project name to communicate on regarding this request.
pubsub_project = messages.StringField(5)
# Instructions to give the machine once it's been leased.
on_lease = messages.MessageField(Instruction, 6)
# UTC seconds from epoch when lease should expire.
lease_expiration_ts = messages.IntegerField(7)
class BatchedLeaseRequest(messages.Message):
"""Represents a batched set of LeaseRequests."""
# LeaseRequest instances to batch together.
requests = messages.MessageField(LeaseRequest, 1, repeated=True)
class LeaseRequestError(messages.Enum):
"""Represents an error in a LeaseRequest."""
# Request IDs are intended to be unique.
# Reusing a request ID in a different request is an error.
REQUEST_ID_REUSE = 1
# Proposed Cloud Pub/Sub topic was invalid.
INVALID_TOPIC = 2
# Proposed Cloud Pub/Sub project was invalid.
INVALID_PROJECT = 3
# Didn't specify a Cloud Pub/Sub topic.
UNSPECIFIED_TOPIC = 4
# Request couldn't be processed in time.
DEADLINE_EXCEEDED = 5
# Miscellaneous transient error.
TRANSIENT_ERROR = 6
# Mutually exclusive duration and lease_expiration_ts both specified.
MUTUAL_EXCLUSION_ERROR = 7
# Proposed duration was zero or negative.
NONPOSITIVE_DEADLINE = 8
# Proposed expiration time is not in the future.
LEASE_EXPIRATION_TS_ERROR = 9
# Neither duration nor lease_expiration_ts were specified.
LEASE_LENGTH_UNSPECIFIED = 10
# Requested lease duration is too long.
LEASE_TOO_LONG = 11
class LeaseRequestState(messages.Enum):
"""Represents the state of a LeaseRequest."""
# LeaseRequest has been received, but not processed yet.
UNTRIAGED = 0
# LeaseRequest is pending provisioning of additional capacity.
PENDING = 1
# LeaseRequest has been fulfilled.
FULFILLED = 2
# LeaseRequest has been denied.
DENIED = 3
class LeaseResponse(messages.Message):
"""Represents a response to a LeaseRequest."""
# SHA-1 identifying the LeaseRequest this response refers to.
request_hash = messages.StringField(1)
# LeaseRequestError instance indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(LeaseRequestError, 2)
# Request ID used by the client to generate the LeaseRequest.
client_request_id = messages.StringField(3, required=True)
# State of the LeaseRequest.
state = messages.EnumField(LeaseRequestState, 4)
# Hostname of the machine available for this request.
hostname = messages.StringField(5)
# Timestamp indicating lease expiration seconds from epoch in UTC.
lease_expiration_ts = messages.IntegerField(6)
class BatchedLeaseResponse(messages.Message):
"""Represents a response to a batched lease request."""
responses = messages.MessageField(LeaseResponse, 1, repeated=True)
class LeaseReleaseRequest(messages.Message):
"""Represents a request to voluntarily cancel a LeaseRequest."""
# Per-user unique ID used to identify the LeaseRequest.
request_id = messages.StringField(1, required=True)
class BatchedLeaseReleaseRequest(messages.Message):
"""Represents a batched set of lease release requests."""
requests = messages.MessageField(LeaseReleaseRequest, 1, repeated=True)
class LeaseReleaseRequestError(messages.Enum):
"""Represents an error in a LeaseReleaseRequest."""
# Request ID referred to non-existent request for this user.
NOT_FOUND = 1
# Request ID referred to an unfulfilled request.
NOT_FULFILLED = 2
# Request ID referred to a fulfilled request whose machine was
# already reclaimed.
ALREADY_RECLAIMED = 3
# Request couldn't be processed in time.
DEADLINE_EXCEEDED = 4
# Miscellaneous transient error.
TRANSIENT_ERROR = 5
class LeaseReleaseResponse(messages.Message):
"""Represents a response to a LeaseReleaseRequest."""
# SHA-1 identifying the LeaseRequest this response refers to.
request_hash = messages.StringField(1)
# LeaseReleaseRequestError indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(LeaseReleaseRequestError, 2)
# Request ID used by the client to generate the LeaseRequest
# referred to by the LeaseReleaseRequest.
client_request_id = messages.StringField(3, required=True)
class BatchedLeaseReleaseResponse(messages.Message):
"""Represents responses to a batched set of lease release requests."""
responses = messages.MessageField(LeaseReleaseResponse, 1, repeated=True)
class MachineInstructionRequest(messages.Message):
"""Represents a request to send an instruction to a leased machine."""
# Request ID for the fulfilled LeaseRequest whose machine should be
# instructed.
request_id = messages.StringField(1, required=True)
# Instruction to send the leased machine.
instruction = messages.MessageField(Instruction, 2)
class MachineInstructionError(messages.Enum):
"""Represents an error in a MachineInstructionRequest."""
# Request ID referred to an unfulfilled request.
NOT_FULFILLED = 1
# Request ID referred to a fulfilled request whose machine was
# already reclaimed.
ALREADY_RECLAIMED = 2
# Invalid instruction for the machine.
INVALID_INSTRUCTION = 3
class MachineInstructionResponse(messages.Message):
"""Represents a response to a MachineInstructionRequest."""
# Request ID used by the client to generate the LeaseRequest for the
# machine being instructed.
client_request_id = messages.StringField(1, required=True)
# MachineInstructionError indicating an error with the request, or None
# if there is no error.
error = messages.EnumField(MachineInstructionError, 2)
class PollRequest(messages.Message):
"""Represents a request to poll for instructions given to a machine."""
# Hostname of the machine whose instructions to retrieve.
hostname = messages.StringField(1, required=True)
# Backend the machine belongs to. Generally required.
backend = messages.EnumField(Backend, 2)
class PollResponse(messages.Message):
"""Represents a response to a request for instructions given to a machine."""
# Instruction given to the machine.
instruction = messages.MessageField(Instruction, 1)
# State of the instruction.
state = messages.StringField(2)
class AckRequest(messages.Message):
"""Represents a request to ack an instruction received by a machine."""
# Hostname of the machine whose instruction to ack.
hostname = messages.StringField(1, required=True)
# Backend the machine belongs to.
backend = messages.EnumField(Backend, 2)
| [
"protorpc.messages.StringField",
"protorpc.messages.MessageField",
"protorpc.messages.IntegerField",
"protorpc.messages.EnumField"
] | [((650, 688), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (670, 688), False, 'from protorpc import messages\n'), ((738, 768), 'protorpc.messages.EnumField', 'messages.EnumField', (['Backend', '(2)'], {}), '(Backend, 2)\n', (756, 768), False, 'from protorpc import messages\n'), ((978, 1014), 'protorpc.messages.MessageField', 'messages.MessageField', (['Dimensions', '(1)'], {}), '(Dimensions, 1)\n', (999, 1014), False, 'from protorpc import messages\n'), ((1065, 1099), 'protorpc.messages.MessageField', 'messages.MessageField', (['Policies', '(2)'], {}), '(Policies, 2)\n', (1086, 1099), False, 'from protorpc import messages\n'), ((1148, 1171), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {}), '(3)\n', (1168, 1171), False, 'from protorpc import messages\n'), ((1272, 1295), 'protorpc.messages.StringField', 'messages.StringField', (['(4)'], {}), '(4)\n', (1292, 1295), False, 'from protorpc import messages\n'), ((1382, 1405), 'protorpc.messages.StringField', 'messages.StringField', (['(5)'], {}), '(5)\n', (1402, 1405), False, 'from protorpc import messages\n'), ((1482, 1505), 'protorpc.messages.StringField', 'messages.StringField', (['(6)'], {}), '(6)\n', (1502, 1505), False, 'from protorpc import messages\n'), ((1578, 1601), 'protorpc.messages.StringField', 'messages.StringField', (['(7)'], {}), '(7)\n', (1598, 1601), False, 'from protorpc import messages\n'), ((1695, 1719), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(8)'], {}), '(8)\n', (1716, 1719), False, 'from protorpc import messages\n'), ((2013, 2064), 'protorpc.messages.MessageField', 'messages.MessageField', (['Dimensions', '(1)'], {'required': '(True)'}), '(Dimensions, 1, required=True)\n', (2034, 2064), False, 'from protorpc import messages\n'), ((2143, 2192), 'protorpc.messages.MessageField', 'messages.MessageField', (['Policies', '(2)'], {'required': '(True)'}), '(Policies, 2, required=True)\n', (2164, 2192), False, 'from protorpc import messages\n'), ((2531, 2601), 'protorpc.messages.MessageField', 'messages.MessageField', (['CatalogMachineAdditionRequest', '(1)'], {'repeated': '(True)'}), '(CatalogMachineAdditionRequest, 1, repeated=True)\n', (2552, 2601), False, 'from protorpc import messages\n'), ((2811, 2862), 'protorpc.messages.MessageField', 'messages.MessageField', (['Dimensions', '(1)'], {'required': '(True)'}), '(Dimensions, 1, required=True)\n', (2832, 2862), False, 'from protorpc import messages\n'), ((3846, 3900), 'protorpc.messages.EnumField', 'messages.EnumField', (['CatalogManipulationRequestError', '(1)'], {}), '(CatalogManipulationRequestError, 1)\n', (3864, 3900), False, 'from protorpc import messages\n'), ((3998, 4053), 'protorpc.messages.MessageField', 'messages.MessageField', (['CatalogMachineAdditionRequest', '(2)'], {}), '(CatalogMachineAdditionRequest, 2)\n', (4019, 4053), False, 'from protorpc import messages\n'), ((4158, 4213), 'protorpc.messages.MessageField', 'messages.MessageField', (['CatalogMachineDeletionRequest', '(3)'], {}), '(CatalogMachineDeletionRequest, 3)\n', (4179, 4213), False, 'from protorpc import messages\n'), ((4368, 4436), 'protorpc.messages.MessageField', 'messages.MessageField', (['CatalogManipulationResponse', '(1)'], {'repeated': '(True)'}), '(CatalogManipulationResponse, 1, repeated=True)\n', (4389, 4436), False, 'from protorpc import messages\n'), ((4607, 4645), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (4627, 4645), False, 'from protorpc import messages\n'), ((4727, 4778), 'protorpc.messages.MessageField', 'messages.MessageField', (['Dimensions', '(2)'], {'required': '(True)'}), '(Dimensions, 2, required=True)\n', (4748, 4778), False, 'from protorpc import messages\n'), ((4836, 4860), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(3)'], {}), '(3)\n', (4857, 4860), False, 'from protorpc import messages\n'), ((4949, 4972), 'protorpc.messages.StringField', 'messages.StringField', (['(4)'], {}), '(4)\n', (4969, 4972), False, 'from protorpc import messages\n'), ((5065, 5088), 'protorpc.messages.StringField', 'messages.StringField', (['(5)'], {}), '(5)\n', (5085, 5088), False, 'from protorpc import messages\n'), ((5162, 5199), 'protorpc.messages.MessageField', 'messages.MessageField', (['Instruction', '(6)'], {}), '(Instruction, 6)\n', (5183, 5199), False, 'from protorpc import messages\n'), ((5277, 5301), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(7)'], {}), '(7)\n', (5298, 5301), False, 'from protorpc import messages\n'), ((5459, 5512), 'protorpc.messages.MessageField', 'messages.MessageField', (['LeaseRequest', '(1)'], {'repeated': '(True)'}), '(LeaseRequest, 1, repeated=True)\n', (5480, 5512), False, 'from protorpc import messages\n'), ((6982, 7005), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (7002, 7005), False, 'from protorpc import messages\n'), ((7119, 7159), 'protorpc.messages.EnumField', 'messages.EnumField', (['LeaseRequestError', '(2)'], {}), '(LeaseRequestError, 2)\n', (7137, 7159), False, 'from protorpc import messages\n'), ((7246, 7284), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {'required': '(True)'}), '(3, required=True)\n', (7266, 7284), False, 'from protorpc import messages\n'), ((7326, 7366), 'protorpc.messages.EnumField', 'messages.EnumField', (['LeaseRequestState', '(4)'], {}), '(LeaseRequestState, 4)\n', (7344, 7366), False, 'from protorpc import messages\n'), ((7436, 7459), 'protorpc.messages.StringField', 'messages.StringField', (['(5)'], {}), '(5)\n', (7456, 7459), False, 'from protorpc import messages\n'), ((7553, 7577), 'protorpc.messages.IntegerField', 'messages.IntegerField', (['(6)'], {}), '(6)\n', (7574, 7577), False, 'from protorpc import messages\n'), ((7698, 7752), 'protorpc.messages.MessageField', 'messages.MessageField', (['LeaseResponse', '(1)'], {'repeated': '(True)'}), '(LeaseResponse, 1, repeated=True)\n', (7719, 7752), False, 'from protorpc import messages\n'), ((7940, 7978), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (7960, 7978), False, 'from protorpc import messages\n'), ((8106, 8166), 'protorpc.messages.MessageField', 'messages.MessageField', (['LeaseReleaseRequest', '(1)'], {'repeated': '(True)'}), '(LeaseReleaseRequest, 1, repeated=True)\n', (8127, 8166), False, 'from protorpc import messages\n'), ((8841, 8864), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {}), '(1)\n', (8861, 8864), False, 'from protorpc import messages\n'), ((8976, 9023), 'protorpc.messages.EnumField', 'messages.EnumField', (['LeaseReleaseRequestError', '(2)'], {}), '(LeaseReleaseRequestError, 2)\n', (8994, 9023), False, 'from protorpc import messages\n'), ((9153, 9191), 'protorpc.messages.StringField', 'messages.StringField', (['(3)'], {'required': '(True)'}), '(3, required=True)\n', (9173, 9191), False, 'from protorpc import messages\n'), ((9334, 9395), 'protorpc.messages.MessageField', 'messages.MessageField', (['LeaseReleaseResponse', '(1)'], {'repeated': '(True)'}), '(LeaseReleaseResponse, 1, repeated=True)\n', (9355, 9395), False, 'from protorpc import messages\n'), ((9623, 9661), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (9643, 9661), False, 'from protorpc import messages\n'), ((9722, 9759), 'protorpc.messages.MessageField', 'messages.MessageField', (['Instruction', '(2)'], {}), '(Instruction, 2)\n', (9743, 9759), False, 'from protorpc import messages\n'), ((10357, 10395), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (10377, 10395), False, 'from protorpc import messages\n'), ((10506, 10552), 'protorpc.messages.EnumField', 'messages.EnumField', (['MachineInstructionError', '(2)'], {}), '(MachineInstructionError, 2)\n', (10524, 10552), False, 'from protorpc import messages\n'), ((10739, 10777), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (10759, 10777), False, 'from protorpc import messages\n'), ((10846, 10876), 'protorpc.messages.EnumField', 'messages.EnumField', (['Backend', '(2)'], {}), '(Backend, 2)\n', (10864, 10876), False, 'from protorpc import messages\n'), ((11051, 11088), 'protorpc.messages.MessageField', 'messages.MessageField', (['Instruction', '(1)'], {}), '(Instruction, 1)\n', (11072, 11088), False, 'from protorpc import messages\n'), ((11129, 11152), 'protorpc.messages.StringField', 'messages.StringField', (['(2)'], {}), '(2)\n', (11149, 11152), False, 'from protorpc import messages\n'), ((11332, 11370), 'protorpc.messages.StringField', 'messages.StringField', (['(1)'], {'required': '(True)'}), '(1, required=True)\n', (11352, 11370), False, 'from protorpc import messages\n'), ((11419, 11449), 'protorpc.messages.EnumField', 'messages.EnumField', (['Backend', '(2)'], {}), '(Backend, 2)\n', (11437, 11449), False, 'from protorpc import messages\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `tensorboard.backend.event_processing.data_provider`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.compat.proto import summary_pb2
from tensorboard.data import provider as base_provider
from tensorboard.plugins.graph import metadata as graph_metadata
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.histogram import summary_v2 as histogram_summary
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.plugins.scalar import summary_v2 as scalar_summary
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.image import summary_v2 as image_summary
from tensorboard.util import tensor_util
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
tf1.enable_eager_execution()
class MultiplexerDataProviderTest(tf.test.TestCase):
def setUp(self):
super(MultiplexerDataProviderTest, self).setUp()
self.logdir = self.get_temp_dir()
self.ctx = context.RequestContext()
logdir = os.path.join(self.logdir, "polynomials")
with tf.summary.create_file_writer(logdir).as_default():
for i in xrange(10):
scalar_summary.scalar(
"square", i ** 2, step=2 * i, description="boxen"
)
scalar_summary.scalar("cube", i ** 3, step=3 * i)
logdir = os.path.join(self.logdir, "waves")
with tf.summary.create_file_writer(logdir).as_default():
for i in xrange(10):
scalar_summary.scalar("sine", tf.sin(float(i)), step=i)
scalar_summary.scalar(
"square", tf.sign(tf.sin(float(i))), step=i
)
# Summary with rank-0 data but not owned by the scalars plugin.
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = "marigraphs"
metadata.data_class = summary_pb2.DATA_CLASS_SCALAR
tf.summary.write(
"high_tide", tensor=i, step=i, metadata=metadata
)
# Summary with rank-1 data of scalar data class (bad!).
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = "greetings"
metadata.data_class = summary_pb2.DATA_CLASS_SCALAR
tf.summary.write(
"bad", tensor=[i, i], step=i, metadata=metadata
)
logdir = os.path.join(self.logdir, "lebesgue")
with tf.summary.create_file_writer(logdir).as_default():
data = [
("very smooth", (0.0, 0.25, 0.5, 0.75, 1.0), "uniform"),
("very smoothn't", (0.0, 0.01, 0.99, 1.0), "bimodal"),
]
for (description, distribution, name) in data:
tensor = tf.constant([distribution], dtype=tf.float64)
for i in xrange(1, 11):
histogram_summary.histogram(
name, tensor * i, step=i, description=description
)
logdir = os.path.join(self.logdir, "mondrian")
with tf.summary.create_file_writer(logdir).as_default():
data = [
("red", (221, 28, 38), "top-right"),
("blue", (1, 91, 158), "bottom-left"),
("yellow", (239, 220, 111), "bottom-right"),
]
for (name, color, description) in data:
image_1x1 = tf.constant([[[color]]], dtype=tf.uint8)
for i in xrange(1, 11):
# Use a non-monotonic sequence of sample sizes to
# test `max_length` calculation.
k = 6 - abs(6 - i) # 1, .., 6, .., 2
# a `k`-sample image summary of `i`-by-`i` images
image = tf.tile(image_1x1, [k, i, i, 1])
image_summary.image(
name,
image,
step=i,
description=description,
max_outputs=99,
)
def create_multiplexer(self):
multiplexer = event_multiplexer.EventMultiplexer()
multiplexer.AddRunsFromDirectory(self.logdir)
multiplexer.Reload()
return multiplexer
def create_provider(self):
multiplexer = self.create_multiplexer()
return data_provider.MultiplexerDataProvider(multiplexer, self.logdir)
def test_data_location(self):
provider = self.create_provider()
result = provider.data_location(self.ctx, experiment_id="unused")
self.assertEqual(result, self.logdir)
def test_list_plugins_with_no_graph(self):
provider = self.create_provider()
result = provider.list_plugins(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
"greetings",
"marigraphs",
histogram_metadata.PLUGIN_NAME,
image_metadata.PLUGIN_NAME,
scalar_metadata.PLUGIN_NAME,
],
)
def test_list_plugins_with_graph(self):
with tf.compat.v1.Graph().as_default() as graph:
writer = tf.compat.v1.summary.FileWriter(self.logdir)
writer.add_graph(graph)
writer.flush()
provider = self.create_provider()
result = provider.list_plugins(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
"greetings",
"marigraphs",
graph_metadata.PLUGIN_NAME,
histogram_metadata.PLUGIN_NAME,
image_metadata.PLUGIN_NAME,
scalar_metadata.PLUGIN_NAME,
],
)
def test_list_runs(self):
# We can't control the timestamps of events written to disk (without
# manually reading the tfrecords, modifying the data, and writing
# them back out), so we provide a fake multiplexer instead.
start_times = {
"second_2": 2.0,
"first": 1.5,
"no_time": None,
"second_1": 2.0,
}
class FakeMultiplexer(object):
def Runs(multiplexer):
result = ["second_2", "first", "no_time", "second_1"]
self.assertItemsEqual(result, start_times)
return result
def FirstEventTimestamp(multiplexer, run):
self.assertIn(run, start_times)
result = start_times[run]
if result is None:
raise ValueError("No event timestep could be found")
else:
return result
multiplexer = FakeMultiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, "fake_logdir"
)
result = provider.list_runs(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
base_provider.Run(
run_id=run, run_name=run, start_time=start_time
)
for (run, start_time) in six.iteritems(start_times)
],
)
def test_list_scalars_all(self):
provider = self.create_provider()
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=None,
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
sample = result["polynomials"]["square"]
self.assertIsInstance(sample, base_provider.ScalarTimeSeries)
self.assertEqual(sample.max_step, 18)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(
sample.display_name, ""
) # not written by V2 summary ops
self.assertEqual(sample.description, "boxen")
def test_list_scalars_filters(self):
provider = self.create_provider()
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(["waves"], ["square"]),
)
self.assertItemsEqual(result.keys(), ["waves"])
self.assertItemsEqual(result["waves"].keys(), ["square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
tags=["square", "quartic"]
),
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square"])
self.assertItemsEqual(result["waves"].keys(), ["square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(runs=["waves", "hugs"]),
)
self.assertItemsEqual(result.keys(), ["waves"])
self.assertItemsEqual(result["waves"].keys(), ["sine", "square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(["un"], ["likely"]),
)
self.assertEqual(result, {})
def test_read_scalars(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
run_tag_filter = base_provider.RunTagFilter(
runs=["waves", "polynomials", "unicorns"],
tags=["sine", "square", "cube", "iridescence"],
)
result = provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=run_tag_filter,
downsample=100,
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
for run in result:
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
self.assertEqual(
datum.value,
tensor_util.make_ndarray(event.tensor_proto).item(),
)
def test_read_scalars_downsamples(self):
# TODO(@wchargin): Verify that this always includes the most
# recent datum, as specified by the interface.
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
result = provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
downsample=3,
)
self.assertLen(result["waves"]["sine"], 3)
def test_read_scalars_but_not_rank_0(self):
provider = self.create_provider()
run_tag_filter = base_provider.RunTagFilter(["waves"], ["bad"])
# No explicit checks yet.
with six.assertRaisesRegex(
self,
ValueError,
"can only convert an array of size 1 to a Python scalar",
):
provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name="greetings",
run_tag_filter=run_tag_filter,
downsample=100,
)
def test_list_tensors_all(self):
provider = self.create_provider()
result = provider.list_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=None,
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"])
sample = result["lebesgue"]["uniform"]
self.assertIsInstance(sample, base_provider.TensorTimeSeries)
self.assertEqual(sample.max_step, 10)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(
sample.display_name, ""
) # not written by V2 summary ops
self.assertEqual(sample.description, "very smooth")
def test_list_tensors_filters(self):
provider = self.create_provider()
# Quick check only, as scalars and tensors use the same underlying
# filtering implementation.
result = provider.list_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
["lebesgue"], ["uniform"]
),
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform"])
def test_read_tensors(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
run_tag_filter = base_provider.RunTagFilter(
runs=["lebesgue"],
tags=["uniform", "bimodal"],
)
result = provider.read_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=run_tag_filter,
downsample=100,
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"])
for run in result:
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
np.testing.assert_equal(
datum.numpy,
tensor_util.make_ndarray(event.tensor_proto),
)
def test_read_tensors_downsamples(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
result = provider.read_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
downsample=3,
)
self.assertLen(result["lebesgue"]["uniform"], 3)
def test_list_blob_sequences(self):
provider = self.create_provider()
with self.subTest("finds all time series for a plugin"):
result = provider.list_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(
result["mondrian"].keys(), ["red", "blue", "yellow"]
)
sample = result["mondrian"]["blue"]
self.assertIsInstance(sample, base_provider.BlobSequenceTimeSeries)
self.assertEqual(sample.max_step, 10)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(sample.max_length, 6 + 2)
self.assertEqual(sample.description, "bottom-left")
self.assertEqual(sample.display_name, "")
with self.subTest("filters by run/tag"):
result = provider.list_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
runs=["mondrian", "picasso"], tags=["yellow", "green't"]
),
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(result["mondrian"].keys(), ["yellow"])
self.assertIsInstance(
result["mondrian"]["yellow"],
base_provider.BlobSequenceTimeSeries,
)
def test_read_blob_sequences_and_read_blob(self):
provider = self.create_provider()
with self.subTest("reads all time series for a plugin"):
result = provider.read_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
downsample=4,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(
result["mondrian"].keys(), ["red", "blue", "yellow"]
)
sample = result["mondrian"]["blue"]
self.assertLen(sample, 4) # downsampled from 10
last = sample[-1]
self.assertIsInstance(last, base_provider.BlobSequenceDatum)
self.assertEqual(last.step, 10)
self.assertLen(last.values, 2 + 2)
blobs = [
provider.read_blob(self.ctx, blob_key=v.blob_key)
for v in last.values
]
self.assertEqual(blobs[0], b"10")
self.assertEqual(blobs[1], b"10")
self.assertStartsWith(blobs[2], b"\x89PNG")
self.assertStartsWith(blobs[3], b"\x89PNG")
blue1 = blobs[2]
blue2 = blobs[3]
red1 = provider.read_blob(
self.ctx,
blob_key=result["mondrian"]["red"][-1].values[2].blob_key,
)
self.assertEqual(blue1, blue2)
self.assertNotEqual(blue1, red1)
with self.subTest("filters by run/tag"):
result = provider.read_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
runs=["mondrian", "picasso"], tags=["yellow", "green't"]
),
downsample=1,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(result["mondrian"].keys(), ["yellow"])
self.assertIsInstance(
result["mondrian"]["yellow"][0],
base_provider.BlobSequenceDatum,
)
class DownsampleTest(tf.test.TestCase):
"""Tests for the `_downsample` private helper function."""
def test_deterministic(self):
xs = "abcdefg"
expected = data_provider._downsample(xs, k=4)
for _ in range(100):
actual = data_provider._downsample(xs, k=4)
self.assertEqual(actual, expected)
def test_underlong_ok(self):
xs = list("abcdefg")
actual = data_provider._downsample(xs, k=10)
expected = list("abcdefg")
self.assertIsNot(actual, xs)
self.assertEqual(actual, expected)
def test_inorder(self):
xs = list(range(10000))
actual = data_provider._downsample(xs, k=100)
self.assertEqual(actual, sorted(actual))
def test_zero(self):
xs = "abcdefg"
actual = data_provider._downsample(xs, k=0)
self.assertEqual(actual, [])
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.compat.v2.compat.v1.summary.FileWriter",
"tensorflow.compat.v2.compat.v1.Graph",
"tensorboard.data.provider.RunTagFilter",
"six.moves.xrange",
"tensorflow.compat.v2.summary.write",
"tensorboard.compat.proto.summary_pb2.SummaryMetadata",
"tensorboard.plugins.image.summary_v2.image",
"tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider",
"tensorboard.backend.event_processing.data_provider._downsample",
"six.assertRaisesRegex",
"tensorboard.backend.event_processing.plugin_event_multiplexer.EventMultiplexer",
"tensorboard.util.tensor_util.make_ndarray",
"tensorboard.context.RequestContext",
"tensorflow.compat.v2.constant",
"os.path.join",
"tensorflow.compat.v2.tile",
"tensorboard.plugins.scalar.summary_v2.scalar",
"tensorflow.compat.v2.summary.create_file_writer",
"tensorflow.compat.v2.test.main",
"tensorboard.data.provider.Run",
"tensorflow.compat.v1.enable_eager_execution",
"six.iteritems",
"tensorboard.plugins.histogram.summary_v2.histogram"
] | [((1874, 1902), 'tensorflow.compat.v1.enable_eager_execution', 'tf1.enable_eager_execution', ([], {}), '()\n', (1900, 1902), True, 'import tensorflow.compat.v1 as tf1\n'), ((21426, 21440), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (21438, 21440), True, 'import tensorflow.compat.v2 as tf\n'), ((2097, 2121), 'tensorboard.context.RequestContext', 'context.RequestContext', ([], {}), '()\n', (2119, 2121), False, 'from tensorboard import context\n'), ((2140, 2180), 'os.path.join', 'os.path.join', (['self.logdir', '"""polynomials"""'], {}), "(self.logdir, 'polynomials')\n", (2152, 2180), False, 'import os\n'), ((2490, 2524), 'os.path.join', 'os.path.join', (['self.logdir', '"""waves"""'], {}), "(self.logdir, 'waves')\n", (2502, 2524), False, 'import os\n'), ((3604, 3641), 'os.path.join', 'os.path.join', (['self.logdir', '"""lebesgue"""'], {}), "(self.logdir, 'lebesgue')\n", (3616, 3641), False, 'import os\n'), ((4219, 4256), 'os.path.join', 'os.path.join', (['self.logdir', '"""mondrian"""'], {}), "(self.logdir, 'mondrian')\n", (4231, 4256), False, 'import os\n'), ((5301, 5337), 'tensorboard.backend.event_processing.plugin_event_multiplexer.EventMultiplexer', 'event_multiplexer.EventMultiplexer', ([], {}), '()\n', (5335, 5337), True, 'from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer\n'), ((5543, 5606), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (5580, 5606), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((7931, 7996), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', '"""fake_logdir"""'], {}), "(multiplexer, 'fake_logdir')\n", (7968, 7996), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((10987, 11050), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (11024, 11050), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((11099, 11221), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['waves', 'polynomials', 'unicorns']", 'tags': "['sine', 'square', 'cube', 'iridescence']"}), "(runs=['waves', 'polynomials', 'unicorns'], tags=\n ['sine', 'square', 'cube', 'iridescence'])\n", (11125, 11221), True, 'from tensorboard.data import provider as base_provider\n'), ((12523, 12586), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (12560, 12586), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((12963, 13009), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', (["['waves']", "['bad']"], {}), "(['waves'], ['bad'])\n", (12989, 13009), True, 'from tensorboard.data import provider as base_provider\n'), ((15005, 15068), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (15042, 15068), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((15117, 15191), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['lebesgue']", 'tags': "['uniform', 'bimodal']"}), "(runs=['lebesgue'], tags=['uniform', 'bimodal'])\n", (15143, 15191), True, 'from tensorboard.data import provider as base_provider\n'), ((16292, 16355), 'tensorboard.backend.event_processing.data_provider.MultiplexerDataProvider', 'data_provider.MultiplexerDataProvider', (['multiplexer', 'self.logdir'], {}), '(multiplexer, self.logdir)\n', (16329, 16355), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((20693, 20727), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(4)'}), '(xs, k=4)\n', (20718, 20727), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((20940, 20975), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(10)'}), '(xs, k=10)\n', (20965, 20975), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((21169, 21205), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(100)'}), '(xs, k=100)\n', (21194, 21205), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((21321, 21355), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(0)'}), '(xs, k=0)\n', (21346, 21355), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((2267, 2277), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (2273, 2277), False, 'from six.moves import xrange\n'), ((2611, 2621), 'six.moves.xrange', 'xrange', (['(10)'], {}), '(10)\n', (2617, 2621), False, 'from six.moves import xrange\n'), ((6376, 6420), 'tensorflow.compat.v2.compat.v1.summary.FileWriter', 'tf.compat.v1.summary.FileWriter', (['self.logdir'], {}), '(self.logdir)\n', (6407, 6420), True, 'import tensorflow.compat.v2 as tf\n'), ((13057, 13158), 'six.assertRaisesRegex', 'six.assertRaisesRegex', (['self', 'ValueError', '"""can only convert an array of size 1 to a Python scalar"""'], {}), "(self, ValueError,\n 'can only convert an array of size 1 to a Python scalar')\n", (13078, 13158), False, 'import six\n'), ((20778, 20812), 'tensorboard.backend.event_processing.data_provider._downsample', 'data_provider._downsample', (['xs'], {'k': '(4)'}), '(xs, k=4)\n', (20803, 20812), False, 'from tensorboard.backend.event_processing import data_provider\n'), ((2295, 2367), 'tensorboard.plugins.scalar.summary_v2.scalar', 'scalar_summary.scalar', (['"""square"""', '(i ** 2)'], {'step': '(2 * i)', 'description': '"""boxen"""'}), "('square', i ** 2, step=2 * i, description='boxen')\n", (2316, 2367), True, 'from tensorboard.plugins.scalar import summary_v2 as scalar_summary\n'), ((2422, 2471), 'tensorboard.plugins.scalar.summary_v2.scalar', 'scalar_summary.scalar', (['"""cube"""', '(i ** 3)'], {'step': '(3 * i)'}), "('cube', i ** 3, step=3 * i)\n", (2443, 2471), True, 'from tensorboard.plugins.scalar import summary_v2 as scalar_summary\n'), ((2923, 2952), 'tensorboard.compat.proto.summary_pb2.SummaryMetadata', 'summary_pb2.SummaryMetadata', ([], {}), '()\n', (2950, 2952), False, 'from tensorboard.compat.proto import summary_pb2\n'), ((3101, 3167), 'tensorflow.compat.v2.summary.write', 'tf.summary.write', (['"""high_tide"""'], {'tensor': 'i', 'step': 'i', 'metadata': 'metadata'}), "('high_tide', tensor=i, step=i, metadata=metadata)\n", (3117, 3167), True, 'import tensorflow.compat.v2 as tf\n'), ((3305, 3334), 'tensorboard.compat.proto.summary_pb2.SummaryMetadata', 'summary_pb2.SummaryMetadata', ([], {}), '()\n', (3332, 3334), False, 'from tensorboard.compat.proto import summary_pb2\n'), ((3482, 3547), 'tensorflow.compat.v2.summary.write', 'tf.summary.write', (['"""bad"""'], {'tensor': '[i, i]', 'step': 'i', 'metadata': 'metadata'}), "('bad', tensor=[i, i], step=i, metadata=metadata)\n", (3498, 3547), True, 'import tensorflow.compat.v2 as tf\n'), ((3970, 4015), 'tensorflow.compat.v2.constant', 'tf.constant', (['[distribution]'], {'dtype': 'tf.float64'}), '([distribution], dtype=tf.float64)\n', (3981, 4015), True, 'import tensorflow.compat.v2 as tf\n'), ((4041, 4054), 'six.moves.xrange', 'xrange', (['(1)', '(11)'], {}), '(1, 11)\n', (4047, 4054), False, 'from six.moves import xrange\n'), ((4606, 4646), 'tensorflow.compat.v2.constant', 'tf.constant', (['[[[color]]]'], {'dtype': 'tf.uint8'}), '([[[color]]], dtype=tf.uint8)\n', (4617, 4646), True, 'import tensorflow.compat.v2 as tf\n'), ((4672, 4685), 'six.moves.xrange', 'xrange', (['(1)', '(11)'], {}), '(1, 11)\n', (4678, 4685), False, 'from six.moves import xrange\n'), ((8170, 8236), 'tensorboard.data.provider.Run', 'base_provider.Run', ([], {'run_id': 'run', 'run_name': 'run', 'start_time': 'start_time'}), '(run_id=run, run_name=run, start_time=start_time)\n', (8187, 8236), True, 'from tensorboard.data import provider as base_provider\n'), ((9574, 9623), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', (["['waves']", "['square']"], {}), "(['waves'], ['square'])\n", (9600, 9623), True, 'from tensorboard.data import provider as base_provider\n'), ((9936, 9990), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'tags': "['square', 'quartic']"}), "(tags=['square', 'quartic'])\n", (9962, 9990), True, 'from tensorboard.data import provider as base_provider\n'), ((10420, 10470), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['waves', 'hugs']"}), "(runs=['waves', 'hugs'])\n", (10446, 10470), True, 'from tensorboard.data import provider as base_provider\n'), ((10791, 10837), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', (["['un']", "['likely']"], {}), "(['un'], ['likely'])\n", (10817, 10837), True, 'from tensorboard.data import provider as base_provider\n'), ((14680, 14733), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', (["['lebesgue']", "['uniform']"], {}), "(['lebesgue'], ['uniform'])\n", (14706, 14733), True, 'from tensorboard.data import provider as base_provider\n'), ((2194, 2231), 'tensorflow.compat.v2.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (2223, 2231), True, 'import tensorflow.compat.v2 as tf\n'), ((2538, 2575), 'tensorflow.compat.v2.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (2567, 2575), True, 'import tensorflow.compat.v2 as tf\n'), ((3655, 3692), 'tensorflow.compat.v2.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (3684, 3692), True, 'import tensorflow.compat.v2 as tf\n'), ((4076, 4154), 'tensorboard.plugins.histogram.summary_v2.histogram', 'histogram_summary.histogram', (['name', '(tensor * i)'], {'step': 'i', 'description': 'description'}), '(name, tensor * i, step=i, description=description)\n', (4103, 4154), True, 'from tensorboard.plugins.histogram import summary_v2 as histogram_summary\n'), ((4270, 4307), 'tensorflow.compat.v2.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (4299, 4307), True, 'import tensorflow.compat.v2 as tf\n'), ((4966, 4998), 'tensorflow.compat.v2.tile', 'tf.tile', (['image_1x1', '[k, i, i, 1]'], {}), '(image_1x1, [k, i, i, 1])\n', (4973, 4998), True, 'import tensorflow.compat.v2 as tf\n'), ((5019, 5104), 'tensorboard.plugins.image.summary_v2.image', 'image_summary.image', (['name', 'image'], {'step': 'i', 'description': 'description', 'max_outputs': '(99)'}), '(name, image, step=i, description=description,\n max_outputs=99)\n', (5038, 5104), True, 'from tensorboard.plugins.image import summary_v2 as image_summary\n'), ((6311, 6331), 'tensorflow.compat.v2.compat.v1.Graph', 'tf.compat.v1.Graph', ([], {}), '()\n', (6329, 6331), True, 'import tensorflow.compat.v2 as tf\n'), ((8316, 8342), 'six.iteritems', 'six.iteritems', (['start_times'], {}), '(start_times)\n', (8329, 8342), False, 'import six\n'), ((17875, 17963), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['mondrian', 'picasso']", 'tags': '[\'yellow\', "green\'t"]'}), '(runs=[\'mondrian\', \'picasso\'], tags=[\'yellow\',\n "green\'t"])\n', (17901, 17963), True, 'from tensorboard.data import provider as base_provider\n'), ((20060, 20148), 'tensorboard.data.provider.RunTagFilter', 'base_provider.RunTagFilter', ([], {'runs': "['mondrian', 'picasso']", 'tags': '[\'yellow\', "green\'t"]'}), '(runs=[\'mondrian\', \'picasso\'], tags=[\'yellow\',\n "green\'t"])\n', (20086, 20148), True, 'from tensorboard.data import provider as base_provider\n'), ((16111, 16155), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['event.tensor_proto'], {}), '(event.tensor_proto)\n', (16135, 16155), False, 'from tensorboard.util import tensor_util\n'), ((12211, 12255), 'tensorboard.util.tensor_util.make_ndarray', 'tensor_util.make_ndarray', (['event.tensor_proto'], {}), '(event.tensor_proto)\n', (12235, 12255), False, 'from tensorboard.util import tensor_util\n')] |
import pyredner
import numpy as np
import torch
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256),
fisheye = False)
mat_grey = pyredner.Material(\
diffuse_reflectance = \
torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()))
materials = [mat_grey]
shape_triangle = pyredner.Shape(\
vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2]], dtype = torch.int32,
device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shape_light = pyredner.Shape(\
vertices = torch.tensor([[-1.0, -1.0, -7.0],
[ 1.0, -1.0, -7.0],
[-1.0, 1.0, -7.0],
[ 1.0, 1.0, -7.0]], device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
dtype = torch.int32, device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id = 1,
intensity = torch.tensor([20.0,20.0,20.0]))
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_state_dict = scene.state_dict()
scene = pyredner.Scene.load_state_dict(scene_state_dict)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
| [
"pyredner.get_device",
"pyredner.Scene.load_state_dict",
"pyredner.Scene",
"torch.tensor",
"pyredner.RenderFunction.serialize_scene"
] | [((1526, 1577), 'pyredner.Scene', 'pyredner.Scene', (['cam', 'shapes', 'materials', 'area_lights'], {}), '(cam, shapes, materials, area_lights)\n', (1540, 1577), False, 'import pyredner\n'), ((1625, 1673), 'pyredner.Scene.load_state_dict', 'pyredner.Scene.load_state_dict', (['scene_state_dict'], {}), '(scene_state_dict)\n', (1655, 1673), False, 'import pyredner\n'), ((1688, 1775), 'pyredner.RenderFunction.serialize_scene', 'pyredner.RenderFunction.serialize_scene', ([], {'scene': 'scene', 'num_samples': '(16)', 'max_bounces': '(1)'}), '(scene=scene, num_samples=16,\n max_bounces=1)\n', (1727, 1775), False, 'import pyredner\n'), ((82, 112), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, -5.0]'], {}), '([0.0, 0.0, -5.0])\n', (94, 112), False, 'import torch\n'), ((146, 175), 'torch.tensor', 'torch.tensor', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (158, 175), False, 'import torch\n'), ((204, 233), 'torch.tensor', 'torch.tensor', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (216, 233), False, 'import torch\n'), ((263, 283), 'torch.tensor', 'torch.tensor', (['[45.0]'], {}), '([45.0])\n', (275, 283), False, 'import torch\n'), ((1463, 1495), 'torch.tensor', 'torch.tensor', (['[20.0, 20.0, 20.0]'], {}), '([20.0, 20.0, 20.0])\n', (1475, 1495), False, 'import torch\n'), ((545, 566), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (564, 566), False, 'import pyredner\n'), ((728, 749), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (747, 749), False, 'import pyredner\n'), ((830, 851), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (849, 851), False, 'import pyredner\n'), ((1149, 1170), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (1168, 1170), False, 'import pyredner\n'), ((1261, 1282), 'pyredner.get_device', 'pyredner.get_device', ([], {}), '()\n', (1280, 1282), False, 'import pyredner\n')] |
from typing import Callable, AnyStr, Optional
from zlib import compress as default_compress, decompress as default_decompress
from .cache import Cache
from ..constants import NOT_FOUND
class CacheCompressionDecorator(Cache):
def __init__(
self,
cache: Cache,
compress: Optional[Callable[[str], AnyStr]] = ...,
decompress: Optional[Callable[[AnyStr], str]] = ...,
) -> None:
super().__init__()
self._cache = cache
if compress is None:
self._compress = self._noop
elif compress is ...:
self._compress = self._default_compress
else:
self._compress = compress
if decompress is None:
self._decompress = self._noop
elif decompress is ...:
self._decompress = self._default_decompress
else:
self._decompress = decompress
def get(self, key):
value = self._cache.get(key)
return value if value is NOT_FOUND else self._decompress(value)
def set(self, key, value, expiration: int) -> None:
self._cache.set(key, self._compress(value), expiration)
@staticmethod
def _noop(x):
return x
@staticmethod
def _default_compress(obj: str) -> bytes:
return default_compress(obj.encode("UTF-8"))
@staticmethod
def _default_decompress(data: bytes) -> str:
return default_decompress(data).decode("UTF-8")
| [
"zlib.decompress"
] | [((1403, 1427), 'zlib.decompress', 'default_decompress', (['data'], {}), '(data)\n', (1421, 1427), True, 'from zlib import compress as default_compress, decompress as default_decompress\n')] |
"""Mobjects representing vector fields."""
__all__ = [
"VectorField",
"ArrowVectorField",
"StreamLines",
]
import itertools as it
import random
from math import ceil, floor
from typing import Callable, Iterable, Optional, Sequence, Tuple, Type
import numpy as np
from colour import Color
from PIL import Image
from .. import config
from ..animation.composition import AnimationGroup, Succession
from ..animation.creation import Create
from ..animation.indication import ShowPassingFlash
from ..animation.update import UpdateFromAlphaFunc
from ..constants import OUT, RIGHT, UP
from ..mobject.geometry import Vector
from ..mobject.mobject import Mobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.bezier import interpolate, inverse_interpolate
from ..utils.color import BLUE_E, GREEN, RED, YELLOW, color_to_rgb, rgb_to_color
from ..utils.deprecation import deprecated_params
from ..utils.rate_functions import ease_out_sine, linear
from ..utils.simple_functions import sigmoid
from .types.opengl_vectorized_mobject import OpenGLVMobject
DEFAULT_SCALAR_FIELD_COLORS: list = [BLUE_E, GREEN, YELLOW, RED]
class VectorField(VGroup):
"""A vector field.
Vector fields are based on a function defining a vector at every position.
This class does by default not include any visible elements but provides
methods to move other :class:`~.Mobject` s along the vector field.
Parameters
----------
func
The function defining the rate of change at every position of the `VectorField`.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
**kwargs
):
super().__init__(**kwargs)
self.func = func
if color is None:
self.single_color = False
if color_scheme is None:
def color_scheme(p):
return np.linalg.norm(p)
self.color_scheme = color_scheme # TODO maybe other default for direction?
self.rgbs = np.array(list(map(color_to_rgb, colors)))
def pos_to_rgb(pos: np.ndarray) -> Tuple[float, float, float, float]:
vec = self.func(pos)
color_value = np.clip(
self.color_scheme(vec),
min_color_scheme_value,
max_color_scheme_value,
)
alpha = inverse_interpolate(
min_color_scheme_value,
max_color_scheme_value,
color_value,
)
alpha *= len(self.rgbs) - 1
c1 = self.rgbs[int(alpha)]
c2 = self.rgbs[min(int(alpha + 1), len(self.rgbs) - 1)]
alpha %= 1
return interpolate(c1, c2, alpha)
self.pos_to_rgb = pos_to_rgb
self.pos_to_color = lambda pos: rgb_to_color(self.pos_to_rgb(pos))
else:
self.single_color = True
self.color = color
self.submob_movement_updater = None
@staticmethod
def shift_func(
func: Callable[[np.ndarray], np.ndarray],
shift_vector: np.ndarray,
) -> Callable[[np.ndarray], np.ndarray]:
"""Shift a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The shift to be applied to the vector field.
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The shifted vector field function.
"""
return lambda p: func(p - shift_vector)
@staticmethod
def scale_func(
func: Callable[[np.ndarray], np.ndarray],
scalar: float,
) -> Callable[[np.ndarray], np.ndarray]:
"""Scale a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The scalar to be applied to the vector field.
Examples
--------
.. manim:: ScaleVectorFieldFunction
class ScaleVectorFieldFunction(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1]) * RIGHT + np.cos(pos[0]) * UP
vector_field = ArrowVectorField(func)
self.add(vector_field)
self.wait()
func = VectorField.scale_func(func, 0.5)
self.play(vector_field.animate.become(ArrowVectorField(func)))
self.wait()
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The scaled vector field function.
"""
return lambda p: func(p * scalar)
def nudge(
self,
mob: Mobject,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Nudge a :class:`~.Mobject` along the vector field.
Parameters
----------
mob
The mobject to move along the vector field
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. If `False` the
vector field takes effect on the center of the given
:class:`~.Mobject`. If `True` the vector field takes effect on the
points of the individual points of the :class:`~.Mobject`,
potentially distorting it.
Returns
-------
VectorField
This vector field.
Examples
--------
.. manim:: Nudging
class Nudging(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1] / 2) * RIGHT + np.cos(pos[0] / 2) * UP
vector_field = ArrowVectorField(
func, x_range=[-7, 7, 1], y_range=[-4, 4, 1], length_func=lambda x: x / 2
)
self.add(vector_field)
circle = Circle(radius=2).shift(LEFT)
self.add(circle.copy().set_color(GRAY))
dot = Dot().move_to(circle)
vector_field.nudge(circle, -2, 60, True)
vector_field.nudge(dot, -2, 60)
circle.add_updater(vector_field.get_nudge_updater(pointwise=True))
dot.add_updater(vector_field.get_nudge_updater())
self.add(circle, dot)
self.wait(6)
"""
def runge_kutta(self, p: Sequence[float], step_size: float) -> float:
"""Returns the change in position of a point along a vector field.
Parameters
----------
p
The position of each point being moved along the vector field.
step_size
A scalar that is used to determine how much a point is shifted in a single step.
Returns
-------
float
How much the point is shifted.
"""
k_1 = self.func(p)
k_2 = self.func(p + step_size * (k_1 * 0.5))
k_3 = self.func(p + step_size * (k_2 * 0.5))
k_4 = self.func(p + step_size * k_3)
return step_size / 6.0 * (k_1 + 2.0 * k_2 + 2.0 * k_3 + k_4)
step_size = dt / substeps
for _ in range(substeps):
if pointwise:
mob.apply_function(lambda p: p + runge_kutta(self, p, step_size))
else:
mob.shift(runge_kutta(self, mob.get_center(), step_size))
return self
def nudge_submobjects(
self,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Apply a nudge along the vector field to all submobjects.
Parameters
----------
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
for mob in self.submobjects:
self.nudge(mob, dt, substeps, pointwise)
return self
def get_nudge_updater(
self,
speed: float = 1,
pointwise: bool = False,
) -> Callable[[Mobject, float], Mobject]:
"""Get an update function to move a :class:`~.Mobject` along the vector field.
When used with :meth:`~.Mobject.add_updater`, the mobject will move along the vector field, where its speed is determined by the magnitude of the vector field.
Parameters
----------
speed
At `speed=1` the distance a mobject moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of such a mobject.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
Callable[[Mobject, float], Mobject]
The update function.
"""
return lambda mob, dt: self.nudge(mob, dt * speed, pointwise=pointwise)
def start_submobject_movement(
self,
speed: float = 1,
pointwise: bool = False,
) -> "VectorField":
"""Start continuously moving all submobjects along the vector field.
Calling this method multiple times will result in removing the previous updater created by this method.
Parameters
----------
speed
The speed at which to move the submobjects. See :meth:`get_nudge_updater` for details.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
self.stop_submobject_movement()
self.submob_movement_updater = lambda mob, dt: mob.nudge_submobjects(
dt * speed,
pointwise=pointwise,
)
self.add_updater(self.submob_movement_updater)
return self
def stop_submobject_movement(self) -> "VectorField":
"""Stops the continuous movement started using :meth:`start_submobject_movement`.
Returns
-------
VectorField
This vector field.
"""
self.remove_updater(self.submob_movement_updater)
self.submob_movement_updater = None
return self
def get_colored_background_image(self, sampling_rate: int = 5) -> Image.Image:
"""Generate an image that displays the vector field.
The color at each position is calculated by passing the positing through a
series of steps:
Calculate the vector field function at that position, map that vector to a
single value using `self.color_scheme` and finally generate a color from
that value using the color gradient.
Parameters
----------
sampling_rate
The stepsize at which pixels get included in the image. Lower values give
more accurate results, but may take a long time to compute.
Returns
-------
Image.Imgae
The vector field image.
"""
if self.single_color:
raise ValueError(
"There is no point in generating an image if the vector field uses a single color.",
)
ph = int(config["pixel_height"] / sampling_rate)
pw = int(config["pixel_width"] / sampling_rate)
fw = config["frame_width"]
fh = config["frame_height"]
points_array = np.zeros((ph, pw, 3))
x_array = np.linspace(-fw / 2, fw / 2, pw)
y_array = np.linspace(fh / 2, -fh / 2, ph)
x_array = x_array.reshape((1, len(x_array)))
y_array = y_array.reshape((len(y_array), 1))
x_array = x_array.repeat(ph, axis=0)
y_array.repeat(pw, axis=1) # TODO why not y_array = y_array.repeat(...)?
points_array[:, :, 0] = x_array
points_array[:, :, 1] = y_array
rgbs = np.apply_along_axis(self.pos_to_rgb, 2, points_array)
return Image.fromarray((rgbs * 255).astype("uint8"))
def get_vectorized_rgba_gradient_function(
self,
start: float,
end: float,
colors: Iterable,
):
"""
Generates a gradient of rgbas as a numpy array
Parameters
----------
start
start value used for inverse interpolation at :func:`~.inverse_interpolate`
end
end value used for inverse interpolation at :func:`~.inverse_interpolate`
colors
list of colors to generate the gradient
Returns
-------
function to generate the gradients as numpy arrays representing rgba values
"""
rgbs = np.array([color_to_rgb(c) for c in colors])
def func(values, opacity=1):
alphas = inverse_interpolate(start, end, np.array(values))
alphas = np.clip(alphas, 0, 1)
scaled_alphas = alphas * (len(rgbs) - 1)
indices = scaled_alphas.astype(int)
next_indices = np.clip(indices + 1, 0, len(rgbs) - 1)
inter_alphas = scaled_alphas % 1
inter_alphas = inter_alphas.repeat(3).reshape((len(indices), 3))
result = interpolate(rgbs[indices], rgbs[next_indices], inter_alphas)
result = np.concatenate(
(result, np.full([len(result), 1], opacity)),
axis=1,
)
return result
return func
class ArrowVectorField(VectorField):
"""A :class:`VectorField` represented by a set of change vectors.
Vector fields are always based on a function defining the :class:`~.Vector` at every position.
The values of this functions is displayed as a grid of vectors.
By default the color of each vector is determined by it's magnitude.
Other color schemes can be used however.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
length_func
The function determining the displayed size of the vectors. The actual size
of the vector is passed, the returned value will be used as display size for the
vector. By default this is used to cap the displayed size of vectors to reduce the clutter.
opacity
The opacity of the arrows.
vector_config
Additional arguments to be passed to the :class:`~.Vector` constructor
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(ArrowVectorField(func))
.. manim:: SizingAndSpacing
class SizingAndSpacing(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
vf = ArrowVectorField(func, x_range=[-7, 7, 1])
self.add(vf)
self.wait()
length_func = lambda x: x / 3
vf2 = ArrowVectorField(func, x_range=[-7, 7, 1], length_func=length_func)
self.play(vf.animate.become(vf2))
self.wait()
.. manim:: Coloring
:save_last_frame:
class Coloring(Scene):
def construct(self):
func = lambda pos: pos - LEFT * 5
colors = [RED, YELLOW, BLUE, DARK_GRAY]
min_radius = Circle(radius=2, color=colors[0]).shift(LEFT * 5)
max_radius = Circle(radius=10, color=colors[-1]).shift(LEFT * 5)
vf = ArrowVectorField(
func, min_color_scheme_value=2, max_color_scheme_value=10, colors=colors
)
self.add(vf, min_radius, max_radius)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining Vector positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False, # Automatically True if z_range is set
# Takes in actual norm, spits out displayed norm
length_func: Callable[[float], float] = lambda norm: 0.45 * sigmoid(norm),
opacity: float = 1.0,
vector_config: Optional[dict] = None,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.length_func = length_func
self.opacity = opacity
if vector_config is None:
vector_config = {}
self.vector_config = vector_config
self.func = func
x_range = np.arange(*self.x_range)
y_range = np.arange(*self.y_range)
z_range = np.arange(*self.z_range)
for x, y, z in it.product(x_range, y_range, z_range):
self.add(self.get_vector(x * RIGHT + y * UP + z * OUT))
self.set_opacity(self.opacity)
def get_vector(self, point: np.ndarray):
"""Creates a vector in the vector field.
The created vector is based on the function of the vector field and is
rooted in the given point. Color and length fit the specifications of
this vector field.
Parameters
----------
point
The root point of the vector.
kwargs : Any
Additional arguments to be passed to the :class:`~.Vector` constructor
"""
output = np.array(self.func(point))
norm = np.linalg.norm(output)
if norm != 0:
output *= self.length_func(norm) / norm
vect = Vector(output, **self.vector_config)
vect.shift(point)
if self.single_color:
vect.set_color(self.color)
else:
vect.set_color(self.pos_to_color(point))
return vect
class StreamLines(VectorField):
"""StreamLines represent the flow of a :class:`VectorField` using the trace of moving agents.
Vector fields are always based on a function defining the vector at every position.
The values of this functions is displayed by moving many agents along the vector field
and showing their trace.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
noise_factor
The amount by which the starting position of each agent is altered along each axis. Defaults to :code:`delta_y / 2` if not defined.
n_repeats
The number of agents generated at each starting point.
dt
The factor by which the distance an agent moves per step is stretched. Lower values result in a better approximation of the trajectories in the vector field.
virtual_time
The time the agents get to move in the vector field. Higher values therefore result in longer stream lines. However, this whole time gets simulated upon creation.
max_anchors_per_line
The maximum number of anchors per line. Lines with more anchors get reduced in complexity, not in length.
padding
The distance agents can move out of the generation area before being terminated.
stroke_width
The stroke with of the stream lines.
opacity
The opacity of the stream lines.
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(StreamLines(func))
.. manim:: SpawningAndFlowingArea
:save_last_frame:
class SpawningAndFlowingArea(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0]) * UR + np.cos(pos[1]) * LEFT + pos / 5
stream_lines = StreamLines(
func, x_range=[-3, 3, 0.2], y_range=[-2, 2, 0.2], padding=1
)
spawning_area = Rectangle(width=6, height=4)
flowing_area = Rectangle(width=8, height=6)
labels = [Tex("Spawning Area"), Tex("Flowing Area").shift(DOWN * 2.5)]
for lbl in labels:
lbl.add_background_rectangle(opacity=0.6, buff=0.05)
self.add(stream_lines, spawning_area, flowing_area, *labels)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining stream line starting positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False,
noise_factor: Optional[float] = None,
n_repeats=1,
# Determining how lines are drawn
dt=0.05,
virtual_time=3,
max_anchors_per_line=100,
padding=3,
# Determining stream line appearance:
stroke_width=1,
opacity=1,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.noise_factor = (
noise_factor if noise_factor is not None else self.y_range[2] / 2
)
self.n_repeats = n_repeats
self.virtual_time = virtual_time
self.max_anchors_per_line = max_anchors_per_line
self.padding = padding
self.stroke_width = stroke_width
half_noise = self.noise_factor / 2
np.random.seed(0)
start_points = np.array(
[
(x - half_noise) * RIGHT
+ (y - half_noise) * UP
+ (z - half_noise) * OUT
+ self.noise_factor * np.random.random(3)
for n in range(self.n_repeats)
for x in np.arange(*self.x_range)
for y in np.arange(*self.y_range)
for z in np.arange(*self.z_range)
],
)
def outside_box(p):
return (
p[0] < self.x_range[0] - self.padding
or p[0] > self.x_range[1] + self.padding - self.x_range[2]
or p[1] < self.y_range[0] - self.padding
or p[1] > self.y_range[1] + self.padding - self.y_range[2]
or p[2] < self.z_range[0] - self.padding
or p[2] > self.z_range[1] + self.padding - self.z_range[2]
)
max_steps = ceil(virtual_time / dt) + 1
if not self.single_color:
self.background_img = self.get_colored_background_image()
if config["renderer"] == "opengl":
self.values_to_rgbas = self.get_vectorized_rgba_gradient_function(
min_color_scheme_value,
max_color_scheme_value,
colors,
)
for point in start_points:
points = [point]
for _ in range(max_steps):
last_point = points[-1]
new_point = last_point + dt * func(last_point)
if outside_box(new_point):
break
points.append(new_point)
step = max_steps
if not step:
continue
if config["renderer"] == "opengl":
line = OpenGLVMobject()
else:
line = VMobject()
line.duration = step * dt
step = max(1, int(len(points) / self.max_anchors_per_line))
line.set_points_smoothly(points[::step])
if self.single_color:
line.set_stroke(self.color)
else:
if config["renderer"] == "opengl":
# scaled for compatibility with cairo
line.set_stroke(width=self.stroke_width / 4.0)
norms = np.array(
[np.linalg.norm(self.func(point)) for point in line.points],
)
line.set_rgba_array_direct(
self.values_to_rgbas(norms, opacity),
name="stroke_rgba",
)
else:
if np.any(self.z_range != np.array([0, 0.5, 0.5])):
line.set_stroke(
[self.pos_to_color(p) for p in line.get_anchors()],
)
else:
line.color_using_background_image(self.background_img)
line.set_stroke(width=self.stroke_width, opacity=opacity)
self.add(line)
self.stream_lines = [*self.submobjects]
def create(
self,
lag_ratio: Optional[float] = None,
run_time: Optional[Callable[[float], float]] = None,
**kwargs
) -> AnimationGroup:
"""The creation animation of the stream lines.
The stream lines appear in random order.
Parameters
----------
lag_ratio
The lag ratio of the animation.
If undefined, it will be selected so that the total animation length is 1.5 times the run time of each stream line creation.
run_time
The run time of every single stream line creation. The runtime of the whole animation might be longer due to the `lag_ratio`.
If undefined, the virtual time of the stream lines is used as run time.
Returns
-------
:class:`~.AnimationGroup`
The creation animation of the stream lines.
Examples
--------
.. manim:: StreamLineCreation
class StreamLineCreation(Scene):
def construct(self):
func = lambda pos: (pos[0] * UR + pos[1] * LEFT) - pos
stream_lines = StreamLines(
func,
color=YELLOW,
x_range=[-7, 7, 1],
y_range=[-4, 4, 1],
stroke_width=3,
virtual_time=1, # use shorter lines
max_anchors_per_line=5, # better performance with fewer anchors
)
self.play(stream_lines.create()) # uses virtual_time as run_time
self.wait()
"""
if run_time is None:
run_time = self.virtual_time
if lag_ratio is None:
lag_ratio = run_time / 2 / len(self.submobjects)
animations = [
Create(line, run_time=run_time, **kwargs) for line in self.stream_lines
]
random.shuffle(animations)
return AnimationGroup(*animations, lag_ratio=lag_ratio)
def start_animation(
self,
warm_up=True,
flow_speed: float = 1,
time_width: float = 0.3,
rate_func: Callable[[float], float] = linear,
line_animation_class: Type[ShowPassingFlash] = ShowPassingFlash,
**kwargs
) -> None:
"""Animates the stream lines using an updater.
The stream lines will continuously flow
Parameters
----------
warm_up : bool, optional
If `True` the animation is initialized line by line. Otherwise it starts with all lines shown.
flow_speed
At `flow_speed=1` the distance the flow moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of this flow.
time_width
The proportion of the stream line shown while being animated
rate_func
The rate function of each stream line flashing
line_animation_class
The animation class being used
Examples
--------
.. manim:: ContinuousMotion
class ContinuousMotion(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(func, stroke_width=3, max_anchors_per_line=30)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5)
self.wait(stream_lines.virtual_time / stream_lines.flow_speed)
"""
for line in self.stream_lines:
run_time = line.duration / flow_speed
line.anim = line_animation_class(
line,
run_time=run_time,
rate_func=rate_func,
time_width=time_width,
**kwargs,
)
line.anim.begin()
line.time = random.random() * self.virtual_time
if warm_up:
line.time *= -1
self.add(line.anim.mobject)
def updater(mob, dt):
for line in mob.stream_lines:
line.time += dt * flow_speed
if line.time >= self.virtual_time:
line.time -= self.virtual_time
line.anim.interpolate(np.clip(line.time / line.anim.run_time, 0, 1))
self.add_updater(updater)
self.flow_animation = updater
self.flow_speed = flow_speed
self.time_width = time_width
def end_animation(self) -> AnimationGroup:
"""End the stream line animation smoothly.
Returns an animation resulting in fully displayed stream lines without a noticeable cut.
Returns
-------
:class:`~.AnimationGroup`
The animation fading out the running stream animation.
Raises
------
ValueError
if no stream line animation is running
Examples
--------
.. manim:: EndAnimation
class EndAnimation(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(
func, stroke_width=3, max_anchors_per_line=5, virtual_time=1, color=BLUE
)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5, time_width=0.5)
self.wait(1)
self.play(stream_lines.end_animation())
"""
if self.flow_animation is None:
raise ValueError("You have to start the animation before fading it out.")
def hide_and_wait(mob, alpha):
if alpha == 0:
mob.set_stroke(opacity=0)
elif alpha == 1:
mob.set_stroke(opacity=1)
def finish_updater_cycle(line, alpha):
line.time += dt * self.flow_speed
line.anim.interpolate(min(line.time / line.anim.run_time, 1))
if alpha == 1:
self.remove(line.anim.mobject)
line.anim.finish()
max_run_time = self.virtual_time / self.flow_speed
creation_rate_func = ease_out_sine
creation_staring_speed = creation_rate_func(0.001) * 1000
creation_run_time = (
max_run_time / (1 + self.time_width) * creation_staring_speed
)
# creation_run_time is calculated so that the creation animation starts at the same speed
# as the regular line flash animation but eases out.
dt = 1 / config["frame_rate"]
animations = []
self.remove_updater(self.flow_animation)
self.flow_animation = None
for line in self.stream_lines:
create = Create(
line,
run_time=creation_run_time,
rate_func=creation_rate_func,
)
if line.time <= 0:
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
hide_and_wait,
run_time=-line.time / self.flow_speed,
),
create,
),
)
self.remove(line.anim.mobject)
line.anim.finish()
else:
remaining_time = max_run_time - line.time / self.flow_speed
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
finish_updater_cycle,
run_time=remaining_time,
),
create,
),
)
return AnimationGroup(*animations)
# TODO: Variant of StreamLines that is able to respond to changes in the vector field function
| [
"numpy.clip",
"math.ceil",
"random.shuffle",
"math.floor",
"numpy.random.random",
"itertools.product",
"numpy.array",
"numpy.zeros",
"numpy.apply_along_axis",
"numpy.linspace",
"numpy.random.seed",
"numpy.linalg.norm",
"random.random",
"numpy.arange"
] | [((13292, 13313), 'numpy.zeros', 'np.zeros', (['(ph, pw, 3)'], {}), '((ph, pw, 3))\n', (13300, 13313), True, 'import numpy as np\n'), ((13332, 13364), 'numpy.linspace', 'np.linspace', (['(-fw / 2)', '(fw / 2)', 'pw'], {}), '(-fw / 2, fw / 2, pw)\n', (13343, 13364), True, 'import numpy as np\n'), ((13383, 13415), 'numpy.linspace', 'np.linspace', (['(fh / 2)', '(-fh / 2)', 'ph'], {}), '(fh / 2, -fh / 2, ph)\n', (13394, 13415), True, 'import numpy as np\n'), ((13744, 13797), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.pos_to_rgb', '(2)', 'points_array'], {}), '(self.pos_to_rgb, 2, points_array)\n', (13763, 13797), True, 'import numpy as np\n'), ((20776, 20800), 'numpy.arange', 'np.arange', (['*self.x_range'], {}), '(*self.x_range)\n', (20785, 20800), True, 'import numpy as np\n'), ((20819, 20843), 'numpy.arange', 'np.arange', (['*self.y_range'], {}), '(*self.y_range)\n', (20828, 20843), True, 'import numpy as np\n'), ((20862, 20886), 'numpy.arange', 'np.arange', (['*self.z_range'], {}), '(*self.z_range)\n', (20871, 20886), True, 'import numpy as np\n'), ((20910, 20947), 'itertools.product', 'it.product', (['x_range', 'y_range', 'z_range'], {}), '(x_range, y_range, z_range)\n', (20920, 20947), True, 'import itertools as it\n'), ((21607, 21629), 'numpy.linalg.norm', 'np.linalg.norm', (['output'], {}), '(output)\n', (21621, 21629), True, 'import numpy as np\n'), ((27667, 27684), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (27681, 27684), True, 'import numpy as np\n'), ((32745, 32771), 'random.shuffle', 'random.shuffle', (['animations'], {}), '(animations)\n', (32759, 32771), False, 'import random\n'), ((14691, 14712), 'numpy.clip', 'np.clip', (['alphas', '(0)', '(1)'], {}), '(alphas, 0, 1)\n', (14698, 14712), True, 'import numpy as np\n'), ((28612, 28635), 'math.ceil', 'ceil', (['(virtual_time / dt)'], {}), '(virtual_time / dt)\n', (28616, 28635), False, 'from math import ceil, floor\n'), ((14652, 14668), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (14660, 14668), True, 'import numpy as np\n'), ((19628, 19661), 'math.floor', 'floor', (["(-config['frame_width'] / 2)"], {}), "(-config['frame_width'] / 2)\n", (19633, 19661), False, 'from math import ceil, floor\n'), ((19675, 19706), 'math.ceil', 'ceil', (["(config['frame_width'] / 2)"], {}), "(config['frame_width'] / 2)\n", (19679, 19706), False, 'from math import ceil, floor\n'), ((19766, 19800), 'math.floor', 'floor', (["(-config['frame_height'] / 2)"], {}), "(-config['frame_height'] / 2)\n", (19771, 19800), False, 'from math import ceil, floor\n'), ((19814, 19846), 'math.ceil', 'ceil', (["(config['frame_height'] / 2)"], {}), "(config['frame_height'] / 2)\n", (19818, 19846), False, 'from math import ceil, floor\n'), ((26366, 26399), 'math.floor', 'floor', (["(-config['frame_width'] / 2)"], {}), "(-config['frame_width'] / 2)\n", (26371, 26399), False, 'from math import ceil, floor\n'), ((26413, 26444), 'math.ceil', 'ceil', (["(config['frame_width'] / 2)"], {}), "(config['frame_width'] / 2)\n", (26417, 26444), False, 'from math import ceil, floor\n'), ((26504, 26538), 'math.floor', 'floor', (["(-config['frame_height'] / 2)"], {}), "(-config['frame_height'] / 2)\n", (26509, 26538), False, 'from math import ceil, floor\n'), ((26552, 26584), 'math.ceil', 'ceil', (["(config['frame_height'] / 2)"], {}), "(config['frame_height'] / 2)\n", (26556, 26584), False, 'from math import ceil, floor\n'), ((34762, 34777), 'random.random', 'random.random', ([], {}), '()\n', (34775, 34777), False, 'import random\n'), ((2995, 3012), 'numpy.linalg.norm', 'np.linalg.norm', (['p'], {}), '(p)\n', (3009, 3012), True, 'import numpy as np\n'), ((27984, 28008), 'numpy.arange', 'np.arange', (['*self.x_range'], {}), '(*self.x_range)\n', (27993, 28008), True, 'import numpy as np\n'), ((28034, 28058), 'numpy.arange', 'np.arange', (['*self.y_range'], {}), '(*self.y_range)\n', (28043, 28058), True, 'import numpy as np\n'), ((28084, 28108), 'numpy.arange', 'np.arange', (['*self.z_range'], {}), '(*self.z_range)\n', (28093, 28108), True, 'import numpy as np\n'), ((35152, 35197), 'numpy.clip', 'np.clip', (['(line.time / line.anim.run_time)', '(0)', '(1)'], {}), '(line.time / line.anim.run_time, 0, 1)\n', (35159, 35197), True, 'import numpy as np\n'), ((27892, 27911), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (27908, 27911), True, 'import numpy as np\n'), ((30366, 30389), 'numpy.array', 'np.array', (['[0, 0.5, 0.5]'], {}), '([0, 0.5, 0.5])\n', (30374, 30389), True, 'import numpy as np\n')] |
"""
This library allows the conversion of python 3.7's :mod:`dataclasses`
to :mod:`marshmallow` schemas.
It takes a python class, and generates a marshmallow schema for it.
Simple example::
from marshmallow import Schema
from marshmallow_dataclass import dataclass
@dataclass
class Point:
x:float
y:float
point = Point(x=0, y=0)
point_json = Point.Schema().dumps(point)
Full example::
from marshmallow import Schema
from dataclasses import field
from marshmallow_dataclass import dataclass
import datetime
@dataclass
class User:
birth: datetime.date = field(metadata= {
"required": True # A parameter to pass to marshmallow's field
})
website:str = field(metadata = {
"marshmallow_field": marshmallow.fields.Url() # Custom marshmallow field
})
Schema: ClassVar[Type[Schema]] = Schema # For the type checker
"""
import inspect
from enum import EnumMeta
from functools import lru_cache
from typing import (
Any,
Callable,
Dict,
List,
Mapping,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
cast,
overload,
)
import dataclasses
import marshmallow
import typing_inspect
__all__ = ["dataclass", "add_schema", "class_schema", "field_for_schema", "NewType"]
NoneType = type(None)
_U = TypeVar("_U")
# Whitelist of dataclass members that will be copied to generated schema.
MEMBERS_WHITELIST: Set[str] = {"Meta"}
# Max number of generated schemas that class_schema keeps of generated schemas. Removes duplicates.
MAX_CLASS_SCHEMA_CACHE_SIZE = 1024
# _cls should never be specified by keyword, so start it with an
# underscore. The presence of _cls is used to detect if this
# decorator is being called with parameters or not.
def dataclass(
_cls: Type[_U] = None,
*,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
base_schema: Optional[Type[marshmallow.Schema]] = None,
):
"""
This decorator does the same as dataclasses.dataclass, but also applies :func:`add_schema`.
It adds a `.Schema` attribute to the class object
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> @dataclass
... class Artist:
... name: str
>>> Artist.Schema
<class 'marshmallow.schema.Artist'>
>>> from typing import ClassVar
>>> from marshmallow import Schema
>>> @dataclass(order=True) # preserve field order
... class Point:
... x:float
... y:float
... Schema: ClassVar[Type[Schema]] = Schema # For the type checker
...
>>> Point.Schema().load({'x':0, 'y':0}) # This line can be statically type checked
Point(x=0.0, y=0.0)
"""
# dataclass's typing doesn't expect it to be called as a function, so ignore type check
dc = dataclasses.dataclass( # type: ignore
_cls, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
if _cls is None:
return lambda cls: add_schema(dc(cls), base_schema)
return add_schema(dc, base_schema)
@overload
def add_schema(_cls: Type[_U]) -> Type[_U]:
...
@overload
def add_schema(
base_schema: Type[marshmallow.Schema] = None,
) -> Callable[[Type[_U]], Type[_U]]:
...
@overload
def add_schema(
_cls: Type[_U], base_schema: Type[marshmallow.Schema] = None
) -> Type[_U]:
...
def add_schema(_cls=None, base_schema=None):
"""
This decorator adds a marshmallow schema as the 'Schema' attribute in a dataclass.
It uses :func:`class_schema` internally.
:param type cls: The dataclass to which a Schema should be added
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> class BaseSchema(marshmallow.Schema):
... def on_bind_field(self, field_name, field_obj):
... field_obj.data_key = (field_obj.data_key or field_name).upper()
>>> @add_schema(base_schema=BaseSchema)
... @dataclasses.dataclass
... class Artist:
... names: Tuple[str, str]
>>> artist = Artist.Schema().loads('{"NAMES": ["Martin", "Ramirez"]}')
>>> artist
Artist(names=('Martin', 'Ramirez'))
"""
def decorator(clazz: Type[_U]) -> Type[_U]:
clazz.Schema = class_schema(clazz, base_schema) # type: ignore
return clazz
return decorator(_cls) if _cls else decorator
def class_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
"""
Convert a class to a marshmallow schema
:param clazz: A python class (may be a dataclass)
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
:return: A marshmallow Schema corresponding to the dataclass
.. note::
All the arguments supported by marshmallow field classes can
be passed in the `metadata` dictionary of a field.
If you want to use a custom marshmallow field
(one that has no equivalent python type), you can pass it as the
``marshmallow_field`` key in the metadata dictionary.
>>> import typing
>>> Meters = typing.NewType('Meters', float)
>>> @dataclasses.dataclass()
... class Building:
... height: Optional[Meters]
... name: str = dataclasses.field(default="anonymous")
... class Meta:
... ordered = True
...
>>> class_schema(Building) # Returns a marshmallow schema class (not an instance)
<class 'marshmallow.schema.Building'>
>>> @dataclasses.dataclass()
... class City:
... name: str = dataclasses.field(metadata={'required':True})
... best_building: Building # Reference to another dataclasses. A schema will be created for it too.
... other_buildings: List[Building] = dataclasses.field(default_factory=lambda: [])
...
>>> citySchema = class_schema(City)()
>>> city = citySchema.load({"name":"Paris", "best_building": {"name": "Eiffel Tower"}})
>>> city
City(name='Paris', best_building=Building(height=None, name='Eiffel Tower'), other_buildings=[])
>>> citySchema.load({"name":"Paris"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'best_building': ['Missing data for required field.']}
>>> city_json = citySchema.dump(city)
>>> city_json['best_building'] # We get an OrderedDict because we specified order = True in the Meta class
OrderedDict([('height', None), ('name', 'Eiffel Tower')])
>>> @dataclasses.dataclass()
... class Person:
... name: str = dataclasses.field(default="Anonymous")
... friends: List['Person'] = dataclasses.field(default_factory=lambda:[]) # Recursive field
...
>>> person = class_schema(Person)().load({
... "friends": [{"name": "<NAME>"}]
... })
>>> person
Person(name='Anonymous', friends=[Person(name='<NAME>', friends=[])])
>>> @dataclasses.dataclass()
... class C:
... important: int = dataclasses.field(init=True, default=0)
... # Only fields that are in the __init__ method will be added:
... unimportant: int = dataclasses.field(init=False, default=0)
...
>>> c = class_schema(C)().load({
... "important": 9, # This field will be imported
... "unimportant": 9 # This field will NOT be imported
... }, unknown=marshmallow.EXCLUDE)
>>> c
C(important=9, unimportant=0)
>>> @dataclasses.dataclass
... class Website:
... url:str = dataclasses.field(metadata = {
... "marshmallow_field": marshmallow.fields.Url() # Custom marshmallow field
... })
...
>>> class_schema(Website)().load({"url": "I am not a good URL !"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'url': ['Not a valid URL.']}
>>> @dataclasses.dataclass
... class NeverValid:
... @marshmallow.validates_schema
... def validate(self, data, **_):
... raise marshmallow.ValidationError('never valid')
...
>>> class_schema(NeverValid)().load({})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'_schema': ['never valid']}
>>> # noinspection PyTypeChecker
>>> class_schema(None) # unsupported type
Traceback (most recent call last):
...
TypeError: None is not a dataclass and cannot be turned into one.
>>> @dataclasses.dataclass
... class Anything:
... name: str
... @marshmallow.validates('name')
... def validates(self, value):
... if len(value) > 5: raise marshmallow.ValidationError("Name too long")
>>> class_schema(Anything)().load({"name": "aaaaaargh"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'name': ['Name too long']}
"""
return _proxied_class_schema(clazz, base_schema)
@lru_cache(maxsize=MAX_CLASS_SCHEMA_CACHE_SIZE)
def _proxied_class_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
try:
# noinspection PyDataclass
fields: Tuple[dataclasses.Field, ...] = dataclasses.fields(clazz)
except TypeError: # Not a dataclass
try:
return class_schema(dataclasses.dataclass(clazz), base_schema)
except Exception:
raise TypeError(
f"{getattr(clazz, '__name__', repr(clazz))} is not a dataclass and cannot be turned into one."
)
# Copy all marshmallow hooks and whitelisted members of the dataclass to the schema.
attributes = {
k: v
for k, v in inspect.getmembers(clazz)
if hasattr(v, "__marshmallow_hook__") or k in MEMBERS_WHITELIST
}
# Update the schema members to contain marshmallow fields instead of dataclass fields
attributes.update(
(
field.name,
field_for_schema(
field.type, _get_field_default(field), field.metadata, base_schema
),
)
for field in fields
if field.init
)
schema_class = type(clazz.__name__, (_base_schema(clazz, base_schema),), attributes)
return cast(Type[marshmallow.Schema], schema_class)
def _field_by_type(
typ: Union[type, Any], base_schema: Optional[Type[marshmallow.Schema]]
) -> Optional[Type[marshmallow.fields.Field]]:
return (
base_schema and base_schema.TYPE_MAPPING.get(typ)
) or marshmallow.Schema.TYPE_MAPPING.get(typ)
def _field_by_supertype(
typ: Type,
default: marshmallow.missing,
newtype_supertype: Type,
metadata: dict,
base_schema: Optional[Type[marshmallow.Schema]],
) -> marshmallow.fields.Field:
"""
Return a new field for fields based on a super field. (Usually spawned from NewType)
"""
# Add the information coming our custom NewType implementation
typ_args = getattr(typ, "_marshmallow_args", {})
# Handle multiple validators from both `typ` and `metadata`.
# See https://github.com/lovasoa/marshmallow_dataclass/issues/91
new_validators: List[Callable] = []
for meta_dict in (typ_args, metadata):
if "validate" in meta_dict:
if marshmallow.utils.is_iterable_but_not_string(meta_dict["validate"]):
new_validators.extend(meta_dict["validate"])
elif callable(meta_dict["validate"]):
new_validators.append(meta_dict["validate"])
metadata["validate"] = new_validators if new_validators else None
metadata = {"description": typ.__name__, **typ_args, **metadata}
field = getattr(typ, "_marshmallow_field", None)
if field:
return field(**metadata)
else:
return field_for_schema(
newtype_supertype,
metadata=metadata,
default=default,
base_schema=base_schema,
)
def field_for_schema(
typ: type,
default=marshmallow.missing,
metadata: Mapping[str, Any] = None,
base_schema: Optional[Type[marshmallow.Schema]] = None,
) -> marshmallow.fields.Field:
"""
Get a marshmallow Field corresponding to the given python type.
The metadata of the dataclass field is used as arguments to the marshmallow Field.
:param typ: The type for which a field should be generated
:param default: value to use for (de)serialization when the field is missing
:param metadata: Additional parameters to pass to the marshmallow field constructor
:param base_schema: marshmallow schema used as a base class when deriving dataclass schema
>>> int_field = field_for_schema(int, default=9, metadata=dict(required=True))
>>> int_field.__class__
<class 'marshmallow.fields.Integer'>
>>> int_field.default
9
>>> field_for_schema(str, metadata={"marshmallow_field": marshmallow.fields.Url()}).__class__
<class 'marshmallow.fields.Url'>
"""
metadata = {} if metadata is None else dict(metadata)
if default is not marshmallow.missing:
metadata.setdefault("default", default)
# 'missing' must not be set for required fields.
if not metadata.get("required"):
metadata.setdefault("missing", default)
else:
metadata.setdefault("required", True)
# If the field was already defined by the user
predefined_field = metadata.get("marshmallow_field")
if predefined_field:
return predefined_field
# Generic types specified without type arguments
if typ is list:
typ = List[Any]
elif typ is dict:
typ = Dict[Any, Any]
# Base types
field = _field_by_type(typ, base_schema)
if field:
return field(**metadata)
if typ is Any:
metadata.setdefault("allow_none", True)
return marshmallow.fields.Raw(**metadata)
# Generic types
origin = typing_inspect.get_origin(typ)
if origin:
arguments = typing_inspect.get_args(typ, True)
# Override base_schema.TYPE_MAPPING to change the class used for generic types below
type_mapping = base_schema.TYPE_MAPPING if base_schema else {}
if origin in (list, List):
child_type = field_for_schema(arguments[0], base_schema=base_schema)
list_type = type_mapping.get(List, marshmallow.fields.List)
return list_type(child_type, **metadata)
if origin in (tuple, Tuple):
children = tuple(
field_for_schema(arg, base_schema=base_schema) for arg in arguments
)
tuple_type = type_mapping.get(Tuple, marshmallow.fields.Tuple)
return tuple_type(children, **metadata)
elif origin in (dict, Dict):
dict_type = type_mapping.get(Dict, marshmallow.fields.Dict)
return dict_type(
keys=field_for_schema(arguments[0], base_schema=base_schema),
values=field_for_schema(arguments[1], base_schema=base_schema),
**metadata,
)
elif typing_inspect.is_optional_type(typ):
subtyp = next(t for t in arguments if t is not NoneType) # type: ignore
# Treat optional types as types with a None default
metadata["default"] = metadata.get("default", None)
metadata["missing"] = metadata.get("missing", None)
metadata["required"] = False
return field_for_schema(subtyp, metadata=metadata, base_schema=base_schema)
elif typing_inspect.is_union_type(typ):
from . import union_field
return union_field.Union(
[
(
subtyp,
field_for_schema(
subtyp, metadata=metadata, base_schema=base_schema
),
)
for subtyp in arguments
],
**metadata,
)
# typing.NewType returns a function with a __supertype__ attribute
newtype_supertype = getattr(typ, "__supertype__", None)
if newtype_supertype and inspect.isfunction(typ):
return _field_by_supertype(
typ=typ,
default=default,
newtype_supertype=newtype_supertype,
metadata=metadata,
base_schema=base_schema,
)
# enumerations
if isinstance(typ, EnumMeta):
import marshmallow_enum
return marshmallow_enum.EnumField(typ, **metadata)
# Nested marshmallow dataclass
nested_schema = getattr(typ, "Schema", None)
# Nested dataclasses
forward_reference = getattr(typ, "__forward_arg__", None)
nested = (
nested_schema or forward_reference or class_schema(typ, base_schema=base_schema)
)
return marshmallow.fields.Nested(nested, **metadata)
def _base_schema(
clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None
) -> Type[marshmallow.Schema]:
"""
Base schema factory that creates a schema for `clazz` derived either from `base_schema`
or `BaseSchema`
"""
# Remove `type: ignore` when mypy handles dynamic base classes
# https://github.com/python/mypy/issues/2813
class BaseSchema(base_schema or marshmallow.Schema): # type: ignore
def load(self, data: Mapping, *, many: bool = None, **kwargs):
all_loaded = super().load(data, many=many, **kwargs)
many = self.many if many is None else bool(many)
if many:
return [clazz(**loaded) for loaded in all_loaded]
else:
return clazz(**all_loaded)
return BaseSchema
def _get_field_default(field: dataclasses.Field):
"""
Return a marshmallow default value given a dataclass default value
>>> _get_field_default(dataclasses.field())
<marshmallow.missing>
"""
# Remove `type: ignore` when https://github.com/python/mypy/issues/6910 is fixed
default_factory = field.default_factory # type: ignore
if default_factory is not dataclasses.MISSING:
return default_factory
elif field.default is dataclasses.MISSING:
return marshmallow.missing
return field.default
def NewType(
name: str,
typ: Type[_U],
field: Optional[Type[marshmallow.fields.Field]] = None,
**kwargs,
) -> Callable[[_U], _U]:
"""NewType creates simple unique types
to which you can attach custom marshmallow attributes.
All the keyword arguments passed to this function will be transmitted
to the marshmallow field constructor.
>>> import marshmallow.validate
>>> IPv4 = NewType('IPv4', str, validate=marshmallow.validate.Regexp(r'^([0-9]{1,3}\\.){3}[0-9]{1,3}$'))
>>> @dataclass
... class MyIps:
... ips: List[IPv4]
>>> MyIps.Schema().load({"ips": ["0.0.0.0", "grumble grumble"]})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'ips': {1: ['String does not match expected pattern.']}}
>>> MyIps.Schema().load({"ips": ["127.0.0.1"]})
MyIps(ips=['127.0.0.1'])
>>> Email = NewType('Email', str, field=marshmallow.fields.Email)
>>> @dataclass
... class ContactInfo:
... mail: Email = dataclasses.field(default="<EMAIL>")
>>> ContactInfo.Schema().load({})
ContactInfo(mail='<EMAIL>')
>>> ContactInfo.Schema().load({"mail": "grumble grumble"})
Traceback (most recent call last):
...
marshmallow.exceptions.ValidationError: {'mail': ['Not a valid email address.']}
"""
def new_type(x: _U):
return x
new_type.__name__ = name
new_type.__supertype__ = typ # type: ignore
new_type._marshmallow_field = field # type: ignore
new_type._marshmallow_args = kwargs # type: ignore
return new_type
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| [
"inspect.getmembers",
"dataclasses.fields",
"marshmallow.Schema.TYPE_MAPPING.get",
"marshmallow_enum.EnumField",
"marshmallow.utils.is_iterable_but_not_string",
"dataclasses.dataclass",
"marshmallow.fields.Nested",
"marshmallow.fields.Raw",
"doctest.testmod",
"typing_inspect.get_args",
"typing_inspect.is_optional_type",
"typing_inspect.is_union_type",
"functools.lru_cache",
"inspect.isfunction",
"typing_inspect.get_origin",
"typing.cast",
"typing.TypeVar"
] | [((1344, 1357), 'typing.TypeVar', 'TypeVar', (['"""_U"""'], {}), "('_U')\n", (1351, 1357), False, 'from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload\n'), ((8956, 9002), 'functools.lru_cache', 'lru_cache', ([], {'maxsize': 'MAX_CLASS_SCHEMA_CACHE_SIZE'}), '(maxsize=MAX_CLASS_SCHEMA_CACHE_SIZE)\n', (8965, 9002), False, 'from functools import lru_cache\n'), ((2894, 2997), 'dataclasses.dataclass', 'dataclasses.dataclass', (['_cls'], {'repr': 'repr', 'eq': 'eq', 'order': 'order', 'unsafe_hash': 'unsafe_hash', 'frozen': 'frozen'}), '(_cls, repr=repr, eq=eq, order=order, unsafe_hash=\n unsafe_hash, frozen=frozen)\n', (2915, 2997), False, 'import dataclasses\n'), ((10249, 10293), 'typing.cast', 'cast', (['Type[marshmallow.Schema]', 'schema_class'], {}), '(Type[marshmallow.Schema], schema_class)\n', (10253, 10293), False, 'from typing import Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload\n'), ((13883, 13913), 'typing_inspect.get_origin', 'typing_inspect.get_origin', (['typ'], {}), '(typ)\n', (13908, 13913), False, 'import typing_inspect\n'), ((16790, 16835), 'marshmallow.fields.Nested', 'marshmallow.fields.Nested', (['nested'], {}), '(nested, **metadata)\n', (16815, 16835), False, 'import marshmallow\n'), ((19828, 19857), 'doctest.testmod', 'doctest.testmod', ([], {'verbose': '(True)'}), '(verbose=True)\n', (19843, 19857), False, 'import doctest\n'), ((9226, 9251), 'dataclasses.fields', 'dataclasses.fields', (['clazz'], {}), '(clazz)\n', (9244, 9251), False, 'import dataclasses\n'), ((10518, 10558), 'marshmallow.Schema.TYPE_MAPPING.get', 'marshmallow.Schema.TYPE_MAPPING.get', (['typ'], {}), '(typ)\n', (10553, 10558), False, 'import marshmallow\n'), ((13814, 13848), 'marshmallow.fields.Raw', 'marshmallow.fields.Raw', ([], {}), '(**metadata)\n', (13836, 13848), False, 'import marshmallow\n'), ((13949, 13983), 'typing_inspect.get_args', 'typing_inspect.get_args', (['typ', '(True)'], {}), '(typ, True)\n', (13972, 13983), False, 'import typing_inspect\n'), ((16111, 16134), 'inspect.isfunction', 'inspect.isfunction', (['typ'], {}), '(typ)\n', (16129, 16134), False, 'import inspect\n'), ((16451, 16494), 'marshmallow_enum.EnumField', 'marshmallow_enum.EnumField', (['typ'], {}), '(typ, **metadata)\n', (16477, 16494), False, 'import marshmallow_enum\n'), ((9703, 9728), 'inspect.getmembers', 'inspect.getmembers', (['clazz'], {}), '(clazz)\n', (9721, 9728), False, 'import inspect\n'), ((11263, 11330), 'marshmallow.utils.is_iterable_but_not_string', 'marshmallow.utils.is_iterable_but_not_string', (["meta_dict['validate']"], {}), "(meta_dict['validate'])\n", (11307, 11330), False, 'import marshmallow\n'), ((15034, 15070), 'typing_inspect.is_optional_type', 'typing_inspect.is_optional_type', (['typ'], {}), '(typ)\n', (15065, 15070), False, 'import typing_inspect\n'), ((9338, 9366), 'dataclasses.dataclass', 'dataclasses.dataclass', (['clazz'], {}), '(clazz)\n', (9359, 9366), False, 'import dataclasses\n'), ((15491, 15524), 'typing_inspect.is_union_type', 'typing_inspect.is_union_type', (['typ'], {}), '(typ)\n', (15519, 15524), False, 'import typing_inspect\n')] |
import sys
import typing
import numpy as np
def solve(
n: int,
g: np.array,
) -> typing.NoReturn:
indeg = np.zeros(
n,
dtype=np.int64,
)
for v in g[:, 1]:
indeg[v] += 1
g = g[g[:, 0].argsort()]
i = np.searchsorted(
g[:, 0],
np.arange(n + 1)
)
q = [
v for v in range(n)
if not indeg[v]
]
dist = np.zeros(
n,
dtype=np.int64,
)
for u in q:
for j in range(
i[u], i[u + 1],
):
v = g[j, 1]
indeg[v] -= 1
dist[v] = max(
dist[v],
dist[u] + 1,
)
if indeg[v]: continue
q.append(v)
print(dist.max())
def main() -> typing.NoReturn:
n, m = map(
int, input().split(),
)
g = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2) - 1
solve(n, g)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import i8, njit
from numba.pycc import CC
cc = CC('my_module')
fn = solve
signature = (i8, i8[:, :])
cc.export(
fn.__name__,
signature,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
| [
"numba.pycc.CC",
"numpy.zeros",
"my_module.solve",
"sys.stdin.read",
"numpy.arange"
] | [((115, 142), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (123, 142), True, 'import numpy as np\n'), ((347, 374), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'np.int64'}), '(n, dtype=np.int64)\n', (355, 374), True, 'import numpy as np\n'), ((791, 802), 'my_module.solve', 'solve', (['n', 'g'], {}), '(n, g)\n', (796, 802), False, 'from my_module import solve\n'), ((912, 927), 'numba.pycc.CC', 'CC', (['"""my_module"""'], {}), "('my_module')\n", (914, 927), False, 'from numba.pycc import CC\n'), ((261, 277), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (270, 277), True, 'import numpy as np\n'), ((721, 737), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (735, 737), False, 'import sys\n')] |
import os
import tempfile
def hasOnePointInside(bigRect, minRect): # хотя бы одна точка лежит внутри
minY, minX, maxY, maxX = bigRect
y1, x1, y2, x2 = minRect
a = (minY <= y1 <= maxY)
b = (minX <= x1 <= maxX)
c = (minY <= y2 <= maxY)
d = (minX <= x2 <= maxX)
return a or b or c or d
def isCompletelyInside(bigRect, minRect): # объект полностью внутри прямоугольника
y1, x1, y2, x2 = bigRect
minX = x1
minY = y1 # вроде верно
maxX = x2
maxY = y2
y1, x1, y2, x2 = minRect
a = (minY <= y1 <= maxY)
b = (minX <= x1 <= maxX)
c = (minY <= y2 <= maxY)
d = (minX <= x2 <= maxX)
return a and b and c and d # если тру, то объект полностью внутри большого прямоугольника
def isPartiallyInside(bigRect, minRect, innerPercent=0.5): # объект частично внутри прямоугольника
bigLUy, bigLUx, bigRDy, bigRDx = bigRect
minLUy, minLUx, minRDy, minRDx = minRect
fullSquare = (minLUy - minRDy) * (minRDx - minLUx) # не уверен что правильно
# Не уверен в ифах
if bigLUy < minLUy:
minLUy = bigLUy
if bigRDy < minRDy:
minRDy = bigRDy
if bigLUx > minLUx:
minLUx = bigLUx
if bigRDx > minRDx:
minRDx = bigRDx
inObjSquare = (minLUy - minRDy) * (minRDx - minLUx)
return inObjSquare / fullSquare >= innerPercent
def createGraphic(imagePath: str, searchRect: list, objectsListRect: list):
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import matplotlib.patches as patches
im = np.array(Image.open(imagePath), dtype=np.uint8)
fig, ax = plt.subplots(1)
ax.imshow(im)
bigRect = Rectangle(searchRect)
minRects = [Rectangle(i) for i in objectsListRect]
rect = patches.Rectangle(*bigRect.getMTparam(), linewidth=1, edgecolor='g', facecolor='None')
ax.add_patch(rect)
for i in minRects:
rect = patches.Rectangle(*i.getMTparam(), linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
temp = tempfile.NamedTemporaryFile()
path = os.path.join(os.getcwd(), temp.name)
plt.savefig(path)
return os.path.split(temp.name + ".png")
class Rectangle:
LDx = 0
LDy = 0
RUx = 0
RUy = 0
def __init__(self, coordinates: list):
if len(coordinates) != 4:
raise ValueError("Нужно подавать координаты(х,у) двух противоложных вершин")
if coordinates[0] >= coordinates[2] or coordinates[1] >= coordinates[3]:
raise ValueError(
"Неверно заданы вершины, сначала подаются 2 координаты нижнего левого угла, потом верхнего правого")
self.LDx, self.LDy, self.RUx, self.RUy = coordinates
def getWidth(self):
return self.RUx - self.LDx
def getHeight(self):
return self.RUy - self.LDy
def getLUx(self):
return self.LDx
def getLUy(self):
return self.RUy
def getMTparam(self):
return ((self.getLUy(), self.getLUx()), # почему -? я не знаю
-self.getHeight(), self.getWidth()) # все абсолютно в другом порядке, чем должно быть? что ха дринся
def getCenterOfDown(self):
return [(self.LDx + self.RUx) / 2, self.LDy]
| [
"PIL.Image.open",
"matplotlib.pyplot.savefig",
"os.path.split",
"os.getcwd",
"tempfile.NamedTemporaryFile",
"matplotlib.pyplot.subplots"
] | [((1618, 1633), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (1630, 1633), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2054), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (2052, 2054), False, 'import tempfile\n'), ((2107, 2124), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (2118, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2137, 2170), 'os.path.split', 'os.path.split', (["(temp.name + '.png')"], {}), "(temp.name + '.png')\n", (2150, 2170), False, 'import os\n'), ((1565, 1586), 'PIL.Image.open', 'Image.open', (['imagePath'], {}), '(imagePath)\n', (1575, 1586), False, 'from PIL import Image\n'), ((2079, 2090), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2088, 2090), False, 'import os\n')] |
from goopylib.objects.GraphicsObject import GraphicsObject
from goopylib.styles import *
class BBox(GraphicsObject):
# Internal base class for objects represented by bounding box
# (opposite corners) Line segment is a degenerate case.
resizing_objects = []
def __init__(self, p1, p2, bounds=None, fill=None, outline=None, outline_width=None, cursor="arrow", layer=0,
tag=None):
self.p1 = p1
self.p2 = p2
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
GraphicsObject.__init__(self, options=(), cursor=cursor, layer=layer, bounds=bounds, tag=tag)
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
self.min_width = None
self.min_height = None
self.max_width = None
self.max_height = None
self.resizing_bounds = {}
self.is_resizing = {}
self.bounds_thickness = 0
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
def __repr__(self):
return "_BBox"
def _set_resizable(self, resizables, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None,
thickness=10):
"""Override in subclasses"""
pass
def _move(self, dx, dy):
self.p1[0] += dx
self.p1[1] += dy
self.p2[0] += dx
self.p2[1] += dy
self.anchor[0] += dx
self.anchor[1] += dy
def is_clicked(self, mouse_pos):
if self.bounds is None:
if mouse_pos is None:
return False
else:
if (self.p1[0] < mouse_pos[0] < self.p2[0] or self.p1[0] > mouse_pos[0] > self.p2[0]) and \
(self.p1[1] < mouse_pos[1] < self.p2[1] or self.p1[1] > mouse_pos[1] > self.p2[1]):
return True
else:
return False
else:
return self.bounds.is_clicked(mouse_pos)
def get_p1(self):
return self.p1.copy()
def get_p2(self):
return self.p2.copy()
def get_top_right(self):
return self.p1.copy()
def get_top_left(self):
return [self.p2[0], self.p1[1]]
def get_bottom_left(self):
return [self.p1[0], self.p2[1]]
def get_bottom_right(self):
return self.p2.copy()
def get_top(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p1[1]]
def get_bottom(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p2[1]]
def get_left(self):
return [self.p1[0], (self.p1[1] + self.p2[1]) / 2]
def get_right(self):
return [self.p2[0], (self.p1[1] + self.p2[1]) / 2]
def get_width(self):
return self.width
def get_height(self):
return self.height
def get_fill(self):
return self.fill
def get_outline(self):
return self.outline
def get_outline_width(self):
return self.outline_width
def get_anchor(self):
return self.anchor
def set_dimensions(self, width, height, horizontal_align="center", vertical_align="center"):
self.set_width(width, horizontal_align)
self.set_height(height, vertical_align)
return self
def set_resizable(self, top=False, left=False, bottom=False, right=False, min_width=40, min_height=40,
bounds_width=10, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None):
if min_width < 1 or min_height < 1:
raise GraphicsError(f"\n\nGraphicsError: Minimum height and width of resizable object must be greater than "
f"or equal to 1. Right now, min_width={min_width} & min_height={min_height}")
self.min_width = min_width
self.min_height = min_height
self.is_resizing = {"top": top, "left": left, "bottom": bottom, "right": right}
self._set_resizable([top, bottom, left, right], top_bounds=top_bounds, bottom_bounds=bottom_bounds,
left_bounds=left_bounds, right_bounds=right_bounds, thickness=bounds_width)
if top is False and bottom is False and left is False and right is False:
if self in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.remove(self)
elif self not in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.add(self)
self.bounds_thickness = bounds_width
return self
def set_coords(self, p1, p2):
self.p1 = p1.copy()
self.p2 = p2.copy()
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
width_scale = (p2[0] - p1[0]) / self.width
height_scale = (p2[1] - p1[1]) / self.height
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = p2[0] - p1[0]
self.height = p2[1] - p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
self._update_layer()
return self
def set_width(self, width, center="center"):
if center not in {"center", "right", "left"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_outline_width) needs to be one of "
f'{["center", "right", "left"]}')
if center == "left":
self.set_coords(self.p1, self.p2.add_x(width - self.width))
elif center == "right":
self.set_coords(self.p1.add_x(-(width - self.width)), self.p2)
else:
self.set_coords(self.p1.add_x(-(width / 2 - self.width)), self.p2.add_x(width / 2 - self.width))
return self
def set_height(self, height, center="center"):
if center not in {"center", "top", "bottom"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_height) needs to be one of "
f'{["center", "top", "bottom"]}')
if center == "top":
self.set_coords(self.p1, self.p2.add_y(height - self.height))
elif center == "bottom":
self.set_coords(self.p1.add_y(-(height - self.height)), self.p2)
else:
self.set_coords(self.p1.add_y(-(height / 2 - self.height)), self.p2.add_y(height / 2 - self.height))
return self
def set_fill(self, fill):
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
self._update_layer()
return self
def set_outline(self, outline):
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
self._update_layer()
return self
def set_outline_width(self, outline_width):
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(
f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
self._update_layer()
return self
| [
"goopylib.objects.GraphicsObject.GraphicsObject.resizing_objects.add",
"goopylib.objects.GraphicsObject.GraphicsObject.resizing_objects.remove",
"goopylib.objects.GraphicsObject.GraphicsObject.__init__"
] | [((1010, 1107), 'goopylib.objects.GraphicsObject.GraphicsObject.__init__', 'GraphicsObject.__init__', (['self'], {'options': '()', 'cursor': 'cursor', 'layer': 'layer', 'bounds': 'bounds', 'tag': 'tag'}), '(self, options=(), cursor=cursor, layer=layer,\n bounds=bounds, tag=tag)\n', (1033, 1107), False, 'from goopylib.objects.GraphicsObject import GraphicsObject\n'), ((5878, 5922), 'goopylib.objects.GraphicsObject.GraphicsObject.resizing_objects.remove', 'GraphicsObject.resizing_objects.remove', (['self'], {}), '(self)\n', (5916, 5922), False, 'from goopylib.objects.GraphicsObject import GraphicsObject\n'), ((5994, 6035), 'goopylib.objects.GraphicsObject.GraphicsObject.resizing_objects.add', 'GraphicsObject.resizing_objects.add', (['self'], {}), '(self)\n', (6029, 6035), False, 'from goopylib.objects.GraphicsObject import GraphicsObject\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Make sure that generic functions work exactly as we expect.'''
# IMPORT STANDARD LIBRARIES
import unittest
# IMPORT WAYS LIBRARIES
from ways import common
class ParseTestCase(unittest.TestCase):
'''Test generic parsing-related functions.'''
def test_working_0001(self):
'''Test that correct input for expand_string works as expected.'''
pattern = '/jobs/{JOB}/some_kind/{THING}/real_folders'
text = '/jobs/some_job_here/some_kind/of/real_folders'
expected_output = {'JOB': 'some_job_here', 'THING': 'of'}
self.assertEqual(expected_output, common.expand_string(pattern, text))
def test_working_0002(self):
'''Test that correct input for expand_string works as expected.'''
shot = 'NAME_010'
format_string = '{SHOT}_{ID}'
expected_output = {'SHOT': 'NAME', 'ID': '010'}
self.assertEqual(expected_output, common.expand_string(format_string, shot))
def test_expand_string_failure_0001(self):
'''Force expand_string fails to prevent a bad match from occurring.'''
text = '/jobs/some_job/some_kind/of/real_folders'
pattern = '/jobs/{JOB}/some_kind/of/real_folders/inner'
self.assertFalse(common.expand_string(pattern, text))
def test_expand_string_failure_0002(self):
'''Force expand_string fails to prevent a bad match from occurring.'''
text = '/jobs/some_job/some_kind/of/real_folders'
pattern = '/jobs/{JOB}/some_kind/{SHOTNAME}/real_folders/inner'
self.assertFalse(common.expand_string(pattern, text))
| [
"ways.common.expand_string"
] | [((644, 679), 'ways.common.expand_string', 'common.expand_string', (['pattern', 'text'], {}), '(pattern, text)\n', (664, 679), False, 'from ways import common\n'), ((953, 994), 'ways.common.expand_string', 'common.expand_string', (['format_string', 'shot'], {}), '(format_string, shot)\n', (973, 994), False, 'from ways import common\n'), ((1271, 1306), 'ways.common.expand_string', 'common.expand_string', (['pattern', 'text'], {}), '(pattern, text)\n', (1291, 1306), False, 'from ways import common\n'), ((1591, 1626), 'ways.common.expand_string', 'common.expand_string', (['pattern', 'text'], {}), '(pattern, text)\n', (1611, 1626), False, 'from ways import common\n')] |
import os, sys
import random
import string
try:
# Make Python2 work like Python3
input = raw_input
except NameError:
# On Python3; already using input
pass
letters = string.ascii_letters
numbers = string.digits
punctuation = string.punctuation
def generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation):
"""Generate a password by include enough random
characters to meet the password length restriction.
In addition, the user can specify that at least one
of the each of the classes of character be used.
"""
#
# Any combination of characters is valid
#
valid_characters = ""
if at_least_one_letter:
valid_characters += letters
if at_least_one_number:
valid_characters += numbers
if at_least_one_punctuation:
valid_characters += punctuation
#
# Start with a blank password and then go round enough
# times to make a password of the required length.
#
password = ""
for i in range(password_length):
#
# Each time around, ensure that one of each of the selected
# groups is chosen, and then just choose randomly from all
# groups.
#
if at_least_one_letter:
character = random.choice(letters)
at_least_one_letter = False
elif at_least_one_number:
character = random.choice(numbers)
at_least_one_number = False
elif at_least_one_punctuation:
character = random.choice(punctuation)
at_least_one_punctuation = False
else:
character = random.choice(valid_characters)
password += character
#
# Finally, shuffle the password so we don't always get a
# letter at the beginning, with a number after and some
# punctuation.
#
characters = list(password)
#
# random.shuffle shuffles a list *in place*
#
random.shuffle(characters)
#
# X.join(...) means: return all the strings in (...) joined by X
# ", ".join(['Eggs', 'Bacon', 'Beans']) => "Eggs, Bacon, Beans"
# But if you want to generate *real* .csv files, use the csv module
# because there are lots of corner-cases.
#
password = "".join(characters)
return password
if __name__ == '__main__':
password_length = int(input("How many letters? "))
at_least_one_letter = "Y" == (input("At least one letter [Y/n]? ").upper() or "Y")
at_least_one_number = "Y" == (input("At least one number [Y/n]? ").upper() or "Y")
at_least_one_punctuation = "Y" == (input("At least one punctuation [Y/n]? ").upper() or "Y")
password = generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation)
print("Your password is: {}".format(password))
| [
"random.choice",
"random.shuffle"
] | [((1951, 1977), 'random.shuffle', 'random.shuffle', (['characters'], {}), '(characters)\n', (1965, 1977), False, 'import random\n'), ((1283, 1305), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (1296, 1305), False, 'import random\n'), ((1404, 1426), 'random.choice', 'random.choice', (['numbers'], {}), '(numbers)\n', (1417, 1426), False, 'import random\n'), ((1530, 1556), 'random.choice', 'random.choice', (['punctuation'], {}), '(punctuation)\n', (1543, 1556), False, 'import random\n'), ((1640, 1671), 'random.choice', 'random.choice', (['valid_characters'], {}), '(valid_characters)\n', (1653, 1671), False, 'import random\n')] |
# forms are not just about display, instead they are more of validation
# wtf forms protect our site against csrf attacks
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, TextAreaField
from wtforms.validators import (DataRequired, Regexp, ValidationError, Email,
Length, EqualTo)
from models import User
def name_exists(form, field):
if User.select().where(User.username == field.data).exists():
raise ValidationError('User with this name already exists.')
def email_exists(form, field):
if User.select().where(User.email == field.data).exists():
raise ValidationError('User with this email already exists.')
class RegisterForm(FlaskForm):
username = StringField(
'Username', # is the label
validators=[
DataRequired(),
Regexp(
r'^[a-zA-Z0-9_]+$',
message = ("Username should be one word, letters, numbers and underscores only.")
),
name_exists
])
email = StringField(
'Email',
validators=[
DataRequired(),
Email(),
email_exists
])
password = PasswordField(
'Password',
validators=[
DataRequired(),
Length(min=8),
EqualTo('<PASSWORD>', message = 'Passwords must match')
])
password2 = PasswordField(
'<PASSWORD>',
validators=[DataRequired()
])
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
class PostForm(FlaskForm):
content = TextAreaField("What's Up?", validators = [DataRequired()])
| [
"wtforms.validators.Email",
"wtforms.validators.ValidationError",
"wtforms.validators.EqualTo",
"models.User.select",
"wtforms.validators.Length",
"wtforms.validators.Regexp",
"wtforms.validators.DataRequired"
] | [((450, 504), 'wtforms.validators.ValidationError', 'ValidationError', (['"""User with this name already exists."""'], {}), "('User with this name already exists.')\n", (465, 504), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((605, 660), 'wtforms.validators.ValidationError', 'ValidationError', (['"""User with this email already exists."""'], {}), "('User with this email already exists.')\n", (620, 660), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((765, 779), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (777, 779), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((784, 893), 'wtforms.validators.Regexp', 'Regexp', (['"""^[a-zA-Z0-9_]+$"""'], {'message': '"""Username should be one word, letters, numbers and underscores only."""'}), "('^[a-zA-Z0-9_]+$', message=\n 'Username should be one word, letters, numbers and underscores only.')\n", (790, 893), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((981, 995), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (993, 995), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1000, 1007), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (1005, 1007), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1090, 1104), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1102, 1104), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1109, 1122), 'wtforms.validators.Length', 'Length', ([], {'min': '(8)'}), '(min=8)\n', (1115, 1122), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1127, 1180), 'wtforms.validators.EqualTo', 'EqualTo', (['"""<PASSWORD>"""'], {'message': '"""Passwords must match"""'}), "('<PASSWORD>', message='Passwords must match')\n", (1134, 1180), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1247, 1261), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1259, 1261), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1339, 1353), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1351, 1353), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1355, 1362), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (1360, 1362), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1415, 1429), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1427, 1429), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((1513, 1527), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1525, 1527), False, 'from wtforms.validators import DataRequired, Regexp, ValidationError, Email, Length, EqualTo\n'), ((383, 396), 'models.User.select', 'User.select', ([], {}), '()\n', (394, 396), False, 'from models import User\n'), ((541, 554), 'models.User.select', 'User.select', ([], {}), '()\n', (552, 554), False, 'from models import User\n')] |
"""
A Testcase to remove mon from
when I/O's are happening.
Polarion-ID- OCS-355
"""
import logging
import pytest
from ocs_ci.ocs import ocp, constants
from ocs_ci.framework.testlib import tier4, ManageTest
from ocs_ci.framework import config
from ocs_ci.ocs.resources import pod
from tests.helpers import run_io_with_rados_bench, delete_cephblockpool
from ocs_ci.ocs.cluster import CephCluster
from ocs_ci.utility.retry import retry
from ocs_ci.ocs.exceptions import CephHealthException
log = logging.getLogger(__name__)
@retry(CephHealthException, 8, 3, 1)
def verify_mon_pod_up(ceph_cluster, pods):
"""
Verify mon pods are in Running state.
Returns:
bool: True for wait for the resource, False otherwise
"""
log.info(f"Verifying all mons pods are up and Running")
ceph_cluster.cluster_health_check(timeout=3)
ret = pods.wait_for_resource(
condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mon',
resource_count=3, timeout=700)
log.info(f"waited for all mon pod to come up and running {ret}")
return ret
def run_io_on_pool():
"""
Runs the I/O on the pool and delete the pool
Returns: A thread of I/O
"""
tools_pod = pod.get_ceph_tools_pod()
tools_pod.add_role(role='client')
return run_io_with_rados_bench(
ceph_pods=[tools_pod],
config={'time': 45, 'cleanup': False,
'pool': 'test-pool'
}
)
@tier4
@pytest.mark.polarion_id("OCS-355")
class TestRemoveMonFromCluster(ManageTest):
def test_remove_mon_pod_from_cluster(self):
"""
To remove mon pod from the cluster
after the I/O is performed on the pool
and waiting for the operator to create a
new mon pod on its own
"""
ceph_cluster = CephCluster()
pods = ocp.OCP(
kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace']
)
list_mons = ceph_cluster.get_mons_from_cluster()
assert len(list_mons) > 1, pytest.skip(
"INVALID: Mon count should be more than one to delete."
)
assert run_io_on_pool(), 'Failed to run I/O on the pool'
assert delete_cephblockpool('test-pool'), 'Failed to delete pool'
ceph_cluster.cluster_health_check(timeout=0)
ceph_cluster.remove_mon_from_cluster()
assert verify_mon_pod_up(ceph_cluster, pods), f"Mon pods are not up and running state"
ceph_cluster.cluster_health_check(timeout=60)
| [
"logging.getLogger",
"ocs_ci.ocs.resources.pod.get_ceph_tools_pod",
"tests.helpers.run_io_with_rados_bench",
"tests.helpers.delete_cephblockpool",
"ocs_ci.ocs.cluster.CephCluster",
"ocs_ci.utility.retry.retry",
"pytest.mark.polarion_id",
"pytest.skip",
"ocs_ci.ocs.ocp.OCP"
] | [((499, 526), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (516, 526), False, 'import logging\n'), ((530, 565), 'ocs_ci.utility.retry.retry', 'retry', (['CephHealthException', '(8)', '(3)', '(1)'], {}), '(CephHealthException, 8, 3, 1)\n', (535, 565), False, 'from ocs_ci.utility.retry import retry\n'), ((1466, 1500), 'pytest.mark.polarion_id', 'pytest.mark.polarion_id', (['"""OCS-355"""'], {}), "('OCS-355')\n", (1489, 1500), False, 'import pytest\n'), ((1219, 1243), 'ocs_ci.ocs.resources.pod.get_ceph_tools_pod', 'pod.get_ceph_tools_pod', ([], {}), '()\n', (1241, 1243), False, 'from ocs_ci.ocs.resources import pod\n'), ((1294, 1404), 'tests.helpers.run_io_with_rados_bench', 'run_io_with_rados_bench', ([], {'ceph_pods': '[tools_pod]', 'config': "{'time': 45, 'cleanup': False, 'pool': 'test-pool'}"}), "(ceph_pods=[tools_pod], config={'time': 45,\n 'cleanup': False, 'pool': 'test-pool'})\n", (1317, 1404), False, 'from tests.helpers import run_io_with_rados_bench, delete_cephblockpool\n'), ((1812, 1825), 'ocs_ci.ocs.cluster.CephCluster', 'CephCluster', ([], {}), '()\n', (1823, 1825), False, 'from ocs_ci.ocs.cluster import CephCluster\n'), ((1841, 1916), 'ocs_ci.ocs.ocp.OCP', 'ocp.OCP', ([], {'kind': 'constants.POD', 'namespace': "config.ENV_DATA['cluster_namespace']"}), "(kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace'])\n", (1848, 1916), False, 'from ocs_ci.ocs import ocp, constants\n'), ((2031, 2099), 'pytest.skip', 'pytest.skip', (['"""INVALID: Mon count should be more than one to delete."""'], {}), "('INVALID: Mon count should be more than one to delete.')\n", (2042, 2099), False, 'import pytest\n'), ((2202, 2235), 'tests.helpers.delete_cephblockpool', 'delete_cephblockpool', (['"""test-pool"""'], {}), "('test-pool')\n", (2222, 2235), False, 'from tests.helpers import run_io_with_rados_bench, delete_cephblockpool\n')] |
'''
Unit tests table.py.
:see: http://docs.python.org/lib/minimal-example.html for an intro to unittest
:see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html
:see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292
'''
from __future__ import absolute_import
from statsmodels.compat.python import zip
import numpy as np
from numpy.testing import assert_equal
__docformat__ = "restructuredtext en"
from statsmodels.iolib.table import Cell, SimpleTable
from statsmodels.iolib.table import default_latex_fmt
from statsmodels.iolib.table import default_html_fmt
ltx_fmt1 = default_latex_fmt.copy()
html_fmt1 = default_html_fmt.copy()
txt_fmt1 = dict(
data_fmts = ['%0.2f', '%d'],
empty_cell = ' ',
colwidths = 1,
colsep=' * ',
row_pre = '* ',
row_post = ' *',
table_dec_above='*',
table_dec_below='*',
header_dec_below='*',
header_fmt = '%s',
stub_fmt = '%s',
title_align='r',
header_align = 'r',
data_aligns = "r",
stubs_align = "l",
fmt = 'txt'
)
cell0data = 0.0000
cell1data = 1
row0data = [cell0data, cell1data]
row1data = [2, 3.333]
table1data = [ row0data, row1data ]
test1stubs = ('stub1', 'stub2')
test1header = ('header1', 'header2')
#test1header = ('header1\nheader1a', 'header2\nheader2a')
tbl = SimpleTable(table1data, test1header, test1stubs,
txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1)
def custom_labeller(cell):
if cell.data is np.nan:
return 'missing'
class TestCell(object):
def test_celldata(self):
celldata = cell0data, cell1data, row1data[0], row1data[1]
cells = [Cell(datum, datatype=i % 2)
for i, datum in enumerate(celldata)]
for cell, datum in zip(cells, celldata):
assert_equal(cell.data, datum)
class TestSimpleTable(object):
def test_txt_fmt1(self):
# Limited test of custom txt_fmt
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * 0.00 * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text()
#print('actual')
#print(actual)
#print('desired')
#print(desired)
assert_equal(actual, desired)
def test_ltx_fmt1(self):
# Limited test of custom ltx_fmt
desired = r"""
\begin{center}
\begin{tabular}{lcc}
\toprule
& \textbf{header1} & \textbf{header2} \\
\midrule
\textbf{stub1} & 0.0 & 1 \\
\textbf{stub2} & 2 & 3.333 \\
\bottomrule
\end{tabular}
\end{center}
"""
actual = '\n%s\n' % tbl.as_latex_tabular()
#print(actual)
#print(desired)
assert_equal(actual, desired)
def test_html_fmt1(self):
# Limited test of custom html_fmt
desired = """
<table class="simpletable">
<tr>
<td></td> <th>header1</th> <th>header2</th>
</tr>
<tr>
<th>stub1</th> <td>0.0</td> <td>1</td>
</tr>
<tr>
<th>stub2</th> <td>2</td> <td>3.333</td>
</tr>
</table>
"""
#the previous has significant trailing whitespace that got removed
#desired = '''\n<table class="simpletable">\n<tr>\n <td></td> <th>header1</th> <th>header2</th>\n</tr>\n<tr>\n <th>stub1</th> <td>0.0</td> <td>1</td> \n</tr>\n<tr>\n <th>stub2</th> <td>2</td> <td>3.333</td> \n</tr>\n</table>\n'''
actual = '\n%s\n' % tbl.as_html()
actual = '\n'.join((line.rstrip() for line in actual.split('\n')))
#print(actual)
#print(desired)
#print len(actual), len(desired)
assert_equal(actual, desired)
def test_customlabel(self):
# Limited test of custom custom labeling
tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)
tbl[1][1].data = np.nan
tbl.label_cells(custom_labeller)
#print([[c.datatype for c in row] for row in tbl])
desired = """
*****************************
* * header1 * header2 *
*****************************
* stub1 * -- * 1 *
* stub2 * 2.00 * 3 *
*****************************
"""
actual = '\n%s\n' % tbl.as_text(missing='--')
assert_equal(actual, desired)
| [
"numpy.testing.assert_equal",
"statsmodels.iolib.table.Cell",
"statsmodels.iolib.table.default_latex_fmt.copy",
"statsmodels.iolib.table.default_html_fmt.copy",
"statsmodels.compat.python.zip",
"statsmodels.iolib.table.SimpleTable"
] | [((619, 643), 'statsmodels.iolib.table.default_latex_fmt.copy', 'default_latex_fmt.copy', ([], {}), '()\n', (641, 643), False, 'from statsmodels.iolib.table import default_latex_fmt\n'), ((656, 679), 'statsmodels.iolib.table.default_html_fmt.copy', 'default_html_fmt.copy', ([], {}), '()\n', (677, 679), False, 'from statsmodels.iolib.table import default_html_fmt\n'), ((1318, 1427), 'statsmodels.iolib.table.SimpleTable', 'SimpleTable', (['table1data', 'test1header', 'test1stubs'], {'txt_fmt': 'txt_fmt1', 'ltx_fmt': 'ltx_fmt1', 'html_fmt': 'html_fmt1'}), '(table1data, test1header, test1stubs, txt_fmt=txt_fmt1, ltx_fmt=\n ltx_fmt1, html_fmt=html_fmt1)\n', (1329, 1427), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((1756, 1776), 'statsmodels.compat.python.zip', 'zip', (['cells', 'celldata'], {}), '(cells, celldata)\n', (1759, 1776), False, 'from statsmodels.compat.python import zip\n'), ((2278, 2307), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (2290, 2307), False, 'from numpy.testing import assert_equal\n'), ((2775, 2804), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (2787, 2804), False, 'from numpy.testing import assert_equal\n'), ((3673, 3702), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (3685, 3702), False, 'from numpy.testing import assert_equal\n'), ((3799, 3865), 'statsmodels.iolib.table.SimpleTable', 'SimpleTable', (['table1data', 'test1header', 'test1stubs'], {'txt_fmt': 'txt_fmt1'}), '(table1data, test1header, test1stubs, txt_fmt=txt_fmt1)\n', (3810, 3865), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((4266, 4295), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (4278, 4295), False, 'from numpy.testing import assert_equal\n'), ((1647, 1674), 'statsmodels.iolib.table.Cell', 'Cell', (['datum'], {'datatype': '(i % 2)'}), '(datum, datatype=i % 2)\n', (1651, 1674), False, 'from statsmodels.iolib.table import Cell, SimpleTable\n'), ((1790, 1820), 'numpy.testing.assert_equal', 'assert_equal', (['cell.data', 'datum'], {}), '(cell.data, datum)\n', (1802, 1820), False, 'from numpy.testing import assert_equal\n')] |
from abc import ABC, abstractmethod
from contextlib import contextmanager
from uuid import uuid4
import pytest
from sqlalchemy import (
delete,
select,
UniqueConstraint,
)
class AbstractBaseTest(ABC):
@pytest.fixture
def cls_(self):
"""
Return class under test.
Assumptions: if the class under test is Foo, then the class grouping
the tests should be a subclass of BaseTest, named TestFoo.
"""
prefix = len("Test")
class_name = self.__class__.__name__[prefix:]
return getattr(self.get_model(), class_name)
@abstractmethod
def get_model(self):
pass
def dbcleanup_wrapper(session, obj, where_clause=None):
with dbcleanup(session, obj, where_clause):
yield obj
@contextmanager
def dbcleanup(session, obj, where_clause=None):
"""
Use the session to store obj in database; delete from database on exit, bypassing the session.
If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct
a custom select statement.
"""
return_id = where_clause is None
try:
obj_id = persist(session, obj, return_id)
yield obj_id
finally:
table = obj.__table__
if where_clause is None:
where_clause = _get_default_where_clause(type(obj), obj_id)
stmt = delete(table).where(where_clause)
session.execute(stmt)
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id
def delete_from_database(session, objects):
"""
Delete each object in objects from database.
May be called at the end of a test if use of a context manager is impractical.
(Assume all objects have the id field as their primary key.)
"""
# Ensure we have a list of objects (check for list explicitly: a model can be iterable)
if not isinstance(objects, list):
objects = [objects]
for obj in objects:
table = obj.__table__
stmt = delete(table).where(table.c.id == obj.id)
session.execute(stmt)
def get_stored_obj(session, cls, obj_id=None, where_clause=None, unique=False):
# Either obj_id or where_clause must be provided, but not both
assert bool(obj_id) ^ (where_clause is not None)
if where_clause is None:
where_clause = _get_default_where_clause(cls, obj_id)
stmt = select(cls).where(where_clause)
result = session.execute(stmt)
# unique() is required if result contains joint eager loads against collections
# https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253
if unique:
result = result.unique()
return result.scalar_one()
def has_unique_constraint(table, fields):
for constraint in table.constraints:
if isinstance(constraint, UniqueConstraint):
col_names = {c.name for c in constraint.columns}
if set(fields) == col_names:
return True
def has_index(table, fields):
for index in table.indexes:
col_names = {c.name for c in index.columns}
if set(fields) == col_names:
return True
def collection_consists_of_objects(collection, *objects):
"""
Returns True iff list(collection) == list(objects), where object equality is determined
by primary key equality: object1.id == object2.id.
"""
if len(collection) != len(objects): # False if lengths are different
return False
if not collection: # True if both are empty
return True
# Sort, then compare each member by its 'id' attribute, which must be its primary key.
collection.sort(key=lambda item: item.id)
objects_l = list(objects)
objects_l.sort(key=lambda item: item.id)
for item1, item2 in zip(collection, objects_l):
if item1.id is None or item2.id is None or item1.id != item2.id:
return False
return True
def get_unique_value():
"""Generate unique values to accommodate unique constraints."""
return uuid4().hex
def _get_default_where_clause(cls, obj_id):
where_clause = cls.__table__.c.id == obj_id
return where_clause
| [
"sqlalchemy.select",
"sqlalchemy.delete",
"uuid.uuid4"
] | [((4284, 4291), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (4289, 4291), False, 'from uuid import uuid4\n'), ((2669, 2680), 'sqlalchemy.select', 'select', (['cls'], {}), '(cls)\n', (2675, 2680), False, 'from sqlalchemy import delete, select, UniqueConstraint\n'), ((1366, 1379), 'sqlalchemy.delete', 'delete', (['table'], {}), '(table)\n', (1372, 1379), False, 'from sqlalchemy import delete, select, UniqueConstraint\n'), ((2293, 2306), 'sqlalchemy.delete', 'delete', (['table'], {}), '(table)\n', (2299, 2306), False, 'from sqlalchemy import delete, select, UniqueConstraint\n')] |
from calendar import setfirstweekday
stopped_in_user_file = True
setfirstweekday(15) | [
"calendar.setfirstweekday"
] | [((65, 84), 'calendar.setfirstweekday', 'setfirstweekday', (['(15)'], {}), '(15)\n', (80, 84), False, 'from calendar import setfirstweekday\n')] |
import os
import sys
import pandas as pd
from datetime import datetime
from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION
from src.features.helpers.processing import add_missing_timestamp_values
from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, \
normalize_according_to_play_direction, check_group_event
from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation
week_num = int(sys.argv[1])
data_v3 = DataV3(DATA_V3_SUBVERSION)
save_file_path = data_v3.get_step1_checkpoint_path(week_num)
try:
clean_df = pd.read_csv(save_file_path)
save_file_exists = True
except FileNotFoundError:
save_file_exists = False
if not save_file_exists:
print("Started loading data")
play_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'plays.csv'))
games_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'games.csv'))
week_and_games = games_df[games_df.week == week_num]
tracking_df = pd.read_csv(os.path.join(RAW_DATA_DIR, f'week{week_num}.csv'))
print("Data loaded. Start processing timestamps")
tracking_df = add_missing_timestamp_values(tracking_df)
games_n_plays_df = play_df.merge(week_and_games, how='inner', on='gameId')
m_grouped = games_n_plays_df.groupby(['gameId', 'playId'])
df_t = tracking_df.merge(games_n_plays_df, how='left', on=['gameId', 'playId'])
# Remove all events without 'pass_forward'
df_t_grouped = df_t.groupby(['gameId', 'playId'])
df_t_v3 = df_t.copy().sort_index()
for name, group in df_t_grouped:
game_id, play_id = name
# if group does not contain pass forward, drop it
if all(group.event != 'pass_forward'):
df_t_v3 = df_t_v3[(df_t_v3.gameId != game_id) | (df_t_v3.playId != play_id)]
df_t_v3_s = df_t_v3.sort_values(by=['gameId', 'playId', 'time', 'event'])
df_t_v3_s = df_t_v3_s.reset_index(drop=True)
df_t_grouped = df_t_v3_s.groupby(['gameId', 'playId'])
# remove all values before 'pass_forward'
print("Removing all values before pass forward event...")
for name, group in df_t_grouped:
game_id, play_id = name
pass_forward_frame_id = group[group.event == 'pass_forward'].index.min() - 1
remove_start = group.index.min()
df_t_v3_s = df_t_v3_s.drop(df_t_v3_s.loc[remove_start:pass_forward_frame_id].index)
pd.options.mode.chained_assignment = None
gb = df_t_v3_s.groupby(['gameId', 'playId'])
print('Getting closest players...')
keep_indices = []
for name, group in gb:
game_id, play_id = name
try:
event_3rd = group.event.unique()[2]
except IndexError:
print('Number of events is < 3, skipping...')
continue
situation_df = group[group.event == event_3rd]
# convert dataframe into series
ball_row = situation_df[situation_df.team == 'football'].head(1)
# remove ball
player_situation_df = situation_df[situation_df.team != 'football']
try:
p1, p2 = get_closest_players(player_situation_df, ball_row.x.item(), ball_row.y.item())
except ValueError:
print('Value Error raised. This group will be skipped.')
continue
p_n_b_indices = get_players_and_ball_indices(group, p1, p2)
if p_n_b_indices:
keep_indices.extend(p_n_b_indices)
clean_df = df_t_v3_s[df_t_v3_s.index.isin(keep_indices)]
clean_df.to_csv(
save_file_path,
index=False
)
print('Normalize...')
clean_df = normalize_according_to_play_direction(clean_df)
clean_df['homeHasPossession'] = clean_df.apply(
lambda row: home_has_possession(row), axis=1
)
clean_df['teamSituation'] = clean_df.apply(
lambda row: calculate_team_sitation(row), axis=1
)
print('Creating features...')
min_df = clean_df[[
'time', 'x', 'y', 's', 'o', 'dir', 'event', 'team',
'gameId', 'playId', 'frameId', 'isDefensivePI'
]]
gb_2 = clean_df.groupby(['gameId', 'playId', 'frameId'])
# ball direction and orientation are NaN
calc_df = pd.DataFrame(
columns=[
'time',
'att_def_d', 'att_ball_d', 'def_ball_d',
'att_s', 'def_s', 'ball_s',
'att_o', 'def_o',
'att_dir', 'def_dir',
'event', 'gameId', 'playId', 'frameId', 'isDefensivePI'
]
)
GROUP_SIZE_MINIMUM = 3
for name, group in gb_2:
game_id, play_id, frameId = name
if len(group) < GROUP_SIZE_MINIMUM:
continue
ball = group[group.teamSituation == 'football'].head(1).squeeze()
p_att = group[group.teamSituation == 'attacking'].head(1).squeeze()
p_def = group[group.teamSituation == 'defending'].head(1).squeeze()
group_row = group.head(1).squeeze()
group_events = group.event.unique().tolist()
dict_to_append = {
'time': group_row.time,
'att_def_d': calculate_distance(p_att.x, p_att.y, p_def.x, p_def.y),
'att_ball_d': calculate_distance(p_att.x, p_att.y, ball.x, ball.y),
'def_ball_d': calculate_distance(p_def.x, p_def.y, ball.x, ball.y),
'att_s': p_att.s, 'def_s': p_def.s, 'ball_s': ball.s,
'att_a': p_att.a, 'def_a': p_def.a, 'ball_a': ball.a,
'att_o': p_att.o, 'def_o': p_def.o,
'att_dir': p_att.dir, 'def_dir': p_def.dir,
'event': group_row.event,
'pass_arrived': check_group_event(group_events, 'pass_arrived'),
'pass_outcome_caught': check_group_event(group_events, 'pass_outcome_caught'),
'tackle': check_group_event(group_events, 'tackle'),
'first_contact': check_group_event(group_events, 'first_contact'),
'pass_outcome_incomplete': check_group_event(group_events, 'pass_outcome_incomplete'),
'out_of_bounds': check_group_event(group_events, 'out_of_bounds'),
'week': week_num,
'gameId': group_row.gameId,
'playId': group_row.playId,
'frameId': group_row.frameId,
'isDefensivePI': group_row.isDefensivePI
}
calc_df = calc_df.append(
dict_to_append,
ignore_index=True
)
print("Saving data...")
calc_df.to_csv(
data_v3.get_step1_end_path(week_num),
index=False
)
print(f'End time: {datetime.now().strftime("%H:%M:%S")}')
| [
"settings.DataV3",
"src.features.helpers.processing_v3.normalize_according_to_play_direction",
"pandas.read_csv",
"src.features.helpers.processing_v4.calculate_team_sitation",
"src.features.helpers.processing_v3.check_group_event",
"os.path.join",
"src.features.helpers.processing_v3.get_players_and_ball_indices",
"datetime.datetime.now",
"src.features.helpers.processing.add_missing_timestamp_values",
"src.features.helpers.processing_v4.home_has_possession",
"src.features.helpers.processing_v3.calculate_distance",
"pandas.DataFrame"
] | [((519, 545), 'settings.DataV3', 'DataV3', (['DATA_V3_SUBVERSION'], {}), '(DATA_V3_SUBVERSION)\n', (525, 545), False, 'from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION\n'), ((3598, 3645), 'src.features.helpers.processing_v3.normalize_according_to_play_direction', 'normalize_according_to_play_direction', (['clean_df'], {}), '(clean_df)\n', (3635, 3645), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((4116, 4320), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['time', 'att_def_d', 'att_ball_d', 'def_ball_d', 'att_s', 'def_s',\n 'ball_s', 'att_o', 'def_o', 'att_dir', 'def_dir', 'event', 'gameId',\n 'playId', 'frameId', 'isDefensivePI']"}), "(columns=['time', 'att_def_d', 'att_ball_d', 'def_ball_d',\n 'att_s', 'def_s', 'ball_s', 'att_o', 'def_o', 'att_dir', 'def_dir',\n 'event', 'gameId', 'playId', 'frameId', 'isDefensivePI'])\n", (4128, 4320), True, 'import pandas as pd\n'), ((627, 654), 'pandas.read_csv', 'pd.read_csv', (['save_file_path'], {}), '(save_file_path)\n', (638, 654), True, 'import pandas as pd\n'), ((1145, 1186), 'src.features.helpers.processing.add_missing_timestamp_values', 'add_missing_timestamp_values', (['tracking_df'], {}), '(tracking_df)\n', (1173, 1186), False, 'from src.features.helpers.processing import add_missing_timestamp_values\n'), ((824, 863), 'os.path.join', 'os.path.join', (['RAW_DATA_DIR', '"""plays.csv"""'], {}), "(RAW_DATA_DIR, 'plays.csv')\n", (836, 863), False, 'import os\n'), ((892, 931), 'os.path.join', 'os.path.join', (['RAW_DATA_DIR', '"""games.csv"""'], {}), "(RAW_DATA_DIR, 'games.csv')\n", (904, 931), False, 'import os\n'), ((1021, 1070), 'os.path.join', 'os.path.join', (['RAW_DATA_DIR', 'f"""week{week_num}.csv"""'], {}), "(RAW_DATA_DIR, f'week{week_num}.csv')\n", (1033, 1070), False, 'import os\n'), ((3313, 3356), 'src.features.helpers.processing_v3.get_players_and_ball_indices', 'get_players_and_ball_indices', (['group', 'p1', 'p2'], {}), '(group, p1, p2)\n', (3341, 3356), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((3711, 3735), 'src.features.helpers.processing_v4.home_has_possession', 'home_has_possession', (['row'], {}), '(row)\n', (3730, 3735), False, 'from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation\n'), ((3807, 3835), 'src.features.helpers.processing_v4.calculate_team_sitation', 'calculate_team_sitation', (['row'], {}), '(row)\n', (3830, 3835), False, 'from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation\n'), ((4899, 4953), 'src.features.helpers.processing_v3.calculate_distance', 'calculate_distance', (['p_att.x', 'p_att.y', 'p_def.x', 'p_def.y'], {}), '(p_att.x, p_att.y, p_def.x, p_def.y)\n', (4917, 4953), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((4977, 5029), 'src.features.helpers.processing_v3.calculate_distance', 'calculate_distance', (['p_att.x', 'p_att.y', 'ball.x', 'ball.y'], {}), '(p_att.x, p_att.y, ball.x, ball.y)\n', (4995, 5029), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5053, 5105), 'src.features.helpers.processing_v3.calculate_distance', 'calculate_distance', (['p_def.x', 'p_def.y', 'ball.x', 'ball.y'], {}), '(p_def.x, p_def.y, ball.x, ball.y)\n', (5071, 5105), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5385, 5432), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""pass_arrived"""'], {}), "(group_events, 'pass_arrived')\n", (5402, 5432), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5465, 5519), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""pass_outcome_caught"""'], {}), "(group_events, 'pass_outcome_caught')\n", (5482, 5519), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5539, 5580), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""tackle"""'], {}), "(group_events, 'tackle')\n", (5556, 5580), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5607, 5655), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""first_contact"""'], {}), "(group_events, 'first_contact')\n", (5624, 5655), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5692, 5750), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""pass_outcome_incomplete"""'], {}), "(group_events, 'pass_outcome_incomplete')\n", (5709, 5750), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((5777, 5825), 'src.features.helpers.processing_v3.check_group_event', 'check_group_event', (['group_events', '"""out_of_bounds"""'], {}), "(group_events, 'out_of_bounds')\n", (5794, 5825), False, 'from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, normalize_according_to_play_direction, check_group_event\n'), ((6226, 6240), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6238, 6240), False, 'from datetime import datetime\n')] |
import streamlit as st
import tensorflow as tf
import numpy
from utils.get_owm_data import get_open_weather_map_data
from utils.get_date import get_date_list_for_gmt
import plotly.graph_objects as go
from plotly import tools
import plotly.offline as py
import plotly.express as px
def app():
st.title("LSTM Model")
st.subheader('What does LSTM model do?')
st.markdown("""<p style='text-align: justify;'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>""", unsafe_allow_html=True)
st.subheader('Why we chose LSTM?')
st.markdown("""<p style='text-align: justify;'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>""", unsafe_allow_html=True)
st.subheader('LSTM model input and output')
st.markdown("Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features.", unsafe_allow_html=True)
LSTM_model_name = 'models/LSTM_model.h5'
model_lstm = tf.keras.models.load_model(LSTM_model_name)
features = get_open_weather_map_data()
prediction_lstm = model_lstm.predict(features) * 100
prediction_lstm = prediction_lstm.ravel()
date_list = get_date_list_for_gmt()
data = []
layout = go.Layout(
title= "<b>LSTM Dam Occupancy Forecasting Plot</b>",paper_bgcolor = 'rgb(248, 248, 255)',plot_bgcolor = 'rgb(248, 248, 255)',barmode = "stack",
xaxis = dict(title="Time", linecolor="#BCCCDC",showspikes=True,spikethickness=2,spikedash="dot",spikecolor= "#ffffff",spikemode="across",),
yaxis= dict(title="Dam Occupancy Rate (%)",linecolor="#021C1E"))
line_chart= go.Scatter(x=date_list, y=prediction_lstm, marker_color='rgb(0, 200, 200)' )
data.append(line_chart)
fig= go.Figure(data=data, layout=layout)
st.plotly_chart(fig)
| [
"streamlit.markdown",
"utils.get_owm_data.get_open_weather_map_data",
"plotly.graph_objects.Scatter",
"plotly.graph_objects.Figure",
"tensorflow.keras.models.load_model",
"streamlit.subheader",
"streamlit.plotly_chart",
"utils.get_date.get_date_list_for_gmt",
"streamlit.title"
] | [((296, 318), 'streamlit.title', 'st.title', (['"""LSTM Model"""'], {}), "('LSTM Model')\n", (304, 318), True, 'import streamlit as st\n'), ((322, 362), 'streamlit.subheader', 'st.subheader', (['"""What does LSTM model do?"""'], {}), "('What does LSTM model do?')\n", (334, 362), True, 'import streamlit as st\n'), ((364, 845), 'streamlit.markdown', 'st.markdown', (['"""<p style=\'text-align: justify;\'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<p style=\'text-align: justify;\'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>"\n , unsafe_allow_html=True)\n', (375, 845), True, 'import streamlit as st\n'), ((843, 877), 'streamlit.subheader', 'st.subheader', (['"""Why we chose LSTM?"""'], {}), "('Why we chose LSTM?')\n", (855, 877), True, 'import streamlit as st\n'), ((879, 1344), 'streamlit.markdown', 'st.markdown', (['"""<p style=\'text-align: justify;\'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>"""'], {'unsafe_allow_html': '(True)'}), '(\n "<p style=\'text-align: justify;\'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>"\n , unsafe_allow_html=True)\n', (890, 1344), True, 'import streamlit as st\n'), ((1343, 1386), 'streamlit.subheader', 'st.subheader', (['"""LSTM model input and output"""'], {}), "('LSTM model input and output')\n", (1355, 1386), True, 'import streamlit as st\n'), ((1388, 1706), 'streamlit.markdown', 'st.markdown', (['"""Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features."""'], {'unsafe_allow_html': '(True)'}), "(\n 'Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features.'\n , unsafe_allow_html=True)\n", (1399, 1706), True, 'import streamlit as st\n'), ((1757, 1800), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['LSTM_model_name'], {}), '(LSTM_model_name)\n', (1783, 1800), True, 'import tensorflow as tf\n'), ((1814, 1841), 'utils.get_owm_data.get_open_weather_map_data', 'get_open_weather_map_data', ([], {}), '()\n', (1839, 1841), False, 'from utils.get_owm_data import get_open_weather_map_data\n'), ((1954, 1977), 'utils.get_date.get_date_list_for_gmt', 'get_date_list_for_gmt', ([], {}), '()\n', (1975, 1977), False, 'from utils.get_date import get_date_list_for_gmt\n'), ((2381, 2456), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'date_list', 'y': 'prediction_lstm', 'marker_color': '"""rgb(0, 200, 200)"""'}), "(x=date_list, y=prediction_lstm, marker_color='rgb(0, 200, 200)')\n", (2391, 2456), True, 'import plotly.graph_objects as go\n'), ((2489, 2524), 'plotly.graph_objects.Figure', 'go.Figure', ([], {'data': 'data', 'layout': 'layout'}), '(data=data, layout=layout)\n', (2498, 2524), True, 'import plotly.graph_objects as go\n'), ((2526, 2546), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (2541, 2546), True, 'import streamlit as st\n')] |
# -*- coding: utf-8 -*-
import os
ROOT = os.path.abspath(os.path.dirname(__file__))
path = lambda *args: os.path.join(ROOT, *args)
""" Template for local settings of the FST webservice (fst_web)
Please edit this file and replace all generic values with values suitable to
your particular installation.
"""
# NOTE! Always set this to False before deploying
DEBUG = True
# NOTE! Before deploying on a public, uncomment ALLOWED_HOSTS
# and add IP address and/or domain of your site
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'fst.magokoro.nu']
# Look for instance-specific settings
try:
from .instance_settings import *
except ImportError:
from .default_instance_settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path('database/fst_demo.db')
}
}
LOG_LEVEL = "DEBUG"
# Enable this to override global DB Debug setting
# DB_DEBUG_LEVEL = "DEBUG"
# Setup mail server for sending email notifications.
# You can use any mail server you want.
# But a very simple way to get started is to use a gmail account.
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
# EMAIL_HOST_USER = 'your email'
# EMAIL_HOST_PASSWORD = '<PASSWORD>'
# Admins specified here receive email notifications on critical errors.
ADMINS = ()
MANAGERS = ADMINS
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = os.path.join("/dokument/")
# Site and port for hosting FST service (do not add ending '/').
FST_SITE_URL = "http://127.0.0.1:8000"
# TODO - Check if FST_INSTANCE_PREFIX can be removed
# Site and port of specific FST instance (do not add ending '/').
FST_INSTANCE_URL = os.path.join(
"http://127.0.0.1:8000",
FST_INSTANCE_PREFIX)
| [
"os.path.dirname",
"os.path.join"
] | [((1505, 1531), 'os.path.join', 'os.path.join', (['"""/dokument/"""'], {}), "('/dokument/')\n", (1517, 1531), False, 'import os\n'), ((1776, 1834), 'os.path.join', 'os.path.join', (['"""http://127.0.0.1:8000"""', 'FST_INSTANCE_PREFIX'], {}), "('http://127.0.0.1:8000', FST_INSTANCE_PREFIX)\n", (1788, 1834), False, 'import os\n'), ((58, 83), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (73, 83), False, 'import os\n'), ((106, 131), 'os.path.join', 'os.path.join', (['ROOT', '*args'], {}), '(ROOT, *args)\n', (118, 131), False, 'import os\n')] |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.appengine.api import users
import webapp2
# For datastore
import cgi
import urllib
from google.appengine.ext import ndb
class UserId(ndb.Model):
content = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def query_user(cls, ancestor_key):
return cls.query(ancestor=ancestor_key).order(-cls.date)
# ************** MainHandler ************* #
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
# ************** GetUser ************* #
class GetUser(webapp2.RequestHandler):
def get(self):
self.response.out.write('<html><body>')
client_id = self.request.get('client_id')
ancestor_key = ndb.Key("ID", client_id or "*no_id*")
userids = UserId.query_user(ancestor_key).fetch(20)
self.response.out.write('her er eitthvad')
for userid in userids:
self.response.out.write('<blockquote>%s</blockquote>' %
cgi.escape(userid.content))
# Checks for active Google account session
# user = users.get_current_user()
# if user:
# self.response.headers['Content-Type'] = 'text/plain'
# self.response.write('Hello, ' + user.nickname())
# else:
# self.redirect(users.create_login_url(self.request.uri))
self.response.out.write('</body></html>')
def post(self):
pass
# ************** HasData ************* #
class HasData(webapp2.RequestHandler):
def get(self):
pass
#TODO does user have data
class PostData(webapp2.RequestHandler):
def post(self):
client_id = self.request.get('client_id')
chrome_user = UserId(parent=ndb.Key("ID", client_id or "*no_id*"),
content = self.request.get('client_id'))
chrome_user.put()
#TODO recieve data from client
class GetSyncData(object):
"""docstring for GetSyncData"""
def __init__(self, arg):
super(GetSyncData, self).__init__()
self.arg = arg
#implement get data for user
# property user.email() or user.user_id()
app = webapp2.WSGIApplication([
('/', MainHandler),
('/GetUser/', GetUser),
('/HasData/', HasData),
('/chrome-sync/command/', PostData),
('/GetSyncData/', GetSyncData)
], debug=True)
| [
"google.appengine.ext.ndb.Key",
"webapp2.WSGIApplication",
"cgi.escape",
"google.appengine.ext.ndb.DateTimeProperty",
"google.appengine.ext.ndb.StringProperty"
] | [((2721, 2905), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/', MainHandler), ('/GetUser/', GetUser), ('/HasData/', HasData), (\n '/chrome-sync/command/', PostData), ('/GetSyncData/', GetSyncData)]"], {'debug': '(True)'}), "([('/', MainHandler), ('/GetUser/', GetUser), (\n '/HasData/', HasData), ('/chrome-sync/command/', PostData), (\n '/GetSyncData/', GetSyncData)], debug=True)\n", (2744, 2905), False, 'import webapp2\n'), ((773, 793), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (791, 793), False, 'from google.appengine.ext import ndb\n'), ((803, 842), 'google.appengine.ext.ndb.DateTimeProperty', 'ndb.DateTimeProperty', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (823, 842), False, 'from google.appengine.ext import ndb\n'), ((1332, 1369), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['"""ID"""', "(client_id or '*no_id*')"], {}), "('ID', client_id or '*no_id*')\n", (1339, 1369), False, 'from google.appengine.ext import ndb\n'), ((2324, 2361), 'google.appengine.ext.ndb.Key', 'ndb.Key', (['"""ID"""', "(client_id or '*no_id*')"], {}), "('ID', client_id or '*no_id*')\n", (2331, 2361), False, 'from google.appengine.ext import ndb\n'), ((1610, 1636), 'cgi.escape', 'cgi.escape', (['userid.content'], {}), '(userid.content)\n', (1620, 1636), False, 'import cgi\n')] |
# Comet VOEvent Broker.
from twisted.application.internet import ClientService
from comet.protocol.subscriber import VOEventSubscriberFactory
__all__ = ["makeSubscriberService"]
def makeSubscriberService(endpoint, local_ivo, validators, handlers, filters):
"""Create a reconnecting VOEvent subscriber service.
Parameters
----------
endpoint : implements `twisted.internet.interfaces.IStreamClientEndpoint`
The endpoint to which the service will connect.
local_ivo : `str` or `None`
IVOA identifier for the subscriber.
validators : `list` of implementers of `~comet.icomet.IValidator`.
Validators which will be applied to incoming events. Events which fail
validation will be rejected.
handlers : `list` of implementers of `~comet.icomet.IHandler`.
Handlers to which events which pass validation will be passed.
filters : `list` of `str`
XPath filters. Will be passed to upstream as a request to filter the
alerts being sent.
Notes
-----
Upstream brokes may not provide support for XPath filtering; in this case,
the filters suppplied will be ignored.
Reconnection is handled according to the default policies of
`twisted.application.internet.ClientService`.
"""
factory = VOEventSubscriberFactory(local_ivo, validators, handlers, filters)
service = ClientService(endpoint, factory)
return service
| [
"twisted.application.internet.ClientService",
"comet.protocol.subscriber.VOEventSubscriberFactory"
] | [((1300, 1366), 'comet.protocol.subscriber.VOEventSubscriberFactory', 'VOEventSubscriberFactory', (['local_ivo', 'validators', 'handlers', 'filters'], {}), '(local_ivo, validators, handlers, filters)\n', (1324, 1366), False, 'from comet.protocol.subscriber import VOEventSubscriberFactory\n'), ((1381, 1413), 'twisted.application.internet.ClientService', 'ClientService', (['endpoint', 'factory'], {}), '(endpoint, factory)\n', (1394, 1413), False, 'from twisted.application.internet import ClientService\n')] |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import os
import re
import shutil
import zlib
from StringIO import StringIO
try:
# Create a block to work around evil sys.modules manipulation in
# email/__init__.py that triggers pylint false positives.
# pylint: disable=E0611,F0401
from email.Message import Message
from email.Utils import formatdate
except ImportError:
raise
from buildbot.process.properties import Properties
from buildbot.schedulers.trysched import TryBase
from twisted.internet import defer, reactor, utils
from twisted.mail.smtp import SMTPSenderFactory
from twisted.python import log
from common.twisted_util.response import StringResponse
from master import gitiles_poller
from master.try_job_base import BadJobfile
class CbuildbotConfigs(object):
# Valid 'etc' builder targets. Specifically, this ensures:
# - The build name doesn't begin with a flag ('--')
# - The build name doesn't contain spaces (to spill into extra args).
_ETC_TARGET_RE = re.compile(r'^[a-zA-Z][\w-]+\w$')
def __init__(self, configs, etc_builder=None):
"""Holds base state of the master's try job related configuration.
configs (dict): A dictionary of all known CrOS configs. This will be as
up-to-date as the Chromite pin.
etc_builder (str): If not None, the name of the etc builder.
"""
self.configs = configs
self.etc_builder = etc_builder
def AddBuildBucketHooks(self, c):
"""Build mutation hook called via BuildBucket when scheduling builds.
The cbuildbot config is specified in the `cbb_config` property. The
callback transforms that property to an actual waterfall builder name by
mapping it based on its config.
If an 'etc' builder is configured and the config name is unknown, it will be
mapped to the 'etc' builder if possible.
A tryserver BuildBucket build takes the form:
- Empty `builder_name` parameter. If one is supplied, it will be ignored.
- BuildBot changes can be added by including one or more BuildBucket
`changes` parameters: [{'author': {'email': '<EMAIL>'}}].
- `cbb_config` property must be set to the build's cbuildbot config target.
- `extra_args` property (optional) may be a JSON list of additional
parameters to pass to the tryjob.
- `slaves_request` property (optional) may be a JSON list of slaves on which
this build may run.
- Additional BuildBot properties may be added.
NOTE: Internally, all of these parameters are converted to BuildBot
properties and referenced as such in other areas of code. The Git poller
also constructs the same property set, so code paths converge.
"""
def params_hook(params, _build):
# Map `cbb_config` to a builder name.
properties = params.get('properties', {})
config_name = properties.get('cbb_config')
if not config_name:
raise ValueError('Missing required `cbb_config` property.')
params['builder_name'] = self.GetBuilderForConfig(config_name)
# Validate other fields.
if not isinstance(properties.get('extra_args', []), list):
raise ValueError('`extra_args` property is not a list.')
if not isinstance(properties.get('slaves_request', []), list):
raise ValueError('`slaves_request` is not a list.')
# Add mandatory properties to build.
params['properties'] = properties
c['buildbucket_params_hook'] = params_hook
def GetBuilderForConfig(self, config_name):
config = self.configs.get(config_name)
if config:
return config['_template'] or config_name
self.ValidateEtcBuild(config_name)
return self.etc_builder
def ValidateEtcBuild(self, config_name):
"""Tests whether a specified build config_name is candidate for etc build.
Raises a ValueError if an etc build cannot be dispatched.
"""
if not self.etc_builder:
raise ValueError('etc builder is not configured.')
if not config_name:
raise ValueError('Empty config name')
if not self._ETC_TARGET_RE.match(config_name):
raise ValueError('invalid etc config name (%s).' % (config_name,))
def translate_v1_to_v2(parsed_job):
"""Translate tryjob desc from V1 to V2."""
parsed_job.setdefault('extra_args', []).append('--remote-trybot')
parsed_job['version'] = 2
def translate_v2_to_v3(parsed_job):
"""Translate tryjob desc from V2 to V3."""
# V3 --remote-patches format is not backwards compatible.
if any(a.startswith('--remote-patches')
for a in parsed_job.get('extra_args', ())):
raise BadJobfile('Cannot translate --remote-patches from tryjob v.2 to '
'v.3. Please run repo sync.')
parsed_job['version'] = 3
class CrOSTryJobGit(TryBase):
"""Poll a Git server to grab patches to try."""
# Name of property source for generated properties.
_PROPERTY_SOURCE = 'Try Job'
# The version of tryjob that the master is expecting.
_TRYJOB_FORMAT_VERSION = 3
# Functions that translate from one tryjob version to another.
_TRANSLATION_FUNCS = {
1 : translate_v1_to_v2,
2 : translate_v2_to_v3,
}
# Template path URL component to retrieve the Base64 contents of a file from
# Gitiles.
_GITILES_PATH_TMPL = '%(repo)s/+/%(revision)s/%(path)s?format=text'
@classmethod
def updateJobDesc(cls, parsed_job):
"""Ensure job description is in the format we expect."""
while parsed_job['version'] < cls._TRYJOB_FORMAT_VERSION:
prev_ver = parsed_job['version']
translation_func = cls._TRANSLATION_FUNCS[parsed_job['version']]
translation_func(parsed_job)
if parsed_job['version'] <= prev_ver:
raise AssertionError('translation function %s not incrementing version!'
% str(translation_func))
def __init__(self, name, pollers, smtp_host, from_addr, reply_to,
email_footer, cbuildbot_configs, properties=None):
"""Initialize the class.
Arguments:
name: See TryBase.__init__().
pollers: A list of job repo git pit pollers.
smtp_host: The smtp host for sending out error emails.
from_addr: The email address to display as being sent from.
reply_to: The email address to put in the 'Reply-To' email header field.
email_footer: The footer to append to any emails sent out.
cbuildbot_configs: (CbuildbotConfigs) A configuration set instance. Any
'bot' request outside of this list will go to an 'etc' builder, if
available.
properties: See TryBase.__init__()
"""
TryBase.__init__(self, name, [], properties or {})
self.pollers = pollers
self.smtp_host = smtp_host
self.from_addr = from_addr
self.reply_to = reply_to
self.email_footer = email_footer
self.cbb = cbuildbot_configs
def startService(self):
TryBase.startService(self)
self.startConsumingChanges()
@staticmethod
def load_job(data):
try:
return json.loads(data)
except ValueError as e:
raise BadJobfile("Failed to parse job JSON: %s" % (e.message,))
def validate_job(self, parsed_job):
# A list of field description tuples of the format:
# (name, type, required).
fields = [('name', basestring, True),
('user', basestring, True),
('email', list, True),
('bot', list, True),
('extra_args', list, False),
('version', int, True),
('slaves_request', list, False),
]
error_msgs = []
for name, f_type, required in fields:
val = parsed_job.get(name)
if val is None:
if required:
error_msgs.append('Option %s missing!' % name)
elif not isinstance(val, f_type):
error_msgs.append('Option %s of wrong type!' % name)
# If we're an 'etc' job, we must have bots defined to execute.
for bot in parsed_job['bot']:
if bot in self.cbb.configs:
continue
# Assert that this is a valid 'etc' build.
try:
self.cbb.ValidateEtcBuild(bot)
except ValueError as e:
error_msgs.append("Invalid 'etc' build (%s): %s" % (bot, e.message))
if error_msgs:
raise BadJobfile('\n'.join(error_msgs))
def get_props(self, config, options):
"""Overriding base class method."""
props = Properties()
props.setProperty('slaves_request', options.get('slaves_request', []),
self._PROPERTY_SOURCE)
props.setProperty('cbb_config', config, self._PROPERTY_SOURCE)
extra_args = options.get('extra_args')
if extra_args:
# This field can be quite large, and exceed BuildBot property limits.
# Compress it, Base64 encode it, and prefix it with "z:" so the consumer
# knows its size.
extra_args = 'z:' + base64.b64encode(zlib.compress(json.dumps(
extra_args)))
props.setProperty('cbb_extra_args', extra_args,
self._PROPERTY_SOURCE)
return props
def create_buildset(self, ssid, parsed_job):
"""Overriding base class method."""
dlist = []
buildset_name = '%s:%s' % (parsed_job['user'], parsed_job['name'])
for bot in parsed_job['bot']:
builder_name = self.cbb.GetBuilderForConfig(bot)
log.msg("Creating '%s' try job(s) %s for %s" % (builder_name, ssid, bot))
dlist.append(self.addBuildsetForSourceStamp(ssid=ssid,
reason=buildset_name,
external_idstring=buildset_name,
builderNames=[builder_name],
properties=self.get_props(bot, parsed_job)))
return defer.DeferredList(dlist)
def send_validation_fail_email(self, name, emails, error):
"""Notify the user via email about the tryjob error."""
html_content = []
html_content.append('<html><body>')
body = """
Your tryjob with name '%(name)s' failed the validation step. This is most
likely because <br>you are running an older version of cbuildbot. Please run
<br><code>repo sync chromiumos/chromite</code> and try again. If you still
see<br>this message please contact <EMAIL>.<br>
"""
html_content.append(body % {'name': name})
html_content.append("Extra error information:")
html_content.append(error.replace('\n', '<br>\n'))
html_content.append(self.email_footer)
m = Message()
m.set_payload('<br><br>'.join(html_content), 'utf8')
m.set_type("text/html")
m['Date'] = formatdate(localtime=True)
m['Subject'] = 'Tryjob failed validation'
m['From'] = self.from_addr
m['Reply-To'] = self.reply_to
result = defer.Deferred()
sender_factory = SMTPSenderFactory(self.from_addr, emails,
StringIO(m.as_string()), result)
reactor.connectTCP(self.smtp_host, 25, sender_factory)
@defer.inlineCallbacks
def gotChange(self, change, important):
try:
yield self._gotChangeImpl(change, important)
except Exception as e:
log.msg('Exception in try job scheduler: %s' % (e,))
import traceback
traceback.print_exc()
@defer.inlineCallbacks
def _gotChangeImpl(self, change, _important):
"""Process the received data and send the queue buildset."""
# Find poller that this change came from.
for poller in self.pollers:
if not isinstance(poller, gitiles_poller.GitilesPoller):
continue
if poller.repo_url == change.repository:
break
else:
raise BadJobfile(
'Received tryjob from unsupported repository %s' % change.repository)
# pylint: disable=W0631
file_contents = yield self.loadGitilesChangeFile(poller, change)
parsed = {}
try:
parsed = self.load_job(file_contents)
self.validate_job(parsed)
self.updateJobDesc(parsed)
except BadJobfile as e:
self.send_validation_fail_email(parsed.setdefault('name', ''),
parsed['email'], str(e))
raise
# The sourcestamp/buildsets created will be merge-able.
ssid = yield self.master.db.sourcestamps.addSourceStamp(
branch=change.branch,
revision=change.revision,
project=change.project,
repository=change.repository,
changeids=[change.number])
yield self.create_buildset(ssid, parsed)
@defer.inlineCallbacks
def loadGitilesChangeFile(self, poller, change):
if len(change.files) != 1:
# We only accept changes with 1 diff file.
raise BadJobfile(
'Try job with too many files %s' % (','.join(change.files)))
# Load the contents of the modified file.
path = self._GITILES_PATH_TMPL % {
'repo': poller.repo_path,
'revision': change.revision,
'path': change.files[0],
}
contents_b64 = yield poller.agent.request('GET', path, retry=5,
protocol=StringResponse.Get)
defer.returnValue(base64.b64decode(contents_b64))
| [
"json.loads",
"email.Message.Message",
"twisted.internet.reactor.connectTCP",
"re.compile",
"twisted.python.log.msg",
"email.Utils.formatdate",
"json.dumps",
"base64.b64decode",
"buildbot.schedulers.trysched.TryBase.startService",
"master.try_job_base.BadJobfile",
"twisted.internet.defer.DeferredList",
"buildbot.process.properties.Properties",
"traceback.print_exc",
"buildbot.schedulers.trysched.TryBase.__init__",
"twisted.internet.defer.Deferred"
] | [((1143, 1177), 're.compile', 're.compile', (['"""^[a-zA-Z][\\\\w-]+\\\\w$"""'], {}), "('^[a-zA-Z][\\\\w-]+\\\\w$')\n", (1153, 1177), False, 'import re\n'), ((4697, 4801), 'master.try_job_base.BadJobfile', 'BadJobfile', (['"""Cannot translate --remote-patches from tryjob v.2 to v.3. Please run repo sync."""'], {}), "(\n 'Cannot translate --remote-patches from tryjob v.2 to v.3. Please run repo sync.'\n )\n", (4707, 4801), False, 'from master.try_job_base import BadJobfile\n'), ((6685, 6735), 'buildbot.schedulers.trysched.TryBase.__init__', 'TryBase.__init__', (['self', 'name', '[]', '(properties or {})'], {}), '(self, name, [], properties or {})\n', (6701, 6735), False, 'from buildbot.schedulers.trysched import TryBase\n'), ((6955, 6981), 'buildbot.schedulers.trysched.TryBase.startService', 'TryBase.startService', (['self'], {}), '(self)\n', (6975, 6981), False, 'from buildbot.schedulers.trysched import TryBase\n'), ((8421, 8433), 'buildbot.process.properties.Properties', 'Properties', ([], {}), '()\n', (8431, 8433), False, 'from buildbot.process.properties import Properties\n'), ((9673, 9698), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['dlist'], {}), '(dlist)\n', (9691, 9698), False, 'from twisted.internet import defer, reactor, utils\n'), ((10384, 10393), 'email.Message.Message', 'Message', ([], {}), '()\n', (10391, 10393), False, 'from email.Message import Message\n'), ((10495, 10521), 'email.Utils.formatdate', 'formatdate', ([], {'localtime': '(True)'}), '(localtime=True)\n', (10505, 10521), False, 'from email.Utils import formatdate\n'), ((10646, 10662), 'twisted.internet.defer.Deferred', 'defer.Deferred', ([], {}), '()\n', (10660, 10662), False, 'from twisted.internet import defer, reactor, utils\n'), ((10802, 10856), 'twisted.internet.reactor.connectTCP', 'reactor.connectTCP', (['self.smtp_host', '(25)', 'sender_factory'], {}), '(self.smtp_host, 25, sender_factory)\n', (10820, 10856), False, 'from twisted.internet import defer, reactor, utils\n'), ((7076, 7092), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (7086, 7092), False, 'import json\n'), ((9342, 9415), 'twisted.python.log.msg', 'log.msg', (['("Creating \'%s\' try job(s) %s for %s" % (builder_name, ssid, bot))'], {}), '("Creating \'%s\' try job(s) %s for %s" % (builder_name, ssid, bot))\n', (9349, 9415), False, 'from twisted.python import log\n'), ((11502, 11587), 'master.try_job_base.BadJobfile', 'BadJobfile', (["('Received tryjob from unsupported repository %s' % change.repository)"], {}), "('Received tryjob from unsupported repository %s' % change.repository\n )\n", (11512, 11587), False, 'from master.try_job_base import BadJobfile\n'), ((12948, 12978), 'base64.b64decode', 'base64.b64decode', (['contents_b64'], {}), '(contents_b64)\n', (12964, 12978), False, 'import base64\n'), ((7133, 7190), 'master.try_job_base.BadJobfile', 'BadJobfile', (["('Failed to parse job JSON: %s' % (e.message,))"], {}), "('Failed to parse job JSON: %s' % (e.message,))\n", (7143, 7190), False, 'from master.try_job_base import BadJobfile\n'), ((11018, 11070), 'twisted.python.log.msg', 'log.msg', (["('Exception in try job scheduler: %s' % (e,))"], {}), "('Exception in try job scheduler: %s' % (e,))\n", (11025, 11070), False, 'from twisted.python import log\n'), ((11100, 11121), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (11119, 11121), False, 'import traceback\n'), ((8921, 8943), 'json.dumps', 'json.dumps', (['extra_args'], {}), '(extra_args)\n', (8931, 8943), False, 'import json\n')] |
# Copyright 2018 <NAME> LLC, <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class Example(object):
def __init__(self):
# things we'll figure out as we scan an example
self.name = ""
self.see_files = []
self.description = []
self.code = []
class Record(object):
def __init__(self):
# things which we'll figure out as we scan the example
self.name = ""
self.purpose = ""
self.provider_names = []
self.related_modules = []
self.category = ""
self.description = []
self.examples = []
self.current_example = Example()
self.phase = 'module'
self.count = 0
def set_phase(self, phase):
self.phase = phase
print("---------------------------------------------------------")
print("%s phase | %s" % (self.count, self.phase))
print("---------------------------------------------------------")
@classmethod
def from_file(cls, filename):
r = cls()
r.name = os.path.basename(filename).replace(".py","")
print("=========================================================")
print("%s M | %s" % ('0', r.name))
data = open(filename).read().splitlines()
for line in data:
if not r.handle_line(line):
break
return r
def load_command(self, line):
if "DESCRIPTION" in line or '----' in line or '====' in line:
pass
elif not ":" in line:
# commands must contain a colon unless they are blocks or DESCRIPTION starters
return (False, None, None)
if not line.startswith("#"):
# commands must be in comments
return (False, None, None)
if ":" in line:
tokens = line.split(":")
if tokens[0].upper() != tokens[0]:
# commands must be in all caps. This is done
# so we don't get confused by colons in URLs and so on.
print("REJECT: %s" % tokens[0])
return (False, None, None)
# at this point we are sure it is a command
if '#------------' in line.replace(" ",""):
return (True, 'start_block', None)
if '#============' in line.replace(" ",""):
return (True, 'end_block', None)
# throw away the leading comment
line = line.replace("#","",1).strip()
if line.startswith("DESCRIPTION"):
return (True, 'description', None)
tokens = line.split(':', 1)
command = tokens[0].replace("#","").strip().lower()
rest = tokens[1].strip()
return (True, command, rest)
def handle_line(self, line):
self.count = self.count + 1
(is_command, command, rest) = self.load_command(line)
print("%s line | %s" % (self.count, line))
#if command == 'policy':
# return False
if is_command:
#if command not in [ 'start_block', 'end_block' ]:
# print("keyword: %s => %s" % (command, rest))
self.handle_command(command, rest)
return True
#print("PHASE=%s" % self.phase)
#print("LINE=%s" % line)
if self.phase == 'module':
if not line.startswith("#") or line.replace("#","").strip():
raise Exception("the module phase should be all commands")
elif self.phase == 'description':
# module description lines must be comments
self.handle_module_description(line)
elif self.phase == 'example':
if not line.startswith("#") or line.replace("#","").strip():
raise Exception("the example phase should be all commands")
elif self.phase == 'example_description':
self.handle_example_description(self.current_example, line)
elif self.phase == 'example_code':
self.handle_example_code(self.current_example, line)
elif self.phase == 'limbo':
#print("ignoring line while in limbo: %s" % line)
pass
elif self.phase == 'done':
#print("ignoring line while done: %s" % line)
pass
else:
raise Exception("unknown phase: %s" % self.phase)
return True # continue
def handle_command(self, command, rest):
#print("<PHASE: %s, COMMAND: %s, REST: %s>" % (self.phase, command, rest))
if self.phase == 'done':
return False
if self.phase == 'module':
# from module mode the only state transition is into module_description mode
# when we find the description command
if command not in ['start_block', 'end_block']:
print("%s set | %-20s | %s" % (self.count, command, rest))
if command == 'module':
pass
elif command == 'start_block':
pass
elif command == 'category':
self.category = rest
elif command == 'purpose':
self.purpose = rest
elif command == 'related':
self.related_modules = [ x.strip() for x in rest.split(",") ]
elif command == 'providers':
self.providers = [ x.strip() for x in rest.split(",") ]
elif command == 'fyi':
pass
elif command == 'description':
print("---------------------------------------------------------")
self.set_phase('description')
elif command == 'end_block':
raise Exception("unexpected end block without description")
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'description':
# in description phase end block moves us into limbo until we find
# another example start block
if command == 'end_block':
self.set_phase('limbo')
else:
raise Exception("invalid command: %s" % command)
elif self.phase == 'limbo':
# in limbo, seeing a start block moves us into example phase
if command == 'start_block':
self.set_phase('example')
else:
raise Exception("invalid command: %s" % command)
elif self.phase == 'example':
# in example phase we can only move into example description phase
# by hitting the description command
if command == 'example':
print("---------------------------------------------------------")
print("%s exmp | %s" % (self.count, rest))
print("---------------------------------------------------------")
self.current_example.name = rest
elif command == 'setup':
self.set_phase('done')
elif command == 'description':
print("MOV!")
self.set_phase('example_description')
elif command == 'see_files' or command == 'see_file':
self.current_example.see_files = [ x.strip() for x in rest.split(",")]
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'example_description':
# in example description phase we can only move into example code phase
# by hitting an end block
if command == 'end_block':
print("-------")
self.set_phase('example_code')
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'example_code':
# in example code phase we can only move back into example phase by
# hitting a start block
if command == 'start_block':
self.examples.append(self.current_example)
self.current_example = Example()
self.set_phase('example')
else:
raise Exception("unknown command: %s" % command)
elif self.phase == 'done':
return False
else:
raise Exception("unknown phase: %s" % self.phase)
def handle_example_description(self, example, line):
# could be a comment or the code example, we want to keep both
if line.startswith("#"):
line = line.replace("#","")
line = line.strip()
print("%s desc | %s" % (self.count, line))
example.description.append(line)
def handle_example_code(self, example, line):
line = line.rstrip()
example.code.append(line)
print("%s code | %s" % (self.count, line))
def handle_module_description(self, line):
if line.startswith("#"):
line = line.replace("#","")
line = line.strip()
if line:
print("%s mdesc | %s" % (self.count, line))
self.description.append(line)
| [
"os.path.basename"
] | [((1572, 1598), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1588, 1598), False, 'import os\n')] |
from matplotlib import colors
import numpy as np
class SaveOutput:
def __init__(self):
self.outputs = []
def __call__(self, module, module_in, module_out):
self.outputs.append(module_out)
def clear(self):
self.outputs = []
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False):
self.vcenter = vcenter
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.vcenter, self.vmax], [self.vmin, self.vcenter, self.vmax]
return np.ma.masked_array(np.interp(value, x, y))
| [
"matplotlib.colors.Normalize.__init__",
"numpy.interp"
] | [((417, 466), 'matplotlib.colors.Normalize.__init__', 'colors.Normalize.__init__', (['self', 'vmin', 'vmax', 'clip'], {}), '(self, vmin, vmax, clip)\n', (442, 466), False, 'from matplotlib import colors\n'), ((737, 759), 'numpy.interp', 'np.interp', (['value', 'x', 'y'], {}), '(value, x, y)\n', (746, 759), True, 'import numpy as np\n')] |
import pytest
from copy import deepcopy
from gefest.core.structure.point import Point
from gefest.core.structure.polygon import Polygon
from gefest.core.structure.structure import Structure
from gefest.core.algs.postproc.resolve_errors import *
from gefest.core.algs.geom.validation import *
# marking length and width for testing polygon
poly_width = 10
poly_length = 20
# creating a testing polygons via corner points
rectangle_points = [(-1, 40), (-1, poly_length+40), (-poly_width-10, poly_length+40), (-poly_width-10, 40)]
out_bounds_rectangle_poly = Polygon('rectangle', points=[Point(*coords) for coords in rectangle_points])
triangle_points = [(1, 1), (poly_width, poly_length), (1, poly_length)]
unclosed_triangle_poly = Polygon('triangle', points=[Point(*coords) for coords in triangle_points])
incorrect_points = [(5, 5), (5, poly_length), (8, poly_length), (5, 5), (5, 30)]
incorrect_poly = Polygon('incorrect_poly', points=[Point(*coords) for coords in incorrect_points])
domain = Domain()
def test_unclosed_poly():
input_structure = Structure([unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert unclosed_poly(input_structure, domain)
assert not unclosed_poly(observed_structure, domain)
def test_self_intersection():
input_structure = Structure([incorrect_poly])
observed_structure = postprocess(input_structure, domain)
assert self_intersection(input_structure)
assert not self_intersection(observed_structure)
def test_out_of_bound():
input_structure = Structure([out_bounds_rectangle_poly])
observed_structure = postprocess(input_structure, domain)
assert out_of_bound(input_structure, domain)
assert not out_of_bound(observed_structure, domain)
def test_fixed_polys():
domain = Domain(fixed_points=[[[15, 30],
[40, 30],
[15, 40]]])
poly_like_fixed = Polygon('like_fixed', points=[Point(15, 30), Point(40, 30), Point(15, 40)])
input_structure = Structure([poly_like_fixed, unclosed_triangle_poly])
observed_structure = postprocess(input_structure, domain)
assert all([np.isclose(len(observed_structure.polygons), 2),
'like_fixed' not in [poly.id for poly in observed_structure.polygons],
'fixed' in [poly.id for poly in observed_structure.polygons]])
def test_too_close():
same_poly = deepcopy(unclosed_triangle_poly)
same_poly.id = 'same_triangle'
input_structure = Structure([unclosed_triangle_poly, same_poly])
observed_structure = postprocess(input_structure, domain)
print(observed_structure.polygons)
assert np.isclose(len(observed_structure.polygons), 1)
| [
"gefest.core.structure.point.Point",
"gefest.core.structure.structure.Structure",
"copy.deepcopy"
] | [((1059, 1094), 'gefest.core.structure.structure.Structure', 'Structure', (['[unclosed_triangle_poly]'], {}), '([unclosed_triangle_poly])\n', (1068, 1094), False, 'from gefest.core.structure.structure import Structure\n'), ((1319, 1346), 'gefest.core.structure.structure.Structure', 'Structure', (['[incorrect_poly]'], {}), '([incorrect_poly])\n', (1328, 1346), False, 'from gefest.core.structure.structure import Structure\n'), ((1558, 1596), 'gefest.core.structure.structure.Structure', 'Structure', (['[out_bounds_rectangle_poly]'], {}), '([out_bounds_rectangle_poly])\n', (1567, 1596), False, 'from gefest.core.structure.structure import Structure\n'), ((2046, 2098), 'gefest.core.structure.structure.Structure', 'Structure', (['[poly_like_fixed, unclosed_triangle_poly]'], {}), '([poly_like_fixed, unclosed_triangle_poly])\n', (2055, 2098), False, 'from gefest.core.structure.structure import Structure\n'), ((2433, 2465), 'copy.deepcopy', 'deepcopy', (['unclosed_triangle_poly'], {}), '(unclosed_triangle_poly)\n', (2441, 2465), False, 'from copy import deepcopy\n'), ((2523, 2569), 'gefest.core.structure.structure.Structure', 'Structure', (['[unclosed_triangle_poly, same_poly]'], {}), '([unclosed_triangle_poly, same_poly])\n', (2532, 2569), False, 'from gefest.core.structure.structure import Structure\n'), ((588, 602), 'gefest.core.structure.point.Point', 'Point', (['*coords'], {}), '(*coords)\n', (593, 602), False, 'from gefest.core.structure.point import Point\n'), ((762, 776), 'gefest.core.structure.point.Point', 'Point', (['*coords'], {}), '(*coords)\n', (767, 776), False, 'from gefest.core.structure.point import Point\n'), ((942, 956), 'gefest.core.structure.point.Point', 'Point', (['*coords'], {}), '(*coords)\n', (947, 956), False, 'from gefest.core.structure.point import Point\n'), ((1978, 1991), 'gefest.core.structure.point.Point', 'Point', (['(15)', '(30)'], {}), '(15, 30)\n', (1983, 1991), False, 'from gefest.core.structure.point import Point\n'), ((1993, 2006), 'gefest.core.structure.point.Point', 'Point', (['(40)', '(30)'], {}), '(40, 30)\n', (1998, 2006), False, 'from gefest.core.structure.point import Point\n'), ((2008, 2021), 'gefest.core.structure.point.Point', 'Point', (['(15)', '(40)'], {}), '(15, 40)\n', (2013, 2021), False, 'from gefest.core.structure.point import Point\n')] |
from unittest.mock import MagicMock, Mock
from i3ipc.aio import Con
import i3_live_tree.tree_serializer # noqa: F401
class MockConSerializer(Mock, Con):
"""Mock a generic i3ipc.aio.Con for serialization purposes
This Mock is meant to ease testing of i3ipc.aio.Con serialization methods,
which are mokey patched in i3_live_tree.tree_serializer.
In order to achieve this, the mock inherits all the method implementations
of i3ipc.aio.Con, most importantly the serialization ones. However,
whatever is needed for serialization, both properties and methods, is
mocked and can be injected in the constructor, in order to ease the
creation of mock instances.
"""
def __init__(self, *args, name=None, layout=None, focused=False,
nodes=iter(()), **kwargs):
Mock.__init__(self, *args, **kwargs)
self.focused = focused
self.layout = layout
self.name = name
self.nodes = nodes
class MockConNavigation(MagicMock):
"""Mock an i3ipc.aio.Con for navigation purposes
This Mock is meant to be used when testing i3ipc event handlers. It mocks
all the necessary methods and properties, by returning `self` when an
i3ipc.aio.Con instance is needed for the sake of simplicity.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def find_focused(self):
"""Return the focused window"""
return self
def workspace(self):
"""Return the containing workspace"""
return self
class MockI3(Mock):
"""Mock an i3ipc.aio.Connection"""
def __init__(self, *args, tree, **kwargs):
super().__init__(*args, **kwargs)
self.tree = tree
async def get_tree(self):
"""Return the i3 tree asynchronously"""
return self.tree
| [
"unittest.mock.Mock.__init__"
] | [((821, 857), 'unittest.mock.Mock.__init__', 'Mock.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (834, 857), False, 'from unittest.mock import MagicMock, Mock\n')] |
import token
from tokenize import tokenize
from brownie import Contract, chain
from brownie.exceptions import ContractNotFound
from cachetools.func import ttl_cache
from .utils.cache import memory
from .utils.multicall2 import fetch_multicall
from .interfaces.ERC20 import ERC20ABI
import ypricemagic.magic
import ypricemagic.utils.utils
from .constants import STABLECOINS, dai, usdc, usdt, wbtc, weth, sushi
# NOTE: If this is failing to pull a price for a token you need, it's likely because that token requires a special swap path.
# Please add a viable swap path below to fetch price data successfully.
#project.load()
if chain.id == 1:
FACTORIES = {
"uniswap": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f",
"sushiswap": "0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac",
}
ROUTERS = {
"uniswap": Contract("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"),
"sushiswap": Contract("0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F"),
}
SPECIAL_PATHS = {
"sushiswap": {
"0xEF69B5697f2Fb0345cC680210fD39b593a2f9684": ["<KEY>","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e": ["0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e","0xC28E27870558cF22ADD83540d2126da2e4b464c2",weth,usdc]
,"0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2": ["0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2","<KEY>",usdc]
,"0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6": ["0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6","0x87F5F9eBE40786D49D35E1B5997b07cCAA8ADbFF",weth,usdc]
,"0x4954Db6391F4feB5468b6B943D4935353596aEC9": ["0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc]
,"0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0": ["0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0","<KEY>","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc]
,"0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d": ["0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d","0xba100000625a3754423978a60c9317c58a424e3D",weth,usdc]
,"0xBA50933C268F567BDC86E1aC131BE072C6B0b71a": ["0xBA50933C268F567BDC86E1aC131BE072C6B0b71a",weth,usdc]
,"0x6102407f07029892eB5Ff02164ADFaFb85f4d222": ["0x6102407f07029892eB5Ff02164ADFaFb85f4d222",usdt]
,"0x85034b3b2e292493D029443455Cc62ab669573B3": ["0x85034b3b2e292493D029443455Cc62ab669573B3","0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984",weth,usdc]
,"0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8": ["0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8", usdc]
,"0x383518188C0C6d7730D91b2c03a03C837814a899": ["0x383518188C0C6d7730D91b2c03a03C837814a899",dai]
,"0xafcE9B78D409bF74980CACF610AFB851BF02F257": ["0xafcE9B78D409bF74980CACF610AFB851BF02F257",wbtc,weth,usdc]
},
"uniswap": {
}
}
elif chain.id == 56:
ROUTERS = {
"pancakeswapv2": Contract("0x10ED43C718714eb63d5aA57B78B54704E256024E"),
"pancakeswapv1": Contract("0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F")
}
FACTORIES = {
"pancakeswapv2": "0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73",
"pancakeswapv1": "0xBCfCcbde45cE874adCB698cC183deBcF17952812"
}
SPECIAL_PATHS = {
"pancakeswapv2": {
},
"pancakeswapv1": {
}
}
elif chain.id == 137:
ROUTERS = {
"quickswap": Contract("0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff")
}
FACTORIES = {
"quickswap": "0x5757371414417b8C6CAad45bAeF941aBc7d3Ab32",
}
SPECIAL_PATHS = {
"quickswap": {
}
}
FACTORY_TO_ROUTER = {FACTORIES[name]: ROUTERS[name] for name in FACTORIES}
FACTORY_TO_PROTOCOL = {FACTORIES[name]: name for name in FACTORIES}
@ttl_cache(ttl=36000)
def get_price(token_in, token_out=usdc, router="uniswap", block=None, paired_against=weth):
"""
Calculate a price based on Uniswap Router quote for selling one `token_in`.
Always uses intermediate WETH pair if `[token_in,weth,token_out]` swap path available.
"""
if chain.id == 56 and token_out == usdc:
busd = Contract("0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56")
token_out = busd
tokens = [str(token) for token in [token_in, token_out]]
amount_in = 10 ** ypricemagic.utils.utils.get_decimals_with_override(tokens[0])
if str(token_in) in STABLECOINS:
return 1
elif str(paired_against) in STABLECOINS and str(token_out) in STABLECOINS:
path = [token_in, paired_against]
elif weth in (token_in, token_out):
path = [token_in, token_out]
elif paired_against == sushi and token_out != sushi:
path = [token_in,sushi,weth,token_out]
elif str(token_in) in SPECIAL_PATHS[router].keys() and str(token_out) in STABLECOINS:
path = SPECIAL_PATHS[router][str(token_in)]
elif chain.id == 56: #bsc
from .constants import cake, wbnb
if wbnb in (token_in, token_out):
path = [token_in, token_out]
elif cake in (token_in, token_out):
path = [token_in, token_out]
else:
path = [token_in,wbnb,token_out]
elif chain.id == 137: #bsc
from .constants import wmatic
if wmatic in (token_in, token_out):
path = [token_in, token_out]
else:
path = [token_in,wmatic,token_out]
else:
path = [token_in, weth, token_out]
fees = 0.997 ** (len(path) - 1)
if router in ROUTERS:
router = ROUTERS[router]
try:
quote = router.getAmountsOut(amount_in, path, block_identifier=block)
amount_out = quote[-1] / 10 ** ypricemagic.utils.utils.get_decimals_with_override(str(path[-1]))
return amount_out / fees
except ValueError as e:
return
@ttl_cache(ttl=600)
def get_price_v1(asset, block=None):
factory = Contract("0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95")
try:
exchange = Contract(factory.getExchange(asset))
eth_bought = exchange.getTokenToEthInputPrice(10 ** ypricemagic.utils.utils.get_decimals_with_override(asset), block_identifier=block)
exchange = Contract(factory.getExchange(usdc))
usdc_bought = exchange.getEthToTokenInputPrice(eth_bought, block_identifier=block) / 1e6
fees = 0.997 ** 2
return usdc_bought / fees
except (ContractNotFound, ValueError) as e:
pass
@memory.cache()
def is_uniswap_pool(address):
try:
return Contract(address).factory() in FACTORY_TO_ROUTER
except (ValueError, OverflowError, AttributeError):
pass
return False
@ttl_cache(ttl=600)
def lp_price(address, block=None):
""" Get Uniswap/Sushiswap LP token price. """
def extrapolate_balance_if_needed():
nonlocal balances
if balances[0] and not balances[1]:
balances[1] = balances[0]
if balances[1] and not balances[0]:
balances[0] = balances[1]
return balances
pair = Contract(address)
if chain.id not in [56, 137]: # No multicall2 on bsc or poly
factory, token0, token1, supply, reserves = fetch_multicall(
[pair, "factory"],
[pair, "token0"],
[pair, "token1"],
[pair, "totalSupply"],
[pair, "getReserves"],
block=block
)
else:
factory = pair.factory(block_identifier = block)
token0 = pair.token0(block_identifier = block)
token1 = pair.token1(block_identifier = block)
supply = pair.totalSupply(block_identifier = block)
reserves = pair.getReserves(block_identifier = block)
router = FACTORY_TO_PROTOCOL[factory]
tokens = [ypricemagic.utils.utils.Contract_with_erc20_fallback(token) for token in [token0, token1]]
price0 = get_price(tokens[0], paired_against=tokens[1], router=router, block=block)
price1 = get_price(tokens[1], paired_against=tokens[0], router=router, block=block)
prices = [price0,price1]
scales = [10 ** ypricemagic.utils.utils.get_decimals_with_override(str(token)) for token in tokens]
supply = supply / 1e18
try:
balances = [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
except TypeError as e: # If can't get price via router, try to get from elsewhere
if not price0:
try:
price0 = ypricemagic.magic.get_price(tokens[0], block)
except ypricemagic.magic.PriceError:
price0 is None
if not price1:
try:
price1 = ypricemagic.magic.get_price(tokens[1], block)
except ypricemagic.magic.PriceError:
price1 is None
prices = [price0,price1]
balances = [None,None] # [res / scale * price for res, scale, price in zip(reserves, scales, prices)]
if price0:
balances[0] = reserves[0] / scales[0] * price0
if price1:
balances[1] = reserves[1] / scales[1] * price1
balances = extrapolate_balance_if_needed()
try:
return sum(balances) / supply
except TypeError:
return | [
"brownie.Contract",
"cachetools.func.ttl_cache"
] | [((3709, 3729), 'cachetools.func.ttl_cache', 'ttl_cache', ([], {'ttl': '(36000)'}), '(ttl=36000)\n', (3718, 3729), False, 'from cachetools.func import ttl_cache\n'), ((5725, 5743), 'cachetools.func.ttl_cache', 'ttl_cache', ([], {'ttl': '(600)'}), '(ttl=600)\n', (5734, 5743), False, 'from cachetools.func import ttl_cache\n'), ((6541, 6559), 'cachetools.func.ttl_cache', 'ttl_cache', ([], {'ttl': '(600)'}), '(ttl=600)\n', (6550, 6559), False, 'from cachetools.func import ttl_cache\n'), ((5795, 5849), 'brownie.Contract', 'Contract', (['"""0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95"""'], {}), "('0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95')\n", (5803, 5849), False, 'from brownie import Contract, chain\n'), ((6913, 6930), 'brownie.Contract', 'Contract', (['address'], {}), '(address)\n', (6921, 6930), False, 'from brownie import Contract, chain\n'), ((845, 899), 'brownie.Contract', 'Contract', (['"""0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"""'], {}), "('0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D')\n", (853, 899), False, 'from brownie import Contract, chain\n'), ((922, 976), 'brownie.Contract', 'Contract', (['"""0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F"""'], {}), "('0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F')\n", (930, 976), False, 'from brownie import Contract, chain\n'), ((4069, 4123), 'brownie.Contract', 'Contract', (['"""0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56"""'], {}), "('0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56')\n", (4077, 4123), False, 'from brownie import Contract, chain\n'), ((2876, 2930), 'brownie.Contract', 'Contract', (['"""0x10ED43C718714eb63d5aA57B78B54704E256024E"""'], {}), "('0x10ED43C718714eb63d5aA57B78B54704E256024E')\n", (2884, 2930), False, 'from brownie import Contract, chain\n'), ((2957, 3011), 'brownie.Contract', 'Contract', (['"""0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F"""'], {}), "('0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F')\n", (2965, 3011), False, 'from brownie import Contract, chain\n'), ((3347, 3401), 'brownie.Contract', 'Contract', (['"""0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff"""'], {}), "('0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff')\n", (3355, 3401), False, 'from brownie import Contract, chain\n'), ((6403, 6420), 'brownie.Contract', 'Contract', (['address'], {}), '(address)\n', (6411, 6420), False, 'from brownie import Contract, chain\n')] |
from typing import Tuple
import torch
from torch.autograd import Function
import torch.nn as nn
from metrics.pointops import pointops_cuda
import numpy as np
class FurthestSampling(Function):
@staticmethod
def forward(ctx, xyz, m):
"""
input: xyz: (b, n, 3) and n > m, m: int32
output: idx: (b, m)
"""
assert xyz.is_contiguous()
b, n, _ = xyz.size()
idx = torch.cuda.IntTensor(b, m)
temp = torch.cuda.FloatTensor(b, n).fill_(1e10)
pointops_cuda.furthestsampling_cuda(b, n, m, xyz, temp, idx)
return idx
@staticmethod
def backward(xyz, a=None):
return None, None
furthestsampling = FurthestSampling.apply
class Gathering(Function):
@staticmethod
def forward(ctx, features, idx):
"""
input: features: (b, c, n), idx : (b, m) tensor
output: (b, c, m)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
m = idx.size(1)
output = torch.cuda.FloatTensor(b, c, m)
pointops_cuda.gathering_forward_cuda(b, c, n, m, features, idx, output)
ctx.for_backwards = (idx, c, n)
return output
@staticmethod
def backward(ctx, grad_out):
idx, c, n = ctx.for_backwards
b, m = idx.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.gathering_backward_cuda(b, c, n, m, grad_out_data, idx, grad_features.data)
return grad_features, None
gathering = Gathering.apply
class NearestNeighbor(Function):
@staticmethod
def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Find the three nearest neighbors of unknown in known
input: unknown: (b, n, 3), known: (b, m, 3)
output: dist2: (b, n, 3) l2 distance to the three nearest neighbors
idx: (b, n, 3) index of 3 nearest neighbors
"""
assert unknown.is_contiguous()
assert known.is_contiguous()
b, n, _ = unknown.size()
m = known.size(1)
dist2 = torch.cuda.FloatTensor(b, n, 3)
idx = torch.cuda.IntTensor(b, n, 3)
pointops_cuda.nearestneighbor_cuda(b, n, m, unknown, known, dist2, idx)
return torch.sqrt(dist2), idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None
nearestneighbor = NearestNeighbor.apply
class Interpolation(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:
"""
Performs weight linear interpolation on 3 features
input: features: (b, c, m) features descriptors to be interpolated from
idx: (b, n, 3) three nearest neighbors of the target features in features
weight: (b, n, 3) weights
output: (b, c, n) tensor of the interpolated features
"""
assert features.is_contiguous()
assert idx.is_contiguous()
assert weight.is_contiguous()
b, c, m = features.size()
n = idx.size(1)
ctx.interpolation_for_backward = (idx, weight, m)
output = torch.cuda.FloatTensor(b, c, n)
pointops_cuda.interpolation_forward_cuda(b, c, m, n, features, idx, weight, output)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, n)
output: grad_features: (b, c, m), None, None
"""
idx, weight, m = ctx.interpolation_for_backward
b, c, n = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, m).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.interpolation_backward_cuda(b, c, n, m, grad_out_data, idx, weight, grad_features.data)
return grad_features, None, None
interpolation = Interpolation.apply
class Grouping(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.FloatTensor(b, c, m, nsample)
pointops_cuda.grouping_forward_cuda(b, c, n, m, nsample, features, idx, output)
ctx.for_backwards = (idx, n)
return output
@staticmethod
def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
input: grad_out: (b, c, m, nsample)
output: (b, c, n), None
"""
idx, n = ctx.for_backwards
b, c, m, nsample = grad_out.size()
grad_features = torch.cuda.FloatTensor(b, c, n).zero_()
grad_out_data = grad_out.data.contiguous()
pointops_cuda.grouping_backward_cuda(b, c, n, m, nsample, grad_out_data, idx, grad_features.data)
return grad_features, None
grouping = Grouping.apply
class GroupingInt(Function):
@staticmethod
def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
"""
input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with
output: (b, c, m, nsample)
"""
assert features.is_contiguous()
assert idx.is_contiguous()
b, c, n = features.size()
_, m, nsample = idx.size()
output = torch.cuda.LongTensor(b, c, m, nsample)
pointops_cuda.grouping_int_forward_cuda(b, c, n, m, nsample, features, idx, output)
return output
@staticmethod
def backward(ctx, a=None):
return None, None
grouping_int = GroupingInt.apply
class BallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor:
"""
input: radius: float, radius of the balls
nsample: int, maximum number of features in the balls
xyz: torch.Tensor, (b, n, 3) xyz coordinates of the features
new_xyz: torch.Tensor, (b, m, 3) centers of the ball query
output: (b, m, nsample) tensor with the indicies of the features that form the query balls
"""
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, n, _ = xyz.size()
m = new_xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.ballquery_cuda(b, n, m, radius, nsample, new_xyz, xyz, idx)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
ballquery = BallQuery.apply
class FeatureDistribute(Function):
@staticmethod
def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor:
"""
:param ctx:
:param max_xyz: (b, n, 3)
:param xyz: (b, m, 3)
:return: distribute_idx: (b, m)
"""
assert max_xyz.is_contiguous()
assert xyz.is_contiguous()
b, n, _ = max_xyz.size()
m = xyz.size(1)
distribute_idx = torch.cuda.IntTensor(b, m).zero_()
pointops_cuda.featuredistribute_cuda(b, n, m, max_xyz, xyz, distribute_idx)
return distribute_idx
@staticmethod
def backward(ctx, a=None):
return None, None
featuredistribute = FeatureDistribute.apply
class FeatureGather(Function):
@staticmethod
def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param max_feature: (b, c, n)
:param distribute_idx: (b, m)
:return: distribute_feature: (b, c, m)
'''
assert max_feature.is_contiguous()
assert distribute_idx.is_contiguous()
b, c, n = max_feature.size()
m = distribute_idx.size(1)
distribute_feature = torch.cuda.FloatTensor(b, c, m).zero_()
pointops_cuda.featuregather_forward_cuda(b, n, m, c, max_feature, distribute_idx, distribute_feature)
ctx.for_backwards = (distribute_idx, n)
return distribute_feature
@staticmethod
def backward(ctx, grad_distribute_feature: torch.Tensor):
'''
:param ctx:
:param grad_distribute_feature: (b, c, m)
:return: grad_max_feature: (b, c, n), None
'''
distribute_idx, n = ctx.for_backwards
b, c, m = grad_distribute_feature.size()
grad_max_feature = torch.cuda.FloatTensor(b, c, n).zero_()
grad_distribute_feature_data = grad_distribute_feature.data.contiguous()
pointops_cuda.featuregather_backward_cuda(b, n, m, c, grad_distribute_feature_data, distribute_idx, grad_max_feature.data)
return grad_max_feature, None
featuregather = FeatureGather.apply
class LabelStatBallRange(Function):
@staticmethod
def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param radius:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_ballrange_cuda(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None, None
labelstat_ballrange = LabelStatBallRange.apply
class LabelStatIdx(Function):
@staticmethod
def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor:
'''
:param ctx:
:param nsample:
:param label_stat: (b, n, nclass)
:param idx: (b, m, nsample)
:return: new_label_stat: (b, m, nclass)
'''
assert label_stat.is_contiguous()
assert idx.is_contiguous()
b, n, nclass = label_stat.size()
m = idx.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
pointops_cuda.labelstat_idx_cuda(b, n, m, nsample, nclass, label_stat, idx, new_label_stat)
return new_label_stat
@staticmethod
def backward(ctx, a=None):
return None, None, None
labelstat_idx = LabelStatIdx.apply
class LabelStatAndBallQuery(Function):
@staticmethod
def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor):
'''
:param ctx:
:param radius:
:param nsample:
:param xyz: (b, n, 3)
:param new_xyz: (b, m, 3)
:param label_stat: (b, n, nclass)
:return: new_label_stat: (b, m, nclass) idx: (b, m, nsample)
'''
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
assert label_stat.is_contiguous()
b, n, nclass = label_stat.size()
m = new_xyz.size(1)
new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_()
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
pointops_cuda.labelstat_and_ballquery_cuda(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat)
return new_label_stat, idx
@staticmethod
def backward(ctx, a=None, b=None):
return None, None, None, None, None
labelstat_and_ballquery = LabelStatAndBallQuery.apply
def pairwise_distances(x, y=None):
'''
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
'''
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
import numpy as np
return torch.clamp(dist, 0.0, np.inf)
class KNNQueryNaive(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 0:nsample].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_naive = KNNQueryNaive.apply
class KNNQuery(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: idx: (b, m, nsample)
( dist2: (b, m, nsample) )
"""
if new_xyz is None:
new_xyz = xyz
assert xyz.is_contiguous()
assert new_xyz.is_contiguous()
b, m, _ = new_xyz.size()
n = xyz.size(1)
idx = torch.cuda.IntTensor(b, m, nsample).zero_()
dist2 = torch.cuda.FloatTensor(b, m, nsample).zero_()
pointops_cuda.knnquery_cuda(b, n, m, nsample, xyz, new_xyz, idx, dist2)
return idx
@staticmethod
def backward(ctx, a=None):
return None, None, None
knnquery = KNNQuery.apply
class KNNQueryExclude(Function):
@staticmethod
def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
KNN Indexing
input: nsample: int32, Number of neighbor
xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
output: new_features: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
b, m, _ = new_xyz.size()
n = xyz.size(1)
'''
idx = torch.zeros(b, m, nsample).int().cuda()
for i in range(b):
dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :])
[_, idxs] = torch.sort(dist, dim=1)
idx[i, :, :] = idxs[:, 0:nsample]
'''
# '''
# new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3)
# xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3)
# dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n)
dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n)
[_, idxs] = torch.sort(dist, dim=2)
idx = idxs[:, :, 1:nsample+1].int()
# '''
return idx
@staticmethod
def backward(ctx):
return None, None, None
knnquery_exclude = KNNQueryExclude.apply
class Le_QueryAndGroup_SameSize(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_SameSize, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, n, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
assert xyz.size() == new_xyz.size()
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class QueryAndGroup_Dilate(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(QueryAndGroup_Dilate, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, 2*self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(2*self.nsample, xyz, new_xyz) # (b, m, nsample)
idx2 = np.array([i for i in range(2*self.nsample)])
np.random.shuffle(idx2)
idx2 = idx2[:self.nsample]
idx = idx[:, :, idx2]
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class Le_QueryAndGroup(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous()
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return grouped_xyz, new_features
class Gen_QueryAndGroupXYZ(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Gen_QueryAndGroupXYZ, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
#def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
#if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
xyz_trans = xyz.transpose(1, 2).contiguous() # BxNx3 -> Bx3xN
grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
return grouped_xyz
class Le_QueryAndGroup_OnlyFeature(nn.Module):
"""
Groups with a ball query of radius
parameters:
radius: float32, Radius of ball
nsample: int32, Maximum number of features to gather in the ball
"""
def __init__(self, radius=None, nsample=32, use_xyz=True):
super(Le_QueryAndGroup_OnlyFeature, self).__init__()
self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: (b, m, 3) centriods
features: (b, c, n)
idx: idx of neighbors
# idxs: (b, n)
output: new_features: (b, c+3, m, nsample)
# grouped_idxs: (b, m, nsample)
"""
if new_xyz is None:
new_xyz = xyz
if idx is None:
if self.radius is not None:
idx = ballquery(self.radius, self.nsample, xyz, new_xyz)
else:
# idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample)
idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample)
#xyz_trans = xyz.transpose(1, 2).contiguous()
#grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample)
# grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample)
#grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1)
if features is not None:
grouped_features = grouping(features, idx) # (b, c, m, nsample)
if self.use_xyz:
#new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le
new_features = grouped_features # (b, c, m, nsample)
else:
new_features = grouped_features
else:
assert self.use_xyz, "Cannot have not features and not use xyz as a feature!"
new_features = grouped_xyz
return new_features
class GroupAll(nn.Module):
"""
Groups all features
"""
def __init__(self, use_xyz: bool = True):
super(GroupAll, self).__init__()
self.use_xyz = use_xyz
def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]:
"""
input: xyz: (b, n, 3) coordinates of the features
new_xyz: ignored torch
features: (b, c, n) descriptors of the features
output: new_features: (b, c+3, 1, N) tensor
"""
grouped_xyz = xyz.transpose(1, 2).unsqueeze(2)
if features is not None:
grouped_features = features.unsqueeze(2)
if self.use_xyz:
new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, 1, n)
else:
new_features = grouped_features
else:
new_features = grouped_xyz
return new_features
| [
"metrics.pointops.pointops_cuda.gathering_backward_cuda",
"torch.cuda.LongTensor",
"metrics.pointops.pointops_cuda.featuregather_forward_cuda",
"metrics.pointops.pointops_cuda.nearestneighbor_cuda",
"metrics.pointops.pointops_cuda.gathering_forward_cuda",
"torch.sqrt",
"metrics.pointops.pointops_cuda.labelstat_and_ballquery_cuda",
"metrics.pointops.pointops_cuda.grouping_int_forward_cuda",
"metrics.pointops.pointops_cuda.featuregather_backward_cuda",
"metrics.pointops.pointops_cuda.labelstat_idx_cuda",
"torch.cuda.IntTensor",
"metrics.pointops.pointops_cuda.grouping_forward_cuda",
"metrics.pointops.pointops_cuda.knnquery_cuda",
"metrics.pointops.pointops_cuda.interpolation_forward_cuda",
"torch.sort",
"metrics.pointops.pointops_cuda.interpolation_backward_cuda",
"metrics.pointops.pointops_cuda.grouping_backward_cuda",
"torch.transpose",
"torch.clamp",
"torch.cat",
"torch.cuda.FloatTensor",
"metrics.pointops.pointops_cuda.ballquery_cuda",
"metrics.pointops.pointops_cuda.furthestsampling_cuda",
"metrics.pointops.pointops_cuda.labelstat_ballrange_cuda",
"metrics.pointops.pointops_cuda.featuredistribute_cuda",
"torch.mm",
"numpy.random.shuffle"
] | [((12487, 12517), 'torch.clamp', 'torch.clamp', (['dist', '(0.0)', 'np.inf'], {}), '(dist, 0.0, np.inf)\n', (12498, 12517), False, 'import torch\n'), ((425, 451), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm'], {}), '(b, m)\n', (445, 451), False, 'import torch\n'), ((516, 576), 'metrics.pointops.pointops_cuda.furthestsampling_cuda', 'pointops_cuda.furthestsampling_cuda', (['b', 'n', 'm', 'xyz', 'temp', 'idx'], {}), '(b, n, m, xyz, temp, idx)\n', (551, 576), False, 'from metrics.pointops import pointops_cuda\n'), ((1055, 1086), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (1077, 1086), False, 'import torch\n'), ((1095, 1166), 'metrics.pointops.pointops_cuda.gathering_forward_cuda', 'pointops_cuda.gathering_forward_cuda', (['b', 'c', 'n', 'm', 'features', 'idx', 'output'], {}), '(b, c, n, m, features, idx, output)\n', (1131, 1166), False, 'from metrics.pointops import pointops_cuda\n'), ((1468, 1561), 'metrics.pointops.pointops_cuda.gathering_backward_cuda', 'pointops_cuda.gathering_backward_cuda', (['b', 'c', 'n', 'm', 'grad_out_data', 'idx', 'grad_features.data'], {}), '(b, c, n, m, grad_out_data, idx,\n grad_features.data)\n', (1505, 1561), False, 'from metrics.pointops import pointops_cuda\n'), ((2202, 2233), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'n', '(3)'], {}), '(b, n, 3)\n', (2224, 2233), False, 'import torch\n'), ((2248, 2277), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'n', '(3)'], {}), '(b, n, 3)\n', (2268, 2277), False, 'import torch\n'), ((2286, 2357), 'metrics.pointops.pointops_cuda.nearestneighbor_cuda', 'pointops_cuda.nearestneighbor_cuda', (['b', 'n', 'm', 'unknown', 'known', 'dist2', 'idx'], {}), '(b, n, m, unknown, known, dist2, idx)\n', (2320, 2357), False, 'from metrics.pointops import pointops_cuda\n'), ((3276, 3307), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (3298, 3307), False, 'import torch\n'), ((3316, 3403), 'metrics.pointops.pointops_cuda.interpolation_forward_cuda', 'pointops_cuda.interpolation_forward_cuda', (['b', 'c', 'm', 'n', 'features', 'idx', 'weight', 'output'], {}), '(b, c, m, n, features, idx, weight,\n output)\n', (3356, 3403), False, 'from metrics.pointops import pointops_cuda\n'), ((3864, 3969), 'metrics.pointops.pointops_cuda.interpolation_backward_cuda', 'pointops_cuda.interpolation_backward_cuda', (['b', 'c', 'n', 'm', 'grad_out_data', 'idx', 'weight', 'grad_features.data'], {}), '(b, c, n, m, grad_out_data, idx,\n weight, grad_features.data)\n', (3905, 3969), False, 'from metrics.pointops import pointops_cuda\n'), ((4499, 4539), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm', 'nsample'], {}), '(b, c, m, nsample)\n', (4521, 4539), False, 'import torch\n'), ((4548, 4627), 'metrics.pointops.pointops_cuda.grouping_forward_cuda', 'pointops_cuda.grouping_forward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'features', 'idx', 'output'], {}), '(b, c, n, m, nsample, features, idx, output)\n', (4583, 4627), False, 'from metrics.pointops import pointops_cuda\n'), ((5091, 5192), 'metrics.pointops.pointops_cuda.grouping_backward_cuda', 'pointops_cuda.grouping_backward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'grad_out_data', 'idx', 'grad_features.data'], {}), '(b, c, n, m, nsample, grad_out_data,\n idx, grad_features.data)\n', (5127, 5192), False, 'from metrics.pointops import pointops_cuda\n'), ((5709, 5748), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['b', 'c', 'm', 'nsample'], {}), '(b, c, m, nsample)\n', (5730, 5748), False, 'import torch\n'), ((5757, 5844), 'metrics.pointops.pointops_cuda.grouping_int_forward_cuda', 'pointops_cuda.grouping_int_forward_cuda', (['b', 'c', 'n', 'm', 'nsample', 'features', 'idx', 'output'], {}), '(b, c, n, m, nsample, features, idx,\n output)\n', (5796, 5844), False, 'from metrics.pointops import pointops_cuda\n'), ((6718, 6791), 'metrics.pointops.pointops_cuda.ballquery_cuda', 'pointops_cuda.ballquery_cuda', (['b', 'n', 'm', 'radius', 'nsample', 'new_xyz', 'xyz', 'idx'], {}), '(b, n, m, radius, nsample, new_xyz, xyz, idx)\n', (6746, 6791), False, 'from metrics.pointops import pointops_cuda\n'), ((7410, 7485), 'metrics.pointops.pointops_cuda.featuredistribute_cuda', 'pointops_cuda.featuredistribute_cuda', (['b', 'n', 'm', 'max_xyz', 'xyz', 'distribute_idx'], {}), '(b, n, m, max_xyz, xyz, distribute_idx)\n', (7446, 7485), False, 'from metrics.pointops import pointops_cuda\n'), ((8188, 8293), 'metrics.pointops.pointops_cuda.featuregather_forward_cuda', 'pointops_cuda.featuregather_forward_cuda', (['b', 'n', 'm', 'c', 'max_feature', 'distribute_idx', 'distribute_feature'], {}), '(b, n, m, c, max_feature,\n distribute_idx, distribute_feature)\n', (8228, 8293), False, 'from metrics.pointops import pointops_cuda\n'), ((8852, 8978), 'metrics.pointops.pointops_cuda.featuregather_backward_cuda', 'pointops_cuda.featuregather_backward_cuda', (['b', 'n', 'm', 'c', 'grad_distribute_feature_data', 'distribute_idx', 'grad_max_feature.data'], {}), '(b, n, m, c,\n grad_distribute_feature_data, distribute_idx, grad_max_feature.data)\n', (8893, 8978), False, 'from metrics.pointops import pointops_cuda\n'), ((9710, 9819), 'metrics.pointops.pointops_cuda.labelstat_ballrange_cuda', 'pointops_cuda.labelstat_ballrange_cuda', (['b', 'n', 'm', 'radius', 'nclass', 'new_xyz', 'xyz', 'label_stat', 'new_label_stat'], {}), '(b, n, m, radius, nclass, new_xyz,\n xyz, label_stat, new_label_stat)\n', (9748, 9819), False, 'from metrics.pointops import pointops_cuda\n'), ((10543, 10638), 'metrics.pointops.pointops_cuda.labelstat_idx_cuda', 'pointops_cuda.labelstat_idx_cuda', (['b', 'n', 'm', 'nsample', 'nclass', 'label_stat', 'idx', 'new_label_stat'], {}), '(b, n, m, nsample, nclass, label_stat, idx,\n new_label_stat)\n', (10575, 10638), False, 'from metrics.pointops import pointops_cuda\n'), ((11550, 11677), 'metrics.pointops.pointops_cuda.labelstat_and_ballquery_cuda', 'pointops_cuda.labelstat_and_ballquery_cuda', (['b', 'n', 'm', 'radius', 'nsample', 'nclass', 'new_xyz', 'xyz', 'label_stat', 'idx', 'new_label_stat'], {}), '(b, n, m, radius, nsample, nclass,\n new_xyz, xyz, label_stat, idx, new_label_stat)\n', (11592, 11677), False, 'from metrics.pointops import pointops_cuda\n'), ((12246, 12270), 'torch.transpose', 'torch.transpose', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (12261, 12270), False, 'import torch\n'), ((12340, 12364), 'torch.transpose', 'torch.transpose', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (12355, 12364), False, 'import torch\n'), ((13671, 13694), 'torch.sort', 'torch.sort', (['dist'], {'dim': '(2)'}), '(dist, dim=2)\n', (13681, 13694), False, 'import torch\n'), ((14633, 14704), 'metrics.pointops.pointops_cuda.knnquery_cuda', 'pointops_cuda.knnquery_cuda', (['b', 'n', 'm', 'nsample', 'xyz', 'new_xyz', 'idx', 'dist2'], {}), '(b, n, m, nsample, xyz, new_xyz, idx, dist2)\n', (14660, 14704), False, 'from metrics.pointops import pointops_cuda\n'), ((15997, 16020), 'torch.sort', 'torch.sort', (['dist'], {'dim': '(2)'}), '(dist, dim=2)\n', (16007, 16020), False, 'import torch\n'), ((21732, 21755), 'numpy.random.shuffle', 'np.random.shuffle', (['idx2'], {}), '(idx2)\n', (21749, 21755), True, 'import numpy as np\n'), ((2373, 2390), 'torch.sqrt', 'torch.sqrt', (['dist2'], {}), '(dist2)\n', (2383, 2390), False, 'import torch\n'), ((12436, 12452), 'torch.mm', 'torch.mm', (['x', 'y_t'], {}), '(x, y_t)\n', (12444, 12452), False, 'import torch\n'), ((467, 495), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'n'], {}), '(b, n)\n', (489, 495), False, 'import torch\n'), ((1369, 1400), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (1391, 1400), False, 'import torch\n'), ((3765, 3796), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (3787, 3796), False, 'import torch\n'), ((4992, 5023), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (5014, 5023), False, 'import torch\n'), ((6666, 6701), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (6686, 6701), False, 'import torch\n'), ((7367, 7393), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm'], {}), '(b, m)\n', (7387, 7393), False, 'import torch\n'), ((8140, 8171), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'm'], {}), '(b, c, m)\n', (8162, 8171), False, 'import torch\n'), ((8723, 8754), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'c', 'n'], {}), '(b, c, n)\n', (8745, 8754), False, 'import torch\n'), ((9659, 9693), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (9679, 9693), False, 'import torch\n'), ((10492, 10526), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (10512, 10526), False, 'import torch\n'), ((11440, 11474), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nclass'], {}), '(b, m, nclass)\n', (11460, 11474), False, 'import torch\n'), ((11497, 11532), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (11517, 11532), False, 'import torch\n'), ((14519, 14554), 'torch.cuda.IntTensor', 'torch.cuda.IntTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (14539, 14554), False, 'import torch\n'), ((14579, 14616), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['b', 'm', 'nsample'], {}), '(b, m, nsample)\n', (14601, 14616), False, 'import torch\n'), ((20079, 20128), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (20088, 20128), False, 'import torch\n'), ((22265, 22314), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (22274, 22314), False, 'import torch\n'), ((29001, 29050), 'torch.cat', 'torch.cat', (['[grouped_xyz, grouped_features]'], {'dim': '(1)'}), '([grouped_xyz, grouped_features], dim=1)\n', (29010, 29050), False, 'import torch\n')] |
from layout import Shape, Widget
from flash.text.engine import TextBlock, TextElement
@package('layout')
class Poly(Shape):
__slots__ = ('fillcolor', 'sequence')
def __init__(self, name, fillcolor, seq, states):
super().__init__(name, states)
self.fillcolor = fillcolor
self.sequence = seq
def draw(self, w, h):
g = self.graphics
g.clear()
for line in values(self.sequence):
g.beginFill(self.fillcolor)
g.moveTo(int(line[0][0]*w), int(line[0][1]*h))
for idx in range(1, line.length):
g.lineTo(int(line[idx][0]*w), int(line[idx][1]*h))
g.endFill()
@package('layout')
class RoundRect(Shape):
__slots__ = ('fillcolor', 'radius')
def __init__(self, name, fillcolor, radius, states):
super().__init__(name, states)
self.fillcolor = fillcolor
self.radius = radius
def draw(self, width, height):
g = self.graphics
g.clear()
g.beginFill(self.fillcolor)
g.drawRoundRect(0, 0, width, height, self.radius, self.radius)
g.endFill()
@package('layout')
class TextLine(Widget):
__slots__ = ('format', 'text', 'textline')
def __init__(self, format, text, name, states):
self.format = format
self.text = text
super().__init__(name, states)
def draw(self, width, height):
if self.textline:
self.removeChild(self.textline)
tb = TextBlock()
tb.content = TextElement(self.text, self.format)
self.textline = tb.createTextLine(None, width)
self.addChild(self.textline)
@package('layout')
class CenteredLine(TextLine):
def __init__(self, format, text, name, states):
super().__init__(format, text, name, states)
def draw(self, width, height):
super().draw(width, height)
self.textline.x = int((width - self.textline.width)/2)
self.textline.y = int((height - self.textline.height)/2)
| [
"flash.text.engine.TextBlock",
"flash.text.engine.TextElement"
] | [((1479, 1490), 'flash.text.engine.TextBlock', 'TextBlock', ([], {}), '()\n', (1488, 1490), False, 'from flash.text.engine import TextBlock, TextElement\n'), ((1512, 1547), 'flash.text.engine.TextElement', 'TextElement', (['self.text', 'self.format'], {}), '(self.text, self.format)\n', (1523, 1547), False, 'from flash.text.engine import TextBlock, TextElement\n')] |
import csv
import math
import numpy as np
import pandas
import scipy.optimize
import sys
import argparse
def ineq_constraint_1(v):
return np.array([vi for vi in v])
def ineq_constraint_2(v):
return np.array([-vi + 30 for vi in v])
class WeightAverage:
def __init__(self, mean, csv):
self.df = pandas.read_csv(csv)
self.course = self.df['name']
self.expected_mean = mean
self.credits = self.df[['credits', 'grade']].query('grade == 0')[['credits']].transpose().to_numpy()[0]
self.grade_initial_sol = np.array([mean for _ in range(0, len(self.credits))])
self.owned_credits = self.df[['credits', 'grade']].query('grade > 0')[['credits']].transpose().to_numpy()[0]
self.owned_grades = self.df[['grade']].query('grade > 0').transpose().to_numpy()[0]
self.tot_credits = sum(self.owned_credits) + sum(self.credits)
def weight_average(self, v):
term1 = 0
term2 = 0
for i in range(0, len(self.owned_grades)):
term1 = term1 + self.owned_grades[i] * self.owned_credits[i]
for i in range(0, len(v)):
term2 = term2 + v[i] * self.credits[i]
return (term1 + term2) / self.tot_credits
def eq_constraint(self, v):
return self.weight_average(v) - self.expected_mean
def solve(self):
cons = (
{'type': 'eq', 'fun': self.eq_constraint},
{'type': 'ineq', 'fun': ineq_constraint_1},
{'type': 'ineq', 'fun': ineq_constraint_2})
res = scipy.optimize.minimize(self.weight_average, self.grade_initial_sol, method='SLSQP', constraints=cons)
if not res.success:
return None
return res.x
def error_no_solution():
print("Mean not possible with current vote :(")
exit(0)
def output_result(solver, sol):
avg = solver.weight_average(sol)
df = solver.df
print(f"Expected mean: {avg} -> {int(round(avg / 30 * 110, 0))} / 110")
if sol is None:
print("Not Possible with current grades :(")
exit()
for index, row in df.query('grade > 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {row['grade']}")
i = 0
for index, row in df.query('grade == 0').iterrows():
print(f"'{row['name']}', credits: {row['credits']}, grade {int(sol[i])}")
i += 1
return 0
def main():
name = "calcGrades"
description = """CalcGrades is an utility which purpose is to compute the minimum
grades required to get a certain weight average of the grades over the credits,
given the desired output and the grades already owned."""
parser = argparse.ArgumentParser(name, description=description)
parser.add_argument('mean', metavar='M', type=float, nargs='+', help='The expected mean')
parser.add_argument('--file',dest='file', default='courses.csv', type=str,
help='path to the csv file containing the courses (default: courses.csv)')
parser.add_argument('--floor', default=False, action='store_true',
help='apply floor operation instead of round to solution')
parser.add_argument('--ceil', default=False, action='store_true',
help='apply ceil operation instead of round to solution')
args = parser.parse_args()
mean = args.mean
courses = args.file
solver = WeightAverage(mean, courses)
sol = solver.solve()
if sol is None:
error_no_solution()
if args.ceil:
sol = [math.ceil(x) for x in sol]
elif args.floor:
sol = [math.floor(x) for x in sol]
else:
sol = [round(x) for x in sol]
output_result(solver, sol)
return 0
if __name__ == '__main__':
main()
| [
"math.ceil",
"argparse.ArgumentParser",
"math.floor",
"pandas.read_csv",
"numpy.array"
] | [((144, 170), 'numpy.array', 'np.array', (['[vi for vi in v]'], {}), '([vi for vi in v])\n', (152, 170), True, 'import numpy as np\n'), ((210, 244), 'numpy.array', 'np.array', (['[(-vi + 30) for vi in v]'], {}), '([(-vi + 30) for vi in v])\n', (218, 244), True, 'import numpy as np\n'), ((2690, 2744), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['name'], {'description': 'description'}), '(name, description=description)\n', (2713, 2744), False, 'import argparse\n'), ((320, 340), 'pandas.read_csv', 'pandas.read_csv', (['csv'], {}), '(csv)\n', (335, 340), False, 'import pandas\n'), ((3547, 3559), 'math.ceil', 'math.ceil', (['x'], {}), '(x)\n', (3556, 3559), False, 'import math\n'), ((3610, 3623), 'math.floor', 'math.floor', (['x'], {}), '(x)\n', (3620, 3623), False, 'import math\n')] |
import tkinter as tk
import tkinter.messagebox
from Control import Control
class View:
def __init__(self, control : Control.Control):
self.control = control
# Init Window
self.root = tk.Tk()
self.root.title(u"Header File Generator")
self.root.geometry("700x800")
self.config_frame = tk.Frame(self.root)
# Config Table
lb_symbol = tk.Label(self.config_frame, width = 20)
lb_symbol["text"] = "Symbol"
lb_symbol.grid(row = 0, column = 0)
lb_description = tk.Label(self.config_frame, width = 40)
lb_description["text"] = "Detail"
lb_description.grid(row = 0, column = 1)
lb_enable = tk.Label(self.config_frame, width = 10)
lb_enable["text"] = "Enable"
lb_enable.grid(row = 0, column = 2)
for i, config in enumerate(self.control.getConfigs()):
symbol_entry = tk.Entry(self.config_frame, width=20)
symbol_entry.insert(tk.END, config.symbol)
symbol_entry.config(state = tk.DISABLED)
symbol_entry.config(disabledforeground = "black", disabledbackground = "white")
symbol_entry.grid(row= i + 1, column = 0)
detail_entry = tk.Entry(self.config_frame, width=40)
detail_entry.insert(tk.END, config.detail)
detail_entry.config(state = tk.DISABLED)
detail_entry.config(disabledforeground = "black", disabledbackground = "white")
detail_entry.grid(row= i + 1, column = 1)
bt_enable = tk.Button(self.config_frame, text="ON", width= 5)
bt_enable["text"] = "ON" if config.enable else "OFF"
color = "green" if config.enable else "red"
bt_enable.config(bg=color, activebackground = color)
bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_config_enable(id, button)
bt_enable.grid(row = i + 1, column = 2)
self.config_frame.pack(side=tk.TOP, anchor=tk.NW)
self.value_config_frame = tk.Frame(self.root)
# Config Table
lb_symbol = tk.Label(self.value_config_frame, width = 20)
lb_symbol["text"] = "Symbol"
lb_symbol.grid(row = 0, column = 0)
lb_description = tk.Label(self.value_config_frame, width = 40)
lb_description["text"] = "Detail"
lb_description.grid(row = 0, column = 1)
lb_value = tk.Label(self.value_config_frame, width = 10)
lb_value["text"] = "Value"
lb_value.grid(row = 0, column = 2)
lb_enable = tk.Label(self.value_config_frame, width = 10)
lb_enable["text"] = "Enable"
lb_enable.grid(row = 0, column = 3)
for i, val_config in enumerate(self.control.getValConfigs()):
symbol_entry = tk.Entry(self.value_config_frame, width=20)
symbol_entry.insert(tk.END, val_config.symbol)
symbol_entry.config(state = tk.DISABLED)
symbol_entry.config(disabledforeground = "black", disabledbackground = "white")
symbol_entry.grid(row= i + 1, column = 0)
detail_entry = tk.Entry(self.value_config_frame, width=40)
detail_entry.insert(tk.END, val_config.detail)
detail_entry.config(state = tk.DISABLED)
detail_entry.config(disabledforeground = "black", disabledbackground = "white")
detail_entry.grid(row= i + 1, column = 1)
value_entry = tk.Entry(self.value_config_frame, width=10)
value_entry.insert(tk.END, val_config.value)
value_entry.config(state = tk.DISABLED)
value_entry.config(disabledforeground = "black", disabledbackground = "white")
value_entry.grid(row= i + 1, column = 2)
bt_enable = tk.Button(self.value_config_frame, text="ON", width= 5)
bt_enable["text"] = "ON" if val_config.enable else "OFF"
color = "green" if val_config.enable else "red"
bt_enable.config(bg=color, activebackground = color)
bt_enable["command"] = lambda id = i, button = bt_enable : self.toggle_val_config_enable(id, button)
bt_enable.grid(row = i + 1, column = 3)
self.value_config_frame.pack(side=tk.TOP, anchor=tk.W)
# Generator Button
self.bt_generate = tk.Button(self.root)
self.bt_generate["text"] = "Generate Header"
self.bt_generate["command"] = self.generateHeader
self.bt_generate.pack(side=tk.BOTTOM, anchor=tk.SE)
def start(self):
self.root.mainloop()
def generateHeader(self):
self.control.generateHeader()
tk.messagebox.showinfo("Header Generator Info", "Generated:{0}".format(self.control.header_config.path))
def update(self):
pass
def toggle_config_enable(self, id, button : tk.Button):
config = self.control.getConfigs()[id]
config.enable = not config.enable
button["text"] = "ON" if config.enable else "OFF"
color = "green" if config.enable else "red"
button.config(bg=color, activebackground = color)
def toggle_val_config_enable(self, id, button : tk.Button):
val_config = self.control.getValConfigs()[id]
val_config.enable = not val_config.enable
button["text"] = "ON" if val_config.enable else "OFF"
color = "green" if val_config.enable else "red"
button.config(bg=color, activebackground = color)
| [
"tkinter.Entry",
"tkinter.Button",
"tkinter.Tk",
"tkinter.Label",
"tkinter.Frame"
] | [((213, 220), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (218, 220), True, 'import tkinter as tk\n'), ((338, 357), 'tkinter.Frame', 'tk.Frame', (['self.root'], {}), '(self.root)\n', (346, 357), True, 'import tkinter as tk\n'), ((402, 439), 'tkinter.Label', 'tk.Label', (['self.config_frame'], {'width': '(20)'}), '(self.config_frame, width=20)\n', (410, 439), True, 'import tkinter as tk\n'), ((549, 586), 'tkinter.Label', 'tk.Label', (['self.config_frame'], {'width': '(40)'}), '(self.config_frame, width=40)\n', (557, 586), True, 'import tkinter as tk\n'), ((701, 738), 'tkinter.Label', 'tk.Label', (['self.config_frame'], {'width': '(10)'}), '(self.config_frame, width=10)\n', (709, 738), True, 'import tkinter as tk\n'), ((2041, 2060), 'tkinter.Frame', 'tk.Frame', (['self.root'], {}), '(self.root)\n', (2049, 2060), True, 'import tkinter as tk\n'), ((2105, 2148), 'tkinter.Label', 'tk.Label', (['self.value_config_frame'], {'width': '(20)'}), '(self.value_config_frame, width=20)\n', (2113, 2148), True, 'import tkinter as tk\n'), ((2258, 2301), 'tkinter.Label', 'tk.Label', (['self.value_config_frame'], {'width': '(40)'}), '(self.value_config_frame, width=40)\n', (2266, 2301), True, 'import tkinter as tk\n'), ((2415, 2458), 'tkinter.Label', 'tk.Label', (['self.value_config_frame'], {'width': '(10)'}), '(self.value_config_frame, width=10)\n', (2423, 2458), True, 'import tkinter as tk\n'), ((2560, 2603), 'tkinter.Label', 'tk.Label', (['self.value_config_frame'], {'width': '(10)'}), '(self.value_config_frame, width=10)\n', (2568, 2603), True, 'import tkinter as tk\n'), ((4299, 4319), 'tkinter.Button', 'tk.Button', (['self.root'], {}), '(self.root)\n', (4308, 4319), True, 'import tkinter as tk\n'), ((913, 950), 'tkinter.Entry', 'tk.Entry', (['self.config_frame'], {'width': '(20)'}), '(self.config_frame, width=20)\n', (921, 950), True, 'import tkinter as tk\n'), ((1233, 1270), 'tkinter.Entry', 'tk.Entry', (['self.config_frame'], {'width': '(40)'}), '(self.config_frame, width=40)\n', (1241, 1270), True, 'import tkinter as tk\n'), ((1550, 1598), 'tkinter.Button', 'tk.Button', (['self.config_frame'], {'text': '"""ON"""', 'width': '(5)'}), "(self.config_frame, text='ON', width=5)\n", (1559, 1598), True, 'import tkinter as tk\n'), ((2785, 2828), 'tkinter.Entry', 'tk.Entry', (['self.value_config_frame'], {'width': '(20)'}), '(self.value_config_frame, width=20)\n', (2793, 2828), True, 'import tkinter as tk\n'), ((3115, 3158), 'tkinter.Entry', 'tk.Entry', (['self.value_config_frame'], {'width': '(40)'}), '(self.value_config_frame, width=40)\n', (3123, 3158), True, 'import tkinter as tk\n'), ((3444, 3487), 'tkinter.Entry', 'tk.Entry', (['self.value_config_frame'], {'width': '(10)'}), '(self.value_config_frame, width=10)\n', (3452, 3487), True, 'import tkinter as tk\n'), ((3766, 3820), 'tkinter.Button', 'tk.Button', (['self.value_config_frame'], {'text': '"""ON"""', 'width': '(5)'}), "(self.value_config_frame, text='ON', width=5)\n", (3775, 3820), True, 'import tkinter as tk\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will cause
problems: the code will get executed twice:
- When you run `python -m nibetaseries` python will execute
``__main__.py`` as a script. That means there won't be any
``nibetaseries.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``nibetaseries.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
from __future__ import absolute_import
import os
import argparse
from argparse import RawTextHelpFormatter
from glob import glob
from multiprocessing import cpu_count
from nipype import config as ncfg
def get_parser():
"""Build parser object"""
from ..__init__ import __version__
import sys
verstr = 'nibs v{}'.format(__version__)
parser = argparse.ArgumentParser(description='NiBetaSeries BIDS arguments',
formatter_class=RawTextHelpFormatter)
parser.add_argument('bids_dir', help='The directory with the input dataset '
'formatted according to the BIDS standard.')
parser.add_argument('derivatives_pipeline', help='The pipeline that contains '
'minimally preprocessed img, brainmask, and confounds.tsv')
parser.add_argument('output_dir', help='The directory where the output directory '
'and files should be stored. If you are running group level analysis '
'this folder should be prepopulated with the results of the'
'participant level analysis.')
parser.add_argument('analysis_level', choices=['participant', 'group'],
help='Level of the analysis that will be performed '
'Multiple participant level analyses can be run independently '
'(in parallel) using the same output_dir')
parser.add_argument('-v', '--version', action='version',
version=verstr)
# Atlas Arguments (Required Options)
atlas_args = parser.add_argument_group('Required Atlas Arguments')
atlas_args.add_argument('-a', '--atlas-img', action='store',
required=('-l' in sys.argv or '--atlas-lut' in sys.argv),
help='input atlas nifti where each voxel within a "region" '
'is labeled with the same integer and there is a unique '
'integer associated with each region of interest.')
atlas_args.add_argument('-l', '--atlas-lut', action='store',
required=('-a' in sys.argv or '--atlas-img' in sys.argv),
help='atlas look up table (tsv) formatted with the columns: '
'index, regions which correspond to the regions in the '
'nifti file specified by --atlas-img.')
# preprocessing options
proc_opts = parser.add_argument_group('Options for processing')
proc_opts.add_argument('--estimator', default='lss',
choices=['lss', 'lsa'],
help='beta series modeling method')
proc_opts.add_argument('-sm', '--smoothing-kernel', action='store', type=float, default=6.0,
help='select a smoothing kernel (mm)')
proc_opts.add_argument('-hp', '--high-pass', action='store', type=float,
default=0.0078125, help='high pass filter (Hz)')
proc_opts.add_argument('-c', '--confounds', help='The confound column names '
'that are to be included in nuisance regression. '
'write the confounds you wish to include separated by a space',
nargs="+")
proc_opts.add_argument('--hrf-model', default='glover',
choices=['glover', 'spm', 'fir',
'glover + derivative',
'glover + derivative + dispersion',
'spm + derivative',
'spm + derivative + dispersion'],
help='convolve your regressors '
'with one of the following hemodynamic response functions')
proc_opts.add_argument('--fir-delays', default=None,
nargs='+', type=int, help='FIR delays in volumes',
metavar='VOL')
proc_opts.add_argument('-w', '--work-dir', help='directory where temporary files '
'are stored (i.e. non-essential files). '
'This directory can be deleted once you are reasonably '
'certain nibs finished as expected.')
# Image Selection options
image_opts = parser.add_argument_group('Options for selecting images')
parser.add_argument('--participant-label', nargs="+",
help='The label(s) of the participant(s) '
'that should be analyzed. The label '
'corresponds to sub-<participant_label> from the BIDS spec '
'(so it does not include "sub-"). If this parameter is not '
'provided all subjects should be analyzed. Multiple '
'participants can be specified with a space separated list.')
image_opts.add_argument('--session-label', action='store',
default=None, help='select a session to analyze')
image_opts.add_argument('-t', '--task-label', action='store',
default=None, help='select a specific task to be processed')
image_opts.add_argument('--run-label', action='store',
default=None, help='select a run to analyze')
image_opts.add_argument('-sp', '--space-label', action='store', default='MNI152NLin2009cAsym',
choices=['MNI152NLin2009cAsym'],
help='select a bold derivative in a specific space to be used')
image_opts.add_argument('--description-label', action='store',
default=None, help='select a bold file with particular '
'`desc` label to process')
image_opts.add_argument('--exclude-description-label', action='store_true',
default=False, help='exclude this `desc` label from nibetaseries')
# performance options
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument('--nthreads', '-n-cpus', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--use-plugin', action='store', default=None,
help='nipype plugin configuration file')
# misc options
misc = parser.add_argument_group('misc options')
misc.add_argument('--graph', action='store_true', default=False,
help='generates a graph png of the workflow')
return parser
def main():
from ..workflows.base import init_nibetaseries_participant_wf
# get commandline options
opts = get_parser().parse_args()
# check inputs
if (opts.hrf_model == 'fir') and (opts.fir_delays is None):
raise ValueError('If the FIR HRF model is selected, '
'FIR delays must be provided.')
# Set up directories
# TODO: set up some sort of versioning system
bids_dir = os.path.abspath(opts.bids_dir)
derivatives_pipeline_dir = os.path.join(bids_dir, 'derivatives', opts.derivatives_pipeline)
output_dir = os.path.abspath(opts.output_dir)
os.makedirs(output_dir, exist_ok=True)
log_dir = os.path.join(output_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
if opts.work_dir:
work_dir = os.path.abspath(opts.work_dir)
else:
work_dir = os.path.join(os.getcwd(), 'nibetaseries_work')
os.makedirs(work_dir, exist_ok=True)
# only for a subset of subjects
if opts.participant_label:
subject_list = opts.participant_label
# for all subjects
else:
subject_dirs = glob(os.path.join(bids_dir, "sub-*"))
subject_list = [subject_dir.split("-")[-1] for subject_dir in subject_dirs]
# Nipype plugin configuration
# Load base plugin_settings from file if --use-plugin
if opts.use_plugin is not None:
from yaml import load as loadyml
with open(opts.use_plugin) as f:
plugin_settings = loadyml(f)
plugin_settings.setdefault('plugin_args', {})
else:
# Defaults
plugin_settings = {
'plugin': 'MultiProc',
'plugin_args': {
'raise_insufficient': False,
'maxtasksperchild': 1,
}
}
# Resource management options
# Note that we're making strong assumptions about valid plugin args
# This may need to be revisited if people try to use batch plugins
nthreads = plugin_settings['plugin_args'].get('n_procs')
# Permit overriding plugin config with specific CLI options
if nthreads is None or opts.nthreads is not None:
nthreads = opts.nthreads
if nthreads is None or nthreads < 1:
nthreads = cpu_count()
plugin_settings['plugin_args']['n_procs'] = nthreads
# Nipype config (logs and execution)
ncfg.update_config({
'logging': {'log_directory': log_dir,
'log_to_file': True},
'execution': {'crashdump_dir': log_dir,
'crashfile_format': 'txt',
'parameterize_dirs': False},
})
# running participant level
if opts.analysis_level == "participant":
nibetaseries_participant_wf = init_nibetaseries_participant_wf(
estimator=opts.estimator,
atlas_img=os.path.abspath(opts.atlas_img),
atlas_lut=os.path.abspath(opts.atlas_lut),
bids_dir=bids_dir,
derivatives_pipeline_dir=derivatives_pipeline_dir,
exclude_description_label=opts.exclude_description_label,
fir_delays=opts.fir_delays,
hrf_model=opts.hrf_model,
high_pass=opts.high_pass,
output_dir=output_dir,
run_label=opts.run_label,
selected_confounds=opts.confounds,
session_label=opts.session_label,
smoothing_kernel=opts.smoothing_kernel,
space_label=opts.space_label,
subject_list=subject_list,
task_label=opts.task_label,
description_label=opts.description_label,
work_dir=work_dir,
)
if opts.graph:
nibetaseries_participant_wf.write_graph(graph2use='colored',
format='svg',
simple_form=True)
try:
nibetaseries_participant_wf.run(**plugin_settings)
except RuntimeError as e:
if "Workflow did not execute cleanly" in str(e):
print("Workflow did not execute cleanly")
else:
raise e
elif opts.analysis_level == "group":
raise NotImplementedError('group analysis not currently implemented')
def init():
if __name__ == "__main__":
raise RuntimeError("NiBetaSeries/cli/run.py should not be run directly;\n"
"Please `pip install` NiBetaSeries and use the `nibs` command")
init()
| [
"argparse.ArgumentParser",
"os.makedirs",
"nipype.config.update_config",
"os.path.join",
"yaml.load",
"multiprocessing.cpu_count",
"os.getcwd",
"os.path.abspath"
] | [((1046, 1154), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""NiBetaSeries BIDS arguments"""', 'formatter_class': 'RawTextHelpFormatter'}), "(description='NiBetaSeries BIDS arguments',\n formatter_class=RawTextHelpFormatter)\n", (1069, 1154), False, 'import argparse\n'), ((7853, 7883), 'os.path.abspath', 'os.path.abspath', (['opts.bids_dir'], {}), '(opts.bids_dir)\n', (7868, 7883), False, 'import os\n'), ((7916, 7980), 'os.path.join', 'os.path.join', (['bids_dir', '"""derivatives"""', 'opts.derivatives_pipeline'], {}), "(bids_dir, 'derivatives', opts.derivatives_pipeline)\n", (7928, 7980), False, 'import os\n'), ((7999, 8031), 'os.path.abspath', 'os.path.abspath', (['opts.output_dir'], {}), '(opts.output_dir)\n', (8014, 8031), False, 'import os\n'), ((8036, 8074), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (8047, 8074), False, 'import os\n'), ((8090, 8122), 'os.path.join', 'os.path.join', (['output_dir', '"""logs"""'], {}), "(output_dir, 'logs')\n", (8102, 8122), False, 'import os\n'), ((8127, 8162), 'os.makedirs', 'os.makedirs', (['log_dir'], {'exist_ok': '(True)'}), '(log_dir, exist_ok=True)\n', (8138, 8162), False, 'import os\n'), ((8317, 8353), 'os.makedirs', 'os.makedirs', (['work_dir'], {'exist_ok': '(True)'}), '(work_dir, exist_ok=True)\n', (8328, 8353), False, 'import os\n'), ((9758, 9943), 'nipype.config.update_config', 'ncfg.update_config', (["{'logging': {'log_directory': log_dir, 'log_to_file': True}, 'execution': {\n 'crashdump_dir': log_dir, 'crashfile_format': 'txt',\n 'parameterize_dirs': False}}"], {}), "({'logging': {'log_directory': log_dir, 'log_to_file': \n True}, 'execution': {'crashdump_dir': log_dir, 'crashfile_format':\n 'txt', 'parameterize_dirs': False}})\n", (9776, 9943), True, 'from nipype import config as ncfg\n'), ((8205, 8235), 'os.path.abspath', 'os.path.abspath', (['opts.work_dir'], {}), '(opts.work_dir)\n', (8220, 8235), False, 'import os\n'), ((8278, 8289), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8287, 8289), False, 'import os\n'), ((8529, 8560), 'os.path.join', 'os.path.join', (['bids_dir', '"""sub-*"""'], {}), "(bids_dir, 'sub-*')\n", (8541, 8560), False, 'import os\n'), ((8887, 8897), 'yaml.load', 'loadyml', (['f'], {}), '(f)\n', (8894, 8897), True, 'from yaml import load as loadyml\n'), ((9639, 9650), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (9648, 9650), False, 'from multiprocessing import cpu_count\n'), ((10232, 10263), 'os.path.abspath', 'os.path.abspath', (['opts.atlas_img'], {}), '(opts.atlas_img)\n', (10247, 10263), False, 'import os\n'), ((10287, 10318), 'os.path.abspath', 'os.path.abspath', (['opts.atlas_lut'], {}), '(opts.atlas_lut)\n', (10302, 10318), False, 'import os\n')] |
from django.test import TestCase
from django.test import Client
class RegisterTestCase(TestCase):
def test_register(self):
c = Client()
# on success redirects to /
response = c.post('/accounts/register/', {
'username': 'asdas',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertRedirects(response, '/')
# passwords don't match
response = c.post('/accounts/register/', {
'username': 'asdasdasd1',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertEquals(response.status_code, 200)
# username is empty
response = c.post('/accounts/register/', {
'username': '',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertEquals(response.status_code, 200)
# no password
response = c.post('/accounts/register/', {
'username': 'asdasdasd',
'password1': '',
'password2': ''
})
self.assertEquals(response.status_code, 200)
# username and password are similar
response = c.post('/accounts/register/', {
'username': 'asdasdasd0',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>'
})
self.assertEquals(response.status_code, 200)
| [
"django.test.Client"
] | [((141, 149), 'django.test.Client', 'Client', ([], {}), '()\n', (147, 149), False, 'from django.test import Client\n')] |
#
# Modified by <NAME>
# Contact: <EMAIL>
#
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
OneNet Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
import math
from typing import Optional, List
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from detectron2.modeling.poolers import ROIPooler, cat
from detectron2.structures import Boxes
from .deconv import CenternetDeconv
class Head(nn.Module):
def __init__(self, cfg, backbone_shape=[2048, 1024, 512, 256]):
super().__init__()
# Build heads.
num_classes = cfg.MODEL.OneNet.NUM_CLASSES
d_model = cfg.MODEL.OneNet.DECONV_CHANNEL[-1]
activation = cfg.MODEL.OneNet.ACTIVATION
self.deconv = CenternetDeconv(cfg, backbone_shape)
self.num_classes = num_classes
self.d_model = d_model
self.num_classes = num_classes
self.activation = _get_activation_fn(activation)
self.feat1 = nn.Conv2d(self.d_model, self.d_model, kernel_size=3, stride=1, padding=1)
self.cls_score = nn.Conv2d(d_model, num_classes, kernel_size=3, stride=1, padding=1)
self.ltrb_pred = nn.Conv2d(d_model, 4, kernel_size=3, stride=1, padding=1)
# Init parameters.
prior_prob = cfg.MODEL.OneNet.PRIOR_PROB
self.bias_value = -math.log((1 - prior_prob) / prior_prob)
self._reset_parameters()
def _reset_parameters(self):
# init all parameters.
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
# initialize the bias for focal loss.
if p.shape[-1] == self.num_classes:
nn.init.constant_(p, self.bias_value)
def forward(self, features_list):
features = self.deconv(features_list)
locations = self.locations(features)[None]
feat = self.activation(self.feat1(features))
class_logits = self.cls_score(feat)
pred_ltrb = F.relu(self.ltrb_pred(feat))
pred_bboxes = self.apply_ltrb(locations, pred_ltrb)
return class_logits, pred_bboxes
def apply_ltrb(self, locations, pred_ltrb):
"""
:param locations: (1, 2, H, W)
:param pred_ltrb: (N, 4, H, W)
"""
pred_boxes = torch.zeros_like(pred_ltrb)
pred_boxes[:,0,:,:] = locations[:,0,:,:] - pred_ltrb[:,0,:,:] # x1
pred_boxes[:,1,:,:] = locations[:,1,:,:] - pred_ltrb[:,1,:,:] # y1
pred_boxes[:,2,:,:] = locations[:,0,:,:] + pred_ltrb[:,2,:,:] # x2
pred_boxes[:,3,:,:] = locations[:,1,:,:] + pred_ltrb[:,3,:,:] # y2
return pred_boxes
@torch.no_grad()
def locations(self, features, stride=4):
"""
Arguments:
features: (N, C, H, W)
Return:
locations: (2, H, W)
"""
h, w = features.size()[-2:]
device = features.device
shifts_x = torch.arange(
0, w * stride, step=stride,
dtype=torch.float32, device=device
)
shifts_y = torch.arange(
0, h * stride, step=stride,
dtype=torch.float32, device=device
)
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2
locations = locations.reshape(h, w, 2).permute(2, 0, 1)
return locations
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| [
"torch.nn.init.constant_",
"torch.nn.init.xavier_uniform_",
"torch.stack",
"torch.nn.Conv2d",
"math.log",
"torch.meshgrid",
"torch.no_grad",
"torch.zeros_like",
"torch.arange"
] | [((2938, 2953), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2951, 2953), False, 'import torch\n'), ((1206, 1279), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.d_model', 'self.d_model'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(self.d_model, self.d_model, kernel_size=3, stride=1, padding=1)\n', (1215, 1279), False, 'from torch import nn, Tensor\n'), ((1305, 1372), 'torch.nn.Conv2d', 'nn.Conv2d', (['d_model', 'num_classes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(d_model, num_classes, kernel_size=3, stride=1, padding=1)\n', (1314, 1372), False, 'from torch import nn, Tensor\n'), ((1398, 1455), 'torch.nn.Conv2d', 'nn.Conv2d', (['d_model', '(4)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(d_model, 4, kernel_size=3, stride=1, padding=1)\n', (1407, 1455), False, 'from torch import nn, Tensor\n'), ((2565, 2592), 'torch.zeros_like', 'torch.zeros_like', (['pred_ltrb'], {}), '(pred_ltrb)\n', (2581, 2592), False, 'import torch\n'), ((3226, 3302), 'torch.arange', 'torch.arange', (['(0)', '(w * stride)'], {'step': 'stride', 'dtype': 'torch.float32', 'device': 'device'}), '(0, w * stride, step=stride, dtype=torch.float32, device=device)\n', (3238, 3302), False, 'import torch\n'), ((3356, 3432), 'torch.arange', 'torch.arange', (['(0)', '(h * stride)'], {'step': 'stride', 'dtype': 'torch.float32', 'device': 'device'}), '(0, h * stride, step=stride, dtype=torch.float32, device=device)\n', (3368, 3432), False, 'import torch\n'), ((3494, 3528), 'torch.meshgrid', 'torch.meshgrid', (['shifts_y', 'shifts_x'], {}), '(shifts_y, shifts_x)\n', (3508, 3528), False, 'import torch\n'), ((1576, 1615), 'math.log', 'math.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (1584, 1615), False, 'import math\n'), ((3625, 3663), 'torch.stack', 'torch.stack', (['(shift_x, shift_y)'], {'dim': '(1)'}), '((shift_x, shift_y), dim=1)\n', (3636, 3663), False, 'import torch\n'), ((1794, 1820), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['p'], {}), '(p)\n', (1817, 1820), False, 'from torch import nn, Tensor\n'), ((1936, 1973), 'torch.nn.init.constant_', 'nn.init.constant_', (['p', 'self.bias_value'], {}), '(p, self.bias_value)\n', (1953, 1973), False, 'from torch import nn, Tensor\n')] |
"""
Module holds all stuff regarding Grinder tool usage
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import time
from bzt import TaurusConfigError, ToolError
from bzt.engine import ScenarioExecutor, FileLister, HavingInstallableTools, SelfDiagnosable
from bzt.modules.aggregator import ConsolidatingAggregator, ResultsReader
from bzt.modules.console import WidgetProvider, ExecutorWidget
from bzt.modules.java import TaurusJavaHelper
from bzt.requests_model import HTTPRequest
from bzt.six import iteritems
from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS
from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR
class GrinderExecutor(ScenarioExecutor, WidgetProvider, FileLister, HavingInstallableTools, SelfDiagnosable):
"""
Grinder executor module
"""
def __init__(self):
super(GrinderExecutor, self).__init__()
self.script = None
self.exec_id = "grinder-bzt-%s" % id(self)
self.properties_file = None
self.kpi_file = None
self.cmd_line = None
self.process = None
self.end_time = None
self.retcode = None
self.java_helper = None
def __write_base_props(self, fds):
"""
write base properties and base properties file contents to fds
:param fds: fds
:return:
"""
base_props_file = self.settings.get("properties-file")
if base_props_file:
fds.write("# Base Properies File Start: %s\n" % base_props_file)
with open(base_props_file) as bpf:
fds.write(bpf.read())
fds.write("# Base Properies File End: %s\n\n" % base_props_file)
# base props
base_props = self.settings.get("properties")
if base_props:
fds.write("# Base Properies Start\n")
for key, val in iteritems(base_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Base Properies End\n\n")
def __write_scenario_props(self, fds, scenario):
"""
Write scenario props and scenario file props to fds
:param fds:
:param scenario: dict
:return:
"""
script_props_file = scenario.get("properties-file")
if script_props_file:
fds.write("# Script Properies File Start: %s\n" % script_props_file)
with open(script_props_file) as spf:
fds.write(spf.read())
fds.write("# Script Properies File End: %s\n\n" % script_props_file)
# scenario props
local_props = scenario.get("properties")
if local_props:
fds.write("# Scenario Properies Start\n")
for key, val in iteritems(local_props):
fds.write("%s=%s\n" % (key, val))
fds.write("# Scenario Properies End\n\n")
def __write_bzt_props(self, fds):
"""
Write bzt properties to fds
:param fds:
:return:
"""
fds.write("# BZT Properies Start\n")
fds.write("grinder.hostID=%s\n" % self.exec_id)
fds.write("grinder.script=%s\n" % self.script.replace(os.path.sep, "/"))
fds.write("grinder.logDirectory=%s\n" % self.engine.artifacts_dir.replace(os.path.sep, "/"))
load = self.get_load()
if load.iterations or load.concurrency:
fds.write("grinder.runs=%s\n" % load.iterations or 0)
if load.concurrency:
fds.write("grinder.threads=%s\n" % load.concurrency)
if load.duration:
fds.write("grinder.duration=%s\n" % int(load.duration * 1000))
fds.write("# taurus load values in case you need them\n")
fds.write("taurus.concurrency=%s\n" % load.concurrency)
fds.write("taurus.throughput=%s\n" % load.throughput)
fds.write("taurus.ramp_up=%s\n" % load.ramp_up)
fds.write("taurus.steps=%s\n" % load.steps)
fds.write("taurus.hold_for=%s\n" % load.hold)
fds.write("taurus.iterations=%s\n" % load.iterations)
fds.write("# BZT Properies End\n")
def prepare(self):
self.stdout = open(self.engine.create_artifact("grinder", ".out"), "w")
self.stderr = open(self.engine.create_artifact("grinder", ".err"), "w")
self.install_required_tools()
scenario = self.get_scenario()
self.exec_id = self.label
self.script = self.get_script_path()
if not self.script:
if "requests" in scenario:
self.script = self.__scenario_from_requests()
else:
msg = "There must be a script file or requests for its generation "
msg += "to run Grinder tool (%s)" % self.execution.get('scenario')
raise TaurusConfigError(msg)
self.properties_file = self.engine.create_artifact("grinder", ".properties")
with open(self.properties_file, 'w') as fds:
self.__write_base_props(fds)
self.__write_scenario_props(fds, scenario)
self.__write_bzt_props(fds)
self.kpi_file = os.path.join(self.engine.artifacts_dir, self.exec_id + "-kpi.log")
self.reader = DataLogReader(self.kpi_file, self.log)
self.reader.report_by_url = self.settings.get("report-by-url", False)
if isinstance(self.engine.aggregator, ConsolidatingAggregator):
self.engine.aggregator.add_underling(self.reader)
# add logback configurations used by worker processes (logback-worker.xml)
self.env.add_path({"CLASSPATH": RESOURCES_DIR}, finish=True)
self.env.add_path({"CLASSPATH": self.java_helper.tool_path}, finish=True)
self.env.add_path({"CLASSPATH": self.settings.get("path", None)}, finish=True)
self.cmd_line = ["java", "net.grinder.Grinder", self.properties_file]
def startup(self):
"""
Should start the tool as fast as possible.
"""
self.env.set({"T_GRINDER_PREFIX": self.exec_id})
self.process = self.execute(self.cmd_line)
def check(self):
"""
Checks if tool is still running. Also checks if resulting logs contains
any data and throws exception otherwise.
:return: bool
:raise TaurusToolError:
"""
self.retcode = self.process.poll()
if self.retcode is not None:
if self.retcode != 0:
raise ToolError("Gatling tool exited with non-zero code: %s" % self.retcode,
self.get_error_diagnostics())
return True
return False
def shutdown(self):
"""
If tool is still running - let's stop it.
"""
shutdown_process(self.process, self.log)
if self.start_time:
self.end_time = time.time()
self.log.debug("Grinder worked for %s seconds", self.end_time - self.start_time)
def post_process(self):
"""
Collect data file artifact
"""
if self.kpi_file:
self.engine.existing_artifact(self.kpi_file)
super(GrinderExecutor, self).post_process()
def __scenario_from_requests(self):
"""
Generate grinder scenario from requests
:return: script
"""
script = self.engine.create_artifact("grinder_requests", ".py")
builder = GrinderScriptBuilder(self.get_scenario(), self.log)
builder.label = self.label
builder.build_source_code()
builder.save(script)
return script
def install_required_tools(self):
grinder = self._get_tool(Grinder, config=self.settings)
self.settings["path"] = grinder.tool_path
self.java_helper = self._get_tool(TaurusJavaHelper)
required_tools = [self._get_tool(TclLibrary),
self._get_tool(JavaVM),
self.java_helper,
grinder]
for tool in required_tools:
if not tool.check_if_installed():
tool.install()
def get_widget(self):
if not self.widget:
if self.script is not None:
label = "Grinder: %s" % os.path.basename(self.script)
else:
label = None
self.widget = ExecutorWidget(self, label)
if self.get_load().ramp_up:
self.widget.duration += self.get_load().ramp_up # because we have ramp-down equal to rampup
return self.widget
def resource_files(self):
resource_files = []
script_file_path = self.get_script_path()
if script_file_path:
resource_files.append(script_file_path)
prop_file = self.get_scenario().get("properties-file")
if prop_file:
resource_files.append(prop_file)
return resource_files
def get_error_diagnostics(self):
diagnostics = []
if self.stdout is not None:
with open(self.stdout.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Grinder STDOUT:\n" + contents)
if self.stderr is not None:
with open(self.stderr.name) as fds:
contents = fds.read().strip()
if contents.strip():
diagnostics.append("Grinder STDOUT:\n" + contents)
return diagnostics
class DataLogReader(ResultsReader):
""" Class to read KPI from data log """
DELIMITER = ","
DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes")
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.report_by_url = False
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.idx = {}
self.partial_buffer = ""
self.start_time = 0
self.end_time = 0
self.concurrency = 0
self.test_names = {}
self.known_threads = set()
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
self.log.debug("Reading grinder results...")
self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass))
lnum = None
start = time.time()
for lnum, line in enumerate(self.lines):
if not self.idx:
if not line.startswith('data.'):
self.__split(line) # to capture early test name records
continue
line = line[line.find(' '):]
header_list = line.strip().split(self.DELIMITER)
for _ix, field in enumerate(header_list):
self.idx[field.strip()] = _ix
data_fields, worker_id = self.__split(line)
if not data_fields:
self.log.debug("Skipping line: %s", line.strip())
continue
yield self.parse_line(data_fields, worker_id, lnum)
if lnum is not None:
duration = time.time() - start
if duration < 0.001:
duration = 0.001
self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration)
def parse_line(self, data_fields, worker_id, lnum):
worker_id = worker_id.split('.')[1]
t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0)
r_time = int(data_fields[self.idx["Test time"]]) / 1000.0
latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0
r_code = data_fields[self.idx["HTTP response code"]].strip()
con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0
con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0
bytes_count = int(data_fields[self.idx["HTTP response length"]].strip())
test_id = data_fields[self.idx["Test"]].strip()
thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip()
if thread_id not in self.known_threads:
self.known_threads.add(thread_id)
self.concurrency += 1
url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count)
if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]):
if not error_msg:
if r_code != '0':
error_msg = "HTTP %s" % r_code
else:
error_msg = "Java exception calling TestRunner"
else:
error_msg = None # suppress errors
if self.report_by_url:
label = url
elif test_id in self.test_names:
label = self.test_names[test_id]
else:
label = "Test #%s" % test_id
source_id = '' # maybe use worker_id somehow?
return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count
def __split(self, line):
if not line.endswith("\n"):
self.partial_buffer += line
return None, None
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
if not line.startswith('data.'):
line_parts = line.split(' ')
if len(line_parts) > 1:
if line_parts[1] == 'starting,':
# self.concurrency += 1
pass
elif line_parts[1] == 'finished':
if self.concurrency > 0:
self.concurrency -= 1
elif set(line_parts[1:5]) == {'Test', 'name', 'for', 'ID'}:
test_id = line_parts[5][:-1]
test_name = ' '.join(line_parts[6:])
self.test_names[test_id] = test_name
self.log.debug("Recognized test id %s => %s", test_id, test_name)
return None, None
worker_id = line[:line.find(' ')]
line = line[line.find(' '):]
data_fields = line.split(self.DELIMITER)
if not data_fields[1].strip().isdigit():
return None, None
if len(data_fields) < max(self.idx.values()):
return None, None
return data_fields, worker_id
def __parse_prev_lines(self, worker_id, lnum, r_code, bytes_count):
url = ''
error_msg = None
for lineNo in reversed(range(max(lnum - 100, 0), lnum)): # looking max 100 lines back. TODO: parameterize?
line = self.lines[lineNo].strip()
matched = self.DETAILS_REGEX.match(line)
if not matched:
continue
if worker_id == matched.group(1) and r_code == matched.group(3) and str(bytes_count) == matched.group(5):
return matched.group(2), matched.group(4)
return url, error_msg
class Grinder(RequiredTool): # todo: take it from maven and convert to JarTool(?)
VERSION = "3.11"
LOCAL_PATH = "~/.bzt/grinder-taurus/lib/grinder.jar"
def __init__(self, config=None, **kwargs):
settings = config or {}
grinder_path = settings.get("path", self.LOCAL_PATH)
grinder_path = get_full_path(grinder_path)
download_link = settings.get("download-link", "")
super(Grinder, self).__init__(tool_path=grinder_path, download_link=download_link, **kwargs)
self.version = self.VERSION
self.mirror_manager = GrinderMirrorsManager(self.http_client, self.log, self.version)
def check_if_installed(self):
self.log.debug("Trying %s: %s", self.tool_name, self.tool_path)
try:
out, err = self.call(["java", "-classpath", self.tool_path, "net.grinder.Grinder"])
if err:
out += err
self.log.debug("%s stdout: %s", self.tool_name, out)
return True
except CALL_PROBLEMS as exc:
self.log.warning("%s check failed: %s", self.tool_name, exc)
return False
def install(self):
dest = get_full_path(self.tool_path, step_up=2)
self.log.info("Will install %s into %s", self.tool_name, dest)
grinder_dist = self._download(use_link=bool(self.download_link))
self.log.info("Unzipping %s", grinder_dist)
unzip(grinder_dist, dest, 'grinder-' + self.version)
os.remove(grinder_dist)
self.log.info("Installed grinder successfully")
if not self.check_if_installed():
raise ToolError("Unable to run %s after installation!" % self.tool_name)
class GrinderMirrorsManager(MirrorsManager):
MIRRORS_SOURCE = "https://sourceforge.net/settings/mirror_choices?projectname=grinder&filename=The%20Grinder" \
"%203/{version}/grinder-{version}-binary.zip&dialog=true"
DOWNLOAD_LINK = "https://downloads.sourceforge.net/project/grinder/The%20Grinder%203/{version}" \
"/grinder-{version}-binary.zip?r=&ts=" + str(int(time.time())) + "&use_mirror=autoselect"
def __init__(self, http_client, parent_logger, grinder_version):
self.grinder_version = grinder_version
base_link = self.MIRRORS_SOURCE.format(version=self.grinder_version)
super(GrinderMirrorsManager, self).__init__(http_client, base_link, parent_logger)
def _parse_mirrors(self):
links = []
if self.page_source is not None:
self.log.debug('Parsing mirrors...')
base_link = "http://sourceforge.net/projects/grinder/files/The%20Grinder%203/{version}/grinder-{version}" \
"-binary.zip/download?use_mirror={mirror}"
li_search_pattern = re.compile(r'<li id=".*?">')
li_elements = li_search_pattern.findall(self.page_source)
if li_elements:
links = [base_link.format(version=self.grinder_version, mirror=link.strip('<li id="').strip('">')) for
link in li_elements]
default_link = self.DOWNLOAD_LINK.format(version=self.grinder_version)
if default_link not in links:
links.append(default_link)
self.log.debug('Total mirrors: %d', len(links))
return links
class GrinderScriptBuilder(PythonGenerator):
IMPORTS = """
from net.grinder.script import Test
from net.grinder.script.Grinder import grinder
from net.grinder.plugin.http import HTTPRequest, HTTPPluginControl, HTTPUtilities
from HTTPClient import NVPair
"""
def __init__(self, scenario, parent_logger):
super(GrinderScriptBuilder, self).__init__(scenario, parent_logger)
self.label = "BZT Requests"
def build_source_code(self):
self.log.debug("Generating Python script for Grinder")
self.root.append(self.gen_comment("This script was generated by Taurus", indent=0))
self.root.append(self.add_imports())
self.root.append(self.gen_new_line())
default_address = self.scenario.get("default-address")
url_arg = "url=%r" % default_address if default_address else ""
self.root.append(self.gen_statement('request = HTTPRequest(%s)' % url_arg, indent=0))
self.root.append(self.gen_statement('test = Test(1, "%s")' % self.label, indent=0))
self.root.append(self.gen_statement('test.record(request)', indent=0))
self.root.append(self.gen_new_line())
self.root.append(self.gen_statement("defaults = HTTPPluginControl.getConnectionDefaults()", indent=0))
self.root.append(self.gen_statement("utilities = HTTPPluginControl.getHTTPUtilities()", indent=0))
headers = self.scenario.get_headers()
if not self.scenario.get("keepalive", True):
headers['Connection'] = 'close'
if headers:
self.root.append(self.gen_statement("defaults.setDefaultHeaders([", indent=0))
for header, value in iteritems(headers):
self.root.append(self.gen_statement("NVPair(%r, %r)," % (header, value), indent=4))
self.root.append(self.gen_statement("])", indent=0))
global_timeout = dehumanize_time(self.scenario.get("timeout", None))
if global_timeout:
self.root.append(self.gen_statement("defaults.setTimeout(%s)" % int(global_timeout * 1000), indent=0))
cookie_flag = int(self.scenario.get("store-cookie", True))
self.root.append(self.gen_statement("defaults.setUseCookies(%s)" % cookie_flag, indent=0))
self.root.append(self.gen_new_line())
self.root.append(self.gen_runner_class())
@staticmethod
def __list_to_nvpair_list(items):
return "[" + ",".join("NVPair(%r, %r)" % (header, value) for header, value in items) + "]"
def gen_runner_class(self):
runner_classdef = self.gen_class_definition("TestRunner", ["object"])
sleep_method = self.gen_method_definition("rampUpSleeper", ["self"])
sleep_method.append(self.gen_statement("if grinder.runNumber != 0: return"))
sleep_method.append(self.gen_statement("tprops = grinder.properties.getPropertySubset('taurus.')"))
sleep_method.append(self.gen_statement("inc = tprops.getDouble('ramp_up', 0)/tprops.getInt('concurrency', 1)"))
sleep_method.append(self.gen_statement("sleep_time = int(1000 * grinder.threadNumber * inc)"))
sleep_method.append(self.gen_statement("grinder.sleep(sleep_time, 0)"))
sleep_method.append(self.gen_statement("if sleep_time: grinder.logger.info('slept for %sms' % sleep_time)"))
sleep_method.append(self.gen_statement("else: grinder.logger.info('No sleep needed')"))
sleep_method.append(self.gen_new_line())
runner_classdef.append(sleep_method)
main_method = self.gen_method_definition("__call__", ["self"])
main_method.append(self.gen_statement("self.rampUpSleeper()"))
for req in self.scenario.get_requests():
if not isinstance(req, HTTPRequest):
msg = "Grinder script generator doesn't support '%s' blocks, skipping"
self.log.warning(msg, req.NAME)
continue
method = req.method.upper()
url = req.url
local_headers = req.headers
params = "[]"
headers = self.__list_to_nvpair_list(iteritems(local_headers))
main_method.append(self.gen_statement("request.%s(%r, %s, %s)" % (method, url, params, headers)))
think_time = dehumanize_time(req.priority_option('think-time'))
if think_time:
main_method.append(self.gen_statement("grinder.sleep(%s)" % int(think_time * 1000)))
runner_classdef.append(main_method)
return runner_classdef
| [
"bzt.utils.FileReader",
"re.compile",
"bzt.utils.unzip",
"os.path.join",
"bzt.utils.get_full_path",
"bzt.modules.console.ExecutorWidget",
"bzt.ToolError",
"os.path.basename",
"bzt.TaurusConfigError",
"bzt.utils.shutdown_process",
"time.time",
"bzt.six.iteritems",
"os.remove"
] | [((10071, 10134), 're.compile', 're.compile', (['"""worker\\\\.(\\\\S+) (.+) -> (\\\\S+) (.+), (\\\\d+) bytes"""'], {}), "('worker\\\\.(\\\\S+) (.+) -> (\\\\S+) (.+), (\\\\d+) bytes')\n", (10081, 10134), False, 'import re\n'), ((5650, 5716), 'os.path.join', 'os.path.join', (['self.engine.artifacts_dir', "(self.exec_id + '-kpi.log')"], {}), "(self.engine.artifacts_dir, self.exec_id + '-kpi.log')\n", (5662, 5716), False, 'import os\n'), ((7251, 7291), 'bzt.utils.shutdown_process', 'shutdown_process', (['self.process', 'self.log'], {}), '(self.process, self.log)\n', (7267, 7291), False, 'from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR\n'), ((10350, 10403), 'bzt.utils.FileReader', 'FileReader', ([], {'filename': 'filename', 'parent_logger': 'self.log'}), '(filename=filename, parent_logger=self.log)\n', (10360, 10403), False, 'from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR\n'), ((10932, 10943), 'time.time', 'time.time', ([], {}), '()\n', (10941, 10943), False, 'import time\n'), ((15846, 15873), 'bzt.utils.get_full_path', 'get_full_path', (['grinder_path'], {}), '(grinder_path)\n', (15859, 15873), False, 'from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS\n'), ((16691, 16731), 'bzt.utils.get_full_path', 'get_full_path', (['self.tool_path'], {'step_up': '(2)'}), '(self.tool_path, step_up=2)\n', (16704, 16731), False, 'from bzt.utils import MirrorsManager, dehumanize_time, get_full_path, PythonGenerator, CALL_PROBLEMS\n'), ((16936, 16988), 'bzt.utils.unzip', 'unzip', (['grinder_dist', 'dest', "('grinder-' + self.version)"], {}), "(grinder_dist, dest, 'grinder-' + self.version)\n", (16941, 16988), False, 'from bzt.utils import unzip, RequiredTool, JavaVM, shutdown_process, TclLibrary, FileReader, RESOURCES_DIR\n'), ((16997, 17020), 'os.remove', 'os.remove', (['grinder_dist'], {}), '(grinder_dist)\n', (17006, 17020), False, 'import os\n'), ((2450, 2471), 'bzt.six.iteritems', 'iteritems', (['base_props'], {}), '(base_props)\n', (2459, 2471), False, 'from bzt.six import iteritems\n'), ((3298, 3320), 'bzt.six.iteritems', 'iteritems', (['local_props'], {}), '(local_props)\n', (3307, 3320), False, 'from bzt.six import iteritems\n'), ((7348, 7359), 'time.time', 'time.time', ([], {}), '()\n', (7357, 7359), False, 'import time\n'), ((8827, 8854), 'bzt.modules.console.ExecutorWidget', 'ExecutorWidget', (['self', 'label'], {}), '(self, label)\n', (8841, 8854), False, 'from bzt.modules.console import WidgetProvider, ExecutorWidget\n'), ((17137, 17203), 'bzt.ToolError', 'ToolError', (["('Unable to run %s after installation!' % self.tool_name)"], {}), "('Unable to run %s after installation!' % self.tool_name)\n", (17146, 17203), False, 'from bzt import TaurusConfigError, ToolError\n'), ((18302, 18329), 're.compile', 're.compile', (['"""<li id=".*?">"""'], {}), '(\'<li id=".*?">\')\n', (18312, 18329), False, 'import re\n'), ((20490, 20508), 'bzt.six.iteritems', 'iteritems', (['headers'], {}), '(headers)\n', (20499, 20508), False, 'from bzt.six import iteritems\n'), ((5326, 5348), 'bzt.TaurusConfigError', 'TaurusConfigError', (['msg'], {}), '(msg)\n', (5343, 5348), False, 'from bzt import TaurusConfigError, ToolError\n'), ((11696, 11707), 'time.time', 'time.time', ([], {}), '()\n', (11705, 11707), False, 'import time\n'), ((22893, 22917), 'bzt.six.iteritems', 'iteritems', (['local_headers'], {}), '(local_headers)\n', (22902, 22917), False, 'from bzt.six import iteritems\n'), ((8724, 8753), 'os.path.basename', 'os.path.basename', (['self.script'], {}), '(self.script)\n', (8740, 8753), False, 'import os\n'), ((17617, 17628), 'time.time', 'time.time', ([], {}), '()\n', (17626, 17628), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pytest
import json
from kafkacli.formatter import Formatter
sampleJson = json.loads('{"a":"s", "b":1}')
def test_print_default(capsys):
Formatter().print(sampleJson)
captured = capsys.readouterr()
assert captured.out == '{"a": "s", "b": 1}\n'
def test_print_idents(capsys):
Formatter(indents=True).print(sampleJson)
captured = capsys.readouterr()
assert captured.out == '{\n "a": "s",\n "b": 1\n}\n'
def test_print_colors(capsys):
Formatter(colors=True).print(sampleJson)
captured = capsys.readouterr()
assert captured.out == \
'{"a": \x1b[34m"s"\x1b[39m, "b": \x1b[31m1\x1b[39m}\n'
| [
"kafkacli.formatter.Formatter",
"json.loads"
] | [((138, 168), 'json.loads', 'json.loads', (['"""{"a":"s", "b":1}"""'], {}), '(\'{"a":"s", "b":1}\')\n', (148, 168), False, 'import json\n'), ((207, 218), 'kafkacli.formatter.Formatter', 'Formatter', ([], {}), '()\n', (216, 218), False, 'from kafkacli.formatter import Formatter\n'), ((359, 382), 'kafkacli.formatter.Formatter', 'Formatter', ([], {'indents': '(True)'}), '(indents=True)\n', (368, 382), False, 'from kafkacli.formatter import Formatter\n'), ((536, 558), 'kafkacli.formatter.Formatter', 'Formatter', ([], {'colors': '(True)'}), '(colors=True)\n', (545, 558), False, 'from kafkacli.formatter import Formatter\n')] |
import libtcodpy as libtcod
from random import randint
nSquares = 30
nTiles = nSquares * 2 + 1
SCREEN_WIDTH = nTiles
SCREEN_HEIGHT = nTiles
libtcod.console_set_custom_font("cp437_12x12.png", libtcod.FONT_LAYOUT_ASCII_INROW)
libtcod.console_init_root(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', False, libtcod.RENDERER_OPENGL)
def CheckDir(x,y,size,direction,table):
if direction == 1:
if y - 2 <= 0:
return 0
if table[x][y-2] == white:
return 0
elif direction == 2:
if x + 2 >= size:
return 0
if table[x+2][y] == white:
return 0
elif direction == 3:
if y + 2 >= size:
return 0
if table[x][y+2] == white:
return 0
elif direction == 4:
if x - 2 <= 0:
return 0
if table[x-2][y] == white:
return 0
return 1
def Possible(x,y,table,size):
if x+2 < size:
if table[x+2][y] == black:
return 1
if x-2 > 0:
if table[x-2][y] == black:
return 1
if y+2 < size:
if table[x][y+2] == black:
return 1
if y-2 > 0:
if table[x][y-2] == black:
return 1
return 0
black = libtcod.black
white = libtcod.white
Table = [[0 for i in range(nTiles)]for i in range(nTiles)]
for x in range(nTiles):
for y in range(nTiles):
Table[x][y] = black
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
Memory = []
CurrX = 1
CurrY = 1
Table[CurrX][CurrY] = white
end = 0
while end == 0:
while Possible(CurrX,CurrY,Table,nTiles):
Dir = randint(1,4)
while CheckDir(CurrX,CurrY,nTiles,Dir,Table) == 0:
Dir = randint(1,4)
if Dir == 1:
Table[CurrX][CurrY - 1] = white
CurrY -= 2
Table[CurrX][CurrY] = white
elif Dir == 2:
Table[CurrX + 1][CurrY] = white
CurrX += 2
Table[CurrX][CurrY] = white
elif Dir == 3:
Table[CurrX][CurrY + 1] = white
CurrY += 2
Table[CurrX][CurrY] = white
elif Dir == 4:
Table[CurrX - 1][CurrY] = white
CurrX -= 2
Table[CurrX][CurrY] = white
Memory.append(Dir)
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
while Possible(CurrX,CurrY,Table,nTiles) == 0:
MemorySize = len(Memory)
Dir = Memory[MemorySize-1]
if Dir == 1:
CurrY += 2
elif Dir == 2:
CurrX -= 2
elif Dir == 3:
CurrY -= 2
elif Dir == 4:
CurrX += 2
del Memory[MemorySize-1]
if CurrX == 1 and CurrY == 1:
end = 1
break
#print
for x in range(nTiles):
for y in range(nTiles):
libtcod.console_put_char_ex(None,x,y,219,Table[x][y],libtcod.white)
libtcod.console_flush()
libtcod.console_wait_for_keypress(True)
| [
"libtcodpy.console_put_char_ex",
"libtcodpy.console_init_root",
"libtcodpy.console_wait_for_keypress",
"libtcodpy.console_flush",
"libtcodpy.console_set_custom_font",
"random.randint"
] | [((152, 240), 'libtcodpy.console_set_custom_font', 'libtcod.console_set_custom_font', (['"""cp437_12x12.png"""', 'libtcod.FONT_LAYOUT_ASCII_INROW'], {}), "('cp437_12x12.png', libtcod.\n FONT_LAYOUT_ASCII_INROW)\n", (183, 240), True, 'import libtcodpy as libtcod\n'), ((237, 347), 'libtcodpy.console_init_root', 'libtcod.console_init_root', (['SCREEN_WIDTH', 'SCREEN_HEIGHT', '"""pyMazeBacktrack"""', '(False)', 'libtcod.RENDERER_OPENGL'], {}), "(SCREEN_WIDTH, SCREEN_HEIGHT, 'pyMazeBacktrack', \n False, libtcod.RENDERER_OPENGL)\n", (262, 347), True, 'import libtcodpy as libtcod\n'), ((1572, 1595), 'libtcodpy.console_flush', 'libtcod.console_flush', ([], {}), '()\n', (1593, 1595), True, 'import libtcodpy as libtcod\n'), ((3216, 3239), 'libtcodpy.console_flush', 'libtcod.console_flush', ([], {}), '()\n', (3237, 3239), True, 'import libtcodpy as libtcod\n'), ((3243, 3282), 'libtcodpy.console_wait_for_keypress', 'libtcod.console_wait_for_keypress', (['(True)'], {}), '(True)\n', (3276, 3282), True, 'import libtcodpy as libtcod\n'), ((1501, 1573), 'libtcodpy.console_put_char_ex', 'libtcod.console_put_char_ex', (['None', 'x', 'y', '(219)', 'Table[x][y]', 'libtcod.white'], {}), '(None, x, y, 219, Table[x][y], libtcod.white)\n', (1528, 1573), True, 'import libtcodpy as libtcod\n'), ((1760, 1773), 'random.randint', 'randint', (['(1)', '(4)'], {}), '(1, 4)\n', (1767, 1773), False, 'from random import randint\n'), ((2613, 2636), 'libtcodpy.console_flush', 'libtcod.console_flush', ([], {}), '()\n', (2634, 2636), True, 'import libtcodpy as libtcod\n'), ((3147, 3219), 'libtcodpy.console_put_char_ex', 'libtcod.console_put_char_ex', (['None', 'x', 'y', '(219)', 'Table[x][y]', 'libtcod.white'], {}), '(None, x, y, 219, Table[x][y], libtcod.white)\n', (3174, 3219), True, 'import libtcodpy as libtcod\n'), ((1852, 1865), 'random.randint', 'randint', (['(1)', '(4)'], {}), '(1, 4)\n', (1859, 1865), False, 'from random import randint\n'), ((2536, 2608), 'libtcodpy.console_put_char_ex', 'libtcod.console_put_char_ex', (['None', 'x', 'y', '(219)', 'Table[x][y]', 'libtcod.white'], {}), '(None, x, y, 219, Table[x][y], libtcod.white)\n', (2563, 2608), True, 'import libtcodpy as libtcod\n')] |
#####################################################################
##
## gradefiles-send.py
##
## Script to send grade files by email to enrolled students; the
## input grade file names should correspond to the user names of
## the students.
##
##
from email.mime.text import MIMEText # For creating a message string.
from subprocess import Popen, PIPE # For sending email on linux.
import sys # For command line arguments.
import os # For commands and file manipulation (walk, path, system).
#####################################################################
## Sending a simple email message.
##
def send(txt, courseNumber, task, sender, targets):
msg = MIMEText(txt)
msg["From"] = sender + "@bu.edu"
msg["To"] = ",".join([target + "@bu.edu" for target in targets])
msg["Cc"] = sender + "@bu.edu"
msg["Subject"] = "CS " + courseNumber + " " + task + " grade"
p = Popen(["/usr/sbin/sendmail", "-t"], stdin=PIPE)
p.communicate(bytes(msg.as_string(), 'UTF-8'))
#####################################################################
## Process the command line parameters.
##
if len(sys.argv) == 6\
and (int(sys.argv[1][0:3]) in range(100,1000))\
and sys.argv[2] in ['Fall', 'Spring']\
and int(sys.argv[3]) in range(2000,2100):
courseNumber = sys.argv[1] # Accepts course names like "591 X1."
season = sys.argv[2]
year = sys.argv[3]
task = sys.argv[4]
sender = sys.argv[5]
else:
print('\n Usage:\n\n % python gradefiles-send.py <###> <Fall|Spring> <YYYY> <task> <sender-username>\n')
exit()
#####################################################################
## Check for list of files.
##
if not os.path.exists('./data'):
print('No folder "data" containing grade files found. Exiting.')
exit()
#####################################################################
## Send the grade files.
##
for curdir, dirs, files in os.walk('./data/'):
for file in files:
txt = open('./data/'+file, 'r').read()
targets = file.split('.')[0].split("_")
send(txt, courseNumber, task, sender, targets)
print('Sent grade file to ' + str(targets) + '.')
#eof | [
"subprocess.Popen",
"os.path.exists",
"os.walk",
"email.mime.text.MIMEText"
] | [((1925, 1943), 'os.walk', 'os.walk', (['"""./data/"""'], {}), "('./data/')\n", (1932, 1943), False, 'import os\n'), ((679, 692), 'email.mime.text.MIMEText', 'MIMEText', (['txt'], {}), '(txt)\n', (687, 692), False, 'from email.mime.text import MIMEText\n'), ((908, 955), 'subprocess.Popen', 'Popen', (["['/usr/sbin/sendmail', '-t']"], {'stdin': 'PIPE'}), "(['/usr/sbin/sendmail', '-t'], stdin=PIPE)\n", (913, 955), False, 'from subprocess import Popen, PIPE\n'), ((1692, 1716), 'os.path.exists', 'os.path.exists', (['"""./data"""'], {}), "('./data')\n", (1706, 1716), False, 'import os\n')] |
#!/usr/bin/env python
import os
import os.path
import yaml
import time
import random
import multiprocessing
import RPi.GPIO as GPIO
from talk import say
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
from adafruit_servokit import ServoKit
Motor1 = {'EN': 27, 'input1': 19, 'input2': 16}
Motor2 = {'EN': 22, 'input1': 26, 'input2': 20}
for x in Motor1:
GPIO.setup(Motor1[x], GPIO.OUT)
GPIO.setup(Motor2[x], GPIO.OUT)
EN1 = GPIO.PWM(Motor1['EN'], 100)
EN2 = GPIO.PWM(Motor2['EN'], 100)
EN1.start(0)
EN2.start(0)
hand = ServoKit(channels=16)
ROOT_PATH = os.path.realpath(os.path.join(__file__, '..', '..'))
def readYaml():
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servo = yaml.load(conf, Loader=yaml.FullLoader)
return servo
def writeYaml(s=None):
with open('{}/src/configuration.yaml'.format(ROOT_PATH),'w', encoding='utf8') as conf:
if s==None:
yaml.dump(servo,conf)
else:
yaml.dump(s,conf)
servo = readYaml()
if servo == None:
with open('{}/src/configurationBackUp.yaml'.format(ROOT_PATH),'r+', encoding='utf8') as conf:
servoBackUp = yaml.load(conf, Loader=yaml.FullLoader)
writeYaml(servoBackUp)
servo = readYaml()
if servo == None:
print('close')
exit()
Initial = servo['Initial_Position']['I2C']
Current = servo['Current_Position']['I2C']
InitialGpio = servo['Initial_Position']['Gpio']
CurrentGpio = servo['Current_Position']['Gpio']
GpioPin = servo['Pin']['Gpio']
for i in range(0,6):
GPIO.setup(GpioPin[i], GPIO.OUT)
Servo = []
for i in range(0,6):
Servo.append(GPIO.PWM(GpioPin[i],50))
Servo[i].start(0)
def changeDegree(pin,newDegree,time1=0.05,update=5):
maxChange = 0
pinSize = len(pin)
for i in range(0,pinSize):
maxChange = max(abs(Current[pin[i]]-newDegree[i]),maxChange)
for deg in range(0,maxChange,update):
for i in range(0,pinSize):
if Current[pin[i]]<newDegree[i]:
Current[pin[i]] += update
elif Current[pin[i]]>newDegree[i]:
Current[pin[i]] -= update
for i in range(0,pinSize):
hand.servo[pin[i]].angle = Current[pin[i]]
servo['Current_Position']['I2C'][pin[i]] = Current[pin[i]]
writeYaml()
time.sleep(time1)
def takePosition():
changeDegree([7,8],[180,0])
changeDegree([0,1,2,3,4,5,6,7,8,9,10,11],[0,50,130,0,170,170,0,180,0,60,150,0])
def changeDegreeGpio(pin,degree,update,duration):
pinSize = len(pin)
for i in range(0,pinSize):
p = pin[i]
if CurrentGpio[p]>degree[i]:
update = -update
for deg in range(CurrentGpio[p],degree[i],update):
duty = deg/18
duty+=2
Servo[p].ChangeDutyCycle(duty)
time.sleep(duration)
CurrentGpio[p]=degree[i]
writeYaml()
def Run(a, b, c, d, x):
GPIO.output(Motor1['input1'], GPIO.LOW)
GPIO.output(Motor1['input2'], GPIO.LOW)
GPIO.output(Motor2['input1'], GPIO.LOW)
GPIO.output(Motor2['input2'], GPIO.LOW)
if a==1:
GPIO.output(Motor1['input1'], GPIO.HIGH)
if b==1:
GPIO.output(Motor1['input2'], GPIO.HIGH)
if c==1:
GPIO.output(Motor2['input1'], GPIO.HIGH)
if d==1:
GPIO.output(Motor2['input2'], GPIO.HIGH)
EN2.ChangeDutyCycle(x)
EN1.ChangeDutyCycle(x)
def Stop():
Run(0,0,0,0,0)
def Start_Slow(a, b, c, d):
for i in range(0,100,20):
Run(a,b,c,d,i)
time.sleep(0.5)
def Stop_Slow(a,b,c,d):
for i in range(100,0,-20):
Run(a,b,c,d,i)
time.sleep(0.5)
def yes(times=3):
for i in range(0,times):
changeDegree([0],[30])
time.sleep(0.08)
changeDegree([0],[0])
time.sleep(0.08)
def no(times=3):
for i in range(0,times):
changeDegree([15],[70],5,0.05)
time.sleep(0.2)
changeDegree([15],[110],5,0.05)
time.sleep(0.2)
changeDegree([15],[90],5,0.05)
def move_head(times=3):
for i in range(0,times):
changeDegree([0],[20])
changeDegreeGpio([0],[80],5,0.05)
changeDegree([0],[0])
changeDegreeGpio([0],[100],5,0.05)
changeDegreeGpio([0],[90],10,0.01)
def random0():
r = random.randrange(1,10000000)%3
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
elif(r==2):
changeDegreeGpio([0],[120],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
else:
changeDegreeGpio([0],[60],5,0.05)
changeDegreeGpio([0],[90],5,0.05)
def random1():
r = random.randrange(1,3)
if(r==1):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([3],[50])
changeDegree([9],[100])
changeDegree([9],[60])
changeDegree([3],[0])
elif(r==2):
changeDegree([0],[20])
changeDegree([0],[0])
changeDegree([4],[120])
changeDegree([10],[140])
changeDegree([10],[180])
changeDegree([4],[170])
else:
changeDegree([3,4],[50,120])
changeDegree([9,10],[100,140])
changeDegree([9,10],[60,180])
changeDegree([3,4],[0,180])
def random2():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
select = [1,2,0,3,1,0,3,2,1,0,2,3,1,2,3,0,3,1,2,3,1,2,3,0,3,1]
for i in range(0,15):
r = select[i%len(select)]%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def random3():
changeDegree([3,4],[20,150])
pin = [7,8,9,10]
deg = [[160,0,60,100],[180,20,100,140]]
ok = [0,0,0,0]
for i in range(0,15):
r = random.randrange(1,1000000)%4
print (' move ',r)
changeDegree([pin[r]],[deg[ok[r]][r]])
takePosition()
def randomCall(t):
changeDegree([3,4,5,6,7,8,9,10],[50,110,80,70,100,80,160,20])
pin = [5,6,7,8]
deg = [[80,50,100,70],[110,90,110,90]]
select = [89,93,472,347,2, 34, 134, 1937, 1983, 1739, 107, 894, 48, 28, 2048,589,689,123, 34,27,4,91,102,893,10283,53,1283,9485,1973,873,1973,0,10973]
ok = [0,0,0,0]
ln = len(select)
for i in range(0,t*3):
r = select[i%16]%4
changeDegree([pin[r]],[deg[ok[r]][r]])
ok[r]^=1
takePosition()
def expression(t):
print (' i got value of t is : ',t)
if(t==0):
random0()
elif(t==1):
random1()
elif(t==2):
random2()
elif(t==3):
random3()
else:
randomCall(t)
def speakOnline(t):
expression(t)
def speakOffline(speech):
t = int(len(speech)/15)
print ('Offline t value is : ',t)
p1 = multiprocessing.Process(target=expression,args=[t])
p1.start()
say(speech)
| [
"random.randrange",
"yaml.dump",
"RPi.GPIO.setup",
"RPi.GPIO.output",
"multiprocessing.Process",
"RPi.GPIO.setwarnings",
"adafruit_servokit.ServoKit",
"os.path.join",
"talk.say",
"RPi.GPIO.PWM",
"yaml.load",
"time.sleep",
"RPi.GPIO.setmode"
] | [((154, 177), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (170, 177), True, 'import RPi.GPIO as GPIO\n'), ((178, 200), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (190, 200), True, 'import RPi.GPIO as GPIO\n'), ((434, 461), 'RPi.GPIO.PWM', 'GPIO.PWM', (["Motor1['EN']", '(100)'], {}), "(Motor1['EN'], 100)\n", (442, 461), True, 'import RPi.GPIO as GPIO\n'), ((472, 499), 'RPi.GPIO.PWM', 'GPIO.PWM', (["Motor2['EN']", '(100)'], {}), "(Motor2['EN'], 100)\n", (480, 499), True, 'import RPi.GPIO as GPIO\n'), ((562, 583), 'adafruit_servokit.ServoKit', 'ServoKit', ([], {'channels': '(16)'}), '(channels=16)\n', (570, 583), False, 'from adafruit_servokit import ServoKit\n'), ((359, 390), 'RPi.GPIO.setup', 'GPIO.setup', (['Motor1[x]', 'GPIO.OUT'], {}), '(Motor1[x], GPIO.OUT)\n', (369, 390), True, 'import RPi.GPIO as GPIO\n'), ((395, 426), 'RPi.GPIO.setup', 'GPIO.setup', (['Motor2[x]', 'GPIO.OUT'], {}), '(Motor2[x], GPIO.OUT)\n', (405, 426), True, 'import RPi.GPIO as GPIO\n'), ((614, 648), 'os.path.join', 'os.path.join', (['__file__', '""".."""', '""".."""'], {}), "(__file__, '..', '..')\n", (626, 648), False, 'import os\n'), ((1603, 1635), 'RPi.GPIO.setup', 'GPIO.setup', (['GpioPin[i]', 'GPIO.OUT'], {}), '(GpioPin[i], GPIO.OUT)\n', (1613, 1635), True, 'import RPi.GPIO as GPIO\n'), ((3000, 3039), 'RPi.GPIO.output', 'GPIO.output', (["Motor1['input1']", 'GPIO.LOW'], {}), "(Motor1['input1'], GPIO.LOW)\n", (3011, 3039), True, 'import RPi.GPIO as GPIO\n'), ((3044, 3083), 'RPi.GPIO.output', 'GPIO.output', (["Motor1['input2']", 'GPIO.LOW'], {}), "(Motor1['input2'], GPIO.LOW)\n", (3055, 3083), True, 'import RPi.GPIO as GPIO\n'), ((3088, 3127), 'RPi.GPIO.output', 'GPIO.output', (["Motor2['input1']", 'GPIO.LOW'], {}), "(Motor2['input1'], GPIO.LOW)\n", (3099, 3127), True, 'import RPi.GPIO as GPIO\n'), ((3132, 3171), 'RPi.GPIO.output', 'GPIO.output', (["Motor2['input2']", 'GPIO.LOW'], {}), "(Motor2['input2'], GPIO.LOW)\n", (3143, 3171), True, 'import RPi.GPIO as GPIO\n'), ((4698, 4720), 'random.randrange', 'random.randrange', (['(1)', '(3)'], {}), '(1, 3)\n', (4714, 4720), False, 'import random\n'), ((6805, 6857), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'expression', 'args': '[t]'}), '(target=expression, args=[t])\n', (6828, 6857), False, 'import multiprocessing\n'), ((6876, 6887), 'talk.say', 'say', (['speech'], {}), '(speech)\n', (6879, 6887), False, 'from talk import say\n'), ((775, 814), 'yaml.load', 'yaml.load', (['conf'], {'Loader': 'yaml.FullLoader'}), '(conf, Loader=yaml.FullLoader)\n', (784, 814), False, 'import yaml\n'), ((1206, 1245), 'yaml.load', 'yaml.load', (['conf'], {'Loader': 'yaml.FullLoader'}), '(conf, Loader=yaml.FullLoader)\n', (1215, 1245), False, 'import yaml\n'), ((1685, 1709), 'RPi.GPIO.PWM', 'GPIO.PWM', (['GpioPin[i]', '(50)'], {}), '(GpioPin[i], 50)\n', (1693, 1709), True, 'import RPi.GPIO as GPIO\n'), ((2371, 2388), 'time.sleep', 'time.sleep', (['time1'], {}), '(time1)\n', (2381, 2388), False, 'import time\n'), ((3194, 3234), 'RPi.GPIO.output', 'GPIO.output', (["Motor1['input1']", 'GPIO.HIGH'], {}), "(Motor1['input1'], GPIO.HIGH)\n", (3205, 3234), True, 'import RPi.GPIO as GPIO\n'), ((3256, 3296), 'RPi.GPIO.output', 'GPIO.output', (["Motor1['input2']", 'GPIO.HIGH'], {}), "(Motor1['input2'], GPIO.HIGH)\n", (3267, 3296), True, 'import RPi.GPIO as GPIO\n'), ((3318, 3358), 'RPi.GPIO.output', 'GPIO.output', (["Motor2['input1']", 'GPIO.HIGH'], {}), "(Motor2['input1'], GPIO.HIGH)\n", (3329, 3358), True, 'import RPi.GPIO as GPIO\n'), ((3380, 3420), 'RPi.GPIO.output', 'GPIO.output', (["Motor2['input2']", 'GPIO.HIGH'], {}), "(Motor2['input2'], GPIO.HIGH)\n", (3391, 3420), True, 'import RPi.GPIO as GPIO\n'), ((3600, 3615), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3610, 3615), False, 'import time\n'), ((3708, 3723), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3718, 3723), False, 'import time\n'), ((3812, 3828), 'time.sleep', 'time.sleep', (['(0.08)'], {}), '(0.08)\n', (3822, 3828), False, 'import time\n'), ((3867, 3883), 'time.sleep', 'time.sleep', (['(0.08)'], {}), '(0.08)\n', (3877, 3883), False, 'import time\n'), ((3978, 3993), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (3988, 3993), False, 'import time\n'), ((4042, 4057), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (4052, 4057), False, 'import time\n'), ((4357, 4386), 'random.randrange', 'random.randrange', (['(1)', '(10000000)'], {}), '(1, 10000000)\n', (4373, 4386), False, 'import random\n'), ((980, 1002), 'yaml.dump', 'yaml.dump', (['servo', 'conf'], {}), '(servo, conf)\n', (989, 1002), False, 'import yaml\n'), ((1028, 1046), 'yaml.dump', 'yaml.dump', (['s', 'conf'], {}), '(s, conf)\n', (1037, 1046), False, 'import yaml\n'), ((2896, 2916), 'time.sleep', 'time.sleep', (['duration'], {}), '(duration)\n', (2906, 2916), False, 'import time\n'), ((5832, 5860), 'random.randrange', 'random.randrange', (['(1)', '(1000000)'], {}), '(1, 1000000)\n', (5848, 5860), False, 'import random\n')] |
from uuid import uuid4
from sqlalchemy import Index, Column, Text, Table, ForeignKey
from sqlalchemy.orm import object_session
from onegov.core.orm import Base
from onegov.core.orm.types import UUID
spoken_association_table = Table(
'spoken_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
written_association_table = Table(
'written_lang_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
mother_tongue_association_table = Table(
'mother_tongue_association',
Base.metadata,
Column(
'translator_id',
UUID,
ForeignKey('translators.id'),
nullable=False),
Column('lang_id', UUID, ForeignKey('languages.id'), nullable=False)
)
class Language(Base):
__tablename__ = 'languages'
__table_args__ = (
Index('unique_name', 'name', unique=True),
)
id = Column(UUID, primary_key=True, default=uuid4)
name = Column(Text, nullable=False)
@property
def speakers_count(self):
session = object_session(self)
return session.query(
spoken_association_table).filter_by(lang_id=self.id).count()
@property
def writers_count(self):
session = object_session(self)
return session.query(
written_association_table).filter_by(lang_id=self.id).count()
@property
def native_speakers_count(self):
"""Having it as mother tongue..."""
session = object_session(self)
return session.query(
mother_tongue_association_table).filter_by(lang_id=self.id).count()
@property
def deletable(self):
return (
self.speakers_count
+ self.writers_count
+ self.native_speakers_count
) == 0
| [
"sqlalchemy.orm.object_session",
"sqlalchemy.ForeignKey",
"sqlalchemy.Column",
"sqlalchemy.Index"
] | [((1180, 1225), 'sqlalchemy.Column', 'Column', (['UUID'], {'primary_key': '(True)', 'default': 'uuid4'}), '(UUID, primary_key=True, default=uuid4)\n', (1186, 1225), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((1237, 1265), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (1243, 1265), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((346, 374), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""translators.id"""'], {}), "('translators.id')\n", (356, 374), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((429, 455), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""languages.id"""'], {}), "('languages.id')\n", (439, 455), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((621, 649), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""translators.id"""'], {}), "('translators.id')\n", (631, 649), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((704, 730), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""languages.id"""'], {}), "('languages.id')\n", (714, 730), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((903, 931), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""translators.id"""'], {}), "('translators.id')\n", (913, 931), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((986, 1012), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""languages.id"""'], {}), "('languages.id')\n", (996, 1012), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((1121, 1162), 'sqlalchemy.Index', 'Index', (['"""unique_name"""', '"""name"""'], {'unique': '(True)'}), "('unique_name', 'name', unique=True)\n", (1126, 1162), False, 'from sqlalchemy import Index, Column, Text, Table, ForeignKey\n'), ((1329, 1349), 'sqlalchemy.orm.object_session', 'object_session', (['self'], {}), '(self)\n', (1343, 1349), False, 'from sqlalchemy.orm import object_session\n'), ((1515, 1535), 'sqlalchemy.orm.object_session', 'object_session', (['self'], {}), '(self)\n', (1529, 1535), False, 'from sqlalchemy.orm import object_session\n'), ((1754, 1774), 'sqlalchemy.orm.object_session', 'object_session', (['self'], {}), '(self)\n', (1768, 1774), False, 'from sqlalchemy.orm import object_session\n')] |
"""
tt URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# Uncomment next two lines to enable admin:
from django.contrib import admin
from django.urls import path, include
from users import views as user_views
from django.contrib.auth import views as auth_views
from upload import views as upload_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
# Uncomment the next line to enable the admin:
path('admin/', admin.site.urls),
path('', include('blog.urls')),
path('register/', user_views.register, name='register'),
path('login/',auth_views.LoginView.as_view(template_name='users/login.html'),name='login'),
path('logout/',auth_views.LogoutView.as_view(template_name='users/logout.html') ,name='logout'),
path('profile/', user_views.profile, name='profile'),
path('book/',upload_views.book_list,name='book_list'),
path('book/upload',upload_views.upload_book,name='upload_book'),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.urls.include",
"django.contrib.auth.views.LogoutView.as_view",
"django.conf.urls.static.static",
"django.contrib.auth.views.LoginView.as_view",
"django.urls.path"
] | [((1026, 1057), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (1030, 1057), False, 'from django.urls import path, include\n'), ((1099, 1154), 'django.urls.path', 'path', (['"""register/"""', 'user_views.register'], {'name': '"""register"""'}), "('register/', user_views.register, name='register')\n", (1103, 1154), False, 'from django.urls import path, include\n'), ((1357, 1409), 'django.urls.path', 'path', (['"""profile/"""', 'user_views.profile'], {'name': '"""profile"""'}), "('profile/', user_views.profile, name='profile')\n", (1361, 1409), False, 'from django.urls import path, include\n'), ((1415, 1470), 'django.urls.path', 'path', (['"""book/"""', 'upload_views.book_list'], {'name': '"""book_list"""'}), "('book/', upload_views.book_list, name='book_list')\n", (1419, 1470), False, 'from django.urls import path, include\n'), ((1474, 1539), 'django.urls.path', 'path', (['"""book/upload"""', 'upload_views.upload_book'], {'name': '"""upload_book"""'}), "('book/upload', upload_views.upload_book, name='upload_book')\n", (1478, 1539), False, 'from django.urls import path, include\n'), ((1581, 1642), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1587, 1642), False, 'from django.conf.urls.static import static\n'), ((1072, 1092), 'django.urls.include', 'include', (['"""blog.urls"""'], {}), "('blog.urls')\n", (1079, 1092), False, 'from django.urls import path, include\n'), ((1174, 1236), 'django.contrib.auth.views.LoginView.as_view', 'auth_views.LoginView.as_view', ([], {'template_name': '"""users/login.html"""'}), "(template_name='users/login.html')\n", (1202, 1236), True, 'from django.contrib.auth import views as auth_views\n'), ((1271, 1335), 'django.contrib.auth.views.LogoutView.as_view', 'auth_views.LogoutView.as_view', ([], {'template_name': '"""users/logout.html"""'}), "(template_name='users/logout.html')\n", (1300, 1335), True, 'from django.contrib.auth import views as auth_views\n')] |
import os
import click
os.environ["GIT_PYTHON_REFRESH"] = "quiet"
@click.group()
def git():
pass
| [
"click.group"
] | [((71, 84), 'click.group', 'click.group', ([], {}), '()\n', (82, 84), False, 'import click\n')] |
import glob
import os
def main():
os.chdir("F:/Downloads")
extensions = ["*.jpg_large", "*.png_large", "*.jpg_orig"]
file_list = list()
for extension in extensions:
file_list = file_list + glob.glob(extension)
for file in file_list:
for extension in extensions:
new_extension = extension.replace('*', '')
if file.endswith(new_extension):
new_name = file.replace(new_extension, '') + ".jpg"
os.rename(file, new_name)
print("Done!")
if __name__ == __name__:
main()
| [
"os.chdir",
"os.rename",
"glob.glob"
] | [((40, 64), 'os.chdir', 'os.chdir', (['"""F:/Downloads"""'], {}), "('F:/Downloads')\n", (48, 64), False, 'import os\n'), ((216, 236), 'glob.glob', 'glob.glob', (['extension'], {}), '(extension)\n', (225, 236), False, 'import glob\n'), ((486, 511), 'os.rename', 'os.rename', (['file', 'new_name'], {}), '(file, new_name)\n', (495, 511), False, 'import os\n')] |
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from azure_common import BaseTest, arm_template
class RouteTableTest(BaseTest):
route_table_name = 'cctestroutetable'
vnet_name = 'ccroutetablevnet'
allowed_subnet_name = 'cctestsubnet1'
disallowed_subnet_name = 'cctestsubnet2'
@staticmethod
def _subnet_id_suffix(subnet):
return '{}/subnets/{}'.format(RouteTableTest.vnet_name, subnet)
def test_route_table_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-route-table',
'resource': 'azure.routetable'
}, validate=True)
self.assertTrue(p)
@arm_template('route-table-and-vnet.json')
def test_find_route_table_by_name(self):
p = self.load_policy({
'name': 'test-find-route-table-by-name',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
@arm_template('route-table-and-vnet.json')
def test_detect_route_table_is_routing_to_correct_subnet(self):
p = self.load_policy({
'name': 'test-detect-route-table-is-routing-to-correct-subnet',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)
),
'value': 'not-null'
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
@arm_template('route-table-and-vnet.json')
def test_detect_route_table_not_routing_to_incorrect_subnet(self):
p = self.load_policy({
'name': 'test-detect-route-table-not-routing-to-incorrect-subnet',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.disallowed_subnet_name)
),
'value': 'not-null'
}
]
})
resources = p.run()
self.assertEqual(len(resources), 0, "A route table is routing to a disallowed subnet")
@arm_template('route-table-and-vnet.json')
def test_detect_route_only_routes_to_specific_subnets(self):
p = self.load_policy({
'name': 'test-detect-route-only-routes-to-specific-subnets',
'resource': 'azure.routetable',
'filters': [
{
'type': 'value',
'key': 'name',
'op': 'eq',
'value': RouteTableTest.route_table_name
},
{
'type': 'value',
'key': 'properties.subnets[?ends_with(id, \'{}\')] | [0]'.format(
RouteTableTest._subnet_id_suffix(RouteTableTest.allowed_subnet_name)
),
'value': 'not-null'
},
{
'type': 'value',
'key': 'length(properties.subnets)',
'op': 'eq',
'value': 1
}
]
})
resources = p.run()
self._assert_only_route_table_in_resources(resources)
def _assert_only_route_table_in_resources(self, resources):
self.assertEqual(len(resources), 1, "Only one route table should be found")
route_table = resources[0]
self.assertEqual(RouteTableTest.route_table_name, route_table.get('name'),
"The wrong route table was found")
properties = route_table.get('properties')
self.assertIsNotNone(properties, "Missing properties")
subnets = properties.get('subnets')
self.assertIsNotNone(subnets, "Missing subnets")
self.assertEqual(1, len(subnets), "There should only be one subnet")
subnet = subnets[0]
self.assertIn(RouteTableTest.allowed_subnet_name, subnet.get('id'), "Incorrect subnet")
| [
"azure_common.arm_template"
] | [((1248, 1289), 'azure_common.arm_template', 'arm_template', (['"""route-table-and-vnet.json"""'], {}), "('route-table-and-vnet.json')\n", (1260, 1289), False, 'from azure_common import BaseTest, arm_template\n'), ((1812, 1853), 'azure_common.arm_template', 'arm_template', (['"""route-table-and-vnet.json"""'], {}), "('route-table-and-vnet.json')\n", (1824, 1853), False, 'from azure_common import BaseTest, arm_template\n'), ((2738, 2779), 'azure_common.arm_template', 'arm_template', (['"""route-table-and-vnet.json"""'], {}), "('route-table-and-vnet.json')\n", (2750, 2779), False, 'from azure_common import BaseTest, arm_template\n'), ((3706, 3747), 'azure_common.arm_template', 'arm_template', (['"""route-table-and-vnet.json"""'], {}), "('route-table-and-vnet.json')\n", (3718, 3747), False, 'from azure_common import BaseTest, arm_template\n')] |
# Copyright 2016 Intel Corporation
# Copyright 2017 Wind River
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
################################################################################
# LIBRARIES & DEPENDENCIES #
################################################################################
import hashlib
import logging
import json
from collections import OrderedDict
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.processor.exceptions import InternalError
from sawtooth_sdk.processor.handler import TransactionHandler
LOGGER = logging.getLogger(__name__)
################################################################################
# HANDLER OBJ #
################################################################################
class ArtifactTransactionHandler:
"""
Class for handling the Transaction Family : Artifact
Attributes:
namespace_prefix (str): The namespace prefix of the transaction family
"""
def __init__(self, namespace_prefix):
"""
Constructs the ArtifactTransactionHandler object.
Args:
namespace_prefix (str):
The namepsace prefix of the transaction family
"""
self._namespace_prefix = namespace_prefix
@property
def family_name(self):
"""
type: str
Returns the family name of the handler object.
"""
return "artifact"
@property
def family_versions(self):
"""
type: list of str
Returns the family version of the handler object.
"""
return ["1.0"]
@property
def encodings(self):
"""
type: list of str
Returns the encoding scheme used for the data for the handler object.
"""
return ["csv-utf8"]
@property
def namespaces(self):
"""
type: list of str
Returns the namespaces associating with the handler object.
"""
return [self._namespace_prefix]
################################################################################
# FUNCTIONS #
################################################################################
def apply(self, transaction, context):
"""
Applys the payload from transaction onto the state storage.
Args:
transaction (Transaction): The transaction pertaining the payload
context (State): The current state of the ledger
Returns:
type: State
The new state of the ledger, which includes the data from the
transaction, is returned to be stored on the state storage.
Raises:
InvalidTransaction:
* If deserialization for payload from transaction failed
* If "create" was called on non-unique uuid
* If "amend" was called on non-existing uuid
* If "Add..." were called on non-existing uuid
* If invalid operation was called
InternalError:
* If deserialization of State.data failed
"""
# Parsing required fields from transaction payload
try:
payload = json.loads(transaction.payload.decode())
artifact_id = payload["uuid"]
artifact_alias = payload["alias"]
artifact_name = payload["name"]
artifact_type = payload["content_type"]
artifact_checksum = payload["checksum"]
artifact_label = payload["label"]
artifact_openchain = payload["openchain"]
action = payload["action"]
prev = payload["prev_block"]
cur = payload["cur_block"]
timestamp = payload["timestamp"]
artifact_list = payload["artifact_list"]
uri_list = payload["uri_list"]
except ValueError:
raise InvalidTransaction("Invalid payload serialization")
# Soft sanity check and loading required data
validate_transaction(artifact_id, action)
data_address = make_artifact_address(self._namespace_prefix,
artifact_id)
state_entries = context.get_state([data_address])
# Hard sanity check before creating final payload for the state storage
if len(state_entries) != 0:
try:
stored_artifact = json.loads(state_entries[0].data.decode())
stored_artifact_id = stored_artifact["uuid"]
except ValueError:
raise InternalError("Failed to deserialize data.")
else:
stored_artifact_id = stored_artifact = None
if action == "create" and stored_artifact_id is not None:
raise InvalidTransaction("Invalid Action-artifact already exists.")
elif action == "create":
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp)
elif action == "amend" and stored_artifact_id is not None:
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list, uri_list)
elif action == "AddArtifact" or action == "AddURI":
if stored_artifact_id is None:
raise InvalidTransaction(
"Invalid Action-requires an existing artifact."
)
artifact = create_artifact(artifact_id, artifact_alias,
artifact_name, artifact_type, artifact_checksum,
artifact_label, artifact_openchain,
prev, cur, timestamp,
artifact_list, uri_list)
# Adding the final payload to the state storage
data = json.dumps(artifact).encode()
addresses = context.set_state({data_address:data})
return addresses
################################################################################
# HELPER FUNCTIONS #
################################################################################
def create_artifact(artifact_id, artifact_alias, artifact_name, artifact_type,
artifact_checksum, artifact_label, artifact_openchain,
prev, cur, timestamp, artifact_list=[], uri_list=[]):
"""
Constructs the payload to be stored in the state storage.
Args:
artifact_uuid (str): The uuid of the artifact
artifact_alias (str): The alias of the artifact
artifact_name (str): The name of the artifact
artifact_type (str): The type of the artifact
artifact_checksum (str): The checksum of the artifact
artifact_label (str): The label of the artifact
artifact_openchain (str): The openchain of the artifact
prev (str): The previous block id of the transaction (default "0")
cur (str): the current block id of the transaction
timestamp (str): The UTC time for when the transaction was submitted
artifact_list (list of dict):
The list of the artifact uuid associated with the artifact
(default [])
uri_list (list of dict):
The list of the uri associated with the artifact (default [])
Returns:
type: dict
The dictionary pertaining all the param is created and returned to
be stored on the state storage.
"""
return {
"uuid" : artifact_id,
"alias" : artifact_alias,
"name" : artifact_name,
"content_type" : artifact_type,
"checksum" : artifact_checksum,
"label" : artifact_label,
"openchain" : artifact_openchain,
"prev_block" : prev,
"cur_block" : cur,
"timestamp" : timestamp,
"artifact_list" : artifact_list,
"uri_list" : uri_list
}
def validate_transaction(artifact_id, action):
"""
Performs soft sanity check in order to improve runtime by eliminating the
obvious exception errors.
Args:
artifact_id (str): The uuid of the artifact
action (str): The command to be performed
Raises:
InvalidTransaction:
If the uuid or the action are not passed in or the
action is not a valid action.
"""
if not artifact_id:
raise InvalidTransaction("Artifact ID is required")
if not action:
raise InvalidTransaction("Action is required")
if action not in ("AddArtifact", "create", "AddURI", "amend"):
raise InvalidTransaction("Invalid action: {}".format(action))
def make_artifact_address(namespace_prefix, artifact_id):
"""
Creates an artifact address which will be used to recover the associated
UUID if the artifact already exists in the state storage; or, used as a key to
store the new data into the state storage.
Args:
namespace_prefix (str):
The prefix associating with the transaction family
artifact_id (str): The uuid of the artifact
Returns:
type: str
The address-to-be, which associates the uuid and the namespace prefix.
"""
return namespace_prefix + \
hashlib.sha512(artifact_id.encode("utf-8")).hexdigest()[:64]
def _display(msg):
"""
Logs the message to the debug logger.
Args:
msg (str): The message that is to be logged into the debug logger
"""
n = msg.count("\n")
if n > 0:
msg = msg.split("\n")
length = max(len(line) for line in msg)
else:
length = len(msg)
msg = [msg]
LOGGER.debug("+" + (length + 2) * "-" + "+")
for line in msg:
LOGGER.debug("+ " + line.center(length) + " +")
LOGGER.debug("+" + (length + 2) * "-" + "+")
################################################################################
# #
################################################################################
| [
"logging.getLogger",
"sawtooth_sdk.processor.exceptions.InvalidTransaction",
"json.dumps",
"sawtooth_sdk.processor.exceptions.InternalError"
] | [((1207, 1234), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1224, 1234), False, 'import logging\n'), ((10040, 10085), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', (['"""Artifact ID is required"""'], {}), "('Artifact ID is required')\n", (10058, 10085), False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((10119, 10159), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', (['"""Action is required"""'], {}), "('Action is required')\n", (10137, 10159), False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((5906, 5967), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', (['"""Invalid Action-artifact already exists."""'], {}), "('Invalid Action-artifact already exists.')\n", (5924, 5967), False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((4954, 5005), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', (['"""Invalid payload serialization"""'], {}), "('Invalid payload serialization')\n", (4972, 5005), False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n'), ((7256, 7276), 'json.dumps', 'json.dumps', (['artifact'], {}), '(artifact)\n', (7266, 7276), False, 'import json\n'), ((5692, 5736), 'sawtooth_sdk.processor.exceptions.InternalError', 'InternalError', (['"""Failed to deserialize data."""'], {}), "('Failed to deserialize data.')\n", (5705, 5736), False, 'from sawtooth_sdk.processor.exceptions import InternalError\n'), ((6750, 6817), 'sawtooth_sdk.processor.exceptions.InvalidTransaction', 'InvalidTransaction', (['"""Invalid Action-requires an existing artifact."""'], {}), "('Invalid Action-requires an existing artifact.')\n", (6768, 6817), False, 'from sawtooth_sdk.processor.exceptions import InvalidTransaction\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
STEP 2
Takes the list of urls in the json files and downloads the html files to local drive
Start with: scrapy runspider ReviewsCollector.py
"""
import scrapy
import json
class ReviewsCollector(scrapy.Spider):
def start_requests(self):
with open("data/books.json") as f:
self.data = json.load(f)
for item in self.data:
if (item['url'] is not None):
yield scrapy.Request(url=item['url'], headers={'Referer':'http://www.google.com/'}, callback=self.parse)
def parse(self, response):
filename = response.url.split("/")[-1] + '.html'
with open('data/reviews/' + filename, 'wb+') as f:
f.write(response.body) | [
"json.load",
"scrapy.Request"
] | [((362, 374), 'json.load', 'json.load', (['f'], {}), '(f)\n', (371, 374), False, 'import json\n'), ((482, 585), 'scrapy.Request', 'scrapy.Request', ([], {'url': "item['url']", 'headers': "{'Referer': 'http://www.google.com/'}", 'callback': 'self.parse'}), "(url=item['url'], headers={'Referer':\n 'http://www.google.com/'}, callback=self.parse)\n", (496, 585), False, 'import scrapy\n')] |
"""
homeassistant.components.device_tracker.owntracks
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
OwnTracks platform for the device tracker.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import json
import logging
import homeassistant.components.mqtt as mqtt
DEPENDENCIES = ['mqtt']
LOCATION_TOPIC = 'owntracks/+/+'
def setup_scanner(hass, config, see):
""" Set up a OwnTracksks tracker. """
def owntracks_location_update(topic, payload, qos):
""" MQTT message received. """
# Docs on available data:
# http://owntracks.org/booklet/tech/json/#_typelocation
try:
data = json.loads(payload)
except ValueError:
# If invalid JSON
logging.getLogger(__name__).error(
'Unable to parse payload as JSON: %s', payload)
return
if not isinstance(data, dict) or data.get('_type') != 'location':
return
parts = topic.split('/')
kwargs = {
'dev_id': '{}_{}'.format(parts[1], parts[2]),
'host_name': parts[1],
'gps': (data['lat'], data['lon']),
}
if 'acc' in data:
kwargs['gps_accuracy'] = data['acc']
if 'batt' in data:
kwargs['battery'] = data['batt']
see(**kwargs)
mqtt.subscribe(hass, LOCATION_TOPIC, owntracks_location_update, 1)
return True
| [
"logging.getLogger",
"json.loads",
"homeassistant.components.mqtt.subscribe"
] | [((1410, 1476), 'homeassistant.components.mqtt.subscribe', 'mqtt.subscribe', (['hass', 'LOCATION_TOPIC', 'owntracks_location_update', '(1)'], {}), '(hass, LOCATION_TOPIC, owntracks_location_update, 1)\n', (1424, 1476), True, 'import homeassistant.components.mqtt as mqtt\n'), ((731, 750), 'json.loads', 'json.loads', (['payload'], {}), '(payload)\n', (741, 750), False, 'import json\n'), ((820, 847), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (837, 847), False, 'import logging\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Created by Roger on 2019-09-10
# Mostly by AllenNLP
import logging
import math
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Pruner
from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder
from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder
from allennlp.modules.similarity_functions import DotProductSimilarity
from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor
from allennlp.modules.token_embedders import Embedding
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.training.metrics import Average
from overrides import overrides
from torch.nn import BCEWithLogitsLoss
from src.metrics.event_coref_scores import EventCorefScores
from src.metrics.mention_f1 import TopSpanMentionTypeF1
from src.utils.cluster_decoding_utils import node_decode
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@Model.register("end-to-end-event-coreference")
class End2EndEventCoreferenceResolver(Model):
"""
This ``Model`` implements the coreference resolution model described "End-to-end Neural
Coreference Resolution"
<https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>
by Lee et al., 2017.
The basic outline of this model is to get an embedded representation of each span in the
document. These span representations are scored and used to prune away spans that are unlikely
to occur in a coreference cluster. For the remaining spans, the model decides which antecedent
span (if any) they are coreferent with. The resulting coreference links, after applying
transitivity, imply a clustering of the spans in the document.
Parameters
----------
vocab : ``Vocabulary``
text_field_embedder : ``TextFieldEmbedder``
Used to embed the ``text`` ``TextField`` we get as input to the model.
context_layer : ``Seq2SeqEncoder``
This layer incorporates contextual information for each word in the document.
mention_feedforward : ``FeedForward``
This feedforward network is applied to the span representations which is then scored
by a linear layer.
antecedent_feedforward: ``FeedForward``
This feedforward network is applied to pairs of span representation, along with any
pairwise features, which is then scored by a linear layer.
feature_size: ``int``
The embedding size for all the embedded features, such as distances or span widths.
max_span_width: ``int``
The maximum width of candidate spans.
spans_per_word: float, required.
A multiplier between zero and one which controls what percentage of candidate mention
spans we retain with respect to the number of words in the document.
max_antecedents: int, required.
For each mention which survives the pruning stage, we consider this many antecedents.
lexical_dropout: ``int``
The probability of dropping out dimensions of the embedded text.
initializer : ``InitializerApplicator``, optional (default=``InitializerApplicator()``)
Used to initialize the model parameters.
regularizer : ``RegularizerApplicator``, optional (default=``None``)
If provided, will be used to calculate the regularization penalty during training.
"""
def __init__(self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
mention_feedforward: FeedForward,
antecedent_feedforward: FeedForward,
feature_size: int,
context_layer: Seq2SeqEncoder = None,
max_span_width: int = 1,
spans_per_word: float = 0.1,
max_antecedents: int = 50,
lexical_dropout: float = 0.2,
pretrain_ed: bool = False,
pretrain_coref: bool = False,
coref_loss_weight: float = 1.0,
bce_loss_weight: float = 1.0,
bce_pos_weight: float = None,
local_window_size: int = 10,
attention_type: str = 'dot',
decoding: str = 'type-guided',
type_threshold: float = -1.,
type_refine: bool = True,
type_match_in_eval: bool = True,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(End2EndEventCoreferenceResolver, self).__init__(vocab, regularizer)
logger.info(vocab)
self._text_field_embedder = text_field_embedder
self._context_layer = context_layer
self._antecedent_feedforward = TimeDistributed(antecedent_feedforward)
self._event_scorer = torch.nn.Sequential(
TimeDistributed(mention_feedforward),
TimeDistributed(torch.nn.Linear(mention_feedforward.get_output_dim(), 1))
)
self._pretrain_ed = pretrain_ed
self._pretrain_coref = pretrain_coref
self._mention_pruner = Pruner(self._event_scorer)
self._antecedent_scorer = TimeDistributed(torch.nn.Linear(antecedent_feedforward.get_output_dim(), 1))
self._local_window_size = local_window_size
self._attention_type = attention_type
self._decoding = decoding
self._type_threshold = type_threshold
logger.info(vocab.get_token_from_index(0, "labels"))
if context_layer is not None:
endpoint_span_extractor_dim = context_layer.get_output_dim()
attentive_span_extractor_dim = text_field_embedder.get_output_dim()
self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size)
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)
span_embedding_size = self._endpoint_span_extractor.get_output_dim() + self._attentive_span_extractor.get_output_dim()
if self._local_window_size <= 0:
self._attention_layer = None
else:
if self._attention_type == 'dot':
similarity_function = DotProductSimilarity(scale_output=True)
num_head = 1
else:
raise NotImplementedError('Attention Type: %s' % self._attention_type)
self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,
similarity_function=similarity_function,
combination='2',
num_attention_heads=num_head
)
else:
attentive_span_extractor_dim = text_field_embedder.get_output_dim()
if max_span_width > 1:
endpoint_span_extractor_dim = text_field_embedder.get_output_dim()
self._endpoint_span_extractor = EndpointSpanExtractor(endpoint_span_extractor_dim,
combination="x,y",
num_width_embeddings=max_span_width,
span_width_embedding_dim=feature_size)
else:
self._endpoint_span_extractor = None
self._attentive_span_extractor = SelfAttentiveSpanExtractor(input_dim=attentive_span_extractor_dim)
if self._local_window_size <= 0:
self._attention_layer = None
else:
if self._attention_type == 'dot':
similarity_function = DotProductSimilarity(scale_output=True)
num_head = 1
else:
raise NotImplementedError('Attention Type: %s' % self._attention_type)
self._attention_layer = IntraSentenceAttentionEncoder(input_dim=attentive_span_extractor_dim,
similarity_function=similarity_function,
combination='2',
num_attention_heads=num_head
)
if self._endpoint_span_extractor is not None:
span_embedding_size = self._attentive_span_extractor.get_output_dim() + self._endpoint_span_extractor.get_output_dim()
else:
span_embedding_size = self._attentive_span_extractor.get_output_dim()
if type_refine:
self._type_refine_gate = torch.nn.Sequential(
TimeDistributed(torch.nn.Linear(span_embedding_size * 2, span_embedding_size)),
torch.nn.Sigmoid()
)
else:
self._type_refine_gate = None
# NIL for Unified Event
self._event_embedding = Embedding(num_embeddings=vocab.get_vocab_size('labels'),
embedding_dim=span_embedding_size)
self._event_embedding_map = torch.nn.Linear(self._event_embedding.get_output_dim() * 2,
self._event_embedding.get_output_dim())
self._positive_label_size = vocab.get_vocab_size('labels') - 1
# 10 possible distance buckets.
self._num_distance_buckets = 10
self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)
self._coref_loss_weight = coref_loss_weight
self._bce_loss_weight = bce_loss_weight
self._bce_pos_weight = bce_pos_weight
self._max_span_width = max_span_width
self._spans_per_word = spans_per_word
self._max_antecedents = max_antecedents
self._mention_f1_score = TopSpanMentionTypeF1()
self._conll_coref_scores = EventCorefScores(mapping_type=type_match_in_eval)
self._type_loss_metric = Average()
self._realis_loss_metric = Average()
self._coref_loss_metric = Average()
self._coref_label_metric = Average()
self._type_label_metric = Average()
self._nil_label_metric = Average()
if self._bce_pos_weight:
self._bce_loss = BCEWithLogitsLoss(reduction='none', pos_weight=torch.tensor(self._bce_pos_weight))
else:
self._bce_loss = BCEWithLogitsLoss(reduction='none')
if lexical_dropout > 0:
self._lexical_dropout = torch.nn.Dropout(p=lexical_dropout)
else:
self._lexical_dropout = lambda x: x
initializer(self)
def _get_event_embedding(self, span_mask):
"""
:param span_mask:
(batch, top_span_size, 1)
:return:
(batch, top_span_size, positive_label_size)
"""
event_indices = util.get_range_vector(self._positive_label_size, device=util.get_device_of(span_mask)) + 1
event_indices = torch.stack([torch.zeros_like(event_indices), event_indices]).transpose(0, 1)
event_indices = event_indices.expand([event_indices.size(0), event_indices.size(1)])
event_embeddings = self._event_embedding(event_indices)
event_embeddings = event_embeddings.reshape(event_embeddings.size(0),
event_embeddings.size(1) * event_embeddings.size(2))
event_embeddings = self._event_embedding_map.forward(event_embeddings)
event_embeddings = event_embeddings.unsqueeze(0).expand(span_mask.size(0),
event_embeddings.size(0),
event_embeddings.size(1),
)
return event_embeddings
def _get_type_antecedent_labels(self, top_event_type_labels):
"""
:param top_event_type_labels:
(batch, top_span_size, 1)
:return:
(batch, top_span_size, positive_label_size)
"""
event_indices = util.get_range_vector(self.vocab.get_vocab_size('labels'),
device=util.get_device_of(top_event_type_labels))
top_event_type_labels = top_event_type_labels.unsqueeze(-1).expand([top_event_type_labels.size(0),
top_event_type_labels.size(1),
event_indices.size(0)])
type_antecedent_labels = (top_event_type_labels == event_indices).float()
return type_antecedent_labels
def _type_refine_embedding(self, top_embeddings, event_embeddings):
# (batch, top_span_size, emb_size) bmm
event_prob = torch.bmm(top_embeddings, torch.transpose(event_embeddings, 1, 2))
shape = [event_prob.size(0), event_prob.size(1), 1]
dummy_scores = event_prob.new_zeros(*shape)
event_prob = torch.cat([dummy_scores, event_prob], -1)
event_prob = torch.softmax(event_prob, -1)
event_rep = torch.bmm(event_prob[:, :, 1:], event_embeddings) + event_prob[:, :, :1] * top_embeddings
refine_gate = self._type_refine_gate(torch.cat([event_rep, top_embeddings], -1))
top_embeddings = refine_gate * top_embeddings + (1 - refine_gate) * event_rep
return top_embeddings
def _local_attention(self, raw_contextualized_embeddings, text_mask):
device = util.get_device_of(raw_contextualized_embeddings)
if device < 0:
device = 'cpu'
attention_mask = torch.ones((text_mask.size(1), text_mask.size(1)), device=device)
# attention_mask = attention_mask - torch.eye(text_mask.size(1),
# device=util.get_device_of(contextualized_embeddings))
new_attention_mask = text_mask[:, :, None] * attention_mask
new_attention_mask = torch.triu(torch.tril(new_attention_mask, self._local_window_size),
-self._local_window_size)
new_contextualized_embeddings = self._attention_layer(raw_contextualized_embeddings,
new_attention_mask)
return new_contextualized_embeddings
@overrides
def forward(self, # type: ignore
text: Dict[str, torch.LongTensor],
spans: torch.IntTensor,
coref_labels: torch.IntTensor = None,
event_type_labels: torch.IntTensor = None,
realis_labels: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
# pylint: disable=arguments-differ
"""
Parameters
----------
text : ``Dict[str, torch.LongTensor]``, required.
The output of a ``TextField`` representing the text of
the document.
spans : ``torch.IntTensor``, required.
A tensor of shape (batch_size, num_spans, 2), representing the inclusive start and end
indices of candidate spans for mentions. Comes from a ``ListField[SpanField]`` of
indices into the text of the document.
coref_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the cluster ids
of each span, or -1 for those which do not appear in any clusters.
event_type_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the event label of the specific span.
realis_labels : ``torch.IntTensor``, optional (default = None).
A tensor of shape (batch_size, num_spans), representing the realis label of the specific span.
metadata : ``List[Dict[str, Any]]``, optional (default = None).
A metadata dictionary for each instance in the batch. We use the "original_text" and "clusters" keys
from this dictionary, which respectively have the original text and the annotated gold coreference
clusters for that instance.
Returns
-------
An output dictionary consisting of:
top_spans : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep, 2)`` representing
the start and end word indices of the top spans that survived the pruning stage.
antecedent_indices : ``torch.IntTensor``
A tensor of shape ``(num_spans_to_keep, max_antecedents)`` representing for each top span
the index (with respect to top_spans) of the possible antecedents the model considered.
predicted_antecedents : ``torch.IntTensor``
A tensor of shape ``(batch_size, num_spans_to_keep)`` representing, for each top span, the
index (with respect to antecedent_indices) of the most likely antecedent. -1 means there
was no predicted link.
loss : ``torch.FloatTensor``, optional
A scalar loss to be optimised.
"""
# Shape: (batch_size, document_length, embedding_size)
text_embeddings = self._lexical_dropout(self._text_field_embedder(text))
document_length = text_embeddings.size(1)
num_spans = spans.size(1)
# Shape: (batch_size, document_length)
text_mask = util.get_text_field_mask(text).float()
# Shape: (batch_size, num_spans)
span_mask = (spans[:, :, 0] >= 0).squeeze(-1).float()
# SpanFields return -1 when they are used as padding. As we do
# some comparisons based on span widths when we attend over the
# span representations that we generate from these indices, we
# need them to be <= 0. This is only relevant in edge cases where
# the number of spans we consider after the pruning stage is >= the
# total number of spans, because in this case, it is possible we might
# consider a masked span.
# Shape: (batch_size, num_spans, 2)
spans = F.relu(spans.float()).long()
if self._context_layer:
# Shape: (batch_size, document_length, encoding_dim)
raw_contextualized_embeddings = self._context_layer(text_embeddings, text_mask)
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
# Shape: (batch_size, num_spans, 2 * encoding_dim + feature_size)
endpoint_span_embeddings = self._endpoint_span_extractor(new_contextualized_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size)
attended_span_embeddings = self._attentive_span_extractor(text_embeddings, spans)
# Shape: (batch_size, num_spans, embedding_size + 2 * encoding_dim + feature_size)
# span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
span_embeddings = torch.cat([endpoint_span_embeddings, attended_span_embeddings], -1)
else:
raw_contextualized_embeddings = text_embeddings
if self._attention_layer is not None:
new_contextualized_embeddings = self._local_attention(
raw_contextualized_embeddings=raw_contextualized_embeddings,
text_mask=text_mask
)
else:
new_contextualized_embeddings = raw_contextualized_embeddings
span_embeddings_list = list()
attended_span_embeddings = self._attentive_span_extractor(new_contextualized_embeddings, spans)
span_embeddings_list += [attended_span_embeddings]
if self._endpoint_span_extractor is not None:
# Shape: (batch_size, num_spans, embedding_size)
endpoint_span_embeddings = self._endpoint_span_extractor(text_embeddings, spans)
span_embeddings_list += [endpoint_span_embeddings]
span_embeddings = torch.cat(span_embeddings_list, -1)
# event_scores = self._event_classifier.forward(span_embeddings)
# Shape: (batch_size, num_spans, num_event_realis_label)
# Shape: (batch_size, num_spans, num_event_realis_label)
# event_realis_scores = self._event_realis_classifier.forward(span_embeddings)
# Prune based on mention scores.
num_spans_to_keep_according_doc_len = int(math.floor(self._spans_per_word * document_length))
(top_embeddings, top_mask, top_indices, top_scores) = self._mention_pruner(span_embeddings,
span_mask,
num_spans_to_keep_according_doc_len,
)
event_embeddings = self._get_event_embedding(span_mask)
top_mask = top_mask.unsqueeze(-1)
# Shape: (batch_size * num_spans_to_keep)
# torch.index_select only accepts 1D indices, but here
# we need to select spans for each element in the batch.
# This reformats the indices to take into account their
# index into the batch. We precompute this here to make
# the multiple calls to util.batched_index_select below more efficient.
flat_top_span_indices = util.flatten_and_batch_shift_indices(top_indices, num_spans)
# Compute final predictions for which spans to consider as mentions.
# Shape: (batch_size, num_spans_to_keep, 2)
top_spans = util.batched_index_select(spans,
top_indices,
flat_top_span_indices)
# Compute indices for antecedent spans to consider.
max_antecedents = min(self._max_antecedents, num_spans_to_keep_according_doc_len)
# top_span_embeddings = top_span_embeddings.detach()
# top_span_mention_scores = top_span_mention_scores.detach()
# Now that we have our variables in terms of num_spans_to_keep, we need to
# compare span pairs to decide each span's antecedent. Each span can only
# have prior spans as antecedents, and we only consider up to max_antecedents
# prior spans. So the first thing we do is construct a matrix mapping a span's
# index to the indices of its allowed antecedents. Note that this is independent
# of the batch dimension - it's just a function of the span's position in
# top_spans. The spans are in document order, so we can just use the relative
# index of the spans to know which other spans are allowed antecedents.
# Once we have this matrix, we reformat our variables again to get embeddings
# for all valid antecedents for each span. This gives us variables with shapes
# like (batch_size, num_spans_to_keep, max_antecedents, embedding_size), which
# we can use to make coreference decisions between valid span pairs.
# Shapes:
# (num_spans_to_keep, max_antecedents),
# (1, max_antecedents),
# (1, num_spans_to_keep, max_antecedents)
valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask = \
_generate_valid_antecedents(num_spans_to_keep_according_doc_len,
max_antecedents,
util.get_device_of(text_mask))
if self._type_refine_gate is not None:
top_embeddings = self._type_refine_embedding(top_embeddings, event_embeddings)
# Select tensors relating to the antecedent spans.
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
candidate_antecedent_embeddings = util.flattened_index_select(top_embeddings,
valid_antecedent_indices)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
candidate_antecedent_mention_scores = util.flattened_index_select(top_scores,
valid_antecedent_indices).squeeze(-1)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
candidate_antecedent_embeddings = self._combine_event_embeddings_and_cluster_antecedent_embeddings(
event_embeddings,
candidate_antecedent_embeddings)
# Compute antecedent scores.
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size)
span_pair_embeddings = self._compute_span_pair_embeddings(top_embeddings,
candidate_antecedent_embeddings,
valid_antecedent_offsets)
# (batch_size, event_type_size, 1)
event_type_prior_scores = self._event_scorer(event_embeddings)
# (batch_size, num_spans_to_keep, event_type_size)
event_type_prior_scores = event_type_prior_scores.transpose(1, 2).expand(
candidate_antecedent_mention_scores.size(0),
candidate_antecedent_mention_scores.size(1),
-1)
# (batch_size, num_spans_to_keep, event_type_size + max_antecedents)
candidate_antecedent_mention_scores = torch.cat([event_type_prior_scores,
candidate_antecedent_mention_scores],
-1)
# Shape: (batch_size, num_spans_to_keep, 1 + event_type_size + max_antecedents)
coreference_scores = self._compute_coreference_scores(span_pair_embeddings,
top_scores,
candidate_antecedent_mention_scores,
valid_antecedent_log_mask)
# We now have, for each span which survived the pruning stage,
# a predicted antecedent. This implies a clustering if we group
# mentions which refer to each other in a chain.
# Shape: (batch_size, num_spans_to_keep)
_, predicted_antecedents = coreference_scores.max(2)
# Subtract one here because index 0 is the "no antecedent" class,
# so this makes the indices line up with actual spans if the prediction
# is greater than -1.
predicted_antecedents -= 1
output_dict = {"top_spans": top_spans,
"antecedent_indices": valid_antecedent_indices,
"predicted_antecedents": predicted_antecedents,
"coreference_scores": coreference_scores,
}
if coref_labels is not None and event_type_labels is not None:
pruned_event_type_labels = torch.gather(event_type_labels, 1, top_indices)
type_antecedent_labels = self._get_type_antecedent_labels(pruned_event_type_labels)
# Find the gold labels for the spans which we kept.
pruned_gold_labels = util.batched_index_select(coref_labels.unsqueeze(-1),
top_indices,
flat_top_span_indices)
antecedent_labels = util.flattened_index_select(pruned_gold_labels,
valid_antecedent_indices).squeeze(-1)
antecedent_labels += valid_antecedent_log_mask.long()
# Compute labels.
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
gold_antecedent_labels = self._compute_antecedent_gold_labels(pruned_gold_labels,
type_antecedent_labels,
antecedent_labels)
bce_loss = self._bce_loss.forward(self._event_scorer.forward(span_embeddings).squeeze(-1),
(event_type_labels > 0).float()) * span_mask
bce_loss = bce_loss.sum() * self._bce_loss_weight
# Now, compute the loss using the negative marginal log-likelihood.
# This is equal to the log of the sum of the probabilities of all antecedent predictions
# that would be consistent with the data, in the sense that we are minimising, for a
# given span, the negative marginal log likelihood of all antecedents which are in the
# same gold cluster as the span we are currently considering. Each span i predicts a
# single antecedent j, but there might be several prior mentions k in the same
# coreference cluster that would be valid antecedents. Our loss is the sum of the
# probability assigned to all valid antecedents. This is a valid objective for
# clustering as we don't mind which antecedent is predicted, so long as they are in
# the same coreference cluster.
if self._pretrain_ed:
# All antecedent mask is 0
top_mask = top_mask.expand_as(coreference_scores).clone()
top_mask[:, :, self._positive_label_size + 2:] = 0
coreference_log_probs = util.masked_log_softmax(coreference_scores, top_mask)
correct_antecedent_log_probs = coreference_log_probs + gold_antecedent_labels.log()
negative_marginal_log_likelihood = -util.logsumexp(correct_antecedent_log_probs).sum()
coref_loss = negative_marginal_log_likelihood * self._coref_loss_weight
output_dict["loss"] = coref_loss + bce_loss
decoded_result = self.decode(output_dict)
pred_label_spans_list = decoded_result['pred_label_spans']
gold_label_spans_list = [m['gold_label_spans'] for m in metadata]
self._mention_f1_score(pred_label_spans_list,
gold_label_spans_list,
)
self._conll_coref_scores(decoded_result['clusters'],
metadata,
pred_label_spans_list,
gold_label_spans_list)
self._type_loss_metric(bce_loss.item())
self._coref_loss_metric(negative_marginal_log_likelihood.item())
else:
self._coref_loss_metric(0.)
if metadata is not None:
output_dict["document"] = [x["original_text"] for x in metadata]
output_dict["offset"] = [x["token_offset"] for x in metadata]
output_dict['doc_id'] = [x.get("doc_id", None) for x in metadata]
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]):
"""
Converts the list of spans and predicted antecedent indices into clusters
of spans for each element in the batch.
Parameters
----------
output_dict : ``Dict[str, torch.Tensor]``, required.
The result of calling :func:`forward` on an instance or batch of instances.
Returns
-------
The same output dictionary, but with an additional ``clusters`` key:
clusters : ``List[List[List[Tuple[int, int]]]]``
A nested list, representing, for each instance in the batch, the list of clusters,
which are in turn comprised of a list of (start, end) inclusive spans into the
original document.
"""
return node_decode(output_dict,
self.vocab, decoding_algorithm=self._decoding,
positive_label_size=self._positive_label_size,
type_threshold=self._type_threshold)
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
mention_result = self._mention_f1_score.get_metric(reset)
coref_precision, coref_recall, coref_f1 = self._conll_coref_scores.get_metric(reset)
return {"c_p": coref_precision,
"c_r": coref_recall,
"c_f1": coref_f1,
"m_p": mention_result['precision'],
"m_r": mention_result['recall'],
"m_f1": mention_result['f1-score'],
"nil": self._nil_label_metric.get_metric(reset),
"type": self._type_label_metric.get_metric(reset),
"coref": self._coref_label_metric.get_metric(reset),
"t_l": self._type_loss_metric.get_metric(reset),
"c_l": self._coref_loss_metric.get_metric(reset),
"a_f1": (mention_result['f1-score'] + coref_f1) / 2.}
@staticmethod
def _combine_event_embeddings_and_cluster_antecedent_embeddings(event_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor):
"""
event_embeddings: ``torch.FloatTensor``, required.
Embedding representations of the event types. Has shape
(batch_size, event_type_size, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size).
return:
(batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
"""
event_embeddings = event_embeddings.unsqueeze(1).expand((antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
event_embeddings.size(1),
antecedent_embeddings.size(3),))
return torch.cat([event_embeddings, antecedent_embeddings], 2)
def _compute_span_pair_embeddings(self,
top_span_embeddings: torch.FloatTensor,
antecedent_embeddings: torch.FloatTensor,
antecedent_offsets: torch.FloatTensor):
"""
Computes an embedding representation of pairs of spans for the pairwise scoring function
to consider. This includes both the original span representations, the element-wise
similarity of the span representations, and an embedding representation of the distance
between the two spans.
Parameters
---------- shape
(batch_size, event_type_size, embedding_size).
top_span_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the top spans. Has shape
(batch_size, num_spans_to_keep, embedding_size).
antecedent_embeddings : ``torch.FloatTensor``, required.
Embedding representations of the antecedent spans we are considering
for each top span. Has shape
(batch_size, num_spans_to_keep, event_type_size + max_antecedents, embedding_size).
antecedent_offsets : ``torch.IntTensor``, required.
The offsets between each top span and its antecedent spans in terms
of spans we are considering. Has shape (1, max_antecedents).
Returns
-------
span_pair_embeddings : ``torch.FloatTensor``
Embedding representation of the pair of spans to consider. Has shape
(batch_size, num_spans_to_keep, max_antecedents, embedding_size)
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents, embedding_size)
target_embeddings = top_span_embeddings.unsqueeze(2).expand_as(antecedent_embeddings)
# Shape: (1, max_antecedents)
bucket_values = util.bucket_values(antecedent_offsets, num_total_buckets=self._num_distance_buckets)
# (1, event_type)
label_bucket_values = bucket_values.new_zeros((1, self._positive_label_size))
# Shape: (1, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = self._distance_embedding(
torch.cat([bucket_values, label_bucket_values], 1)
)
# Shape: (1, 1, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.unsqueeze(0)
expanded_distance_embeddings_shape = (antecedent_embeddings.size(0),
antecedent_embeddings.size(1),
antecedent_embeddings.size(2),
antecedent_distance_embeddings.size(-1))
# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
antecedent_distance_embeddings = antecedent_distance_embeddings.expand(*expanded_distance_embeddings_shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + event_type_size, embedding_size)
span_pair_embeddings = torch.cat([target_embeddings,
antecedent_embeddings,
antecedent_embeddings * target_embeddings,
antecedent_distance_embeddings], -1)
return span_pair_embeddings
def _compute_antecedent_gold_labels(self,
top_span_labels: torch.IntTensor,
type_antecedent_labels: torch.IntTensor,
antecedent_labels: torch.IntTensor):
"""
Generates a binary indicator for every pair of spans. This label is one if and
only if the pair of spans belong to the same cluster. The labels are augmented
with a dummy antecedent at the zeroth position, which represents the prediction
that a span does not have any antecedent.
Parameters
----------
top_span_labels : ``torch.IntTensor``, required.
The cluster id label for every span. The id is arbitrary,
as we just care about the clustering. Has shape (batch_size, num_spans_to_keep).
antecedent_labels : ``torch.IntTensor``, required.
The cluster id label for every antecedent span. The id is arbitrary,
as we just care about the clustering. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
Returns
-------
pairwise_labels_with_dummy_label : ``torch.FloatTensor``
A binary tensor representing whether a given pair of spans belong to
the same cluster in the gold clustering.
Has shape (batch_size, num_spans_to_keep, max_antecedents + 1).
"""
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
# print(top_span_labels)
# print(antecedent_labels)
target_labels = top_span_labels.expand_as(antecedent_labels)
same_cluster_indicator = (target_labels == antecedent_labels).float()
non_dummy_indicator = (target_labels >= 0).float()
pairwise_labels = same_cluster_indicator * non_dummy_indicator
if self._pretrain_ed:
pairwise_labels = pairwise_labels * 0
else:
# for pairwise_labels without type_antecedent_labels
pairwise_labels_indicator = (pairwise_labels.sum(-1, keepdim=True) > 0).float()
type_antecedent_labels = type_antecedent_labels * (1 - pairwise_labels_indicator)
self._coref_label_metric(torch.sum(pairwise_labels).item())
self._nil_label_metric(torch.sum(type_antecedent_labels[:, :, 0]).item())
self._type_label_metric(torch.sum(type_antecedent_labels[:, :, 1: self._positive_label_size + 1]).item())
# print(pairwise_labels)
#
# # Shape: (batch_size, num_spans_to_keep, 1)
# dummy_labels = (1 - pairwise_labels).prod(-1, keepdim=True)
# Shape: (batch_size, num_spans_to_keep, event_type_size + max_antecedents + 1)
pairwise_labels_with_dummy_label = torch.cat([type_antecedent_labels, pairwise_labels], -1)
return pairwise_labels_with_dummy_label
def _compute_coreference_scores(self,
pairwise_embeddings: torch.FloatTensor,
top_span_mention_scores: torch.FloatTensor,
antecedent_mention_scores: torch.FloatTensor,
antecedent_log_mask: torch.FloatTensor) -> torch.FloatTensor:
"""
Computes scores for every pair of spans. Additionally, a dummy label is included,
representing the decision that the span is not coreferent with anything. For the dummy
label, the score is always zero. For the true antecedent spans, the score consists of
the pairwise antecedent score and the unary mention scores for the span and its
antecedent. The factoring allows the model to blame many of the absent links on bad
spans, enabling the pruning strategy used in the forward pass.
Parameters
----------
pairwise_embeddings: ``torch.FloatTensor``, required.
Embedding representations of pairs of spans. Has shape
(batch_size, num_spans_to_keep, max_antecedents, encoding_dim)
top_span_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every span. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_mention_scores: ``torch.FloatTensor``, required.
Mention scores for every antecedent. Has shape
(batch_size, num_spans_to_keep, max_antecedents).
antecedent_log_mask: ``torch.FloatTensor``, required.
The log of the mask for valid antecedents.
Returns
-------
coreference_scores: ``torch.FloatTensor``
A tensor of shape (batch_size, num_spans_to_keep, max_antecedents + 1),
representing the unormalised score for each (span, antecedent) pair
we considered.
"""
antecedent_log_mask = torch.cat([antecedent_log_mask.new_zeros((antecedent_log_mask.size(0),
antecedent_log_mask.size(1),
self._positive_label_size)),
antecedent_log_mask],
-1)
# Shape: (batch_size, num_spans_to_keep, max_antecedents)
antecedent_scores = self._antecedent_scorer(
self._antecedent_feedforward(pairwise_embeddings)).squeeze(-1)
antecedent_scores += top_span_mention_scores + antecedent_mention_scores
antecedent_scores += antecedent_log_mask
# Shape: (batch_size, num_spans_to_keep, 1)
shape = [antecedent_scores.size(0), antecedent_scores.size(1), 1]
dummy_scores = antecedent_scores.new_zeros(*shape)
# Shape: (batch_size, num_spans_to_keep, max_antecedents + 1)
coreference_scores = torch.cat([dummy_scores, antecedent_scores], -1)
return coreference_scores
def _generate_valid_antecedents(num_spans_to_keep: int,
max_antecedents: int,
device: int) -> Tuple[torch.IntTensor,
torch.IntTensor,
torch.FloatTensor]:
"""
This method generates possible antecedents per span which survived the pruning
stage. This procedure is `generic across the batch`. The reason this is the case is
that each span in a batch can be coreferent with any previous span, but here we
are computing the possible `indices` of these spans. So, regardless of the batch,
the 1st span _cannot_ have any antecedents, because there are none to select from.
Similarly, each element can only predict previous spans, so this returns a matrix
of shape (num_spans_to_keep, max_antecedents), where the (i,j)-th index is equal to
(i - 1) - j if j <= i, or zero otherwise.
Parameters
----------
num_spans_to_keep : ``int``, required.
The number of spans that were kept while pruning.
max_antecedents : ``int``, required.
The maximum number of antecedent spans to consider for every span.
device: ``int``, required.
The CUDA device to use.
Returns
-------
valid_antecedent_indices : ``torch.IntTensor``
The indices of every antecedent to consider with respect to the top k spans.
Has shape ``(num_spans_to_keep, max_antecedents)``.
valid_antecedent_offsets : ``torch.IntTensor``
The distance between the span and each of its antecedents in terms of the number
of considered spans (i.e not the word distance between the spans).
Has shape ``(1, max_antecedents)``.
valid_antecedent_log_mask : ``torch.FloatTensor``
The logged mask representing whether each antecedent span is valid. Required since
different spans have different numbers of valid antecedents. For example, the first
span in the document should have no valid antecedents.
Has shape ``(1, num_spans_to_keep, max_antecedents)``.
"""
# Shape: (num_spans_to_keep, 1)
target_indices = util.get_range_vector(num_spans_to_keep, device).unsqueeze(1)
# Shape: (1, max_antecedents)
valid_antecedent_offsets = (util.get_range_vector(max_antecedents, device) + 1).unsqueeze(0)
# This is a broadcasted subtraction.
# Shape: (num_spans_to_keep, max_antecedents)
raw_antecedent_indices = target_indices - valid_antecedent_offsets
# In our matrix of indices, the upper triangular part will be negative
# because the offsets will be > the target indices. We want to mask these,
# because these are exactly the indices which we don't want to predict, per span.
# We're generating a logspace mask here because we will eventually create a
# distribution over these indices, so we need the 0 elements of the mask to be -inf
# in order to not mess up the normalisation of the distribution.
# Shape: (1, num_spans_to_keep, max_antecedents)
valid_antecedent_log_mask = (raw_antecedent_indices >= 0).float().unsqueeze(0).log()
# Shape: (num_spans_to_keep, max_antecedents)
valid_antecedent_indices = F.relu(raw_antecedent_indices.float()).long()
return valid_antecedent_indices, valid_antecedent_offsets, valid_antecedent_log_mask
| [
"logging.getLogger",
"src.metrics.mention_f1.TopSpanMentionTypeF1",
"allennlp.nn.util.bucket_values",
"torch.nn.Dropout",
"allennlp.nn.util.get_range_vector",
"math.floor",
"allennlp.training.metrics.Average",
"allennlp.modules.Pruner",
"allennlp.models.model.Model.register",
"allennlp.nn.util.get_device_of",
"torch.softmax",
"torch.sum",
"allennlp.nn.util.logsumexp",
"torch.bmm",
"allennlp.nn.util.flattened_index_select",
"torch.nn.Sigmoid",
"allennlp.modules.TimeDistributed",
"torch.tril",
"src.utils.cluster_decoding_utils.node_decode",
"torch.zeros_like",
"allennlp.modules.seq2seq_encoders.IntraSentenceAttentionEncoder",
"torch.gather",
"allennlp.nn.InitializerApplicator",
"allennlp.nn.util.flatten_and_batch_shift_indices",
"allennlp.modules.span_extractors.SelfAttentiveSpanExtractor",
"torch.transpose",
"allennlp.nn.util.masked_log_softmax",
"torch.nn.BCEWithLogitsLoss",
"torch.cat",
"allennlp.modules.token_embedders.Embedding",
"torch.tensor",
"allennlp.modules.span_extractors.EndpointSpanExtractor",
"torch.nn.Linear",
"allennlp.nn.util.batched_index_select",
"src.metrics.event_coref_scores.EventCorefScores",
"allennlp.nn.util.get_text_field_mask",
"allennlp.modules.similarity_functions.DotProductSimilarity"
] | [((1105, 1132), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1122, 1132), False, 'import logging\n'), ((1168, 1214), 'allennlp.models.model.Model.register', 'Model.register', (['"""end-to-end-event-coreference"""'], {}), "('end-to-end-event-coreference')\n", (1182, 1214), False, 'from allennlp.models.model import Model\n'), ((4676, 4699), 'allennlp.nn.InitializerApplicator', 'InitializerApplicator', ([], {}), '()\n', (4697, 4699), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((5028, 5067), 'allennlp.modules.TimeDistributed', 'TimeDistributed', (['antecedent_feedforward'], {}), '(antecedent_feedforward)\n', (5043, 5067), False, 'from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder\n'), ((5382, 5408), 'allennlp.modules.Pruner', 'Pruner', (['self._event_scorer'], {}), '(self._event_scorer)\n', (5388, 5408), False, 'from allennlp.modules import FeedForward, Pruner\n'), ((10273, 10324), 'allennlp.modules.token_embedders.Embedding', 'Embedding', (['self._num_distance_buckets', 'feature_size'], {}), '(self._num_distance_buckets, feature_size)\n', (10282, 10324), False, 'from allennlp.modules.token_embedders import Embedding\n'), ((10646, 10668), 'src.metrics.mention_f1.TopSpanMentionTypeF1', 'TopSpanMentionTypeF1', ([], {}), '()\n', (10666, 10668), False, 'from src.metrics.mention_f1 import TopSpanMentionTypeF1\n'), ((10704, 10753), 'src.metrics.event_coref_scores.EventCorefScores', 'EventCorefScores', ([], {'mapping_type': 'type_match_in_eval'}), '(mapping_type=type_match_in_eval)\n', (10720, 10753), False, 'from src.metrics.event_coref_scores import EventCorefScores\n'), ((10787, 10796), 'allennlp.training.metrics.Average', 'Average', ([], {}), '()\n', (10794, 10796), False, 'from allennlp.training.metrics import Average\n'), ((10832, 10841), 'allennlp.training.metrics.Average', 'Average', ([], {}), '()\n', (10839, 10841), False, 'from allennlp.training.metrics import Average\n'), ((10876, 10885), 'allennlp.training.metrics.Average', 'Average', ([], {}), '()\n', (10883, 10885), False, 'from allennlp.training.metrics import Average\n'), ((10921, 10930), 'allennlp.training.metrics.Average', 'Average', ([], {}), '()\n', (10928, 10930), False, 'from allennlp.training.metrics import Average\n'), ((10965, 10974), 'allennlp.training.metrics.Average', 'Average', ([], {}), '()\n', (10972, 10974), False, 'from allennlp.training.metrics import Average\n'), ((11008, 11017), 'allennlp.training.metrics.Average', 'Average', ([], {}), '()\n', (11015, 11017), False, 'from allennlp.training.metrics import Average\n'), ((13842, 13883), 'torch.cat', 'torch.cat', (['[dummy_scores, event_prob]', '(-1)'], {}), '([dummy_scores, event_prob], -1)\n', (13851, 13883), False, 'import torch\n'), ((13905, 13934), 'torch.softmax', 'torch.softmax', (['event_prob', '(-1)'], {}), '(event_prob, -1)\n', (13918, 13934), False, 'import torch\n'), ((14345, 14394), 'allennlp.nn.util.get_device_of', 'util.get_device_of', (['raw_contextualized_embeddings'], {}), '(raw_contextualized_embeddings)\n', (14363, 14394), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((22511, 22571), 'allennlp.nn.util.flatten_and_batch_shift_indices', 'util.flatten_and_batch_shift_indices', (['top_indices', 'num_spans'], {}), '(top_indices, num_spans)\n', (22547, 22571), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((22721, 22789), 'allennlp.nn.util.batched_index_select', 'util.batched_index_select', (['spans', 'top_indices', 'flat_top_span_indices'], {}), '(spans, top_indices, flat_top_span_indices)\n', (22746, 22789), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((24949, 25018), 'allennlp.nn.util.flattened_index_select', 'util.flattened_index_select', (['top_embeddings', 'valid_antecedent_indices'], {}), '(top_embeddings, valid_antecedent_indices)\n', (24976, 25018), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((26557, 26634), 'torch.cat', 'torch.cat', (['[event_type_prior_scores, candidate_antecedent_mention_scores]', '(-1)'], {}), '([event_type_prior_scores, candidate_antecedent_mention_scores], -1)\n', (26566, 26634), False, 'import torch\n'), ((32870, 33034), 'src.utils.cluster_decoding_utils.node_decode', 'node_decode', (['output_dict', 'self.vocab'], {'decoding_algorithm': 'self._decoding', 'positive_label_size': 'self._positive_label_size', 'type_threshold': 'self._type_threshold'}), '(output_dict, self.vocab, decoding_algorithm=self._decoding,\n positive_label_size=self._positive_label_size, type_threshold=self.\n _type_threshold)\n', (32881, 33034), False, 'from src.utils.cluster_decoding_utils import node_decode\n'), ((35234, 35289), 'torch.cat', 'torch.cat', (['[event_embeddings, antecedent_embeddings]', '(2)'], {}), '([event_embeddings, antecedent_embeddings], 2)\n', (35243, 35289), False, 'import torch\n'), ((37184, 37273), 'allennlp.nn.util.bucket_values', 'util.bucket_values', (['antecedent_offsets'], {'num_total_buckets': 'self._num_distance_buckets'}), '(antecedent_offsets, num_total_buckets=self.\n _num_distance_buckets)\n', (37202, 37273), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((38421, 38557), 'torch.cat', 'torch.cat', (['[target_embeddings, antecedent_embeddings, antecedent_embeddings *\n target_embeddings, antecedent_distance_embeddings]', '(-1)'], {}), '([target_embeddings, antecedent_embeddings, antecedent_embeddings *\n target_embeddings, antecedent_distance_embeddings], -1)\n', (38430, 38557), False, 'import torch\n'), ((41484, 41540), 'torch.cat', 'torch.cat', (['[type_antecedent_labels, pairwise_labels]', '(-1)'], {}), '([type_antecedent_labels, pairwise_labels], -1)\n', (41493, 41540), False, 'import torch\n'), ((44547, 44595), 'torch.cat', 'torch.cat', (['[dummy_scores, antecedent_scores]', '(-1)'], {}), '([dummy_scores, antecedent_scores], -1)\n', (44556, 44595), False, 'import torch\n'), ((5130, 5166), 'allennlp.modules.TimeDistributed', 'TimeDistributed', (['mention_feedforward'], {}), '(mention_feedforward)\n', (5145, 5166), False, 'from allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder\n'), ((5996, 6145), 'allennlp.modules.span_extractors.EndpointSpanExtractor', 'EndpointSpanExtractor', (['endpoint_span_extractor_dim'], {'combination': '"""x,y"""', 'num_width_embeddings': 'max_span_width', 'span_width_embedding_dim': 'feature_size'}), "(endpoint_span_extractor_dim, combination='x,y',\n num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size)\n", (6017, 6145), False, 'from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\n'), ((6385, 6451), 'allennlp.modules.span_extractors.SelfAttentiveSpanExtractor', 'SelfAttentiveSpanExtractor', ([], {'input_dim': 'attentive_span_extractor_dim'}), '(input_dim=attentive_span_extractor_dim)\n', (6411, 6451), False, 'from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\n'), ((8183, 8249), 'allennlp.modules.span_extractors.SelfAttentiveSpanExtractor', 'SelfAttentiveSpanExtractor', ([], {'input_dim': 'attentive_span_extractor_dim'}), '(input_dim=attentive_span_extractor_dim)\n', (8209, 8249), False, 'from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\n'), ((11207, 11242), 'torch.nn.BCEWithLogitsLoss', 'BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (11224, 11242), False, 'from torch.nn import BCEWithLogitsLoss\n'), ((11312, 11347), 'torch.nn.Dropout', 'torch.nn.Dropout', ([], {'p': 'lexical_dropout'}), '(p=lexical_dropout)\n', (11328, 11347), False, 'import torch\n'), ((13667, 13706), 'torch.transpose', 'torch.transpose', (['event_embeddings', '(1)', '(2)'], {}), '(event_embeddings, 1, 2)\n', (13682, 13706), False, 'import torch\n'), ((13956, 14005), 'torch.bmm', 'torch.bmm', (['event_prob[:, :, 1:]', 'event_embeddings'], {}), '(event_prob[:, :, 1:], event_embeddings)\n', (13965, 14005), False, 'import torch\n'), ((14092, 14134), 'torch.cat', 'torch.cat', (['[event_rep, top_embeddings]', '(-1)'], {}), '([event_rep, top_embeddings], -1)\n', (14101, 14134), False, 'import torch\n'), ((14825, 14880), 'torch.tril', 'torch.tril', (['new_attention_mask', 'self._local_window_size'], {}), '(new_attention_mask, self._local_window_size)\n', (14835, 14880), False, 'import torch\n'), ((20085, 20152), 'torch.cat', 'torch.cat', (['[endpoint_span_embeddings, attended_span_embeddings]', '(-1)'], {}), '([endpoint_span_embeddings, attended_span_embeddings], -1)\n', (20094, 20152), False, 'import torch\n'), ((21115, 21150), 'torch.cat', 'torch.cat', (['span_embeddings_list', '(-1)'], {}), '(span_embeddings_list, -1)\n', (21124, 21150), False, 'import torch\n'), ((21534, 21584), 'math.floor', 'math.floor', (['(self._spans_per_word * document_length)'], {}), '(self._spans_per_word * document_length)\n', (21544, 21584), False, 'import math\n'), ((24595, 24624), 'allennlp.nn.util.get_device_of', 'util.get_device_of', (['text_mask'], {}), '(text_mask)\n', (24613, 24624), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((28105, 28152), 'torch.gather', 'torch.gather', (['event_type_labels', '(1)', 'top_indices'], {}), '(event_type_labels, 1, top_indices)\n', (28117, 28152), False, 'import torch\n'), ((30596, 30649), 'allennlp.nn.util.masked_log_softmax', 'util.masked_log_softmax', (['coreference_scores', 'top_mask'], {}), '(coreference_scores, top_mask)\n', (30619, 30649), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((37532, 37582), 'torch.cat', 'torch.cat', (['[bucket_values, label_bucket_values]', '(1)'], {}), '([bucket_values, label_bucket_values], 1)\n', (37541, 37582), False, 'import torch\n'), ((46833, 46881), 'allennlp.nn.util.get_range_vector', 'util.get_range_vector', (['num_spans_to_keep', 'device'], {}), '(num_spans_to_keep, device)\n', (46854, 46881), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((7010, 7175), 'allennlp.modules.seq2seq_encoders.IntraSentenceAttentionEncoder', 'IntraSentenceAttentionEncoder', ([], {'input_dim': 'attentive_span_extractor_dim', 'similarity_function': 'similarity_function', 'combination': '"""2"""', 'num_attention_heads': 'num_head'}), "(input_dim=attentive_span_extractor_dim,\n similarity_function=similarity_function, combination='2',\n num_attention_heads=num_head)\n", (7039, 7175), False, 'from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder\n'), ((7710, 7859), 'allennlp.modules.span_extractors.EndpointSpanExtractor', 'EndpointSpanExtractor', (['endpoint_span_extractor_dim'], {'combination': '"""x,y"""', 'num_width_embeddings': 'max_span_width', 'span_width_embedding_dim': 'feature_size'}), "(endpoint_span_extractor_dim, combination='x,y',\n num_width_embeddings=max_span_width, span_width_embedding_dim=feature_size)\n", (7731, 7859), False, 'from allennlp.modules.span_extractors import SelfAttentiveSpanExtractor, EndpointSpanExtractor\n'), ((8677, 8842), 'allennlp.modules.seq2seq_encoders.IntraSentenceAttentionEncoder', 'IntraSentenceAttentionEncoder', ([], {'input_dim': 'attentive_span_extractor_dim', 'similarity_function': 'similarity_function', 'combination': '"""2"""', 'num_attention_heads': 'num_head'}), "(input_dim=attentive_span_extractor_dim,\n similarity_function=similarity_function, combination='2',\n num_attention_heads=num_head)\n", (8706, 8842), False, 'from allennlp.modules.seq2seq_encoders import IntraSentenceAttentionEncoder\n'), ((9609, 9627), 'torch.nn.Sigmoid', 'torch.nn.Sigmoid', ([], {}), '()\n', (9625, 9627), False, 'import torch\n'), ((13021, 13062), 'allennlp.nn.util.get_device_of', 'util.get_device_of', (['top_event_type_labels'], {}), '(top_event_type_labels)\n', (13039, 13062), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((18263, 18293), 'allennlp.nn.util.get_text_field_mask', 'util.get_text_field_mask', (['text'], {}), '(text)\n', (18287, 18293), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((25201, 25266), 'allennlp.nn.util.flattened_index_select', 'util.flattened_index_select', (['top_scores', 'valid_antecedent_indices'], {}), '(top_scores, valid_antecedent_indices)\n', (25228, 25266), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((46962, 47008), 'allennlp.nn.util.get_range_vector', 'util.get_range_vector', (['max_antecedents', 'device'], {}), '(max_antecedents, device)\n', (46983, 47008), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((6784, 6823), 'allennlp.modules.similarity_functions.DotProductSimilarity', 'DotProductSimilarity', ([], {'scale_output': '(True)'}), '(scale_output=True)\n', (6804, 6823), False, 'from allennlp.modules.similarity_functions import DotProductSimilarity\n'), ((8451, 8490), 'allennlp.modules.similarity_functions.DotProductSimilarity', 'DotProductSimilarity', ([], {'scale_output': '(True)'}), '(scale_output=True)\n', (8471, 8490), False, 'from allennlp.modules.similarity_functions import DotProductSimilarity\n'), ((9529, 9590), 'torch.nn.Linear', 'torch.nn.Linear', (['(span_embedding_size * 2)', 'span_embedding_size'], {}), '(span_embedding_size * 2, span_embedding_size)\n', (9544, 9590), False, 'import torch\n'), ((11128, 11162), 'torch.tensor', 'torch.tensor', (['self._bce_pos_weight'], {}), '(self._bce_pos_weight)\n', (11140, 11162), False, 'import torch\n'), ((11726, 11755), 'allennlp.nn.util.get_device_of', 'util.get_device_of', (['span_mask'], {}), '(span_mask)\n', (11744, 11755), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((28588, 28661), 'allennlp.nn.util.flattened_index_select', 'util.flattened_index_select', (['pruned_gold_labels', 'valid_antecedent_indices'], {}), '(pruned_gold_labels, valid_antecedent_indices)\n', (28615, 28661), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n'), ((40953, 40979), 'torch.sum', 'torch.sum', (['pairwise_labels'], {}), '(pairwise_labels)\n', (40962, 40979), False, 'import torch\n'), ((41019, 41061), 'torch.sum', 'torch.sum', (['type_antecedent_labels[:, :, 0]'], {}), '(type_antecedent_labels[:, :, 0])\n', (41028, 41061), False, 'import torch\n'), ((41102, 41174), 'torch.sum', 'torch.sum', (['type_antecedent_labels[:, :, 1:self._positive_label_size + 1]'], {}), '(type_antecedent_labels[:, :, 1:self._positive_label_size + 1])\n', (41111, 41174), False, 'import torch\n'), ((11798, 11829), 'torch.zeros_like', 'torch.zeros_like', (['event_indices'], {}), '(event_indices)\n', (11814, 11829), False, 'import torch\n'), ((30794, 30838), 'allennlp.nn.util.logsumexp', 'util.logsumexp', (['correct_antecedent_log_probs'], {}), '(correct_antecedent_log_probs)\n', (30808, 30838), False, 'from allennlp.nn import util, InitializerApplicator, RegularizerApplicator\n')] |
import math
from sys import exit
# итак, n - приблизительное число элементов в массиве, P - вероятность ложноположительного ответа, тогда размер
# структуры m = -(nlog2P) / ln2 (2 - основание), количество хеш-функций будет равно -log2P
# хеш-функции используются вида: (((i + 1)*x + p(i+1)) mod M) mod m,где - x - ключ, i - номер хэш-функции,
# pi - i-тое по счету простое число, а M - 31ое число Мерсенна, M = 2^31 - 1, M = 2 147 483 647, M - простое число.
# При подсчёте хеш-функций необходимо знать первые k простых чисел. Посчитаем их один раз в конструкторе BloomFilter
# и будем хранить в структуре данных.
# Также нам необходимо создать битовый массив размера m, однако по умолчанию в питоне битовый массив отсутствует,
# поэтому будем использовать байтовый массив. Реализуем для удобства отдельную СД, из методов необходимо: изменить
# указанный бит на 1, проверить является ли указанный бит 1 и напечатать (вернуть) сам массив
Mersen_31 = 2147483647
class BitArray:
def __init__(self, size):
self.__array = bytearray(int(math.ceil(size / 8)))
self.__size = size
def add_bit(self, i):
# i-тый бит содержится в i//8 байте на i % 8 месте
self.__array[i // 8] |= 2 ** (7 - (i % 8))
def check_bit(self, i):
if (self.__array[i // 8] & (2 ** (7 - (i % 8)))) == 0:
return False
else:
return True
def print(self):
array_str = ""
for byte in self.__array:
_line = str(bin(byte))[2:]
if len(_line) != 8:
_line = '0' * (8 - len(_line)) + _line
array_str += _line
return array_str[:self.__size]
class BloomFilter:
def __init__(self, n: int, p: float):
self.size = int(-round(n * math.log2(p) / math.log(2)))
self.hash_numbers = int(-round(math.log2(p)))
self.__prime_numbers = list()
self.__get_prime(self.hash_numbers + 1)
self.__bitarray = BitArray(self.size)
def __get_prime(self, prime_size):
# обычный проход по всем числам и их проверка на простоту - сложно по времени
# немного упростим: во-первых будем идти с интервалом 2, начиная от 3, а после новое число проверять на
# делимость на уже найденные простые числа (кроме двойки, мы же рассматриваем нечётные)
if prime_size == 1:
self.__prime_numbers.append(2)
return
self.__prime_numbers.append(2)
i = 3
while len(self.__prime_numbers) < prime_size:
j = 1
prime_flag = True
while j < len(self.__prime_numbers):
if (i % self.__prime_numbers[j]) == 0:
prime_flag = False
break
j += 1
if prime_flag:
self.__prime_numbers.append(i)
i += 2
def __get_hash(self, x, i):
return (((i + 1) * x + self.__prime_numbers[i]) % Mersen_31) % self.size
def add(self, key: int):
i = 0
while i < self.hash_numbers:
self.__bitarray.add_bit(self.__get_hash(key, i))
i += 1
def search(self, key: int):
i = 0
while i < self.hash_numbers:
if not self.__bitarray.check_bit(self.__get_hash(key, i)):
return False
i += 1
return True
def print(self):
return self.__bitarray.print()
bloom_filter = 0
while True:
try:
line = input().split()
if len(line) == 0:
continue
else:
if line[0] == "set":
try:
elements_number = int(line[1])
probability = float(line[2])
if (elements_number <= 0) | (probability <= 0) | (probability >= 1):
print("error")
continue
bloom_filter = BloomFilter(elements_number, probability)
if (bloom_filter.size == 0) | (bloom_filter.hash_numbers == 0):
print("error")
continue
break
except TypeError:
print("error")
continue
else:
print("error")
continue
except EOFError:
exit()
print(bloom_filter.size, bloom_filter.hash_numbers)
while True:
try:
line = input().split()
if len(line) == 0:
continue
elif line[0] == "print":
print(bloom_filter.print())
elif (line[0] == "add") & (line[1].isnumeric()):
bloom_filter.add(int(line[1]))
elif (line[0] == "search") & (line[1].isnumeric()):
print(int(bloom_filter.search(int(line[1]))))
else:
print("error")
except EOFError:
break
| [
"math.log",
"math.ceil",
"math.log2",
"sys.exit"
] | [((4309, 4315), 'sys.exit', 'exit', ([], {}), '()\n', (4313, 4315), False, 'from sys import exit\n'), ((1048, 1067), 'math.ceil', 'math.ceil', (['(size / 8)'], {}), '(size / 8)\n', (1057, 1067), False, 'import math\n'), ((1830, 1842), 'math.log2', 'math.log2', (['p'], {}), '(p)\n', (1839, 1842), False, 'import math\n'), ((1777, 1788), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (1785, 1788), False, 'import math\n'), ((1762, 1774), 'math.log2', 'math.log2', (['p'], {}), '(p)\n', (1771, 1774), False, 'import math\n')] |
import sqlite3
con = sqlite3.connect(":memory:")
# enable extension loading
con.enable_load_extension(True)
# Load the fulltext search extension
con.execute("select load_extension('./fts3.so')")
# alternatively you can load the extension using an API call:
# con.load_extension("./fts3.so")
# disable extension loading again
con.enable_load_extension(False)
# example from SQLite wiki
con.execute("create virtual table recipe using fts3(name, ingredients)")
con.executescript("""
insert into recipe (name, ingredients) values ('broccoli stew', 'broccoli peppers cheese tomatoes');
insert into recipe (name, ingredients) values ('pumpkin stew', 'pumpkin onions garlic celery');
insert into recipe (name, ingredients) values ('broccoli pie', 'broccoli cheese onions flour');
insert into recipe (name, ingredients) values ('pumpkin pie', 'pumpkin sugar flour butter');
""")
for row in con.execute("select rowid, name, ingredients from recipe where name match 'pie'"):
print(row)
| [
"sqlite3.connect"
] | [((22, 49), 'sqlite3.connect', 'sqlite3.connect', (['""":memory:"""'], {}), "(':memory:')\n", (37, 49), False, 'import sqlite3\n')] |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility for exporting an InferenceGraph proto from model params."""
import collections
import contextlib
import re
import lingvo.compat as tf
from lingvo.core import base_model
from lingvo.core import bfloat16_variables
from lingvo.core import inference_graph_pb2
from lingvo.core import py_utils
import six
from google.protobuf import text_format
FLAGS = tf.flags.FLAGS
# InferenceDeviceOptions contains options to configure inference on the device.
# device: Device to infer on.
# retain_device_placement: If true, the specified device in the generated
# inference graph nodes will be retained. Otherwise, the specified device
# will be cleared, so that the runtime can choose automatically.
# var_options: Options on handling variables. For TPUs, variables can be
# either placed on device through 'ON_DEVICE' option, or treated as
# constants with AS_CONSTANTS.
# gen_init_op: Whether to serialize initialization ops for the device. For TPUs,
# servers can be initialized globally once, in which case this should be
# turned off to avoid tripping initialization checks.
# dtype_override: Whether to override the dtype to use for activations and
# weights in the model. Options supported are None or tf.bfloat16.
InferenceDeviceOptions = collections.namedtuple('InferenceDeviceOptions', [
'device', 'retain_device_placement', 'var_options', 'gen_init_op',
'dtype_override', 'fprop_dtype_override'
])
_CONST_GUARANTEE = None
@contextlib.contextmanager
def NoConstGuaranteeScope():
"""Disallow const gauranteeing variable with-in scope."""
global _CONST_GUARANTEE
var_scope = tf.get_variable_scope()
old_caching_device = var_scope.caching_device
old_val = _CONST_GUARANTEE
var_scope.set_caching_device(None)
_CONST_GUARANTEE = False
yield
_CONST_GUARANTEE = old_val
var_scope.set_caching_device(old_caching_device)
# Marks variable as constants for compilation
def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs):
global _CONST_GUARANTEE
if _CONST_GUARANTEE:
with tf.control_dependencies(None):
return tf.guarantee_const(
getter(name, *args, **kwargs), name=name + '/GuaranteeConst')
else:
return getter(name, *args, **kwargs)
@contextlib.contextmanager
def ConstGuaranteeScope():
"""Treats all variables under this scope as constants."""
global _CONST_GUARANTEE
var_scope = tf.get_variable_scope()
old_custom_getter = var_scope.custom_getter
old_caching_device = var_scope.caching_device
old_val = _CONST_GUARANTEE
var_scope.set_custom_getter(MaybeGuaranteeConstGetter)
var_scope.set_caching_device(lambda op: op.device)
_CONST_GUARANTEE = True
yield
_CONST_GUARANTEE = old_val
var_scope.set_custom_getter(old_custom_getter)
var_scope.set_caching_device(old_caching_device)
@contextlib.contextmanager
def _DummyScope():
yield None
def _GetVarName(v):
return v.name[:-len(':0')]
def _MakeVariableDictionary(variables):
"""Returns a dictionary with name -> tf.Variable() mapping."""
vars_dict = {}
for v in variables:
vars_dict[_GetVarName(v)] = v
return vars_dict
def IsTpu(device_options):
return device_options.device == 'tpu'
def ShouldForceBfloat16ForWeightsAndActivations(device_options):
return device_options.dtype_override == tf.bfloat16
def ShouldForceBfloat16ForActivations(device_options):
return device_options.fprop_dtype_override == tf.bfloat16
def ConvertSubgraphDictToProto(subgraphs_dict):
"""Converts dict of subgraphs/feeds/fetches to InferenceGraph.
Args:
subgraphs_dict: Dict of (fetches, feeds) where each fetches/feeds is a
NestedMap.
Returns:
Equivalent InferenceGraph.
"""
# Build the output inference graph.
inference_graph_proto = inference_graph_pb2.InferenceGraph()
for subgraph_name, tensors in subgraphs_dict.items():
fetches = tensors[0]
feeds = tensors[1]
# Rewrite fetches and feeds to map to their tensor name instead of
# Tensor instance.
named_fetches = {k: v.name for k, v in fetches.items() if v is not None}
named_feeds = {k: v.name for k, v in feeds.items() if v is not None}
# Export as subgraph.
inference_graph_proto.subgraphs[subgraph_name].fetches.update(named_fetches)
inference_graph_proto.subgraphs[subgraph_name].feeds.update(named_feeds)
return inference_graph_proto
def GetOutputOpNames(graph,
inference_graph_proto,
subgraphs=None,
preserve_colocation_nodes=True,
preserve_saver_restore_nodes=False,
preserve_extra_ops=None):
"""Gets output op names from an inference graph.
Args:
graph: The tf graph.
inference_graph_proto: an InferenceGraph proto.
subgraphs: an optional list of subgraph names. If provided, only output ops
from these subgraphs are preserved. Otherwise, all subgraphs are included.
preserve_colocation_nodes: a Python bool, default to True. Preserves nodes
colocating with the closure of output ops in the returned array.
preserve_saver_restore_nodes: a Python bool, default to False. Preserves
nodes for restoring according to inference_graph_proto.saver_def.
preserve_extra_ops: an optional list of extra op names to preserve as long
as they present in the graph.
Returns:
Array of tf op names that should be preserved in the graph.
"""
output_op_names = set()
def _GetOpName(tensor_or_op_name):
"""Returns the op name of the given node name."""
# Tensor names have format <op_name>:<output_index>. Some inference
# graphs put tensors and others put ops in the feeds/fetches (depends
# on how it is used). We differentiate here. We still do the lookup in
# the graph to sanity check (versus relying on the text manipulation).
# If this logic ever breaks, TensorFlow will raise a ValueError with
# a description of the syntax of each.
if re.search(r':[0-9]+$', tensor_or_op_name):
# Tensor-name.
t = graph.get_tensor_by_name(tensor_or_op_name)
return t.op.name
else:
op = graph.get_operation_by_name(tensor_or_op_name)
return op.name
for subgraph_name, subgraph in inference_graph_proto.subgraphs.items():
if subgraphs and subgraph_name not in subgraphs:
tf.logging.info('Skip subgraph %s.', subgraph_name)
continue
# Sometimes feeds aren't connected to any outputs but keep them in the graph
# anyways to avoid errors.
for tensor_or_op_name in (list(subgraph.feeds.values()) +
list(subgraph.fetches.values())):
output_op_names.add(_GetOpName(tensor_or_op_name))
if preserve_saver_restore_nodes:
# Only nodes for restoring is preserved. saver_def.save_tensor_name is
# skipped because it's only used for saving.
saver_def = inference_graph_proto.saver_def
for op_name in [saver_def.filename_tensor_name, saver_def.restore_op_name]:
try:
output_op_names.add(_GetOpName(op_name))
except KeyError:
tf.logging.info('Op/tensor %s not in the graph. Ignoring.' % op_name)
if not preserve_colocation_nodes and not preserve_extra_ops:
return sorted(list(output_op_names))
# We also need to preserve any nodes that are used for colocation.
# E.g., a node may have this attr:
# attr {
# key: "_class"
# value {
# list {
# s: "loc:@inference/embedding_lookup/Read/ReadVariableOp"
# }
# }
# }
#
# In this case, we need to make sure the node
# inference/embedding_lookup/Read/ReadVariableOp is not pruned.
#
# TODO(zhifengc): It's possible that it's better to fix in
# tf.graph_util.extract_sub_graph.
graph_def = tf.graph_util.extract_sub_graph(graph.as_graph_def(),
list(output_op_names))
reachable_vars = [node.name for node in graph_def.node]
for node in graph.get_operations():
if preserve_extra_ops and node.name in preserve_extra_ops:
output_op_names.add(node.name)
elif preserve_colocation_nodes and '_class' in node.node_def.attr:
for loc in node.node_def.attr['_class'].list.s:
loc = six.ensure_text(loc, 'utf-8')
if loc.startswith('loc:@'):
loc_name = loc[5:]
if loc_name not in reachable_vars:
# Skip nodes that cannot be reached from the pruned graph.
continue
output_op_names.add(node.name)
return sorted(list(output_op_names))
def _ParamExists(param_obj, param_name):
"""Tests whether param_name is contained in param_obj."""
if not param_obj:
return
for k, _ in param_obj.IterParams():
if k == param_name:
return True
return False
def _FreezeGraphFromCheckpoint(graph, saver, checkpoint, output_op_names):
"""Freezes a graph from a checkpoint.
Args:
graph: tf.Graph.
saver: The tf.Saver to use for restoration.
checkpoint: The checkpoint to restore.
output_op_names: Names of output ops.
Returns:
Resulting tf.GraphDef.
"""
sess = tf.Session(graph=graph, config=py_utils.SessionConfig())
saver.restore(sess, checkpoint)
return tf.graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), output_op_names)
def _FreezeDefaults(graph, output_op_names):
"""Default initializes a graph and freezes it.
Args:
graph: tf.Graph.
output_op_names: Names of output ops.
Returns:
Resulting tf.GraphDef.
"""
with tf.Session(graph=graph, config=py_utils.SessionConfig()) as sess:
sess.run(graph.get_operation_by_name('init_all_variables'))
return tf.graph_util.convert_variables_to_constants(sess,
graph.as_graph_def(),
output_op_names)
class InferenceGraphExporter:
"""Class for exporting inference graphs."""
@classmethod
def Export(cls,
model_cfg,
model_task_name=None,
device_options=InferenceDeviceOptions(
device='',
retain_device_placement=False,
var_options=None,
gen_init_op=True,
dtype_override=None,
fprop_dtype_override=None),
freeze_checkpoint=None,
freeze_defaults=False,
export_path=None,
subgraph_filter=None,
random_seed=None,
disable_packed_input=True):
"""Exports a InferenceGraph proto with piecewise subgraphs.
Sets FLAGS.enable_asserts to False unless user explicitly sets it to True.
Note: Enable FLAGS.pin_vars_to_cpu (default false) to make weight-sharing
and multi-core inference on TPUs work properly.
Args:
model_cfg: a Params instance as returned by
model_registry.GetParams(modelname, 'Test') or model_params.Model().
model_task_name: The task to generate an inference graph for. Should be
None for single-task models.
device_options: Device options for the accelerator used for serving.
freeze_checkpoint: The checkpoint to load. Loads and freezes the model if
given.
freeze_defaults: Default initializes the graph and freeze. Useful for
early testing of downstream tools without having a checkpoint.
export_path: If not None, write the inference graph in ASCII to this path.
subgraph_filter: A string or a list of subgraph names. If not None or
empty, export only this list of inference subgraphs.
random_seed: Fixes the random seed in the exported inference graph.
disable_packed_input: Disable packed input for inference writing purposes.
Returns:
InferenceGraph proto.
Raises:
ValueError: if the model does not support the listed subgraphs.
"""
assert issubclass(model_cfg.cls, base_model.BaseModel)
if device_options.dtype_override and device_options.fprop_dtype_override:
raise ValueError(
'device_options{dtype_override,fprop_dtype_override) can not both be'
'set.')
if subgraph_filter and not isinstance(subgraph_filter, (tuple, list)):
subgraph_filter = [subgraph_filter]
# Disable assertions unless user explicitly enables it.
if FLAGS['enable_asserts'].using_default_value:
FLAGS.enable_asserts = False
# TODO(laurenzo): Work out how much we need to specify here in terms of
# cluster configuration.
cls._SetClusterParams(model_cfg.cluster, device_options)
# Configure the model.
model_cfg.random_seed = random_seed
model_cfg.is_inference = True
if disable_packed_input:
def _DisablePackedInput(task):
if (_ParamExists(task, 'encoder') and
_ParamExists(task.encoder, 'packed_input')):
task.encoder.packed_input = False
if (_ParamExists(task, 'decoder') and
_ParamExists(task.decoder, 'packed_input')):
task.decoder.packed_input = False
if issubclass(model_cfg.cls, base_model.MultiTaskModel):
for _, task_param in model_cfg.task_params.IterParams():
_DisablePackedInput(task_param)
else:
_DisablePackedInput(model_cfg.task)
tf.logging.debug('Model %s params:', model_cfg.name)
for line in model_cfg.ToText().split('\n'):
tf.logging.debug('%s', line)
# Instantiate the graph.
graph = tf.Graph()
with graph.as_default():
tf.random.set_seed(random_seed)
cluster = model_cfg.cluster.Instantiate()
device = cluster.GetPlacer()
tpu_const_scope = _DummyScope()
if (IsTpu(device_options) and
device_options.var_options == 'AS_CONSTANTS'):
# Do not specify devices for variables if we are marking them as
# constants.
device = ''
tpu_const_scope = ConstGuaranteeScope()
with cluster, tf.device(device), tpu_const_scope:
bfloat16_override = ShouldForceBfloat16ForWeightsAndActivations(
device_options)
if bfloat16_override:
py_utils.UpdateDtype(model_cfg, tf.bfloat16)
py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)
act_bfloat16_override = ShouldForceBfloat16ForActivations(
device_options)
if act_bfloat16_override:
py_utils.UpdateFpropDtype(model_cfg, tf.bfloat16)
# Hard-code TPU-related flags prior to instantiating model.
old_enable_asserts = FLAGS.enable_asserts
old_xla_device = FLAGS.xla_device
if IsTpu(device_options):
FLAGS.enable_asserts = False
FLAGS.xla_device = 'tpu'
try:
mdl = model_cfg.Instantiate()
task = mdl.GetTask(model_task_name)
variables_to_restore = (
_MakeVariableDictionary(tf.global_variables()) if not mdl.ema else
mdl.ema.variables_to_restore(mdl.variables_for_ema))
if bfloat16_override:
saver_var_spec = (
bfloat16_variables
.get_saver_spec_for_variables_with_bf16_overrides(
variables_to_restore))
else:
saver_var_spec = variables_to_restore
saver = tf.train.Saver(saver_var_spec)
tf.variables_initializer(
tf.global_variables(), name='init_all_variables')
if IsTpu(device_options) and device_options.gen_init_op:
tf.group(tf.tpu.initialize_system(), name='tpu_init_op')
if freeze_checkpoint or freeze_defaults:
# Replace variables with tensors using tf.identity in theta before
# freezing to avoid the graph referencing types of DT_RESOURCE.
def AddIdentityToTheta(layer):
layer._private_theta = layer._private_theta.Transform(tf.identity) # pylint: disable=protected-access
layer.children.Transform(AddIdentityToTheta)
AddIdentityToTheta(task)
inference_graph_proto = inference_graph_pb2.InferenceGraph()
subgraphs_proto = task.Inference()
if isinstance(subgraphs_proto, dict):
subgraphs_proto = ConvertSubgraphDictToProto(subgraphs_proto)
for name, subgraph in subgraphs_proto.subgraphs.items():
if not subgraph_filter or name in subgraph_filter:
inference_graph_proto.subgraphs[name].CopyFrom(subgraph)
# Yes, graph collections are bad, however this seems to be the
# easiest way to get this assets registered from
# TextFileInitializer.
assets_collection = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.ASSET_FILEPATHS)
for asset in assets_collection:
if asset.op.type == 'Const' and asset.op.get_attr(
'dtype') == tf.dtypes.string:
constant_value = asset.op.get_attr('value')
if constant_value.string_val:
tf.logging.info('Found asset file_path: %s',
constant_value.string_val[0])
asset_file_def = inference_graph_proto.asset_file_def.add()
asset_file_def.tensor_info.name = asset.name
asset_file_def.filename = constant_value.string_val[0]
# Add a table init op and global variable init op to the graph.
# Tables can be declared anywhere in the graph, so this op has to be
# added last.
tf.tables_initializer(name='init_all_tables')
finally:
# Reset TPU-related flags after model instantiation.
FLAGS.enable_asserts = old_enable_asserts
FLAGS.xla_device = old_xla_device
tf.logging.info('Graph contains ops: %r',
[op.name for op in graph.get_operations()])
# Collection defs
if not tf.executing_eagerly():
meta_graph = tf.train.export_meta_graph(graph=graph)
for key in meta_graph.collection_def:
tf.logging.info('copying collection %s', key)
inference_graph_proto.collection_def[key].CopyFrom(
meta_graph.collection_def[key])
else:
tf.logging.warning('Not exporting collection defs '
'since operating in eager mode.')
# Freezing.
if freeze_defaults or freeze_checkpoint:
output_op_names = GetOutputOpNames(
graph,
inference_graph_proto,
preserve_colocation_nodes=False,
preserve_saver_restore_nodes=False)
if cls._DeviceSupportsFreezing(device_options):
raise ValueError('freeze_checkpoint cannot be used with device ' +
device_options.device)
if freeze_checkpoint:
tf.logging.info('Freezing graph from checkpoint: %s', freeze_checkpoint)
graph_def = _FreezeGraphFromCheckpoint(graph, saver, freeze_checkpoint,
output_op_names)
elif freeze_defaults:
tf.logging.info('Default initializing graph and freezing.')
graph_def = _FreezeDefaults(graph, output_op_names)
else:
inference_graph_proto.saver_def.CopyFrom(saver.as_saver_def())
output_op_names = GetOutputOpNames(graph, inference_graph_proto)
# Prune the graph to just the parts we need.
# To support restoring, we have to not prune out the restore node.
output_op_names.append('init_all_tables')
output_op_names.append('init_all_variables')
output_op_names.append('save/control_dependency')
output_op_names.append('save/restore_all')
if IsTpu(device_options) and device_options.gen_init_op:
output_op_names.append('tpu_init_op')
graph_def = graph.as_graph_def()
tf.logging.info('Pruning graph to output ops: %r', output_op_names)
graph_def = tf.graph_util.extract_sub_graph(graph_def, output_op_names)
if not device_options.retain_device_placement:
# Clear the device so that the runtime can choose.
tf.logging.info('Clearing device placement for: %s',
device_options.device)
for node in graph_def.node:
node.ClearField('device')
for function in graph_def.library.function:
for node_def in function.node_def:
node_def.ClearField('device')
inference_graph_proto.graph_def.CopyFrom(graph_def)
if export_path:
with tf.io.gfile.GFile(export_path, 'w') as f:
f.write(text_format.MessageToString(inference_graph_proto))
return inference_graph_proto
@classmethod
def _SetClusterParams(cls, cluster_params, device_options):
"""Sets cluster params.
Args:
cluster_params: Model().cluster config.
device_options: InferenceDeviceOptions.
"""
def Update(p):
"""Update cluster params `p`."""
p.name = '/job:localhost'
p.replicas = 1
p.tpus_per_replica = 1 if IsTpu(device_options) else 0
p.gpus_per_replica = 0
p.devices_per_split = 1
cluster_params.mode = 'sync'
cluster_params.job = 'decoder'
cluster_params.add_summary = False
cluster_params.do_eval = True
Update(cluster_params.controller)
Update(cluster_params.worker)
Update(cluster_params.ps)
Update(cluster_params.evaler)
Update(cluster_params.decoder)
Update(cluster_params.input)
@classmethod
def _DeviceSupportsFreezing(cls, device_options):
return IsTpu(device_options)
| [
"lingvo.compat.train.Saver",
"lingvo.compat.logging.info",
"lingvo.compat.compat.v1.get_collection",
"lingvo.compat.graph_util.extract_sub_graph",
"re.search",
"lingvo.core.py_utils.UpdateFpropDtype",
"lingvo.compat.io.gfile.GFile",
"lingvo.compat.get_variable_scope",
"lingvo.compat.executing_eagerly",
"six.ensure_text",
"lingvo.core.inference_graph_pb2.InferenceGraph",
"lingvo.compat.random.set_seed",
"lingvo.compat.logging.warning",
"collections.namedtuple",
"lingvo.compat.tables_initializer",
"lingvo.compat.global_variables",
"lingvo.core.py_utils.SessionConfig",
"lingvo.compat.control_dependencies",
"lingvo.core.bfloat16_variables.get_saver_spec_for_variables_with_bf16_overrides",
"lingvo.compat.tpu.initialize_system",
"lingvo.compat.device",
"lingvo.compat.train.export_meta_graph",
"lingvo.compat.logging.debug",
"google.protobuf.text_format.MessageToString",
"lingvo.compat.Graph",
"lingvo.core.py_utils.UpdateDtype"
] | [((1968, 2135), 'collections.namedtuple', 'collections.namedtuple', (['"""InferenceDeviceOptions"""', "['device', 'retain_device_placement', 'var_options', 'gen_init_op',\n 'dtype_override', 'fprop_dtype_override']"], {}), "('InferenceDeviceOptions', ['device',\n 'retain_device_placement', 'var_options', 'gen_init_op',\n 'dtype_override', 'fprop_dtype_override'])\n", (1990, 2135), False, 'import collections\n'), ((2321, 2344), 'lingvo.compat.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (2342, 2344), True, 'import lingvo.compat as tf\n'), ((3083, 3106), 'lingvo.compat.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (3104, 3106), True, 'import lingvo.compat as tf\n'), ((4450, 4486), 'lingvo.core.inference_graph_pb2.InferenceGraph', 'inference_graph_pb2.InferenceGraph', ([], {}), '()\n', (4484, 4486), False, 'from lingvo.core import inference_graph_pb2\n'), ((6651, 6691), 're.search', 're.search', (['""":[0-9]+$"""', 'tensor_or_op_name'], {}), "(':[0-9]+$', tensor_or_op_name)\n", (6660, 6691), False, 'import re\n'), ((13935, 13987), 'lingvo.compat.logging.debug', 'tf.logging.debug', (['"""Model %s params:"""', 'model_cfg.name'], {}), "('Model %s params:', model_cfg.name)\n", (13951, 13987), True, 'import lingvo.compat as tf\n'), ((14113, 14123), 'lingvo.compat.Graph', 'tf.Graph', ([], {}), '()\n', (14121, 14123), True, 'import lingvo.compat as tf\n'), ((2742, 2771), 'lingvo.compat.control_dependencies', 'tf.control_dependencies', (['None'], {}), '(None)\n', (2765, 2771), True, 'import lingvo.compat as tf\n'), ((7015, 7066), 'lingvo.compat.logging.info', 'tf.logging.info', (['"""Skip subgraph %s."""', 'subgraph_name'], {}), "('Skip subgraph %s.', subgraph_name)\n", (7030, 7066), True, 'import lingvo.compat as tf\n'), ((9805, 9829), 'lingvo.core.py_utils.SessionConfig', 'py_utils.SessionConfig', ([], {}), '()\n', (9827, 9829), False, 'from lingvo.core import py_utils\n'), ((14042, 14070), 'lingvo.compat.logging.debug', 'tf.logging.debug', (['"""%s"""', 'line'], {}), "('%s', line)\n", (14058, 14070), True, 'import lingvo.compat as tf\n'), ((14159, 14190), 'lingvo.compat.random.set_seed', 'tf.random.set_seed', (['random_seed'], {}), '(random_seed)\n', (14177, 14190), True, 'import lingvo.compat as tf\n'), ((18498, 18520), 'lingvo.compat.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (18518, 18520), True, 'import lingvo.compat as tf\n'), ((18541, 18580), 'lingvo.compat.train.export_meta_graph', 'tf.train.export_meta_graph', ([], {'graph': 'graph'}), '(graph=graph)\n', (18567, 18580), True, 'import lingvo.compat as tf\n'), ((18799, 18886), 'lingvo.compat.logging.warning', 'tf.logging.warning', (['"""Not exporting collection defs since operating in eager mode."""'], {}), "(\n 'Not exporting collection defs since operating in eager mode.')\n", (18817, 18886), True, 'import lingvo.compat as tf\n'), ((20372, 20439), 'lingvo.compat.logging.info', 'tf.logging.info', (['"""Pruning graph to output ops: %r"""', 'output_op_names'], {}), "('Pruning graph to output ops: %r', output_op_names)\n", (20387, 20439), True, 'import lingvo.compat as tf\n'), ((20458, 20517), 'lingvo.compat.graph_util.extract_sub_graph', 'tf.graph_util.extract_sub_graph', (['graph_def', 'output_op_names'], {}), '(graph_def, output_op_names)\n', (20489, 20517), True, 'import lingvo.compat as tf\n'), ((20633, 20708), 'lingvo.compat.logging.info', 'tf.logging.info', (['"""Clearing device placement for: %s"""', 'device_options.device'], {}), "('Clearing device placement for: %s', device_options.device)\n", (20648, 20708), True, 'import lingvo.compat as tf\n'), ((10222, 10246), 'lingvo.core.py_utils.SessionConfig', 'py_utils.SessionConfig', ([], {}), '()\n', (10244, 10246), False, 'from lingvo.core import py_utils\n'), ((14588, 14605), 'lingvo.compat.device', 'tf.device', (['device'], {}), '(device)\n', (14597, 14605), True, 'import lingvo.compat as tf\n'), ((18633, 18678), 'lingvo.compat.logging.info', 'tf.logging.info', (['"""copying collection %s"""', 'key'], {}), "('copying collection %s', key)\n", (18648, 18678), True, 'import lingvo.compat as tf\n'), ((19366, 19438), 'lingvo.compat.logging.info', 'tf.logging.info', (['"""Freezing graph from checkpoint: %s"""', 'freeze_checkpoint'], {}), "('Freezing graph from checkpoint: %s', freeze_checkpoint)\n", (19381, 19438), True, 'import lingvo.compat as tf\n'), ((21021, 21056), 'lingvo.compat.io.gfile.GFile', 'tf.io.gfile.GFile', (['export_path', '"""w"""'], {}), "(export_path, 'w')\n", (21038, 21056), True, 'import lingvo.compat as tf\n'), ((7756, 7825), 'lingvo.compat.logging.info', 'tf.logging.info', (["('Op/tensor %s not in the graph. Ignoring.' % op_name)"], {}), "('Op/tensor %s not in the graph. Ignoring.' % op_name)\n", (7771, 7825), True, 'import lingvo.compat as tf\n'), ((8898, 8927), 'six.ensure_text', 'six.ensure_text', (['loc', '"""utf-8"""'], {}), "(loc, 'utf-8')\n", (8913, 8927), False, 'import six\n'), ((14767, 14811), 'lingvo.core.py_utils.UpdateDtype', 'py_utils.UpdateDtype', (['model_cfg', 'tf.bfloat16'], {}), '(model_cfg, tf.bfloat16)\n', (14787, 14811), False, 'from lingvo.core import py_utils\n'), ((14822, 14871), 'lingvo.core.py_utils.UpdateFpropDtype', 'py_utils.UpdateFpropDtype', (['model_cfg', 'tf.bfloat16'], {}), '(model_cfg, tf.bfloat16)\n', (14847, 14871), False, 'from lingvo.core import py_utils\n'), ((15012, 15061), 'lingvo.core.py_utils.UpdateFpropDtype', 'py_utils.UpdateFpropDtype', (['model_cfg', 'tf.bfloat16'], {}), '(model_cfg, tf.bfloat16)\n', (15037, 15061), False, 'from lingvo.core import py_utils\n'), ((15909, 15939), 'lingvo.compat.train.Saver', 'tf.train.Saver', (['saver_var_spec'], {}), '(saver_var_spec)\n', (15923, 15939), True, 'import lingvo.compat as tf\n'), ((16675, 16711), 'lingvo.core.inference_graph_pb2.InferenceGraph', 'inference_graph_pb2.InferenceGraph', ([], {}), '()\n', (16709, 16711), False, 'from lingvo.core import inference_graph_pb2\n'), ((17276, 17343), 'lingvo.compat.compat.v1.get_collection', 'tf.compat.v1.get_collection', (['tf.compat.v1.GraphKeys.ASSET_FILEPATHS'], {}), '(tf.compat.v1.GraphKeys.ASSET_FILEPATHS)\n', (17303, 17343), True, 'import lingvo.compat as tf\n'), ((18131, 18176), 'lingvo.compat.tables_initializer', 'tf.tables_initializer', ([], {'name': '"""init_all_tables"""'}), "(name='init_all_tables')\n", (18152, 18176), True, 'import lingvo.compat as tf\n'), ((19619, 19678), 'lingvo.compat.logging.info', 'tf.logging.info', (['"""Default initializing graph and freezing."""'], {}), "('Default initializing graph and freezing.')\n", (19634, 19678), True, 'import lingvo.compat as tf\n'), ((21079, 21129), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['inference_graph_proto'], {}), '(inference_graph_proto)\n', (21106, 21129), False, 'from google.protobuf import text_format\n'), ((15695, 15789), 'lingvo.core.bfloat16_variables.get_saver_spec_for_variables_with_bf16_overrides', 'bfloat16_variables.get_saver_spec_for_variables_with_bf16_overrides', (['variables_to_restore'], {}), '(\n variables_to_restore)\n', (15762, 15789), False, 'from lingvo.core import bfloat16_variables\n'), ((15990, 16011), 'lingvo.compat.global_variables', 'tf.global_variables', ([], {}), '()\n', (16009, 16011), True, 'import lingvo.compat as tf\n'), ((15505, 15526), 'lingvo.compat.global_variables', 'tf.global_variables', ([], {}), '()\n', (15524, 15526), True, 'import lingvo.compat as tf\n'), ((16128, 16154), 'lingvo.compat.tpu.initialize_system', 'tf.tpu.initialize_system', ([], {}), '()\n', (16152, 16154), True, 'import lingvo.compat as tf\n'), ((17628, 17702), 'lingvo.compat.logging.info', 'tf.logging.info', (['"""Found asset file_path: %s"""', 'constant_value.string_val[0]'], {}), "('Found asset file_path: %s', constant_value.string_val[0])\n", (17643, 17702), True, 'import lingvo.compat as tf\n')] |
# pylint: disable=no-self-use,invalid-name
import numpy as np
from numpy.testing import assert_almost_equal
import torch
from allennlp.common import Params
from allennlp.data import Vocabulary
from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder
from allennlp.common.testing import AllenNlpTestCase
class TestBagOfWordCountsTokenEmbedder(AllenNlpTestCase):
def setUp(self):
super(TestBagOfWordCountsTokenEmbedder, self).setUp()
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("1")
self.vocab.add_token_to_namespace("2")
self.vocab.add_token_to_namespace("3")
self.vocab.add_token_to_namespace("4")
def test_forward_calculates_bow_properly(self):
params = Params({})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([[2, 0], [3, 0], [4, 4]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
numpy_tensor = np.array([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])
manual_output = torch.from_numpy(numpy_tensor).float()
assert_almost_equal(embedder_output.data.numpy(), manual_output.data.numpy())
def test_projects_properly(self):
params = Params({"projection_dim": 50})
embedder = BagOfWordCountsTokenEmbedder.from_params(self.vocab, params=params)
numpy_tensor = np.array([self.vocab.get_token_index(x) for x in ["1", "2", "3"]])
inputs = torch.from_numpy(numpy_tensor).unsqueeze(1)
embedder_output = embedder(inputs)
assert embedder_output.shape[1] == 50
| [
"allennlp.common.Params",
"allennlp.data.Vocabulary",
"torch.from_numpy",
"numpy.array",
"allennlp.modules.token_embedders.BagOfWordCountsTokenEmbedder.from_params"
] | [((484, 496), 'allennlp.data.Vocabulary', 'Vocabulary', ([], {}), '()\n', (494, 496), False, 'from allennlp.data import Vocabulary\n'), ((755, 765), 'allennlp.common.Params', 'Params', (['{}'], {}), '({})\n', (761, 765), False, 'from allennlp.common import Params\n'), ((785, 852), 'allennlp.modules.token_embedders.BagOfWordCountsTokenEmbedder.from_params', 'BagOfWordCountsTokenEmbedder.from_params', (['self.vocab'], {'params': 'params'}), '(self.vocab, params=params)\n', (825, 852), False, 'from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder\n'), ((876, 910), 'numpy.array', 'np.array', (['[[2, 0], [3, 0], [4, 4]]'], {}), '([[2, 0], [3, 0], [4, 4]])\n', (884, 910), True, 'import numpy as np\n'), ((1038, 1108), 'numpy.array', 'np.array', (['[[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]]'], {}), '([[1, 0, 1, 0, 0, 0], [1, 0, 0, 1, 0, 0], [0, 0, 0, 0, 2, 0]])\n', (1046, 1108), True, 'import numpy as np\n'), ((1314, 1344), 'allennlp.common.Params', 'Params', (["{'projection_dim': 50}"], {}), "({'projection_dim': 50})\n", (1320, 1344), False, 'from allennlp.common import Params\n'), ((1364, 1431), 'allennlp.modules.token_embedders.BagOfWordCountsTokenEmbedder.from_params', 'BagOfWordCountsTokenEmbedder.from_params', (['self.vocab'], {'params': 'params'}), '(self.vocab, params=params)\n', (1404, 1431), False, 'from allennlp.modules.token_embedders import BagOfWordCountsTokenEmbedder\n'), ((928, 958), 'torch.from_numpy', 'torch.from_numpy', (['numpy_tensor'], {}), '(numpy_tensor)\n', (944, 958), False, 'import torch\n'), ((1133, 1163), 'torch.from_numpy', 'torch.from_numpy', (['numpy_tensor'], {}), '(numpy_tensor)\n', (1149, 1163), False, 'import torch\n'), ((1539, 1569), 'torch.from_numpy', 'torch.from_numpy', (['numpy_tensor'], {}), '(numpy_tensor)\n', (1555, 1569), False, 'import torch\n')] |
import argparse
import logging
import multiprocessing as mp
import logging
import os
from detectron2.evaluation import inference_context
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from detectron2.utils.collect_env import collect_env_info
from detectron2.utils.logger import setup_logger
from fvcore.common.file_io import PathManager
from pathlib import Path
from pytorch3d.io import save_obj
from shapenet.config.config import get_shapenet_cfg
from shapenet.data.utils import imagenet_preprocess
from shapenet.modeling.heads import voxel_head
from shapenet.modeling.mesh_arch import build_model
from shapenet.utils.checkpoint import clean_state_dict
import torchvision.transforms as T
import glob
from PIL import Image
import trimesh
import pyvista as pv
import pyacvd
import numpy as np
logger = logging.getLogger('demo')
def setup_cfgs(args):
cfg = get_shapenet_cfg()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/shapenet/voxmesh_R50.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input main folder")
# parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def resample_mesh(mesh, count=2466):
pv_mesh = pv.wrap(mesh)
# logger.info('Original mesh:')
# print(pv_mesh)
clus = pyacvd.Clustering(pv_mesh)
clus.subdivide(3)
clus.cluster(count)
# remesh
remesh = clus.create_mesh()
# verts = remesh.points
# faces = remesh.faces.reshape((-1, 4))[:, 1:]
return remesh
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
device = torch.device("cuda:%d" % 0)
logger = setup_logger(name="demo shapenet")
logger.info("Arguments: " + str(args))
cfg = setup_cfgs(args)
# load checkpoing and build model
if cfg.MODEL.CHECKPOINT == "":
raise ValueError("Invalid checkpoing provided")
logger.info("Loading model from checkpoint: %s" % (cfg.MODEL.CHECKPOINT))
cp = torch.load(PathManager.get_local_path(cfg.MODEL.CHECKPOINT))
state_dict = clean_state_dict(cp["best_states"]["model"])
model = build_model(cfg)
model.load_state_dict(state_dict)
logger.info("Model loaded")
model.to(device)
sub_dir = sorted(os.listdir(args.input))
for sd in sub_dir:
curr_path = os.path.join(args.input, sd)
images = glob.glob(curr_path + "/*.png")
for img_dir in images:
# load image
transform = [T.ToTensor()]
transform.append(imagenet_preprocess())
transform = T.Compose(transform)
im_name = img_dir.split("/")[-1].split(".")[0]
with PathManager.open(img_dir, "rb") as f:
img = Image.open(f).convert("RGB")
img = transform(img)
img = img[None, :, :, :]
img = img.to(device)
with inference_context(model):
img_feats, voxel_scores, meshes_pred, P, cubified_meshes = model(img)
# Save voxel_score
voxel_odir = os.path.join(curr_path, "voxel_score")
if not Path(voxel_odir).is_dir():
os.mkdir(voxel_odir)
voxel_file = os.path.join(voxel_odir, "%s.pt" % (im_name))
torch.save(voxel_scores, voxel_file)
# Save image features
imgfeat_odir = os.path.join(curr_path, "img_feat")
if not Path(imgfeat_odir).is_dir():
os.mkdir(imgfeat_odir)
img_feat_file = os.path.join(imgfeat_odir, "%s.pt" % (im_name))
torch.save(img_feats, img_feat_file)
# Save P
p_odir = os.path.join(curr_path, "P")
if not Path(p_odir).is_dir():
os.mkdir(p_odir)
p_file = os.path.join(p_odir, "%s.pt" % (im_name))
torch.save(P, p_file)
# Save cubified mesh
cmesh_odir = os.path.join(curr_path, "cube_mesh")
if not Path(cmesh_odir).is_dir():
os.mkdir(cmesh_odir)
cube_mesh_file = os.path.join(cmesh_odir, "%s_cube.obj" % (im_name))
c_verts, c_faces = cubified_meshes[-1].get_mesh_verts_faces(0)
save_obj(cube_mesh_file, c_verts, c_faces)
# Save predicted mesh
mesh_odir = os.path.join(curr_path, "final_mesh")
if not Path(mesh_odir).is_dir():
os.mkdir(mesh_odir)
save_file = os.path.join(mesh_odir, "%s.obj" % (im_name))
verts, faces = meshes_pred[-1].get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
logger.info("Predictions saved for %s/%s" % (curr_path.split('/')[-1], im_name))
| [
"logging.getLogger",
"os.listdir",
"argparse.ArgumentParser",
"pathlib.Path",
"fvcore.common.file_io.PathManager.open",
"pyvista.wrap",
"os.mkdir",
"torch.multiprocessing.set_start_method",
"torchvision.transforms.ToTensor",
"glob.glob",
"shapenet.config.config.get_shapenet_cfg",
"shapenet.utils.checkpoint.clean_state_dict",
"pytorch3d.io.save_obj",
"shapenet.modeling.mesh_arch.build_model",
"fvcore.common.file_io.PathManager.get_local_path",
"shapenet.data.utils.imagenet_preprocess",
"pyacvd.Clustering",
"torch.save",
"torchvision.transforms.Compose",
"torch.device",
"PIL.Image.open",
"detectron2.evaluation.inference_context",
"os.path.join",
"detectron2.utils.logger.setup_logger"
] | [((840, 865), 'logging.getLogger', 'logging.getLogger', (['"""demo"""'], {}), "('demo')\n", (857, 865), False, 'import logging\n'), ((899, 917), 'shapenet.config.config.get_shapenet_cfg', 'get_shapenet_cfg', ([], {}), '()\n', (915, 917), False, 'from shapenet.config.config import get_shapenet_cfg\n'), ((1059, 1111), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MeshRCNN Demo"""'}), "(description='MeshRCNN Demo')\n", (1082, 1111), False, 'import argparse\n'), ((1932, 1945), 'pyvista.wrap', 'pv.wrap', (['mesh'], {}), '(mesh)\n', (1939, 1945), True, 'import pyvista as pv\n'), ((2019, 2045), 'pyacvd.Clustering', 'pyacvd.Clustering', (['pv_mesh'], {}), '(pv_mesh)\n', (2036, 2045), False, 'import pyacvd\n'), ((2273, 2313), 'torch.multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {'force': '(True)'}), "('spawn', force=True)\n", (2292, 2313), True, 'import torch.multiprocessing as mp\n'), ((2365, 2392), 'torch.device', 'torch.device', (["('cuda:%d' % 0)"], {}), "('cuda:%d' % 0)\n", (2377, 2392), False, 'import torch\n'), ((2407, 2441), 'detectron2.utils.logger.setup_logger', 'setup_logger', ([], {'name': '"""demo shapenet"""'}), "(name='demo shapenet')\n", (2419, 2441), False, 'from detectron2.utils.logger import setup_logger\n'), ((2808, 2852), 'shapenet.utils.checkpoint.clean_state_dict', 'clean_state_dict', (["cp['best_states']['model']"], {}), "(cp['best_states']['model'])\n", (2824, 2852), False, 'from shapenet.utils.checkpoint import clean_state_dict\n'), ((2865, 2881), 'shapenet.modeling.mesh_arch.build_model', 'build_model', (['cfg'], {}), '(cfg)\n', (2876, 2881), False, 'from shapenet.modeling.mesh_arch import build_model\n'), ((2741, 2789), 'fvcore.common.file_io.PathManager.get_local_path', 'PathManager.get_local_path', (['cfg.MODEL.CHECKPOINT'], {}), '(cfg.MODEL.CHECKPOINT)\n', (2767, 2789), False, 'from fvcore.common.file_io import PathManager\n'), ((2995, 3017), 'os.listdir', 'os.listdir', (['args.input'], {}), '(args.input)\n', (3005, 3017), False, 'import os\n'), ((3063, 3091), 'os.path.join', 'os.path.join', (['args.input', 'sd'], {}), '(args.input, sd)\n', (3075, 3091), False, 'import os\n'), ((3109, 3140), 'glob.glob', 'glob.glob', (["(curr_path + '/*.png')"], {}), "(curr_path + '/*.png')\n", (3118, 3140), False, 'import glob\n'), ((3321, 3341), 'torchvision.transforms.Compose', 'T.Compose', (['transform'], {}), '(transform)\n', (3330, 3341), True, 'import torchvision.transforms as T\n'), ((3812, 3850), 'os.path.join', 'os.path.join', (['curr_path', '"""voxel_score"""'], {}), "(curr_path, 'voxel_score')\n", (3824, 3850), False, 'import os\n'), ((3960, 4003), 'os.path.join', 'os.path.join', (['voxel_odir', "('%s.pt' % im_name)"], {}), "(voxel_odir, '%s.pt' % im_name)\n", (3972, 4003), False, 'import os\n'), ((4018, 4054), 'torch.save', 'torch.save', (['voxel_scores', 'voxel_file'], {}), '(voxel_scores, voxel_file)\n', (4028, 4054), False, 'import torch\n'), ((4117, 4152), 'os.path.join', 'os.path.join', (['curr_path', '"""img_feat"""'], {}), "(curr_path, 'img_feat')\n", (4129, 4152), False, 'import os\n'), ((4269, 4314), 'os.path.join', 'os.path.join', (['imgfeat_odir', "('%s.pt' % im_name)"], {}), "(imgfeat_odir, '%s.pt' % im_name)\n", (4281, 4314), False, 'import os\n'), ((4329, 4365), 'torch.save', 'torch.save', (['img_feats', 'img_feat_file'], {}), '(img_feats, img_feat_file)\n', (4339, 4365), False, 'import torch\n'), ((4409, 4437), 'os.path.join', 'os.path.join', (['curr_path', '"""P"""'], {}), "(curr_path, 'P')\n", (4421, 4437), False, 'import os\n'), ((4535, 4574), 'os.path.join', 'os.path.join', (['p_odir', "('%s.pt' % im_name)"], {}), "(p_odir, '%s.pt' % im_name)\n", (4547, 4574), False, 'import os\n'), ((4589, 4610), 'torch.save', 'torch.save', (['P', 'p_file'], {}), '(P, p_file)\n', (4599, 4610), False, 'import torch\n'), ((4670, 4706), 'os.path.join', 'os.path.join', (['curr_path', '"""cube_mesh"""'], {}), "(curr_path, 'cube_mesh')\n", (4682, 4706), False, 'import os\n'), ((4820, 4869), 'os.path.join', 'os.path.join', (['cmesh_odir', "('%s_cube.obj' % im_name)"], {}), "(cmesh_odir, '%s_cube.obj' % im_name)\n", (4832, 4869), False, 'import os\n'), ((4959, 5001), 'pytorch3d.io.save_obj', 'save_obj', (['cube_mesh_file', 'c_verts', 'c_faces'], {}), '(cube_mesh_file, c_verts, c_faces)\n', (4967, 5001), False, 'from pytorch3d.io import save_obj\n'), ((5061, 5098), 'os.path.join', 'os.path.join', (['curr_path', '"""final_mesh"""'], {}), "(curr_path, 'final_mesh')\n", (5073, 5098), False, 'import os\n'), ((5205, 5248), 'os.path.join', 'os.path.join', (['mesh_odir', "('%s.obj' % im_name)"], {}), "(mesh_odir, '%s.obj' % im_name)\n", (5217, 5248), False, 'import os\n'), ((5330, 5363), 'pytorch3d.io.save_obj', 'save_obj', (['save_file', 'verts', 'faces'], {}), '(save_file, verts, faces)\n', (5338, 5363), False, 'from pytorch3d.io import save_obj\n'), ((3231, 3243), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (3241, 3243), True, 'import torchvision.transforms as T\n'), ((3274, 3295), 'shapenet.data.utils.imagenet_preprocess', 'imagenet_preprocess', ([], {}), '()\n', (3293, 3295), False, 'from shapenet.data.utils import imagenet_preprocess\n'), ((3432, 3463), 'fvcore.common.file_io.PathManager.open', 'PathManager.open', (['img_dir', '"""rb"""'], {}), "(img_dir, 'rb')\n", (3448, 3463), False, 'from fvcore.common.file_io import PathManager\n'), ((3643, 3667), 'detectron2.evaluation.inference_context', 'inference_context', (['model'], {}), '(model)\n', (3660, 3667), False, 'from detectron2.evaluation import inference_context\n'), ((3913, 3933), 'os.mkdir', 'os.mkdir', (['voxel_odir'], {}), '(voxel_odir)\n', (3921, 3933), False, 'import os\n'), ((4217, 4239), 'os.mkdir', 'os.mkdir', (['imgfeat_odir'], {}), '(imgfeat_odir)\n', (4225, 4239), False, 'import os\n'), ((4496, 4512), 'os.mkdir', 'os.mkdir', (['p_odir'], {}), '(p_odir)\n', (4504, 4512), False, 'import os\n'), ((4769, 4789), 'os.mkdir', 'os.mkdir', (['cmesh_odir'], {}), '(cmesh_odir)\n', (4777, 4789), False, 'import os\n'), ((5160, 5179), 'os.mkdir', 'os.mkdir', (['mesh_odir'], {}), '(mesh_odir)\n', (5168, 5179), False, 'import os\n'), ((3492, 3505), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (3502, 3505), False, 'from PIL import Image\n'), ((3870, 3886), 'pathlib.Path', 'Path', (['voxel_odir'], {}), '(voxel_odir)\n', (3874, 3886), False, 'from pathlib import Path\n'), ((4172, 4190), 'pathlib.Path', 'Path', (['imgfeat_odir'], {}), '(imgfeat_odir)\n', (4176, 4190), False, 'from pathlib import Path\n'), ((4457, 4469), 'pathlib.Path', 'Path', (['p_odir'], {}), '(p_odir)\n', (4461, 4469), False, 'from pathlib import Path\n'), ((4726, 4742), 'pathlib.Path', 'Path', (['cmesh_odir'], {}), '(cmesh_odir)\n', (4730, 4742), False, 'from pathlib import Path\n'), ((5118, 5133), 'pathlib.Path', 'Path', (['mesh_odir'], {}), '(mesh_odir)\n', (5122, 5133), False, 'from pathlib import Path\n')] |
from django.contrib import admin
from django.contrib.auth.models import Group
from accounts.models import EmailUser
from shared.admin import ExportCsvMixin
# no need for groups - we only have regular users and superusers
admin.site.unregister(Group)
@admin.register(EmailUser)
class EmailUserAdmin(ExportCsvMixin, admin.ModelAdmin):
"""option d'affichage des activités dans la vue django admin"""
filename = "export_utilisateurs.csv"
list_display = ("email", "last_name", "first_name", "is_superuser", "is_active", "email_confirmed",)
list_filter = ("is_superuser","is_active", "email_confirmed",)
fields = ("email", "last_name", "first_name", "is_superuser", "is_staff", "is_active", "email_confirmed",
("date_joined", "last_login",),
)
ordering = ("last_name", "first_name")
readonly_fields = ("date_joined", "last_login",)
list_per_page = 200
csv_export_exclude = ["password"]
| [
"django.contrib.admin.site.unregister",
"django.contrib.admin.register"
] | [((223, 251), 'django.contrib.admin.site.unregister', 'admin.site.unregister', (['Group'], {}), '(Group)\n', (244, 251), False, 'from django.contrib import admin\n'), ((254, 279), 'django.contrib.admin.register', 'admin.register', (['EmailUser'], {}), '(EmailUser)\n', (268, 279), False, 'from django.contrib import admin\n')] |
import requests
import urllib.request
import os.path
import shutil
import csv
def main():
with open("data.csv") as i: #Open the data.csv file
instances = i.readlines() #Write them into memory
instances = [x.strip() for x in instances] #Strip any weird issues from writing
instances.sort() #Sort them alphabetically
setup(instances) #Run setup to create all the necessary files and subfolders
count = len(instances) #Get the count just for fun
i = 0
try:
for name in instances:
try:
i += 1
print("-----!"+name+"!-----")
print(str(i) +" of " + str(count) + " remaining!")
fetch(name) #Run the fetching code
except Exception as e:
print(e) #Print the error. We catch errors here for pleroma instances, weirdly encoded urls, etc
pass #Don't stop the beat
except Exception as e:
print("Instance Error")
print(e)
pass
clone(instances) #Clone all of them into one big folder for ease of access
def fetch(name):
r = requests.get('https://%s/api/v1/custom_emojis'% name, allow_redirects=True) #Throw the instance name into the standard url for fetching data
path = "emoji/%s/" % name #Because of the clone function we know all of these folders will exist
try:
for emoji in r.json(): #Emoji = the json code from the request
try:
if os.path.isfile(path+emoji['shortcode']+".png"): #Check to see if it exists.
pass
else:
if "ms_" not in emoji['shortcode']: #Cut out Mutant Standard Emojis (Or at least most of them). #Mutant standard is huge and common
#print(emoji['shortcode'] + " found!")
emojiimage = requests.get(emoji['static_url'],allow_redirects=True) #Get the image from the json
open(path + emoji['shortcode']+".png",'wb').write(emojiimage.content) #Now save it as an image in the filesystem
except Exception as e:
print("Did not get: " + emoji['url']) #If somethings fucky throw a nice error then keep going.
print(e)
pass
except Exception as e:
print(e)
def setup(instances):
if (os.path.isdir("emoji/")): #Check to see if emoji/ exists
pass
else:
os.mkdir("emoji/") #make it if it doesnt
for name in instances:
if (os.path.isdir("emoji/%s/"%name)):
pass
else: os.mkdir("emoji/%s/"%name)
if (os.path.isdir("emoji/all")):
pass
else:
os.mkdir("emoji/all")
def clone(instances):
for name in instances:
print("Copying emoji for: %s"% name)
path = "emoji/%s/" % name
files = os.listdir(path)
for name in files: #This gets alll files
try:
shutil.copyfile(path+name,"emoji/all/"+name) #Then copies them into the all folder
except Exception as e:
print(e)
pass
if __name__ == '__main__':
main()
| [
"shutil.copyfile",
"requests.get"
] | [((1115, 1191), 'requests.get', 'requests.get', (["('https://%s/api/v1/custom_emojis' % name)"], {'allow_redirects': '(True)'}), "('https://%s/api/v1/custom_emojis' % name, allow_redirects=True)\n", (1127, 1191), False, 'import requests\n'), ((2940, 2989), 'shutil.copyfile', 'shutil.copyfile', (['(path + name)', "('emoji/all/' + name)"], {}), "(path + name, 'emoji/all/' + name)\n", (2955, 2989), False, 'import shutil\n'), ((1849, 1904), 'requests.get', 'requests.get', (["emoji['static_url']"], {'allow_redirects': '(True)'}), "(emoji['static_url'], allow_redirects=True)\n", (1861, 1904), False, 'import requests\n')] |
import get_data_ours
import get_data_akiba
import get_data_NearLinear
import get_data_LinearTime
import os
import matplotlib.pyplot as plt
# graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "RHG-100000000-nodes-2000000000-edges", "delaunay_n24", "del26"]
graphs = ["uk-2002", "arabic-2005", "gsh-2015-tpd", "uk-2005", "it-2004", "sk-2005", "uk-2007-05", "webbase-2001", "asia.osm", "road_usa", "europe.osm", "rgg_n26_s0", "delaunay_n24", "del26"]
linearTimeDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/LinearTimeKernels/logs"
partitioningDir = "../../LinearTimeKernels/partitions"
ourTimeDir = "../../results/LinearTimeKernelsScalingAll"
nearLinearDir = "../../../triangle_counting_paper/MIS_sigmod_pub/results/NearLinear"
akibaDir = "../../akiba_vertex_cover/results"
def getOurTimeAndSizeSequential(graph):
res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)
result = dict()
result["time"] = res["sequential_quasikernel_time"] + res["lineartime_time"]
result["size"] = res["sequential_quasikernel_size"]
return result
def getOurTimeAndSizeParallel(graph):
res = get_data_ours.getOurTimeAndSizeUltrafast(graph, linearTimeDir, partitioningDir, ourTimeDir)
result = dict()
result["time"] = res["parallel_quasikernel_time"] + res["lineartime_time"] + res["partitioning_time"]
result["size"] = res["parallel_quasikernel_size"]
return result
def getAkibaTimeAndSize(graph):
return get_data_akiba.getAkibaTimeAndSize(graph, akibaDir)
def getNearLinearTimeAndSize(graph):
return get_data_NearLinear.getNearLinearTimeAndSize(graph, nearLinearDir)
def getLinearTimeTimeAndSize(graph):
return get_data_LinearTime.getLinearTimeTimeAndSize(graph, linearTimeDir)
def minProperty(graph, prop):
oursequential = getOurTimeAndSizeSequential(graph)[prop]
ourparallel = getOurTimeAndSizeParallel(graph)[prop]
akiba = getAkibaTimeAndSize(graph)[prop]
nearLinear = getNearLinearTimeAndSize(graph)[prop]
linearTime = getLinearTimeTimeAndSize(graph)[prop]
data = [oursequential, ourparallel, akiba, nearLinear, linearTime]
# data = [oursequential, ourparallel, akiba, nearLinear]
data = filter(lambda x : x >= 0, data)
minimum = min(data)
if minimum == 0:
return 1
return minimum
oursizeSequential = []
ourtimeSequential = []
oursizeParallel = []
ourtimeParallel = []
akibasize = []
akibatime = []
nearlinearsize = []
nearlineartime = []
lineartimesize = []
lineartimetime = []
for graph in graphs:
minsize = getAkibaTimeAndSize(graph)["size"]
mintime = getAkibaTimeAndSize(graph)["time"]
oss = getOurTimeAndSizeSequential(graph)["size"] / minsize
# print(graph + "(sequential): " + str(getOurTimeAndSizeSequential(graph)["size"]))
ots = getOurTimeAndSizeSequential(graph)["time"] / mintime
if oss > 0 and ots > 0:
oursizeSequential.append(oss)
ourtimeSequential.append(ots)
osp = getOurTimeAndSizeParallel(graph)["size"] / minsize
# print(graph + "(parallel): " + str(getOurTimeAndSizeParallel(graph)["size"]))
otp = getOurTimeAndSizeParallel(graph)["time"] / mintime
if osp > 0 and otp > 0:
oursizeParallel.append(osp)
ourtimeParallel.append(otp)
aks = getAkibaTimeAndSize(graph)["size"] / minsize
akt = getAkibaTimeAndSize(graph)["time"] / mintime
if aks > 0 and akt > 0:
akibasize.append(aks)
akibatime.append(akt)
nls = getNearLinearTimeAndSize(graph)["size"] / minsize
nlt = getNearLinearTimeAndSize(graph)["time"] / mintime
if nls > 0 and nlt > 0:
nearlinearsize.append(nls)
nearlineartime.append(nlt)
lts = getLinearTimeTimeAndSize(graph)["size"] / minsize
ltt = getLinearTimeTimeAndSize(graph)["time"] / mintime
if nls > 0 and nlt > 0:
lineartimesize.append(lts)
lineartimetime.append(ltt)
# print("We")
# print(oursizeSequential)
# print(ourtimeSequential)
# print("We (parallel)")
# print(oursizeParallel)
# print(ourtimeParallel)
# print("Akiba")
# print(akibasize)
# print(akibatime)
# print("NearLinear")
# print(nearlinearsize)
# print(nearlineartime)
# print("LinearTime")
# print(lineartimesize)
# print(lineartimetime)
plt.rc('font', size=14)
fig = plt.figure(figsize=(3.2, 2.4))
ax = fig.add_subplot(1,1,1)
plt.title("Summary", fontsize=14)
ax.set_yscale("log")
ax.set_xscale("log")
ax.scatter(ourtimeSequential, oursizeSequential, label="FastKer", marker="x", color="green")
ax.scatter(ourtimeParallel, oursizeParallel, label="ParFastKer", marker="+", color="black")
# ax.scatter(akibatime, akibasize, label="VCSolver", marker="^", edgecolors="blue", facecolors="none")
ax.scatter(nearlineartime, nearlinearsize, label="NearLinear", marker="o", edgecolors="red", facecolors="none")
ax.scatter(lineartimetime, lineartimesize, label="LinearTime", marker="^", edgecolors="magenta", facecolors="none")
plt.xlabel("time / VCSolver time")
plt.ylabel("size / VCSolver size")
plt.xticks([0.0001, 0.01, 1])
ax.legend(bbox_to_anchor=(0.35,-0.7), ncol=2, loc='lower center', frameon=False, borderaxespad=0., mode="expand")
plt.savefig("summaryplot_vcsolver_baseline.pdf", bbox_inches="tight")
# plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"get_data_ours.getOurTimeAndSizeUltrafast",
"get_data_LinearTime.getLinearTimeTimeAndSize",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"get_data_akiba.getAkibaTimeAndSize",
"get_data_NearLinear.getNearLinearTimeAndSize",
"matplotlib.pyplot.rc"
] | [((4378, 4401), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(14)'}), "('font', size=14)\n", (4384, 4401), True, 'import matplotlib.pyplot as plt\n'), ((4408, 4438), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3.2, 2.4)'}), '(figsize=(3.2, 2.4))\n', (4418, 4438), True, 'import matplotlib.pyplot as plt\n'), ((4467, 4500), 'matplotlib.pyplot.title', 'plt.title', (['"""Summary"""'], {'fontsize': '(14)'}), "('Summary', fontsize=14)\n", (4476, 4500), True, 'import matplotlib.pyplot as plt\n'), ((5059, 5093), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time / VCSolver time"""'], {}), "('time / VCSolver time')\n", (5069, 5093), True, 'import matplotlib.pyplot as plt\n'), ((5094, 5128), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""size / VCSolver size"""'], {}), "('size / VCSolver size')\n", (5104, 5128), True, 'import matplotlib.pyplot as plt\n'), ((5129, 5158), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[0.0001, 0.01, 1]'], {}), '([0.0001, 0.01, 1])\n', (5139, 5158), True, 'import matplotlib.pyplot as plt\n'), ((5273, 5342), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""summaryplot_vcsolver_baseline.pdf"""'], {'bbox_inches': '"""tight"""'}), "('summaryplot_vcsolver_baseline.pdf', bbox_inches='tight')\n", (5284, 5342), True, 'import matplotlib.pyplot as plt\n'), ((958, 1053), 'get_data_ours.getOurTimeAndSizeUltrafast', 'get_data_ours.getOurTimeAndSizeUltrafast', (['graph', 'linearTimeDir', 'partitioningDir', 'ourTimeDir'], {}), '(graph, linearTimeDir,\n partitioningDir, ourTimeDir)\n', (998, 1053), False, 'import get_data_ours\n'), ((1274, 1369), 'get_data_ours.getOurTimeAndSizeUltrafast', 'get_data_ours.getOurTimeAndSizeUltrafast', (['graph', 'linearTimeDir', 'partitioningDir', 'ourTimeDir'], {}), '(graph, linearTimeDir,\n partitioningDir, ourTimeDir)\n', (1314, 1369), False, 'import get_data_ours\n'), ((1608, 1659), 'get_data_akiba.getAkibaTimeAndSize', 'get_data_akiba.getAkibaTimeAndSize', (['graph', 'akibaDir'], {}), '(graph, akibaDir)\n', (1642, 1659), False, 'import get_data_akiba\n'), ((1709, 1775), 'get_data_NearLinear.getNearLinearTimeAndSize', 'get_data_NearLinear.getNearLinearTimeAndSize', (['graph', 'nearLinearDir'], {}), '(graph, nearLinearDir)\n', (1753, 1775), False, 'import get_data_NearLinear\n'), ((1825, 1891), 'get_data_LinearTime.getLinearTimeTimeAndSize', 'get_data_LinearTime.getLinearTimeTimeAndSize', (['graph', 'linearTimeDir'], {}), '(graph, linearTimeDir)\n', (1869, 1891), False, 'import get_data_LinearTime\n')] |
# Generated by Django 2.1.5 on 2019-03-26 11:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='U2FKey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_used_at', models.DateTimeField(null=True)),
('public_key', models.TextField(unique=True)),
('key_handle', models.TextField()),
('app_id', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='u2f_keys', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserOTP',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('otp_type', models.CharField(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)),
('secret_key', models.CharField(blank=True, max_length=100)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserRecoveryCodes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('secret_code', models.CharField(max_length=10)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='django_mfa.UserOTP')),
],
),
]
| [
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((435, 528), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (451, 528), False, 'from django.db import migrations, models\n'), ((558, 597), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (578, 597), False, 'from django.db import migrations, models\n'), ((633, 664), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (653, 664), False, 'from django.db import migrations, models\n'), ((698, 727), 'django.db.models.TextField', 'models.TextField', ([], {'unique': '(True)'}), '(unique=True)\n', (714, 727), False, 'from django.db import migrations, models\n'), ((761, 779), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (777, 779), False, 'from django.db import migrations, models\n'), ((809, 827), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (825, 827), False, 'from django.db import migrations, models\n'), ((855, 976), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""u2f_keys"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='u2f_keys', to=settings.AUTH_USER_MODEL)\n", (872, 976), False, 'from django.db import migrations, models\n'), ((1104, 1197), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1120, 1197), False, 'from django.db import migrations, models\n'), ((1225, 1302), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('HOTP', 'hotp'), ('TOTP', 'totp')]", 'max_length': '(20)'}), "(choices=[('HOTP', 'hotp'), ('TOTP', 'totp')], max_length=20)\n", (1241, 1302), False, 'from django.db import migrations, models\n'), ((1336, 1380), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)'}), '(blank=True, max_length=100)\n', (1352, 1380), False, 'from django.db import migrations, models\n'), ((1408, 1507), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.PROTECT, to=\n settings.AUTH_USER_MODEL)\n', (1428, 1507), False, 'from django.db import migrations, models\n'), ((1645, 1738), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1661, 1738), False, 'from django.db import migrations, models\n'), ((1769, 1800), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)'}), '(max_length=10)\n', (1785, 1800), False, 'from django.db import migrations, models\n'), ((1828, 1920), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""django_mfa.UserOTP"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'django_mfa.UserOTP')\n", (1845, 1920), False, 'from django.db import migrations, models\n')] |
from my_collection import logger
if __name__ == "__main__":
logger.now().debug("debug1")
logger.now().debug("debug2")
logger.now().info("hello1")
logger.now().info("hello2")
logger.now().with_field("key", "val").error("with field1")
logger.now().with_field("key", "val").error("with field2")
| [
"my_collection.logger.now"
] | [((65, 77), 'my_collection.logger.now', 'logger.now', ([], {}), '()\n', (75, 77), False, 'from my_collection import logger\n'), ((98, 110), 'my_collection.logger.now', 'logger.now', ([], {}), '()\n', (108, 110), False, 'from my_collection import logger\n'), ((131, 143), 'my_collection.logger.now', 'logger.now', ([], {}), '()\n', (141, 143), False, 'from my_collection import logger\n'), ((163, 175), 'my_collection.logger.now', 'logger.now', ([], {}), '()\n', (173, 175), False, 'from my_collection import logger\n'), ((195, 207), 'my_collection.logger.now', 'logger.now', ([], {}), '()\n', (205, 207), False, 'from my_collection import logger\n'), ((258, 270), 'my_collection.logger.now', 'logger.now', ([], {}), '()\n', (268, 270), False, 'from my_collection import logger\n')] |
import unittest
from .helpers import StubBoard, StubPiece, C, WHITE, BLACK
class TestBishopGenerate(unittest.TestCase):
def get_bishop(self, board, team, position):
from chess.models import Bishop
return Bishop(board, team, position)
def compare_list(self, expected, results):
compared = []
for e in expected:
for r in results:
if e[0] == r[0] and e[1] == r[1]:
compared.append(True)
break
else:
compared.append(False)
return compared
def test_generate_topright(self):
board = StubBoard()
board[C('h7')] = StubPiece(board, BLACK, C('h7'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('f5'), C('g6'), C('h7')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
def test_generate_topleft(self):
board = StubBoard()
board[C('c6')] = StubPiece(board, WHITE, C('c6'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('d5')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
expected = [C('c6')]
correct = self.compare_list(expected, results)
self.assertFalse(any(correct))
def test_generate_bottomleft(self):
board = StubBoard()
board[C('c2')] = StubPiece(board, BLACK, C('c2'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('d3'), C('c2')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
expected = [C('b1')]
correct = self.compare_list(expected, results)
self.assertFalse(any(correct))
def test_generate_bottomright(self):
board = StubBoard()
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
expected = [C('f3'), C('g2'), C('h1')]
correct = self.compare_list(expected, results)
self.assertTrue(all(correct))
def test_generate_amount(self):
board = StubBoard()
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
self.assertEqual(len(results), 13)
board = StubBoard()
board[C('c6')] = StubPiece(board, WHITE, C('c6'))
bishop = self.get_bishop(board, WHITE, C('e4'))
results = bishop.generate()
self.assertEqual(len(results), 10)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"chess.models.Bishop"
] | [((2640, 2655), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2653, 2655), False, 'import unittest\n'), ((225, 254), 'chess.models.Bishop', 'Bishop', (['board', 'team', 'position'], {}), '(board, team, position)\n', (231, 254), False, 'from chess.models import Bishop\n')] |
from lib.fmd.namedentity import NamedEntity
from lib.fmd.decorators import Action, ListStage, GetStage
from lib.exceptions.workflow import EntryException
@Action(ListStage.DATAGATHERING)
def list_records(context, output):
output = []
if hasattr(context, 'filter'):
context.log.debug('Using filter [%s]' % context.filter)
entries = context.ddb.list(context.filter)
else:
entries = context.ddb.list()
return NamedEntity('records', entries)
| [
"lib.fmd.decorators.Action",
"lib.fmd.namedentity.NamedEntity"
] | [((156, 187), 'lib.fmd.decorators.Action', 'Action', (['ListStage.DATAGATHERING'], {}), '(ListStage.DATAGATHERING)\n', (162, 187), False, 'from lib.fmd.decorators import Action, ListStage, GetStage\n'), ((449, 480), 'lib.fmd.namedentity.NamedEntity', 'NamedEntity', (['"""records"""', 'entries'], {}), "('records', entries)\n", (460, 480), False, 'from lib.fmd.namedentity import NamedEntity\n')] |
#!/usr/bin/env python3
# Author: <NAME> <<EMAIL>>
# This example steps you through using resty & restAssured to save pickled/serialized
# data as a blob and then later re-using it in after deserialization.
# Sample usage might be in collaborative computing ie publish results from an expensive
# computation on one machine so that other machines can load it as live data.
def testSerializer():
import Serializer
bs = Serializer.BinarySerializer()
js = Serializer.JSONSerializer()
data = dict((i, i) for i in range(10))
bserial = bs.serialize(data)
jserial = js.serialize(data)
bdserial = bs.deserialize(bserial)
jdserial = js.deserialize(jserial)
print('bdserial', bdserial)
ioS = bs.ioStream(bserial)
ioR = ioS.read()
print('ioS data from the stream', ioR)
def testCloudPassagePickledVersion():
from entrails.cloudPassage import CloudPassageHandler
cc = CloudPassageHandler()
data = dict((i, i*10) for i in range(9))
title = 'Dict of items 0-8999, keys i*10'
res = cc.push(data, title=title, asPickle=True)
pulledObj = cc.pull(metaData='pickle')
print('PulledObj', pulledObj, data)
assert(pulledObj == data)
rmTry = cc.removeTrace(data, asPickle=True)
print(rmTry)
def testCloudPassageJSONVersion():
from entrails.cloudPassage import CloudPassageHandler
cc = CloudPassageHandler()
data = dict((str(i), i*10) for i in range(9))
title = 'Dict of items 0-8999, keys i*10'
res = cc.push(data, title=title, asPickle=False)
pulledObj = cc.pull(metaData='json')
print('PulledObj', pulledObj, data)
assert(pulledObj == data)
rmTry = cc.removeTrace(data)
print(rmTry)
def main():
testSerializer()
testCloudPassageJSONVersion()
testCloudPassagePickledVersion()
if __name__ == '__main__':
main()
| [
"Serializer.BinarySerializer",
"Serializer.JSONSerializer",
"entrails.cloudPassage.CloudPassageHandler"
] | [((427, 456), 'Serializer.BinarySerializer', 'Serializer.BinarySerializer', ([], {}), '()\n', (454, 456), False, 'import Serializer\n'), ((466, 493), 'Serializer.JSONSerializer', 'Serializer.JSONSerializer', ([], {}), '()\n', (491, 493), False, 'import Serializer\n'), ((916, 937), 'entrails.cloudPassage.CloudPassageHandler', 'CloudPassageHandler', ([], {}), '()\n', (935, 937), False, 'from entrails.cloudPassage import CloudPassageHandler\n'), ((1364, 1385), 'entrails.cloudPassage.CloudPassageHandler', 'CloudPassageHandler', ([], {}), '()\n', (1383, 1385), False, 'from entrails.cloudPassage import CloudPassageHandler\n')] |
# dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 <NAME>
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = '<NAME>'
# @email = '<EMAIL>'
from itertools import chain
import tabulate
from ._dataframe_column import DataFrameColumn
from ._dataframe_row import DataFrameRow
class DataFrameColumnSet:
def __init__(self, **kwargs):
self.__data_columns = []
self.__nrow = -1
self.cbind(**kwargs)
def __getitem__(self, item):
if isinstance(item, int):
return self.__data_columns[item]
raise ValueError("Item should be integer!")
def __iter__(self):
for col in self.__data_columns:
yield col
def __str__(self):
stri = "\nA dataframe"
ta = []
for col in self.__data_columns:
vals = col.values
if len(vals) > 10:
vals = list(chain(vals[:3], "...", vals[-3:]))
ta.append(vals)
ta = tabulate.tabulate(zip(*ta), headers=self.colnames)
return stri + "\n\n" + ta.__str__()
@property
def nrow(self):
return self.__nrow
@property
def ncol(self):
return len(self.colnames)
@property
def colnames(self):
return [x.colname for x in self.__data_columns]
def rows(self, idxs):
return [self.row(i) for i in idxs]
def row(self, idx):
"""
Returns DataFrameRow of the DataFrame given its index.
:param idx: the index of the row in the DataFrame.
:return: returns a DataFrameRow
"""
return DataFrameRow(idx, [x[idx] for x in self], self.colnames)
def which_colnames(self, *args):
idx = []
for i in range(len(self.__data_columns)):
if self.colnames[i] in args:
idx.append(i)
return idx
def cbind(self, **columns):
keys = sorted([x for x in columns.keys()])
for k in keys:
self.__cbind(DataFrameColumn(str(k), columns.get(k)))
def __cbind(self, column):
if column.colname in self.colnames:
ValueError("Appending duplicate col-name!")
self.__data_columns.append(column)
self.__nrow = self.__data_columns[-1].size()
for col in self.__data_columns:
if col.size() != self.__nrow:
raise ValueError("Columns do not have equal lengths!")
| [
"itertools.chain"
] | [((1534, 1567), 'itertools.chain', 'chain', (['vals[:3]', '"""..."""', 'vals[-3:]'], {}), "(vals[:3], '...', vals[-3:])\n", (1539, 1567), False, 'from itertools import chain\n')] |
import warnings
import cupy
from cupy_backends.cuda.api import runtime
from cupy.cuda import device
from cupyx.jit import _cuda_types
from cupyx.jit._internal_types import BuiltinFunc
from cupyx.jit._internal_types import Data
from cupyx.jit._internal_types import Constant
from cupyx.jit._internal_types import Range
from cupyx.jit import _compile
from functools import reduce
class RangeFunc(BuiltinFunc):
def __call__(self, *args, unroll=None):
"""Range with loop unrolling support.
Args:
start (int):
Same as that of built-in :obj:`range`.
stop (int):
Same as that of built-in :obj:`range`.
step (int):
Same as that of built-in :obj:`range`.
unroll (int or bool or None):
- If `True`, add ``#pragma unroll`` directive before the
loop.
- If `False`, add ``#pragma unroll(1)`` directive before
the loop to disable unrolling.
- If an `int`, add ``#pragma unroll(n)`` directive before
the loop, where the integer ``n`` means the number of
iterations to unroll.
- If `None` (default), leave the control of loop unrolling
to the compiler (no ``#pragma``).
.. seealso:: `#pragma unroll`_
.. _#pragma unroll:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#pragma-unroll
"""
super().__call__()
def call(self, env, *args, unroll=None):
if len(args) == 0:
raise TypeError('range expected at least 1 argument, got 0')
elif len(args) == 1:
start, stop, step = Constant(0), args[0], Constant(1)
elif len(args) == 2:
start, stop, step = args[0], args[1], Constant(1)
elif len(args) == 3:
start, stop, step = args
else:
raise TypeError(
f'range expected at most 3 argument, got {len(args)}')
if unroll is not None:
if not all(isinstance(x, Constant)
for x in (start, stop, step, unroll)):
raise TypeError(
'loop unrolling requires constant start, stop, step and '
'unroll value')
unroll = unroll.obj
if not (isinstance(unroll, int) or isinstance(unroll, bool)):
raise TypeError(
'unroll value expected to be of type int, '
f'got {type(unroll).__name__}')
if unroll is False:
unroll = 1
if not (unroll is True or 0 < unroll < 1 << 31):
warnings.warn(
'loop unrolling is ignored as the unroll value is '
'non-positive or greater than INT_MAX')
if isinstance(step, Constant):
step_is_positive = step.obj >= 0
elif step.ctype.dtype.kind == 'u':
step_is_positive = True
else:
step_is_positive = None
stop = Data.init(stop, env)
start = Data.init(start, env)
step = Data.init(step, env)
if start.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if stop.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if step.ctype.dtype.kind not in 'iu':
raise TypeError('range supports only for integer type.')
if env.mode == 'numpy':
ctype = _cuda_types.Scalar(int)
elif env.mode == 'cuda':
ctype = stop.ctype
else:
assert False
return Range(start, stop, step, ctype, step_is_positive, unroll=unroll)
class LenFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) != 1:
raise TypeError(f'len() expects only 1 argument, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
arg = args[0]
if not isinstance(arg.ctype, _cuda_types.CArray):
raise TypeError('len() supports only array type')
if not arg.ctype.ndim:
raise TypeError('len() of unsized array')
return Data(f'static_cast<long long>({arg.code}.shape()[0])',
_cuda_types.Scalar('q'))
class MinFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) < 2:
raise TypeError(
f'min() expects at least 2 arguments, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
return reduce(lambda a, b: _compile._call_ufunc(
cupy.minimum, (a, b), None, env), args)
class MaxFunc(BuiltinFunc):
def call(self, env, *args, **kwds):
if len(args) < 2:
raise TypeError(
f'max() expects at least 2 arguments, got {len(args)}')
if kwds:
raise TypeError('keyword arguments are not supported')
return reduce(lambda a, b: _compile._call_ufunc(
cupy.maximum, (a, b), None, env), args)
class SyncThreads(BuiltinFunc):
def __call__(self):
"""Calls ``__syncthreads()``.
.. seealso:: `Synchronization functions`_
.. _Synchronization functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions
"""
super().__call__()
def call_const(self, env):
return Data('__syncthreads()', _cuda_types.void)
class SyncWarp(BuiltinFunc):
def __call__(self, *, mask=0xffffffff):
"""Calls ``__syncwarp()``.
Args:
mask (int): Active threads in a warp. Default is 0xffffffff.
.. seealso:: `Synchronization functions`_
.. _Synchronization functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#synchronization-functions
"""
super().__call__()
def call(self, env, *, mask=None):
if runtime.is_hip:
if mask is not None:
warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)
mask = None
if mask:
if isinstance(mask, Constant):
if not (0x0 <= mask.obj <= 0xffffffff):
raise ValueError('mask is out of range')
mask = _compile._astype_scalar(
mask, _cuda_types.int32, 'same_kind', env)
mask = Data.init(mask, env)
code = f'__syncwarp({mask.code})'
else:
code = '__syncwarp()'
return Data(code, _cuda_types.void)
class SharedMemory(BuiltinFunc):
def __call__(self, dtype, size, alignment=None):
"""Allocates shared memory and returns it as a 1-D array.
Args:
dtype (dtype):
The dtype of the returned array.
size (int or None):
If ``int`` type, the size of static shared memory.
If ``None``, declares the shared memory with extern specifier.
alignment (int or None): Enforce the alignment via __align__(N).
"""
super().__call__()
def call_const(self, env, dtype, size, alignment=None):
name = env.get_fresh_variable_name(prefix='_smem')
child_type = _cuda_types.Scalar(dtype)
while env[name] is not None:
name = env.get_fresh_variable_name(prefix='_smem') # retry
var = Data(name, _cuda_types.SharedMem(child_type, size, alignment))
env.decls[name] = var
env.locals[name] = var
return Data(name, _cuda_types.Ptr(child_type))
class AtomicOp(BuiltinFunc):
def __init__(self, op, dtypes):
self._op = op
self._name = 'atomic' + op
self._dtypes = dtypes
doc = f"""Calls the ``{self._name}`` function to operate atomically on
``array[index]``. Please refer to `Atomic Functions`_ for detailed
explanation.
Args:
array: A :class:`cupy.ndarray` to index over.
index: A valid index such that the address to the corresponding
array element ``array[index]`` can be computed.
value: Represent the value to use for the specified operation. For
the case of :obj:`atomic_cas`, this is the value for
``array[index]`` to compare with.
alt_value: Only used in :obj:`atomic_cas` to represent the value
to swap to.
.. seealso:: `Numba's corresponding atomic functions`_
.. _Atomic Functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomic-functions
.. _Numba's corresponding atomic functions:
https://numba.readthedocs.io/en/stable/cuda-reference/kernel.html#synchronization-and-atomic-operations
"""
self.__doc__ = doc
def __call__(self, array, index, value, alt_value=None):
super().__call__()
def call(self, env, array, index, value, value2=None):
name = self._name
op = self._op
array = Data.init(array, env)
if not isinstance(array.ctype, (_cuda_types.CArray, _cuda_types.Ptr)):
raise TypeError('The first argument must be of array type.')
target = _compile._indexing(array, index, env)
ctype = target.ctype
if ctype.dtype.name not in self._dtypes:
raise TypeError(f'`{name}` does not support {ctype.dtype} input.')
# On HIP, 'e' is not supported and we will never reach here
if (op == 'Add' and ctype.dtype.char == 'e'
and runtime.runtimeGetVersion() < 10000):
raise RuntimeError(
'float16 atomic operation is not supported before CUDA 10.0.')
value = _compile._astype_scalar(value, ctype, 'same_kind', env)
value = Data.init(value, env)
if op == 'CAS':
assert value2 is not None
# On HIP, 'H' is not supported and we will never reach here
if ctype.dtype.char == 'H':
if runtime.runtimeGetVersion() < 10010:
raise RuntimeError(
'uint16 atomic operation is not supported before '
'CUDA 10.1')
if int(device.get_compute_capability()) < 70:
raise RuntimeError(
'uint16 atomic operation is not supported before '
'sm_70')
value2 = _compile._astype_scalar(value2, ctype, 'same_kind', env)
value2 = Data.init(value2, env)
code = f'{name}(&{target.code}, {value.code}, {value2.code})'
else:
assert value2 is None
code = f'{name}(&{target.code}, {value.code})'
return Data(code, ctype)
class GridFunc(BuiltinFunc):
def __init__(self, mode):
if mode == 'grid':
self._desc = 'Compute the thread index in the grid.'
self._eq = 'jit.threadIdx.x + jit.blockIdx.x * jit.blockDim.x'
self._link = 'numba.cuda.grid'
self._code = 'threadIdx.{n} + blockIdx.{n} * blockDim.{n}'
elif mode == 'gridsize':
self._desc = 'Compute the grid size.'
self._eq = 'jit.blockDim.x * jit.gridDim.x'
self._link = 'numba.cuda.gridsize'
self._code = 'blockDim.{n} * gridDim.{n}'
else:
raise ValueError('unsupported function')
doc = f""" {self._desc}
Computation of the first integer is as follows::
{self._eq}
and for the other two integers the ``y`` and ``z`` attributes are used.
Args:
ndim (int): The dimension of the grid. Only 1, 2, or 3 is allowed.
Returns:
int or tuple:
If ``ndim`` is 1, an integer is returned, otherwise a tuple.
.. note::
This function follows the convention of Numba's
:func:`{self._link}`.
"""
self.__doc__ = doc
def __call__(self, ndim):
super().__call__()
def call_const(self, env, ndim):
if not isinstance(ndim, int):
raise TypeError('ndim must be an integer')
# Numba convention: for 1D we return a single variable,
# otherwise a tuple
if ndim == 1:
return Data(self._code.format(n='x'), _cuda_types.uint32)
elif ndim == 2:
dims = ('x', 'y')
elif ndim == 3:
dims = ('x', 'y', 'z')
else:
raise ValueError('Only ndim=1,2,3 are supported')
elts_code = ', '.join(self._code.format(n=n) for n in dims)
ctype = _cuda_types.Tuple([_cuda_types.uint32]*ndim)
return Data(f'thrust::make_tuple({elts_code})', ctype)
class WarpShuffleOp(BuiltinFunc):
def __init__(self, op, dtypes):
self._op = op
self._name = '__shfl_' + (op + '_' if op else '') + 'sync'
self._dtypes = dtypes
doc = f"""Calls the ``{self._name}`` function. Please refer to
`Warp Shuffle Functions`_ for detailed explanation.
.. _Warp Shuffle Functions:
https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#warp-shuffle-functions
"""
self.__doc__ = doc
def __call__(self, mask, var, val_id, *, width=32):
super().__call__()
def call(self, env, mask, var, val_id, *, width=None):
name = self._name
var = Data.init(var, env)
ctype = var.ctype
if ctype.dtype.name not in self._dtypes:
raise TypeError(f'`{name}` does not support {ctype.dtype} input.')
try:
mask = mask.obj
except Exception:
raise TypeError('mask must be an integer')
if runtime.is_hip:
warnings.warn(f'mask {mask} is ignored on HIP', RuntimeWarning)
elif not (0x0 <= mask <= 0xffffffff):
raise ValueError('mask is out of range')
# val_id refers to "delta" for shfl_{up, down}, "srcLane" for shfl, and
# "laneMask" for shfl_xor
if self._op in ('up', 'down'):
val_id_t = _cuda_types.uint32
else:
val_id_t = _cuda_types.int32
val_id = _compile._astype_scalar(val_id, val_id_t, 'same_kind', env)
val_id = Data.init(val_id, env)
if width:
if isinstance(width, Constant):
if width.obj not in (2, 4, 8, 16, 32):
raise ValueError('width needs to be power of 2')
else:
width = Constant(64) if runtime.is_hip else Constant(32)
width = _compile._astype_scalar(
width, _cuda_types.int32, 'same_kind', env)
width = Data.init(width, env)
code = f'{name}({hex(mask)}, {var.code}, {val_id.code}'
code += f', {width.code})'
return Data(code, ctype)
class LaneID(BuiltinFunc):
def __call__(self):
"""Returns the lane ID of the calling thread, ranging in
``[0, jit.warpsize)``.
.. note::
Unlike :obj:`numba.cuda.laneid`, this is a callable function
instead of a property.
"""
super().__call__()
def _get_preamble(self):
preamble = '__device__ __forceinline__ unsigned int LaneId() {'
if not runtime.is_hip:
# see https://github.com/NVIDIA/cub/blob/main/cub/util_ptx.cuh#L419
preamble += """
unsigned int ret;
asm ("mov.u32 %0, %%laneid;" : "=r"(ret) );
return ret; }
"""
else:
# defined in hip/hcc_detail/device_functions.h
preamble += """
return __lane_id(); }
"""
return preamble
def call_const(self, env):
env.generated.add_code(self._get_preamble())
return Data('LaneId()', _cuda_types.uint32)
builtin_functions_dict = {
range: RangeFunc(),
len: LenFunc(),
min: MinFunc(),
max: MaxFunc(),
}
range_ = RangeFunc()
syncthreads = SyncThreads()
syncwarp = SyncWarp()
shared_memory = SharedMemory()
grid = GridFunc('grid')
gridsize = GridFunc('gridsize')
laneid = LaneID()
# atomic functions
atomic_add = AtomicOp(
'Add',
('int32', 'uint32', 'uint64', 'float32', 'float64')
+ (() if runtime.is_hip else ('float16',)))
atomic_sub = AtomicOp(
'Sub', ('int32', 'uint32'))
atomic_exch = AtomicOp(
'Exch', ('int32', 'uint32', 'uint64', 'float32'))
atomic_min = AtomicOp(
'Min', ('int32', 'uint32', 'uint64'))
atomic_max = AtomicOp(
'Max', ('int32', 'uint32', 'uint64'))
atomic_inc = AtomicOp(
'Inc', ('uint32',))
atomic_dec = AtomicOp(
'Dec', ('uint32',))
atomic_cas = AtomicOp(
'CAS',
('int32', 'uint32', 'uint64')
+ (() if runtime.is_hip else ('uint16',)))
atomic_and = AtomicOp(
'And', ('int32', 'uint32', 'uint64'))
atomic_or = AtomicOp(
'Or', ('int32', 'uint32', 'uint64'))
atomic_xor = AtomicOp(
'Xor', ('int32', 'uint32', 'uint64'))
# warp-shuffle functions
_shfl_dtypes = (
('int32', 'uint32', 'int64', 'float32', 'float64')
+ (() if runtime.is_hip else ('uint64', 'float16')))
shfl_sync = WarpShuffleOp('', _shfl_dtypes)
shfl_up_sync = WarpShuffleOp('up', _shfl_dtypes)
shfl_down_sync = WarpShuffleOp('down', _shfl_dtypes)
shfl_xor_sync = WarpShuffleOp('xor', _shfl_dtypes)
| [
"cupyx.jit._internal_types.Data.init",
"cupyx.jit._compile._astype_scalar",
"cupy_backends.cuda.api.runtime.runtimeGetVersion",
"cupyx.jit._internal_types.Range",
"cupyx.jit._compile._call_ufunc",
"cupyx.jit._cuda_types.Tuple",
"cupyx.jit._cuda_types.SharedMem",
"cupyx.jit._compile._indexing",
"cupyx.jit._cuda_types.Ptr",
"cupyx.jit._internal_types.Data",
"cupyx.jit._cuda_types.Scalar",
"warnings.warn",
"cupyx.jit._internal_types.Constant",
"cupy.cuda.device.get_compute_capability"
] | [((3099, 3119), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['stop', 'env'], {}), '(stop, env)\n', (3108, 3119), False, 'from cupyx.jit._internal_types import Data\n'), ((3136, 3157), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['start', 'env'], {}), '(start, env)\n', (3145, 3157), False, 'from cupyx.jit._internal_types import Data\n'), ((3173, 3193), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['step', 'env'], {}), '(step, env)\n', (3182, 3193), False, 'from cupyx.jit._internal_types import Data\n'), ((3737, 3801), 'cupyx.jit._internal_types.Range', 'Range', (['start', 'stop', 'step', 'ctype', 'step_is_positive'], {'unroll': 'unroll'}), '(start, stop, step, ctype, step_is_positive, unroll=unroll)\n', (3742, 3801), False, 'from cupyx.jit._internal_types import Range\n'), ((5564, 5605), 'cupyx.jit._internal_types.Data', 'Data', (['"""__syncthreads()"""', '_cuda_types.void'], {}), "('__syncthreads()', _cuda_types.void)\n", (5568, 5605), False, 'from cupyx.jit._internal_types import Data\n'), ((6676, 6704), 'cupyx.jit._internal_types.Data', 'Data', (['code', '_cuda_types.void'], {}), '(code, _cuda_types.void)\n', (6680, 6704), False, 'from cupyx.jit._internal_types import Data\n'), ((7386, 7411), 'cupyx.jit._cuda_types.Scalar', '_cuda_types.Scalar', (['dtype'], {}), '(dtype)\n', (7404, 7411), False, 'from cupyx.jit import _cuda_types\n'), ((9170, 9191), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['array', 'env'], {}), '(array, env)\n', (9179, 9191), False, 'from cupyx.jit._internal_types import Data\n'), ((9361, 9398), 'cupyx.jit._compile._indexing', '_compile._indexing', (['array', 'index', 'env'], {}), '(array, index, env)\n', (9379, 9398), False, 'from cupyx.jit import _compile\n'), ((9861, 9916), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', (['value', 'ctype', '"""same_kind"""', 'env'], {}), "(value, ctype, 'same_kind', env)\n", (9884, 9916), False, 'from cupyx.jit import _compile\n'), ((9933, 9954), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['value', 'env'], {}), '(value, env)\n', (9942, 9954), False, 'from cupyx.jit._internal_types import Data\n'), ((10865, 10882), 'cupyx.jit._internal_types.Data', 'Data', (['code', 'ctype'], {}), '(code, ctype)\n', (10869, 10882), False, 'from cupyx.jit._internal_types import Data\n'), ((12751, 12797), 'cupyx.jit._cuda_types.Tuple', '_cuda_types.Tuple', (['([_cuda_types.uint32] * ndim)'], {}), '([_cuda_types.uint32] * ndim)\n', (12768, 12797), False, 'from cupyx.jit import _cuda_types\n'), ((12811, 12858), 'cupyx.jit._internal_types.Data', 'Data', (['f"""thrust::make_tuple({elts_code})"""', 'ctype'], {}), "(f'thrust::make_tuple({elts_code})', ctype)\n", (12815, 12858), False, 'from cupyx.jit._internal_types import Data\n'), ((13543, 13562), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['var', 'env'], {}), '(var, env)\n', (13552, 13562), False, 'from cupyx.jit._internal_types import Data\n'), ((14310, 14369), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', (['val_id', 'val_id_t', '"""same_kind"""', 'env'], {}), "(val_id, val_id_t, 'same_kind', env)\n", (14333, 14369), False, 'from cupyx.jit import _compile\n'), ((14387, 14409), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['val_id', 'env'], {}), '(val_id, env)\n', (14396, 14409), False, 'from cupyx.jit._internal_types import Data\n'), ((14696, 14763), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', (['width', '_cuda_types.int32', '"""same_kind"""', 'env'], {}), "(width, _cuda_types.int32, 'same_kind', env)\n", (14719, 14763), False, 'from cupyx.jit import _compile\n'), ((14793, 14814), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['width', 'env'], {}), '(width, env)\n', (14802, 14814), False, 'from cupyx.jit._internal_types import Data\n'), ((14930, 14947), 'cupyx.jit._internal_types.Data', 'Data', (['code', 'ctype'], {}), '(code, ctype)\n', (14934, 14947), False, 'from cupyx.jit._internal_types import Data\n'), ((15923, 15959), 'cupyx.jit._internal_types.Data', 'Data', (['"""LaneId()"""', '_cuda_types.uint32'], {}), "('LaneId()', _cuda_types.uint32)\n", (15927, 15959), False, 'from cupyx.jit._internal_types import Data\n'), ((3594, 3617), 'cupyx.jit._cuda_types.Scalar', '_cuda_types.Scalar', (['int'], {}), '(int)\n', (3612, 3617), False, 'from cupyx.jit import _cuda_types\n'), ((4380, 4403), 'cupyx.jit._cuda_types.Scalar', '_cuda_types.Scalar', (['"""q"""'], {}), "('q')\n", (4398, 4403), False, 'from cupyx.jit import _cuda_types\n'), ((6443, 6509), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', (['mask', '_cuda_types.int32', '"""same_kind"""', 'env'], {}), "(mask, _cuda_types.int32, 'same_kind', env)\n", (6466, 6509), False, 'from cupyx.jit import _compile\n'), ((6546, 6566), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['mask', 'env'], {}), '(mask, env)\n', (6555, 6566), False, 'from cupyx.jit._internal_types import Data\n'), ((7546, 7596), 'cupyx.jit._cuda_types.SharedMem', '_cuda_types.SharedMem', (['child_type', 'size', 'alignment'], {}), '(child_type, size, alignment)\n', (7567, 7596), False, 'from cupyx.jit import _cuda_types\n'), ((7685, 7712), 'cupyx.jit._cuda_types.Ptr', '_cuda_types.Ptr', (['child_type'], {}), '(child_type)\n', (7700, 7712), False, 'from cupyx.jit import _cuda_types\n'), ((10568, 10624), 'cupyx.jit._compile._astype_scalar', '_compile._astype_scalar', (['value2', 'ctype', '"""same_kind"""', 'env'], {}), "(value2, ctype, 'same_kind', env)\n", (10591, 10624), False, 'from cupyx.jit import _compile\n'), ((10646, 10668), 'cupyx.jit._internal_types.Data.init', 'Data.init', (['value2', 'env'], {}), '(value2, env)\n', (10655, 10668), False, 'from cupyx.jit._internal_types import Data\n'), ((13879, 13942), 'warnings.warn', 'warnings.warn', (['f"""mask {mask} is ignored on HIP"""', 'RuntimeWarning'], {}), "(f'mask {mask} is ignored on HIP', RuntimeWarning)\n", (13892, 13942), False, 'import warnings\n'), ((2722, 2834), 'warnings.warn', 'warnings.warn', (['"""loop unrolling is ignored as the unroll value is non-positive or greater than INT_MAX"""'], {}), "(\n 'loop unrolling is ignored as the unroll value is non-positive or greater than INT_MAX'\n )\n", (2735, 2834), False, 'import warnings\n'), ((4722, 4775), 'cupyx.jit._compile._call_ufunc', '_compile._call_ufunc', (['cupy.minimum', '(a, b)', 'None', 'env'], {}), '(cupy.minimum, (a, b), None, env)\n', (4742, 4775), False, 'from cupyx.jit import _compile\n'), ((5113, 5166), 'cupyx.jit._compile._call_ufunc', '_compile._call_ufunc', (['cupy.maximum', '(a, b)', 'None', 'env'], {}), '(cupy.maximum, (a, b), None, env)\n', (5133, 5166), False, 'from cupyx.jit import _compile\n'), ((6154, 6217), 'warnings.warn', 'warnings.warn', (['f"""mask {mask} is ignored on HIP"""', 'RuntimeWarning'], {}), "(f'mask {mask} is ignored on HIP', RuntimeWarning)\n", (6167, 6217), False, 'import warnings\n'), ((9696, 9723), 'cupy_backends.cuda.api.runtime.runtimeGetVersion', 'runtime.runtimeGetVersion', ([], {}), '()\n', (9721, 9723), False, 'from cupy_backends.cuda.api import runtime\n'), ((14631, 14643), 'cupyx.jit._internal_types.Constant', 'Constant', (['(64)'], {}), '(64)\n', (14639, 14643), False, 'from cupyx.jit._internal_types import Constant\n'), ((14667, 14679), 'cupyx.jit._internal_types.Constant', 'Constant', (['(32)'], {}), '(32)\n', (14675, 14679), False, 'from cupyx.jit._internal_types import Constant\n'), ((1738, 1749), 'cupyx.jit._internal_types.Constant', 'Constant', (['(0)'], {}), '(0)\n', (1746, 1749), False, 'from cupyx.jit._internal_types import Constant\n'), ((1760, 1771), 'cupyx.jit._internal_types.Constant', 'Constant', (['(1)'], {}), '(1)\n', (1768, 1771), False, 'from cupyx.jit._internal_types import Constant\n'), ((10148, 10175), 'cupy_backends.cuda.api.runtime.runtimeGetVersion', 'runtime.runtimeGetVersion', ([], {}), '()\n', (10173, 10175), False, 'from cupy_backends.cuda.api import runtime\n'), ((1851, 1862), 'cupyx.jit._internal_types.Constant', 'Constant', (['(1)'], {}), '(1)\n', (1859, 1862), False, 'from cupyx.jit._internal_types import Constant\n'), ((10360, 10391), 'cupy.cuda.device.get_compute_capability', 'device.get_compute_capability', ([], {}), '()\n', (10389, 10391), False, 'from cupy.cuda import device\n')] |
#!/home/a.ghaderi/.conda/envs/envjm/bin/python
# Model 2
import pystan
import pandas as pd
import numpy as np
import sys
sys.path.append('../../')
import utils
parts = 1
data = utils.get_data() #loading dateset
data = data[data['participant']==parts]
mis = np.where((data['n200lat']<.101)|(data['n200lat']>.248))[0] # missing data for n200lat
obs = np.where((data['n200lat']>.101)&(data['n200lat']<.248))[0] # observation and missing data for n200lat
N_mis = mis.shape[0] # number of missing data
N_obs = obs.shape[0] # number of observed data
modelfile = '../../stans/res_nonhier.stan' #reading the model span
f = open(modelfile, 'r')
model_wiener = f.read()
sm = pystan.StanModel(model_code=model_wiener)# Compile the model stan
ncohers = 2 #Number of coherence conditions
nspats = 2 #Number of spatial conditions
nconds = 4 #Number of conditions
y = data['y'].to_numpy()
cond_coher = data['cond_coher'].to_numpy()
cond_spat = data['cond_spat'].to_numpy()
conds = data['conds'].to_numpy()
n200lat = data['n200lat'].to_numpy()
#set inistial data for molde span
data_winner = {'N_obs':N_obs, #Number of trial-level observations
'N_mis':N_mis, #Number of trial-level mising data
'ncohers':ncohers, #Number of coherence conditions
'nspats':nspats, #Number of spatial conditions
'nconds':nconds, #Number of conditions
'y':np.concatenate([y[obs],y[mis]]), #acc*rt in seconds for obervation and missing data
'cond_coher':np.concatenate([cond_coher[obs],cond_coher[mis]]), #Coherence index for each trial
'cond_spat':np.concatenate([cond_spat[obs],cond_spat[mis]]), #sptial index for each trial
'conds':np.concatenate([conds[obs],conds[mis]]), #sptial index for each trial
'n200lat_obs':n200lat[obs]}; #n200 latency for each trial observation
# setting MCMC arguments
niter = 10000
nwarmup = 4000
nchains = 1
thin = 1
initials = [] # initial sampling
for c in range(0, nchains):
chaininit = {
'delta': np.random.uniform(1, 3, size=ncohers),
'alpha': np.random.uniform(.5, 1.),
'eta': np.random.uniform(.01, .2),
'res': np.random.uniform(.01, .02, size=nspats),
'n200sub': np.random.uniform(.11, .2, size=nconds),
'lambda': np.random.uniform(.01, .02),
'n200lat_mis': np.random.uniform(.11, .2, size = N_mis)
}
initials.append(chaininit)
# Train the model and generate samples
fit = sm.sampling(data=data_winner, iter=niter, chains=nchains, warmup=nwarmup, thin=thin, init=initials)
utils.to_pickle(stan_model=sm, stan_fit=fit, save_path='../../save/nonhier/'+str(parts)+'_res_nonhier.pkl')
| [
"numpy.where",
"utils.get_data",
"numpy.concatenate",
"numpy.random.uniform",
"sys.path.append",
"pystan.StanModel"
] | [((122, 147), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (137, 147), False, 'import sys\n'), ((180, 196), 'utils.get_data', 'utils.get_data', ([], {}), '()\n', (194, 196), False, 'import utils\n'), ((683, 724), 'pystan.StanModel', 'pystan.StanModel', ([], {'model_code': 'model_wiener'}), '(model_code=model_wiener)\n', (699, 724), False, 'import pystan\n'), ((270, 333), 'numpy.where', 'np.where', (["((data['n200lat'] < 0.101) | (data['n200lat'] > 0.248))"], {}), "((data['n200lat'] < 0.101) | (data['n200lat'] > 0.248))\n", (278, 333), True, 'import numpy as np\n'), ((362, 425), 'numpy.where', 'np.where', (["((data['n200lat'] > 0.101) & (data['n200lat'] < 0.248))"], {}), "((data['n200lat'] > 0.101) & (data['n200lat'] < 0.248))\n", (370, 425), True, 'import numpy as np\n'), ((1442, 1474), 'numpy.concatenate', 'np.concatenate', (['[y[obs], y[mis]]'], {}), '([y[obs], y[mis]])\n', (1456, 1474), True, 'import numpy as np\n'), ((1559, 1609), 'numpy.concatenate', 'np.concatenate', (['[cond_coher[obs], cond_coher[mis]]'], {}), '([cond_coher[obs], cond_coher[mis]])\n', (1573, 1609), True, 'import numpy as np\n'), ((1675, 1723), 'numpy.concatenate', 'np.concatenate', (['[cond_spat[obs], cond_spat[mis]]'], {}), '([cond_spat[obs], cond_spat[mis]])\n', (1689, 1723), True, 'import numpy as np\n'), ((1785, 1825), 'numpy.concatenate', 'np.concatenate', (['[conds[obs], conds[mis]]'], {}), '([conds[obs], conds[mis]])\n', (1799, 1825), True, 'import numpy as np\n'), ((2124, 2161), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(3)'], {'size': 'ncohers'}), '(1, 3, size=ncohers)\n', (2141, 2161), True, 'import numpy as np\n'), ((2180, 2207), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.0)'], {}), '(0.5, 1.0)\n', (2197, 2207), True, 'import numpy as np\n'), ((2222, 2250), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(0.2)'], {}), '(0.01, 0.2)\n', (2239, 2250), True, 'import numpy as np\n'), ((2265, 2307), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(0.02)'], {'size': 'nspats'}), '(0.01, 0.02, size=nspats)\n', (2282, 2307), True, 'import numpy as np\n'), ((2331, 2372), 'numpy.random.uniform', 'np.random.uniform', (['(0.11)', '(0.2)'], {'size': 'nconds'}), '(0.11, 0.2, size=nconds)\n', (2348, 2372), True, 'import numpy as np\n'), ((2390, 2419), 'numpy.random.uniform', 'np.random.uniform', (['(0.01)', '(0.02)'], {}), '(0.01, 0.02)\n', (2407, 2419), True, 'import numpy as np\n'), ((2442, 2482), 'numpy.random.uniform', 'np.random.uniform', (['(0.11)', '(0.2)'], {'size': 'N_mis'}), '(0.11, 0.2, size=N_mis)\n', (2459, 2482), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
from frappe.contacts.address_and_contact import load_address_and_contact
class Member(Document):
def onload(self):
"""Load address and contacts in `__onload`"""
load_address_and_contact(self)
def validate(self):
self.validate_email_type(self.email)
def validate_email_type(self, email):
from frappe.utils import validate_email_add
validate_email_add(email.strip(), True) | [
"frappe.contacts.address_and_contact.load_address_and_contact"
] | [((395, 425), 'frappe.contacts.address_and_contact.load_address_and_contact', 'load_address_and_contact', (['self'], {}), '(self)\n', (419, 425), False, 'from frappe.contacts.address_and_contact import load_address_and_contact\n')] |
import logging
from pydantic import BaseModel, Field
from typing import List
from .similar import important_words
from .server import app
_MAX_LENGTH = 2000
logger = logging.getLogger(__name__)
class ImportantWordsResponse(BaseModel):
important_words: List[str] = Field(..., description="List of lemmas")
class ImportantWordsRequest(BaseModel):
input_string: str = Field(
...,
description="Icelandic text for analysis.",
min_length=1,
max_length=_MAX_LENGTH,
)
# Strange things happen with error handling when using alias - splitting up into two input models
class ParseInputDeprecated(BaseModel):
input_string: str = Field(
...,
description="Icelandic text for analysis.",
min_length=1,
max_length=_MAX_LENGTH,
alias="in",
)
@app.post(
"/v1/important_words",
description="Find lemmas of important words",
response_model=ImportantWordsResponse,
)
def v1_important_words(*, data: ImportantWordsRequest):
return ImportantWordsResponse(important_words=important_words(data.input_string))
@app.post(
"/v1/parse",
description="Find lemmas of important words",
response_model=ImportantWordsResponse,
deprecated=True,
)
def v1_parse(*, data: ParseInputDeprecated):
logger.info(f"parse: {repr(data.input_string)}")
return ImportantWordsResponse(important_words=important_words(data.input_string))
| [
"logging.getLogger",
"pydantic.Field"
] | [((170, 197), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (187, 197), False, 'import logging\n'), ((274, 314), 'pydantic.Field', 'Field', (['...'], {'description': '"""List of lemmas"""'}), "(..., description='List of lemmas')\n", (279, 314), False, 'from pydantic import BaseModel, Field\n'), ((381, 477), 'pydantic.Field', 'Field', (['...'], {'description': '"""Icelandic text for analysis."""', 'min_length': '(1)', 'max_length': '_MAX_LENGTH'}), "(..., description='Icelandic text for analysis.', min_length=1,\n max_length=_MAX_LENGTH)\n", (386, 477), False, 'from pydantic import BaseModel, Field\n'), ((676, 784), 'pydantic.Field', 'Field', (['...'], {'description': '"""Icelandic text for analysis."""', 'min_length': '(1)', 'max_length': '_MAX_LENGTH', 'alias': '"""in"""'}), "(..., description='Icelandic text for analysis.', min_length=1,\n max_length=_MAX_LENGTH, alias='in')\n", (681, 784), False, 'from pydantic import BaseModel, Field\n')] |
Subsets and Splits