content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import io
from queue import Queue
import time
import unittest
import unittest.mock as mock
from ossdbtoolsservice.hosting.json_rpc_server import (
JSONRPCServer,
IncomingMessageConfiguration,
NotificationContext, RequestContext
)
from ossdbtoolsservice.hosting.json_message import JSONRPCMessage, JSONRPCMessageType
from ossdbtoolsservice.hosting.json_reader import JSONRPCReader
from ossdbtoolsservice.hosting.json_writer import JSONRPCWriter
import tests.utils as utils
class JSONRPCServerTests(unittest.TestCase):
def test_handler_init(self):
# If: I create a Handler class
handler = JSONRPCServer.Handler('class', 'handler')
# Then: The values should be available
self.assertEqual(handler.class_, 'class')
self.assertEqual(handler.handler, 'handler')
def test_server_init(self):
# Setup: Create objects to init the server with
input_stream = io.BytesIO()
output_stream = io.BytesIO()
logger = utils.get_mock_logger()
# If: I create a server
server = JSONRPCServer(input_stream, output_stream, logger=logger)
# Then: The state should be initialized as defined
self.assertIsInstance(server.writer, JSONRPCWriter)
self.assertIsInstance(server.reader, JSONRPCReader)
self.assertIs(server._logger, logger)
self.assertEqual(server._version, '0')
self.assertFalse(server._stop_requested)
# ... The output queue should be empty
self.assertIsInstance(server._output_queue, Queue)
self.assertTrue(server._output_queue.all_tasks_done)
self.assertDictEqual(server._notification_handlers, {})
self.assertListEqual(server._shutdown_handlers, [])
# ... The threads shouldn't be assigned yet
self.assertIsNone(server._output_consumer)
self.assertIsNone(server._input_consumer)
# ... The built-in handlers should be assigned
self.assertTrue('echo' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['echo'])
self.assertTrue('version' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['version'].handler)
self.assertTrue('shutdown' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['shutdown'].handler)
self.assertTrue('exit' in server._request_handlers)
self.assertIsNotNone(server._request_handlers['exit'].handler)
def test_add_shutdown_handler(self):
# If: I add a shutdown handler
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.add_shutdown_handler(handler)
# Then: The shutdown handlers should contain the handler
self.assertTrue(handler in server._shutdown_handlers)
def test_set_request_handler(self):
# If: I add a request handler
params = IncomingMessageConfiguration('test/test', int)
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.set_request_handler(params, handler)
# Then: The request handler should contain the handler
self.assertTrue(params.method in server._request_handlers)
self.assertIsNotNone(server._request_handlers[params.method])
self.assertIs(server._request_handlers[params.method].class_, int)
self.assertIs(server._request_handlers[params.method].handler, handler)
def test_set_notification_handler(self):
# If: I add a notification handler
params = IncomingMessageConfiguration('test/test', int)
handler = mock.MagicMock()
server = JSONRPCServer(None, None)
server.set_notification_handler(params, handler)
# Then: The request handler should contain the handler
self.assertTrue(params.method in server._notification_handlers)
self.assertIsNotNone(server._notification_handlers[params.method])
self.assertIs(server._notification_handlers[params.method].class_, int)
self.assertIs(server._notification_handlers[params.method].handler, handler)
# BUILT-IN HANDLER TESTS ###############################################
@staticmethod
def test_echo_request():
# If: I send a request for an echo
rc = utils.MockRequestContext()
params = {}
JSONRPCServer._handle_echo_request(rc, params)
# Then: The params should have been echoed back
rc.send_response.assert_called_once_with(params)
rc.send_notification.assert_not_called()
rc.send_error.assert_not_called()
@staticmethod
def test_version_request():
# If: I send a request for the version
rc = utils.MockRequestContext()
server = JSONRPCServer(None, None)
server._handle_version_request(rc, None)
# Then: I should get a response
rc.send_response.assert_called_once_with(server._version)
rc.send_error.assert_not_called()
rc.send_notification.assert_not_called()
def test_shutdown_request(self):
# If: I send a request for the service to shutdown
rc = utils.MockRequestContext()
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.add_shutdown_handler(handler)
server._handle_shutdown_request(rc, None)
# Then:
# ... The server should be shutting down
self.assertTrue(server._stop_requested)
# ... The shutdown handler should be called
handler.assert_called_once()
# RequestContext TESTS #################################################
def test_request_context_init_test(self):
# If: I create a request context
queue = Queue()
message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(message, queue)
# Then: The internal state should be set up correctly
self.assertIs(rc._message, message)
self.assertIs(rc._queue, queue)
def test_request_context_send_response(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send a response via the response handler
params = {}
rc.send_response(params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.ResponseSuccess)
self.assertEqual(out_message.message_id, '123')
self.assertEqual(out_message.message_result, params)
def test_request_context_send_notification(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send a notification
params = {}
method = 'test/test'
rc.send_notification(method, params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.Notification)
self.assertIsNone(out_message.message_id)
self.assertEqual(out_message.message_params, params)
def test_request_context_send_error(self):
# Setup: Create a request context
queue = Queue()
in_message = JSONRPCMessage.from_dictionary({'id': '123', 'method': 'test/text/', 'params': {}})
rc = RequestContext(in_message, queue)
# If: I send an error
params = {}
rc.send_error(params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.ResponseError)
self.assertEqual(out_message.message_id, '123')
self.assertIsInstance(out_message.message_error, dict)
self.assertIs(out_message.message_error['message'], params)
# DISPATCHER TESTS #####################################################
@staticmethod
def test_dispatch_response_success():
# TODO: Replace with robust logic once response routing is implemented
# If: I dispatch a response message
message = JSONRPCMessage.create_response('123', {})
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_response_error():
# TODO: Replace with robust logic once error routing is implemented
# If: I dispatch an error message
message = JSONRPCMessage.create_error('123', 0, message='', data={})
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_invalid():
# If: I dispatch an invalid message
message = JSONRPCMessage('invalidType')
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server._dispatch_message(message)
# Then: Nothing should have happened
@staticmethod
def test_dispatch_request_no_handler():
# If: I dispatch a message that has no handler
logger = utils.get_mock_logger()
message = JSONRPCMessage.create_request('123', 'non_existent', {})
server = JSONRPCServer(None, None, logger=logger)
server._dispatch_message(message)
# Then:
# ... Nothing should have happened
# TODO: Capture that an error was sent
# ... A warning should have been logged
logger.warn.assert_called_once()
def test_dispatch_request_none_class(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', None)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_request_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_request('123', 'test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], RequestContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][0]._message, message)
self.assertIs(handler.mock_calls[0][1][1], params)
def test_dispatch_request_normal(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', _TestParams)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_request_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_request('123', 'test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], RequestContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][0]._message, message)
self.assertIsInstance(handler.mock_calls[0][1][1], _TestParams)
@staticmethod
def test_dispatch_notification_no_handler():
# If: I dispatch a message that has no handler
logger = utils.get_mock_logger()
message = JSONRPCMessage.create_notification('non_existent', {})
server = JSONRPCServer(None, None, logger=logger)
server._dispatch_message(message)
# Then:
# ... Nothing should have happened
# TODO: Capture that an error was sent
# ... A warning should have been logged
logger.warn.assert_called_once()
def test_dispatch_notification_none_class(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', None)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_notification_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_notification('test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], NotificationContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIs(handler.mock_calls[0][1][1], params)
def test_dispatch_notification_normal(self):
# Setup: Create a server with a single handler that has none for the deserialization class
config = IncomingMessageConfiguration('test/test', _TestParams)
handler = mock.MagicMock()
server = JSONRPCServer(None, None, logger=utils.get_mock_logger())
server.set_notification_handler(config, handler)
# If: I dispatch a message that has none set for the deserialization class
params = {}
message = JSONRPCMessage.create_notification('test/test', params)
server._dispatch_message(message)
# Then:
# ... The handler should have been called
handler.assert_called_once()
# ... The parameters to the handler should have been a request context and params
self.assertIsInstance(handler.mock_calls[0][1][0], NotificationContext)
self.assertIs(handler.mock_calls[0][1][0]._queue, server._output_queue)
self.assertIsInstance(handler.mock_calls[0][1][1], _TestParams)
# RequestContext TESTS #################################################
def test_notification_context_init_test(self):
# If: I create a notification context
queue = Queue()
nc = NotificationContext(queue)
# Then: The internal state should be set up correctly
self.assertIs(nc._queue, queue)
def test_notification_context_send(self):
# Setup: Create a request context
queue = Queue()
nc = NotificationContext(queue)
# If: I send a response via the response handler
method = 'test/test'
params = {}
nc.send_notification(method, params)
# Then:
# ... There should be a message in the outbound queue
self.assertTrue(queue.not_empty)
out_message = queue.get_nowait()
self.assertIsInstance(out_message, JSONRPCMessage)
# .. The message must be a response with the proper id
self.assertEqual(out_message.message_type, JSONRPCMessageType.Notification)
self.assertIsNone(out_message.message_id)
self.assertEqual(out_message.message_params, params)
self.assertEqual(out_message.message_method, method)
# END-TO-END TESTS #####################################################
def test_request_enqueued(self):
# Setup: Create empty io streams
input_stream = io.BytesIO()
output_stream = io.BytesIO()
# If: I submit an outbound request
test_client = JSONRPCServer(input_stream, output_stream)
test_client.send_request('test/test', {'test': 'test'})
# Then:
# ... There should be one request in the outbound queue
request = test_client._output_queue.get()
# ... The queued message should match the request we sent
self.assertEqual(request.message_method, 'test/test')
self.assertDictEqual(request.message_params, {'test': 'test'})
def test_notification_enqueued(self):
# Setup: Create empty io streams
input_stream = io.BytesIO()
output_stream = io.BytesIO()
# If: I submit an outbound request
test_client = JSONRPCServer(input_stream, output_stream)
test_client.send_notification('test/test', {'test': 'test'})
# Then:
# ... There should be one request in the outbound queue
request = test_client._output_queue.get()
# ... The queued message should match the request we sent
self.assertEqual(request.message_method, 'test/test')
self.assertDictEqual(request.message_params, {'test': 'test'})
def test_reads_message(self):
# Setup:
# ... Create an input stream with a single message
input_stream = io.BytesIO(b'Content-Length: 30\r\n\r\n{"method":"test", "params":{}}')
output_stream = io.BytesIO()
# ... Create a server that uses the input and output streams
server = JSONRPCServer(input_stream, output_stream, logger=utils.get_mock_logger())
# ... Patch the server to not dispatch a message
dispatch_mock = mock.MagicMock()
server._dispatch_message = dispatch_mock
# If: I start the server, run it for a bit, and stop it
# TODO: Remove explicit sleep and add spin-locks
server.start()
time.sleep(1)
server.stop()
server.wait_for_exit()
# Then: The dispatch method should have been called
expected_output = JSONRPCMessage.from_dictionary({"method": "test", "params": {}})
dispatch_mock.assert_called_once()
self.assertDictEqual(dispatch_mock.mock_calls[0][1][0].dictionary, expected_output.dictionary)
# Teardown: All background threads should be shut down.
self.assertFalse(server._input_consumer.isAlive())
self.assertFalse(server._output_consumer.isAlive())
def test_read_multiple_messages(self):
# Setup:
# ... Create an input stream with two messages
test_bytes = b'Content-Length: 30\r\n\r\n{"method":"test", "params":{}}'
input_stream = io.BytesIO(test_bytes + test_bytes)
output_stream = io.BytesIO()
# ... Create a server that uses the input and output streams
server = JSONRPCServer(input_stream, output_stream, logger=utils.get_mock_logger())
# ... Patch the server to not dispatch a message
dispatch_mock = mock.MagicMock()
server._dispatch_message = dispatch_mock
# If: I start the server, run it for a bit, and stop it
server.start()
time.sleep(1)
server.stop()
server.wait_for_exit()
# Then: The dispatch method should have been called twice
expected_output = JSONRPCMessage.from_dictionary({"method": "test", "params": {}})
self.assertEqual(len(dispatch_mock.mock_calls), 2)
self.assertDictEqual(dispatch_mock.mock_calls[0][1][0].dictionary, expected_output.dictionary)
self.assertDictEqual(dispatch_mock.mock_calls[1][1][0].dictionary, expected_output.dictionary)
# Teardown: All background threads should be shut down.
self.assertFalse(server._input_consumer.isAlive())
self.assertFalse(server._output_consumer.isAlive())
class _TestParams:
@classmethod
def from_dict(cls, dictionary):
return _TestParams()
def __init__(self):
pass
if __name__ == '__main__':
unittest.main()
| 41.403162 | 104 | 0.664916 | [
"MIT"
] | DaeunYim/pgtoolsservice | tests/hosting/test_server.py | 20,950 | Python |
from setuptools import setup, find_packages
VERSION = '0.0.1'
DESCRIPTION = 'edu-lib'
LONG_DESCRIPTION = 'Libary zum erlernen der Grundstruktur.'
setup(
name="mylibrary",
version=VERSION,
author="Stephan Bökelmann",
author_email="sb@gruppe.ai",
scripts=[],
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=[],
url="",
keywords=['python', 'debugging'],
classifiers= [
"Intended Audience :: Education",
"Programming Language :: Python :: 3",
"Operating System :: POSIX",
]
)
| 26.192308 | 59 | 0.581498 | [
"MIT"
] | bjoekeldude/edu_python_mini_lib | setup.py | 682 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['Account']
class Account(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
active_directories: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
NetApp account resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the NetApp account
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ActiveDirectoryArgs']]]] active_directories: Active Directories
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_name'] = account_name
__props__['active_directories'] = active_directories
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:netapp/v20200901:Account"), pulumi.Alias(type_="azure-native:netapp:Account"), pulumi.Alias(type_="azure-nextgen:netapp:Account"), pulumi.Alias(type_="azure-native:netapp/latest:Account"), pulumi.Alias(type_="azure-nextgen:netapp/latest:Account"), pulumi.Alias(type_="azure-native:netapp/v20170815:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20170815:Account"), pulumi.Alias(type_="azure-native:netapp/v20190501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190501:Account"), pulumi.Alias(type_="azure-native:netapp/v20190601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190601:Account"), pulumi.Alias(type_="azure-native:netapp/v20190701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190701:Account"), pulumi.Alias(type_="azure-native:netapp/v20190801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20190801:Account"), pulumi.Alias(type_="azure-native:netapp/v20191001:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191001:Account"), pulumi.Alias(type_="azure-native:netapp/v20191101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20191101:Account"), pulumi.Alias(type_="azure-native:netapp/v20200201:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200201:Account"), pulumi.Alias(type_="azure-native:netapp/v20200301:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200301:Account"), pulumi.Alias(type_="azure-native:netapp/v20200501:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200501:Account"), pulumi.Alias(type_="azure-native:netapp/v20200601:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200601:Account"), pulumi.Alias(type_="azure-native:netapp/v20200701:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200701:Account"), pulumi.Alias(type_="azure-native:netapp/v20200801:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20200801:Account"), pulumi.Alias(type_="azure-native:netapp/v20201101:Account"), pulumi.Alias(type_="azure-nextgen:netapp/v20201101:Account")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Account, __self__).__init__(
'azure-native:netapp/v20200901:Account',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Account':
"""
Get an existing Account resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["active_directories"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["type"] = None
return Account(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activeDirectories")
def active_directories(self) -> pulumi.Output[Optional[Sequence['outputs.ActiveDirectoryResponse']]]:
"""
Active Directories
"""
return pulumi.get(self, "active_directories")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Azure lifecycle management
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.065789 | 2,057 | 0.675213 | [
"Apache-2.0"
] | pulumi-bot/pulumi-azure-native | sdk/python/pulumi_azure_native/netapp/v20200901/account.py | 7,762 | Python |
from django.test import TestCase
from blog.models import Entry
class EntryModelTest(TestCase):
def test_string_representation(self):
entry = Entry(title="My entry title")
self.assertEqual(str(entry), entry.title)
def test_verbose_name_plural(self):
self.assertEqual(str(Entry._meta.verbose_name_plural), "entries")
| 27 | 73 | 0.735043 | [
"BSD-2-Clause"
] | elinguiuriel/djangoTDD | code/blog/tests/test_models.py | 351 | Python |
import os
import logging
from django.conf import settings
from django.utils import translation
from django.utils.translation import gettext_lazy as _
from django.db import transaction
from django.core.files.base import ContentFile
from celery.exceptions import SoftTimeLimitExceeded
from froide.celery import app as celery_app
from froide.publicbody.models import PublicBody
from froide.upload.models import Upload
from .models import FoiRequest, FoiMessage, FoiAttachment, FoiProject
from .foi_mail import _process_mail, _fetch_mail
from .notifications import send_classification_reminder
logger = logging.getLogger(__name__)
@celery_app.task(
name="froide.foirequest.tasks.process_mail", acks_late=True, time_limit=60
)
def process_mail(*args, **kwargs):
translation.activate(settings.LANGUAGE_CODE)
with transaction.atomic():
_process_mail(*args, **kwargs)
@celery_app.task(name="froide.foirequest.tasks.fetch_mail", expires=60)
def fetch_mail():
for mail_uid, rfc_data in _fetch_mail():
process_mail.delay(rfc_data, mail_uid=mail_uid)
@celery_app.task
def detect_overdue():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_overdue():
foirequest.set_overdue()
@celery_app.task
def detect_asleep():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_to_be_asleep():
foirequest.set_asleep()
@celery_app.task
def classification_reminder():
translation.activate(settings.LANGUAGE_CODE)
for foirequest in FoiRequest.objects.get_unclassified():
send_classification_reminder(foirequest)
@celery_app.task
def check_delivery_status(message_id, count=None, extended=False):
try:
message = FoiMessage.objects.get(id=message_id)
except FoiMessage.DoesNotExist:
return
message.check_delivery_status(count=count, extended=extended)
@celery_app.task
def create_project_requests(project_id, publicbody_ids, **kwargs):
for seq, pb_id in enumerate(publicbody_ids):
create_project_request.delay(project_id, pb_id, sequence=seq, **kwargs)
@celery_app.task
def create_project_request(project_id, publicbody_id, sequence=0, **kwargs):
from .services import CreateRequestFromProjectService
try:
project = FoiProject.objects.get(id=project_id)
except FoiProject.DoesNotExist:
# project does not exist anymore?
return
try:
pb = PublicBody.objects.get(id=publicbody_id)
except PublicBody.DoesNotExist:
# pb was deleted?
return
kwargs.update(
{
"project": project,
"publicbody": pb,
"subject": project.title,
"user": project.user,
"body": project.description,
"public": project.public,
"reference": project.reference,
"tags": [t.name for t in project.tags.all()],
"project_order": sequence,
}
)
service = CreateRequestFromProjectService(kwargs)
foirequest = service.execute()
if project.request_count == project.foirequest_set.all().count():
project.status = FoiProject.STATUS_READY
project.save()
return foirequest.pk
@celery_app.task(name="froide.foirequest.tasks.convert_attachment_task", time_limit=60)
def convert_attachment_task(instance_id):
try:
att = FoiAttachment.objects.get(pk=instance_id)
except FoiAttachment.DoesNotExist:
return
if att.can_convert_to_pdf():
return convert_attachment(att)
def ocr_pdf_attachment(att):
if att.converted:
ocred_att = att.converted
else:
name, ext = os.path.splitext(att.name)
name = _("{name}_ocr{ext}").format(name=name, ext=".pdf")
ocred_att = FoiAttachment.objects.create(
name=name,
belongs_to=att.belongs_to,
approved=False,
filetype="application/pdf",
is_converted=True,
can_approve=att.can_approve,
)
att.converted = ocred_att
att.can_approve = False
att.approved = False
att.save()
ocr_pdf_task.delay(
att.pk,
ocred_att.pk,
)
def convert_attachment(att):
from filingcabinet.pdf_utils import convert_to_pdf
output_bytes = convert_to_pdf(
att.file.path,
binary_name=settings.FROIDE_CONFIG.get("doc_conversion_binary"),
construct_call=settings.FROIDE_CONFIG.get("doc_conversion_call_func"),
)
if output_bytes is None:
return
if att.converted:
new_att = att.converted
else:
name, ext = os.path.splitext(att.name)
name = _("{name}_converted{ext}").format(name=name, ext=".pdf")
new_att = FoiAttachment(
name=name,
belongs_to=att.belongs_to,
approved=False,
filetype="application/pdf",
is_converted=True,
can_approve=att.can_approve,
)
new_file = ContentFile(output_bytes)
new_att.size = new_file.size
new_att.file.save(new_att.name, new_file)
new_att.save()
att.converted = new_att
att.can_approve = False
att.approved = False
att.save()
@celery_app.task(
name="froide.foirequest.tasks.convert_images_to_pdf_task",
time_limit=60 * 5,
soft_time_limit=60 * 4,
)
def convert_images_to_pdf_task(att_ids, target_id, instructions, can_approve=True):
from filingcabinet.pdf_utils import convert_images_to_ocred_pdf
att_qs = FoiAttachment.objects.filter(id__in=att_ids)
att_map = {a.id: a for a in att_qs}
atts = [att_map[a_id] for a_id in att_ids]
try:
target = FoiAttachment.objects.get(id=target_id)
except FoiAttachment.DoesNotExist:
return
paths = [a.file.path for a in atts]
try:
pdf_bytes = convert_images_to_ocred_pdf(paths, instructions=instructions)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is None:
att_qs.update(can_approve=can_approve)
target.delete()
return
new_file = ContentFile(pdf_bytes)
target.size = new_file.size
target.file.save(target.name, new_file)
target.save()
@celery_app.task(
name="froide.foirequest.tasks.ocr_pdf_task",
time_limit=60 * 5,
soft_time_limit=60 * 4,
)
def ocr_pdf_task(att_id, target_id, can_approve=True):
from filingcabinet.pdf_utils import run_ocr
try:
attachment = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
try:
target = FoiAttachment.objects.get(pk=target_id)
except FoiAttachment.DoesNotExist:
return
try:
pdf_bytes = run_ocr(
attachment.file.path,
language=settings.TESSERACT_LANGUAGE
if settings.TESSERACT_LANGUAGE
else settings.LANGUAGE_CODE,
timeout=180,
)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is None:
attachment.can_approve = can_approve
attachment.save()
target.delete()
return
new_file = ContentFile(pdf_bytes)
target.size = new_file.size
target.file.save(target.name, new_file)
target.save()
@celery_app.task(
name="froide.foirequest.tasks.redact_attachment_task",
time_limit=60 * 6,
soft_time_limit=60 * 5,
)
def redact_attachment_task(att_id, target_id, instructions):
from filingcabinet.pdf_utils import run_ocr
from froide.helper.redaction import redact_file
try:
attachment = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
if att_id != target_id:
try:
target = FoiAttachment.objects.get(pk=target_id)
except FoiAttachment.DoesNotExist:
return
else:
target = attachment
logger.info("Trying redaction of %s", attachment.id)
try:
pdf_bytes = redact_file(attachment.file, instructions)
except Exception:
logger.error("PDF redaction error", exc_info=True)
pdf_bytes = None
if pdf_bytes is None:
logger.info("Redaction failed %s", attachment.id)
# Redaction has failed, remove empty attachment
if attachment.redacted:
attachment.redacted = None
if attachment.is_redacted:
attachment.approved = True
attachment.can_approve = True
attachment.pending = False
attachment.save()
if not target.file:
target.delete()
return
logger.info("Redaction successful %s", attachment.id)
pdf_file = ContentFile(pdf_bytes)
target.size = pdf_file.size
target.file.save(target.name, pdf_file, save=False)
logger.info("Trying OCR %s", target.id)
try:
pdf_bytes = run_ocr(
target.file.path,
language=settings.TESSERACT_LANGUAGE
if settings.TESSERACT_LANGUAGE
else settings.LANGUAGE_CODE,
timeout=60 * 4,
)
except SoftTimeLimitExceeded:
pdf_bytes = None
if pdf_bytes is not None:
logger.info("OCR successful %s", target.id)
pdf_file = ContentFile(pdf_bytes)
target.size = pdf_file.size
target.file.save(target.name, pdf_file, save=False)
else:
logger.info("OCR failed %s", target.id)
target.can_approve = True
target.pending = False
target.approve_and_save()
FoiAttachment.attachment_published.send(sender=target, user=None)
@celery_app.task(name="froide.foirequest.tasks.move_upload_to_attachment")
def move_upload_to_attachment(att_id, upload_id):
try:
att = FoiAttachment.objects.get(pk=att_id)
except FoiAttachment.DoesNotExist:
return
try:
upload = Upload.objects.get(pk=upload_id)
except FoiAttachment.DoesNotExist:
return
file = upload.get_file()
if file:
att.pending = False
att.file.save(att.name, file, save=True)
upload.finish()
upload.delete()
if att.can_convert_to_pdf():
convert_attachment_task.delay(att.id)
| 28.307263 | 87 | 0.677719 | [
"MIT"
] | MrKrisKrisu/froide | froide/foirequest/tasks.py | 10,134 | Python |
from __future__ import absolute_import
import pytest
from changes.expanders.commands import CommandsExpander
from changes.testutils import TestCase
class CommandsExpanderTest(TestCase):
def setUp(self):
super(CommandsExpanderTest, self).setUp()
self.project = self.create_project()
def get_expander(self, data):
return CommandsExpander(self.project, data)
def test_validate(self):
with pytest.raises(AssertionError):
self.get_expander({}).validate()
self.get_expander({'commands': []}).validate()
def test_expand(self):
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
results = list(self.get_expander({'commands': [
{'script': 'echo 1'},
{'script': 'echo 2', 'label': 'foo'}
]}).expand(job=job, max_executors=10))
assert len(results) == 2
assert results[0].label == 'echo 1'
assert len(results[0].commands) == 1
assert results[0].commands[0].label == 'echo 1'
assert results[0].commands[0].script == 'echo 1'
assert results[1].label == 'foo'
assert len(results[1].commands) == 1
assert results[1].commands[0].label == 'foo'
assert results[1].commands[0].script == 'echo 2'
| 31.785714 | 56 | 0.626966 | [
"Apache-2.0"
] | dropbox/changes | tests/changes/expanders/test_commands.py | 1,335 | Python |
"""Parse Warren2020 fluxes.
Fluxes from https://zenodo.org/record/3952926 (DOI:10.5281/zenodo.3952926)
See https://arxiv.org/abs/1902.01340 and https://arxiv.org/abs/1912.03328
for description of the models.
"""
import h5py
from sntools.formats import gamma, get_starttime, get_endtime
flux = {}
def parse_input(input, inflv, starttime, endtime):
"""Read simulations data from input file.
Arguments:
input -- prefix of file containing neutrino fluxes
inflv -- neutrino flavor to consider
starttime -- start time set by user via command line option (or None)
endtime -- end time set by user via command line option (or None)
"""
f = h5py.File(input, 'r')
for (t, r) in f['sim_data']['shock_radius']:
if r > 1:
tbounce = t * 1000 # convert to ms
break
starttime = get_starttime(starttime, 1000 * f['sim_data']['shock_radius'][0][0] - tbounce)
endtime = get_endtime(endtime, 1000 * f['sim_data']['shock_radius'][-1][0] - tbounce)
# Save flux data to dictionary to look up in nu_emission() below
global flux
flux = {}
path = {'e': 'nue_data', 'eb': 'nuae_data', 'x': 'nux_data', 'xb': 'nux_data'}[inflv]
for i, (t, lum) in enumerate(f[path]['lum']):
t = 1000 * t - tbounce # convert to time post-bounce in ms
if (t < starttime - 30) or (t > endtime + 30):
# Ignore data outside of the requested time span.
continue
lum *= 1e51 * 624.151 # convert from 10^51 erg/s to MeV/ms
mean_e = f[path]['avg_energy'][i][1]
mean_e_sq = f[path]['rms_energy'][i][1]**2
flux[t] = (mean_e, mean_e_sq, lum)
f.close()
return (starttime, endtime, sorted(flux.keys()))
def prepare_evt_gen(binned_t):
global flux
gamma.flux = flux
gamma.prepare_evt_gen(binned_t)
flux = gamma.flux
def nu_emission(eNu, time):
gamma.flux = flux
return gamma.nu_emission(eNu, time)
| 30.578125 | 94 | 0.633112 | [
"BSD-3-Clause"
] | arfon/sntools | sntools/formats/warren2020.py | 1,957 | Python |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample Google App Engine application that demonstrates using the Users API
For more information about App Engine, see README.md under /appengine.
"""
# [START all]
from google.appengine.api import users
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
nickname = user.nickname()
logout_url = users.create_logout_url('/')
greeting = 'Welcome, {}! (<a href="{}">sign out</a>)'.format(
nickname, logout_url)
else:
login_url = users.create_login_url('/')
greeting = '<a href="{}">Sign in</a>'.format(login_url)
self.response.write(
'<html><body>{}</body></html>'.format(greeting))
class AdminPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
if users.is_current_user_admin():
self.response.write('You are an administrator.')
else:
self.response.write('You are not an administrator.')
else:
self.response.write('You are not logged in.')
app = webapp2.WSGIApplication([
('/', MainPage),
('/admin', AdminPage)
], debug=True)
# [END all]
| 30.278689 | 74 | 0.646995 | [
"Apache-2.0"
] | Acidburn0zzz/python-docs-samples | appengine/standard/users/main.py | 1,847 | Python |
"""
ASGI config for avocadobites project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'avocadobites.settings')
application = get_asgi_application()
| 23.588235 | 78 | 0.790524 | [
"MIT"
] | sanjuop/PatrioticPictures | avocadobites/avocadobites/asgi.py | 401 | Python |
"""
eZmax API Definition
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.3
Contact: support-api@ezmax.ca
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.ezsignformfield_response import EzsignformfieldResponse
globals()['EzsignformfieldResponse'] = EzsignformfieldResponse
from eZmaxApi.model.ezsignformfield_response_compound import EzsignformfieldResponseCompound
class TestEzsignformfieldResponseCompound(unittest.TestCase):
"""EzsignformfieldResponseCompound unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEzsignformfieldResponseCompound(self):
"""Test EzsignformfieldResponseCompound"""
# FIXME: construct object with mandatory attributes with example values
# model = EzsignformfieldResponseCompound() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.846154 | 97 | 0.747851 | [
"MIT"
] | eZmaxinc/eZmax-SDK-python | test/test_ezsignformfield_response_compound.py | 1,047 | Python |
import os
import shutil
import tempfile
from unittest import TestCase
from mock import patch
from regulations.apps import RegulationsConfig
class RegulationsConfigTests(TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
@patch('regulations.apps.get_app_template_dirs')
def test_precompute_custom_templates(self, get_app_template_dirs):
"""Verify that custom templates are found"""
get_app_template_dirs.return_value = [self.tmpdir]
open(os.path.join(self.tmpdir, '123-45-a.html'), 'w').close()
open(os.path.join(self.tmpdir, 'other.html'), 'w').close()
RegulationsConfig.precompute_custom_templates()
self.assertEqual(RegulationsConfig.custom_tpls['123-45-a'],
'regulations/custom_nodes/123-45-a.html')
self.assertEqual(RegulationsConfig.custom_tpls['other'],
'regulations/custom_nodes/other.html')
self.assertFalse('another' in RegulationsConfig.custom_tpls)
| 34.516129 | 70 | 0.695327 | [
"CC0-1.0"
] | CMSgov/cmcs-eregulations | regulations/tests/apps_tests.py | 1,070 | Python |
import random
from player import Player
from hand import Hand
class CPU(Player):
def __init__(self, name: str):
super().__init__(name)
self.hand = Hand()
def discard(self):
if(self.hand == None or len(self.hand) <= 0):
raise RuntimeError('No cards to discard')
return self.hand.pop(random.randrange(len(self.hand)))
def play(self, currentPlayPointLimit):
print('{0}\'s Hand: {1}'.format(self.name, str(self.playHand)))
if(self.playHand == None or len(self.playHand) <= 0):
raise RuntimeError('No play hand was created or it is empty')
playableCardIndexes = []
for i, card in enumerate(self.playHand):
if(card.valuePoints <= currentPlayPointLimit):
playableCardIndexes.append(i)
cardToPlayIndex = playableCardIndexes[random.randrange(len(playableCardIndexes))]
return self.playHand.pop(cardToPlayIndex) | 39.5 | 89 | 0.646624 | [
"MIT"
] | SudoSpartanDan/CribbagePythonGame | cpu.py | 948 | Python |
"""@package vc_updated
Functions to implement the updated Voce-Chaboche material model and measure its error.
"""
import numpy as np
import pandas as pd
from numdifftools import nd_algopy as nda
def uvc_return_mapping(x_sol, data, tol=1.0e-8, maximum_iterations=1000):
""" Implements the time integration of the updated Voce-Chaboche material model.
:param np.array x_sol: Updated Voce-Chaboche model parameters.
:param pd.DataFrame data: stress-strain data.
:param float tol: Local Newton tolerance.
:param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.
:return dict: History of: stress ('stress'), strain ('strain'), the total error ('error') calculated by the
updated Voce-Chaboche model, number of iterations for convergence at each increment ('num_its').
"""
if len(x_sol) < 8:
raise RuntimeError("No backstresses or using original V-C params.")
n_param_per_back = 2
n_basic_param = 6
# Get material properties
E = x_sol[0] * 1.0
sy_0 = x_sol[1] * 1.0
Q = x_sol[2] * 1.0
b = x_sol[3] * 1.0
D = x_sol[4] * 1.0
a = x_sol[5] * 1.0
# Set up backstresses
n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back)
c_k = []
gamma_k = []
for i in range(0, n_backstresses):
c_k.append(x_sol[n_basic_param + n_param_per_back * i])
gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i])
# Initialize parameters
alpha_components = np.zeros(n_backstresses, dtype=object) # backstress components
strain = 0.
stress = 0.
ep_eq = 0. # equivalent plastic strain
error = 0. # error measure
sum_abs_de = 0. # total strain
stress_sim = 0.0
stress_test = 0.0
area_test = 0.0
stress_track = []
strain_track = []
strain_inc_track = []
iteration_track = []
loading = np.diff(data['e_true'])
for increment_number, strain_inc in enumerate(loading):
strain += strain_inc
alpha = np.sum(alpha_components)
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
trial_stress = stress + E * strain_inc
relative_stress = trial_stress - alpha
flow_dir = np.sign(relative_stress)
yield_condition = np.abs(relative_stress) - yield_stress
if yield_condition > tol:
is_converged = False
else:
is_converged = True
# For error
stress_sim_1 = stress_sim * 1.0
stress_test_1 = stress_test * 1.0
# Return mapping if plastic loading
ep_eq_init = ep_eq
alpha_init = alpha
consist_param = 0.
number_of_iterations = 0
while is_converged is False and number_of_iterations < maximum_iterations:
number_of_iterations += 1
# Isotropic hardening and isotropic modulus
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
# Kinematic hardening and kinematic modulus
alpha = 0.
kin_modulus = 0.
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i]
delta_alpha = alpha - alpha_init
# Local Newton step
numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha)
denominator = -(E + iso_modulus + kin_modulus)
consist_param = consist_param - numerator / denominator
ep_eq = ep_eq_init + consist_param
if np.abs(numerator) < tol:
is_converged = True
# Update the variables
stress = trial_stress - E * flow_dir * consist_param
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \
+ (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
stress_track.append(stress)
strain_track.append(strain)
strain_inc_track.append(strain_inc)
iteration_track.append(number_of_iterations)
# Calculate the error
stress_sim = stress * 1.0
stress_test = data['Sigma_true'].iloc[increment_number + 1]
sum_abs_de += np.abs(strain_inc)
area_test += np.abs(strain_inc) * ((stress_test) ** 2 + (stress_test_1) ** 2) / 2.
error += np.abs(strain_inc) * ((stress_sim - stress_test) ** 2 + (stress_sim_1 - stress_test_1) ** 2) / 2.
if number_of_iterations >= maximum_iterations:
print ("Increment number = ", increment_number)
print ("Parameters = ", x_sol)
print ("Numerator = ", numerator)
raise RuntimeError('Return mapping did not converge in ' + str(maximum_iterations) + ' iterations.')
area = area_test / sum_abs_de
error = error / sum_abs_de
return {'stress': stress_track, 'strain': strain_track, 'error': error, 'num_its': iteration_track,
'area': area}
def sim_curve_uvc(x_sol, test_clean):
""" Returns the stress-strain approximation of the updated Voce-Chaboche material model to a given strain input.
:param np.array x_sol: Voce-Chaboche model parameters
:param DataFrame test_clean: stress-strain data
:return DataFrame: Voce-Chaboche approximation
The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
"""
model_output = uvc_return_mapping(x_sol, test_clean)
strain = np.append([0.], model_output['strain'])
stress = np.append([0.], model_output['stress'])
sim_curve = pd.DataFrame(np.array([strain, stress]).transpose(), columns=['e_true', 'Sigma_true'])
return sim_curve
def error_single_test_uvc(x_sol, test_clean):
""" Returns the relative error between a test and its approximation using the updated Voce-Chaboche material model.
:param np.array x_sol: Voce-Chaboche model parameters
:param DataFrame test_clean: stress-strain data
:return float: relative error
The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
"""
model_output = uvc_return_mapping(x_sol, test_clean)
return model_output['error']
def normalized_error_single_test_uvc(x_sol, test_clean):
""" Returns the error and the total area of a test and its approximation using the updated Voce-Chaboche
material model.
:param np.array x_sol: Voce-Chaboche model parameters
:param DataFrame test_clean: stress-strain data
:return list: (float) total error, (float) total area
The strain column in the DataFrame is labeled "e_true" and the stress column is labeled "Sigma_true".
"""
model_output = uvc_return_mapping(x_sol, test_clean)
return [model_output['error'], model_output['area']]
def calc_phi_total(x, data):
""" Returns the sum of the normalized relative error of the updated Voce-Chaboche material model given x.
:param np.array x: Updated Voce-Chaboche material model parameters.
:param list data: (pd.DataFrame) Stress-strain history for each test considered.
:return float: Normalized error value expressed as a percent (raw value * 100).
The normalized error is defined in de Sousa and Lignos (2017).
"""
error_total = 0.
area_total = 0.
for d in data:
error, area = normalized_error_single_test_uvc(x, d)
error_total += error
area_total += area
return np.sqrt(error_total / area_total) * 100.
def test_total_area(x, data):
""" Returns the total squared area underneath all the tests.
:param np.array x: Updated Voce-Chaboche material model parameters.
:param list data: (pd.DataFrame) Stress-strain history for each test considered.
:return float: Total squared area.
"""
area_total = 0.
for d in data:
_, area = normalized_error_single_test_uvc(x, d)
area_total += area
return area_total
def uvc_get_hessian(x, data):
""" Returns the Hessian of the material model error function for a given set of test data evaluated at x.
:param np.array x: Updated Voce-Chaboche material model parameters.
:param list data: (pd.DataFrame) Stress-strain history for each test considered.
:return np.array: Hessian matrix of the error function.
"""
def f(xi):
val = 0.
for d in data:
val += error_single_test_uvc(xi, d)
return val
hess_fun = nda.Hessian(f)
return hess_fun(x)
def uvc_consistency_metric(x_base, x_sample, data):
""" Returns the xi_2 consistency metric from de Sousa and Lignos 2019 using the updated Voce-Chaboche model.
:param np.array x_base: Updated Voce-Chaboche material model parameters from the base case.
:param np.array x_sample: Updated Voce-Chaboche material model parameters from the sample case.
:param list data: (pd.DataFrame) Stress-strain history for each test considered.
:return float: Increase in quadratic approximation from the base to the sample case.
"""
x_diff = x_sample - x_base
hess_base = uvc_get_hessian(x_base, data)
numerator = np.dot(x_diff, hess_base.dot(x_diff))
denominator = test_total_area(x_base, data)
return np.sqrt(numerator / denominator)
def uvc_tangent_modulus(x_sol, data, tol=1.0e-8, maximum_iterations=1000):
""" Returns the tangent modulus at each strain step.
:param np.array x_sol: Updated Voce-Chaboche model parameters.
:param pd.DataFrame data: stress-strain data.
:param float tol: Local Newton tolerance.
:param int maximum_iterations: maximum iterations in local Newton procedure, raises RuntimeError if exceeded.
:return np.ndarray: Tangent modulus array.
"""
if len(x_sol) < 8:
raise RuntimeError("No backstresses or using original V-C params.")
n_param_per_back = 2
n_basic_param = 6
# Get material properties
E = x_sol[0] * 1.0
sy_0 = x_sol[1] * 1.0
Q = x_sol[2] * 1.0
b = x_sol[3] * 1.0
D = x_sol[4] * 1.0
a = x_sol[5] * 1.0
# Set up backstresses
n_backstresses = int((len(x_sol) - n_basic_param) / n_param_per_back)
c_k = []
gamma_k = []
for i in range(0, n_backstresses):
c_k.append(x_sol[n_basic_param + n_param_per_back * i])
gamma_k.append(x_sol[n_basic_param + 1 + n_param_per_back * i])
# Initialize parameters
alpha_components = np.zeros(n_backstresses, dtype=object) # backstress components
strain = 0.
stress = 0.
ep_eq = 0. # equivalent plastic strain
stress_track = []
strain_track = []
strain_inc_track = []
iteration_track = []
tangent_track = []
loading = np.diff(data['e_true'])
for increment_number, strain_inc in enumerate(loading):
strain += strain_inc
alpha = np.sum(alpha_components)
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
trial_stress = stress + E * strain_inc
relative_stress = trial_stress - alpha
flow_dir = np.sign(relative_stress)
yield_condition = np.abs(relative_stress) - yield_stress
if yield_condition > tol:
is_converged = False
else:
is_converged = True
# Return mapping if plastic loading
ep_eq_init = ep_eq
alpha_init = alpha
consist_param = 0.
number_of_iterations = 0
while is_converged is False and number_of_iterations < maximum_iterations:
number_of_iterations += 1
# Isotropic hardening and isotropic modulus
yield_stress = sy_0 + Q * (1. - np.exp(-b * ep_eq)) - D * (1. - np.exp(-a * ep_eq))
iso_modulus = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
# Kinematic hardening and kinematic modulus
alpha = 0.
kin_modulus = 0.
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha += flow_dir * c_k[i] / gamma_k[i] + (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
kin_modulus += c_k[i] * e_k - flow_dir * gamma_k[i] * e_k * alpha_components[i]
delta_alpha = alpha - alpha_init
# Local Newton step
numerator = np.abs(relative_stress) - (consist_param * E + yield_stress + flow_dir * delta_alpha)
denominator = -(E + iso_modulus + kin_modulus)
consist_param = consist_param - numerator / denominator
ep_eq = ep_eq_init + consist_param
if np.abs(numerator) < tol:
is_converged = True
# Update the variables
stress = trial_stress - E * flow_dir * consist_param
for i in range(0, n_backstresses):
e_k = np.exp(-gamma_k[i] * (ep_eq - ep_eq_init))
alpha_components[i] = flow_dir * c_k[i] / gamma_k[i] \
+ (alpha_components[i] - flow_dir * c_k[i] / gamma_k[i]) * e_k
stress_track.append(stress)
strain_track.append(strain)
strain_inc_track.append(strain_inc)
iteration_track.append(number_of_iterations)
# Calculate the tangent modulus
if number_of_iterations > 0:
h_prime = 0.
for i in range(0, n_backstresses):
h_prime += c_k[i] - flow_dir * gamma_k[i] * alpha_components[i]
k_prime = Q * b * np.exp(-b * ep_eq) - D * a * np.exp(-a * ep_eq)
tangent_track.append(E * (k_prime + h_prime) / (E + k_prime + h_prime))
else:
# Elastic loading
tangent_track.append(E)
return np.append([0.], np.array(tangent_track))
| 38.755495 | 119 | 0.641455 | [
"MIT"
] | AlbanoCastroSousa/RESSPyLab | RESSPyLab/uvc_model.py | 14,107 | Python |
import wikipedia as wiki
from ..parsing import get_wiki_page_id, get_wiki_lines, get_wiki_sections
def get_wiki_references(url, outfile=None):
"""get_wiki_references.
Extracts references from predefined sections of wiki page
Uses `urlscan`, `refextract`, `doi`, `wikipedia`, and `re` (for ArXiv URLs)
:param url: URL of wiki article to scrape
:param outfile: File to write extracted references to
"""
def _check(l):
return (not l['doi'] or l['doi'] == l['refs'][-1]['doi']) \
and (not l['arxiv'] or l['arxiv'] == l['refs'][-1]['arxiv'])
page = wiki.page(get_wiki_page_id(url))
sections = get_wiki_sections(page.content)
lines = sum([get_wiki_lines(s, predicate=any) for s in sections.values()], [])
links = sum([wikiparse.parse(s).external_links for s in sections.values()], [])
summary = sum([
[
{
'raw': l,
'links': urlscan.parse_text_urls(l),
'refs': refextract.extract_references_from_string(l),
'doi': doi.find_doi_in_text(l),
'arxiv': m.group(1) if (m := arxiv_url_regex.matches(l)) is not None else None
} for l in get_wiki_lines(s, predicate=any)
] for s in sections.values()
])
failed = [ld for ld in summary if not _check(ld)]
if any(failed):
logger.warning('Consistency check failed for the following lines: {}'.format(failed))
return _serialize(summary, outfile)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 41 | 94 | 0.622593 | [
"MIT"
] | antimike/citation-scraper | scraper/apis/wikipedia.py | 1,558 | Python |
from armulator.armv6.opcodes.abstract_opcodes.mov_register_thumb import MovRegisterThumb
from armulator.armv6.opcodes.opcode import Opcode
class MovRegisterThumbT2(MovRegisterThumb, Opcode):
def __init__(self, instruction, m, d):
Opcode.__init__(self, instruction)
MovRegisterThumb.__init__(self, True, m, d)
def is_pc_changing_opcode(self):
return self.d == 15
@staticmethod
def from_bitarray(instr, processor):
rd = instr[13:16]
rm = instr[10:13]
if processor.in_it_block():
print "unpredictable"
else:
return MovRegisterThumbT2(instr, **{"m": rm.uint, "d": rd.uint})
| 31.904762 | 88 | 0.674627 | [
"MIT"
] | AhmedMounir/armulator | armulator/armv6/opcodes/thumb_instruction_set/thumb_instruction_set_encoding_16_bit/thumb_shift_immediate_add_subtract_move_and_compare/mov_register_thumb_t2.py | 670 | Python |
class DefaultConfig(object):
DEBUG = False
JSONIFY_PRETTYPRINT_REGULAR = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
API_PREFIX = '/api'
| 25.666667 | 42 | 0.74026 | [
"MIT"
] | sz-piotr/fioletowe-pomarancze | server/src/config.py | 154 | Python |
from concurrent.futures.process import ProcessPoolExecutor
import api.Config
import api.middleware
from api.Config import app
from api.routers import (feedback, hiscore, label, legacy, legacy_debug,
player, prediction, report, scraper)
app.include_router(hiscore.router)
app.include_router(player.router)
app.include_router(prediction.router)
app.include_router(feedback.router)
app.include_router(report.router)
app.include_router(legacy.router)
app.include_router(scraper.router)
app.include_router(label.router)
app.include_router(legacy_debug.router)
@app.get("/")
async def root():
return {"message": "Hello World"}
# @app.on_event("startup")
# async def startup_event():
# app.state.executor = ProcessPoolExecutor()
# @app.on_event("shutdown")
# async def on_shutdown():
# app.state.executor.shutdown()
| 25.878788 | 72 | 0.757611 | [
"BSD-2-Clause"
] | ThorntonMatthewD/Bot-Detector-Core-Files | api/app.py | 854 | Python |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
import os
import subprocess
OTBN_DIR = os.path.join(os.path.dirname(__file__), '../../..')
UTIL_DIR = os.path.join(OTBN_DIR, 'util')
SIM_DIR = os.path.join(os.path.dirname(__file__), '..')
def asm_and_link_one_file(asm_path: str, work_dir: str) -> str:
'''Assemble and link file at asm_path in work_dir.
Returns the path to the resulting ELF
'''
otbn_as = os.path.join(UTIL_DIR, 'otbn-as')
otbn_ld = os.path.join(UTIL_DIR, 'otbn-ld')
obj_path = os.path.join(work_dir, 'tst.o')
elf_path = os.path.join(work_dir, 'tst')
subprocess.run([otbn_as, '-o', obj_path, asm_path], check=True)
subprocess.run([otbn_ld, '-o', elf_path, obj_path], check=True)
return elf_path
| 30.607143 | 74 | 0.690782 | [
"Apache-2.0"
] | OneToughMonkey/opentitan | hw/ip/otbn/dv/otbnsim/test/testutil.py | 857 | Python |
import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Discharge import UrbanQTotal
class TestUrbanQTotal(VariableUnitTest):
def test_UrbanQTotal(self):
z = self.z
np.testing.assert_array_almost_equal(
UrbanQTotal.UrbanQTotal_f(z.NYrs, z.DaysMonth, z.NRur, z.NUrb, z.Temp, z.InitSnow_0, z.Prec, z.Area,
z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA),
UrbanQTotal.UrbanQTotal(z.NYrs, z.DaysMonth, z.NRur, z.NUrb, z.Temp, z.InitSnow_0, z.Prec, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA), decimal=7)
| 43.823529 | 119 | 0.606711 | [
"Apache-2.0"
] | rajadain/gwlf-e | test/unittests/test_UrbanQTotal.py | 745 | Python |
#Find,Remove,Find
"""Return a tuple of the indices of the two smallest values in list L.
>>> items = [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]
>>> find_two_smallest(items)
(6, 7)
>>> items == [809, 834, 477, 478, 307, 122, 96, 102, 324, 476]
True
"""
from typing import List, Tuple
def find_two_smallest(L:List[float]) -> Tuple[int, int]:
""" (see above) """
# Find the index of the minimum and remove that item
smallest = min(L)
min1 = L.index(smallest)
L.remove(smallest)
# Find the index of the new minimum item in the list
next_smallest = min(L)
min2 = L.index(next_smallest)
# Put smallest back into L
L.insert(min1, smallest)
# Fix min2 in case it was affected by the removal and reinsertion:
if min1 <= min2:
min2 +=1
return (min1, min2)
if __name__ == '__main__':
import doctest
doctest.testmod()
print(find_two_smallest([0, 1, 3, 2, 5, 6, 1]))
| 24.3 | 70 | 0.614198 | [
"MIT"
] | YordanIH/Intro_to_CS_w_Python | chapter12/examples/example02.py | 972 | Python |
from __future__ import absolute_import, print_function
import logging
import bokeh.server.tornado as tornado
from bokeh.application import Application
from bokeh.client import pull_session
from bokeh.server.views.static_handler import StaticHandler
from .utils import ManagedServerLoop, url
logging.basicConfig(level=logging.DEBUG)
def test_check_whitelist_rejects_port_mismatch():
assert False == tornado.check_whitelist("foo:100", ["foo:101", "foo:102"])
def test_check_whitelist_rejects_name_mismatch():
assert False == tornado.check_whitelist("foo:100", ["bar:100", "baz:100"])
def test_check_whitelist_accepts_name_port_match():
assert True == tornado.check_whitelist("foo:100", ["foo:100", "baz:100"])
def test_check_whitelist_accepts_implicit_port_80():
assert True == tornado.check_whitelist("foo", ["foo:80"])
def test_check_whitelist_accepts_all_on_star():
assert True == tornado.check_whitelist("192.168.0.1", ['*'])
assert True == tornado.check_whitelist("192.168.0.1:80", ['*'])
assert True == tornado.check_whitelist("192.168.0.1:5006", ['*'])
assert True == tornado.check_whitelist("192.168.0.1:80", ['*:80'])
assert False == tornado.check_whitelist("192.168.0.1:80", ['*:81'])
assert True == tornado.check_whitelist("192.168.0.1:5006", ['*:*'])
assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*'])
assert True == tornado.check_whitelist("192.168.0.1:5006", ['192.168.0.*'])
assert False == tornado.check_whitelist("192.168.1.1", ['192.168.0.*'])
assert True == tornado.check_whitelist("foobarbaz", ['*'])
assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*'])
assert False == tornado.check_whitelist("192.168.1.1", ['192.168.0.*'])
assert False == tornado.check_whitelist("192.168.0.1", ['192.168.0.*:5006'])
assert True == tornado.check_whitelist("192.168.0.1", ['192.168.0.*:80'])
assert True == tornado.check_whitelist("foobarbaz", ['*'])
assert True == tornado.check_whitelist("foobarbaz", ['*:*'])
assert True == tornado.check_whitelist("foobarbaz", ['*:80'])
assert False == tornado.check_whitelist("foobarbaz", ['*:5006'])
assert True == tornado.check_whitelist("foobarbaz:5006", ['*'])
assert True == tornado.check_whitelist("foobarbaz:5006", ['*:*'])
assert True == tornado.check_whitelist("foobarbaz:5006", ['*:5006'])
def test_default_resources():
application = Application()
with ManagedServerLoop(application) as server:
r = server._tornado.resources()
assert r.mode == "server"
assert r.root_url == ""
assert r.path_versioner == StaticHandler.append_version
with ManagedServerLoop(application, prefix="/foo/") as server:
r = server._tornado.resources()
assert r.mode == "server"
assert r.root_url == "/foo/"
assert r.path_versioner == StaticHandler.append_version
with ManagedServerLoop(application, prefix="foo/") as server:
r = server._tornado.resources()
assert r.mode == "server"
assert r.root_url == "/foo/"
assert r.path_versioner == StaticHandler.append_version
with ManagedServerLoop(application, prefix="foo") as server:
r = server._tornado.resources()
assert r.mode == "server"
assert r.root_url == "/foo/"
assert r.path_versioner == StaticHandler.append_version
with ManagedServerLoop(application, prefix="/foo") as server:
r = server._tornado.resources()
assert r.mode == "server"
assert r.root_url == "/foo/"
assert r.path_versioner == StaticHandler.append_version
with ManagedServerLoop(application, prefix="/foo/bar") as server:
r = server._tornado.resources()
assert r.mode == "server"
assert r.root_url == "/foo/bar/"
assert r.path_versioner == StaticHandler.append_version
def test_default_app_paths():
app = Application()
t = tornado.BokehTornado({}, "", [])
assert t.app_paths == set()
t = tornado.BokehTornado({"/": app}, "", [])
assert t.app_paths == { "/" }
t = tornado.BokehTornado({"/": app, "/foo": app}, "", [])
assert t.app_paths == { "/", "/foo"}
# tried to use capsys to test what's actually logged and it wasn't
# working, in the meantime at least this tests that log_stats
# doesn't crash in various scenarios
def test_log_stats():
application = Application()
with ManagedServerLoop(application) as server:
server._tornado.log_stats()
session1 = pull_session(session_id='session1',
url=url(server),
io_loop=server.io_loop)
session2 = pull_session(session_id='session2',
url=url(server),
io_loop=server.io_loop)
server._tornado.log_stats()
session1.close()
session2.close()
server._tornado.log_stats()
| 42.732759 | 80 | 0.65463 | [
"BSD-3-Clause"
] | Maluuba/bokeh | bokeh/server/tests/test_tornado.py | 4,957 | Python |
#!/usr/bin/env python
# Copyright 2015 Luminal, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import json
import operator
import os
import os.path
import sys
import time
import re
import boto3
import botocore.exceptions
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
try:
import yaml
NO_YAML = False
except ImportError:
NO_YAML = True
from base64 import b64encode, b64decode
from boto3.dynamodb.conditions import Attr
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from Crypto.Hash.HMAC import HMAC
from Crypto.Util import Counter
DEFAULT_REGION = "us-east-1"
PAD_LEN = 19 # number of digits in sys.maxint
WILDCARD_CHAR = "*"
class KmsError(Exception):
def __init__(self, value=""):
self.value = "KMS ERROR: " + value if value is not "" else "KMS ERROR"
def __str__(self):
return self.value
class IntegrityError(Exception):
def __init__(self, value=""):
self.value = "INTEGRITY ERROR: " + value if value is not "" else \
"INTEGRITY ERROR"
def __str__(self):
return self.value
class ItemNotFound(Exception):
pass
class KeyValueToDictionary(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace,
self.dest,
dict((x[0], x[1]) for x in values))
def printStdErr(s):
sys.stderr.write(str(s))
sys.stderr.write("\n")
def key_value_pair(string):
output = string.split('=')
if len(output) != 2:
msg = "%r is not the form of \"key=value\"" % string
raise argparse.ArgumentTypeError(msg)
return output
def expand_wildcard(string, secrets):
prog = re.compile('^' + string.replace(WILDCARD_CHAR, '.*') + '$')
output = []
for secret in secrets:
if prog.search(secret) is not None:
output.append(secret)
return output
def value_or_filename(string):
# argparse running on old version of python (<2.7) will pass an empty
# string to this function before it passes the actual value.
# If an empty string is passes in, just return an empty string
if string == "":
return ""
if string[0] == "@":
filename = string[1:]
try:
with open(os.path.expanduser(filename)) as f:
output = f.read()
except IOError as e:
raise argparse.ArgumentTypeError("Unable to read file %s" %
filename)
else:
output = string
return output
def csv_dump(dictionary):
csvfile = StringIO()
csvwriter = csv.writer(csvfile)
for key in dictionary:
csvwriter.writerow([key, dictionary[key]])
return csvfile.getvalue()
def paddedInt(i):
'''
return a string that contains `i`, left-padded with 0's up to PAD_LEN digits
'''
i_str = str(i)
pad = PAD_LEN - len(i_str)
return (pad * "0") + i_str
def getHighestVersion(name, region="us-east-1", table="credential-store"):
'''
Return the highest version of `name` in the table
'''
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.query(Limit=1,
ScanIndexForward=False,
ConsistentRead=True,
KeyConditionExpression=boto3.dynamodb.conditions.Key("name").eq(name),
ProjectionExpression="version")
if response["Count"] == 0:
return 0
return response["Items"][0]["version"]
def listSecrets(region="us-east-1", table="credential-store"):
'''
do a full-table scan of the credential-store,
and return the names and versions of every credential
'''
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(ProjectionExpression="#N, version",
ExpressionAttributeNames={"#N": "name"})
return response["Items"]
def putSecret(name, secret, version, kms_key="alias/credstash",
region="us-east-1", table="credential-store", context=None):
'''
put a secret called `name` into the secret-store,
protected by the key kms_key
'''
if not context:
context = {}
kms = boto3.client('kms', region_name=region)
# generate a a 64 byte key.
# Half will be for data encryption, the other half for HMAC
try:
kms_response = kms.generate_data_key(KeyId=kms_key, EncryptionContext=context, NumberOfBytes=64)
except:
raise KmsError("Could not generate key using KMS key %s" % kms_key)
data_key = kms_response['Plaintext'][:32]
hmac_key = kms_response['Plaintext'][32:]
wrapped_key = kms_response['CiphertextBlob']
enc_ctr = Counter.new(128)
encryptor = AES.new(data_key, AES.MODE_CTR, counter=enc_ctr)
c_text = encryptor.encrypt(secret)
# compute an HMAC using the hmac key and the ciphertext
hmac = HMAC(hmac_key, msg=c_text, digestmod=SHA256)
b64hmac = hmac.hexdigest()
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
data = {}
data['name'] = name
data['version'] = version if version != "" else paddedInt(1)
data['key'] = b64encode(wrapped_key).decode('utf-8')
data['contents'] = b64encode(c_text).decode('utf-8')
data['hmac'] = b64hmac
return secrets.put_item(Item=data, ConditionExpression=Attr('name').not_exists())
def getAllSecrets(version="", region="us-east-1",
table="credential-store", context=None):
'''
fetch and decrypt all secrets
'''
output = {}
secrets = listSecrets(region, table)
for credential in set([x["name"] for x in secrets]):
try:
output[credential] = getSecret(credential,
version,
region,
table,
context)
except:
pass
return output
def getSecret(name, version="", region="us-east-1",
table="credential-store", context=None):
'''
fetch and decrypt the secret called `name`
'''
if not context:
context = {}
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
if version == "":
# do a consistent fetch of the credential with the highest version
response = secrets.query(Limit=1,
ScanIndexForward=False,
ConsistentRead=True,
KeyConditionExpression=boto3.dynamodb.conditions.Key("name").eq(name))
if response["Count"] == 0:
raise ItemNotFound("Item {'name': '%s'} couldn't be found." % name)
material = response["Items"][0]
else:
response = secrets.get_item(Key={"name": name, "version": version})
if "Item" not in response:
raise ItemNotFound("Item {'name': '%s', 'version': '%s'} couldn't be found." % (name, version))
material = response["Item"]
kms = boto3.client('kms', region_name=region)
# Check the HMAC before we decrypt to verify ciphertext integrity
try:
kms_response = kms.decrypt(CiphertextBlob=b64decode(material['key']), EncryptionContext=context)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "InvalidCiphertextException":
if context is None:
msg = ("Could not decrypt hmac key with KMS. The credential may "
"require that an encryption context be provided to decrypt "
"it.")
else:
msg = ("Could not decrypt hmac key with KMS. The encryption "
"context provided may not match the one used when the "
"credential was stored.")
else:
msg = "Decryption error %s" % e
raise KmsError(msg)
except Exception as e:
raise KmsError("Decryption error %s" % e)
key = kms_response['Plaintext'][:32]
hmac_key = kms_response['Plaintext'][32:]
hmac = HMAC(hmac_key, msg=b64decode(material['contents']),
digestmod=SHA256)
if hmac.hexdigest() != material['hmac']:
raise IntegrityError("Computed HMAC on %s does not match stored HMAC"
% name)
dec_ctr = Counter.new(128)
decryptor = AES.new(key, AES.MODE_CTR, counter=dec_ctr)
plaintext = decryptor.decrypt(b64decode(material['contents'])).decode("utf-8")
return plaintext
def deleteSecrets(name, region="us-east-1", table="credential-store"):
dynamodb = boto3.resource('dynamodb', region_name=region)
secrets = dynamodb.Table(table)
response = secrets.scan(FilterExpression=boto3.dynamodb.conditions.Attr("name").eq(name),
ProjectionExpression="#N, version",
ExpressionAttributeNames={"#N": "name"})
for secret in response["Items"]:
print("Deleting %s -- version %s" % (secret["name"], secret["version"]))
secrets.delete_item(Key=secret)
def createDdbTable(region="us-east-1", table="credential-store"):
'''
create the secret store table in DDB in the specified region
'''
dynamodb = boto3.resource("dynamodb", region_name=region)
if table in (t.name for t in dynamodb.tables.all()):
print("Credential Store table already exists")
return
print("Creating table...")
response = dynamodb.create_table(
TableName=table,
KeySchema=[
{
"AttributeName": "name",
"KeyType": "HASH",
},
{
"AttributeName": "version",
"KeyType": "RANGE",
}
],
AttributeDefinitions=[
{
"AttributeName": "name",
"AttributeType": "S",
},
{
"AttributeName": "version",
"AttributeType": "S",
},
],
ProvisionedThroughput={
"ReadCapacityUnits": 1,
"WriteCapacityUnits": 1,
}
)
print("Waiting for table to be created...")
client = boto3.client("dynamodb", region_name=region)
client.get_waiter("table_exists").wait(TableName=table)
print("Table has been created. "
"Go read the README about how to create your KMS key")
def main():
parsers = {}
parsers['super'] = argparse.ArgumentParser(
description="A credential/secret storage system")
parsers['super'].add_argument("-r", "--region",
help="the AWS region in which to operate."
"If a region is not specified, credstash "
"will use the value of the "
"AWS_DEFAULT_REGION env variable, "
"or if that is not set, us-east-1")
parsers['super'].add_argument("-t", "--table", default="credential-store",
help="DynamoDB table to use for "
"credential storage")
subparsers = parsers['super'].add_subparsers(help='Try commands like '
'"{name} get -h" or "{name}'
'put --help" to get each'
'sub command\'s options'
.format(name=os.path.basename(
__file__)))
action = 'delete'
parsers[action] = subparsers.add_parser(action,
help='Delete a credential " \
"from the store')
parsers[action].add_argument("credential", type=str,
help="the name of the credential to delete")
parsers[action].set_defaults(action=action)
action = 'get'
parsers[action] = subparsers.add_parser(action, help="Get a credential "
"from the store")
parsers[action].add_argument("credential", type=str,
help="the name of the credential to get."
"Using the wildcard character '%s' will "
"search for credentials that match the "
"pattern" % WILDCARD_CHAR)
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-n", "--noline", action="store_true",
help="Don't append newline to returned "
"value (useful in scripts or with "
"binary files)")
parsers[action].add_argument("-v", "--version", default="",
help="Get a specific version of the "
"credential (defaults to the latest version)")
parsers[action].set_defaults(action=action)
action = 'getall'
parsers[action] = subparsers.add_parser(action,
help="Get all credentials from "
"the store")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-v", "--version", default="",
help="Get a specific version of the "
"credential (defaults to the latest version)")
parsers[action].add_argument("-f", "--format", default="json",
choices=["json", "csv"] +
([] if NO_YAML else ["yaml"]),
help="Output format. json(default) " +
("" if NO_YAML else "yaml ") + "or csv.")
parsers[action].set_defaults(action=action)
action = 'list'
parsers[action] = subparsers.add_parser(action,
help="list credentials and "
"their versions")
parsers[action].set_defaults(action=action)
action = 'put'
parsers[action] = subparsers.add_parser(action,
help="Put a credential into "
"the store")
parsers[action].add_argument("credential", type=str,
help="the name of the credential to store")
parsers[action].add_argument("value", type=value_or_filename,
help="the value of the credential to store "
"or, if beginning with the \"@\" character, "
"the filename of the file containing "
"the value", default="")
parsers[action].add_argument("context", type=key_value_pair,
action=KeyValueToDictionary, nargs='*',
help="encryption context key/value pairs "
"associated with the credential in the form "
"of \"key=value\"")
parsers[action].add_argument("-k", "--key", default="alias/credstash",
help="the KMS key-id of the master key "
"to use. See the README for more "
"information. Defaults to alias/credstash")
parsers[action].add_argument("-v", "--version", default="",
help="Put a specific version of the "
"credential (update the credential; "
"defaults to version `1`).")
parsers[action].add_argument("-a", "--autoversion", action="store_true",
help="Automatically increment the version of "
"the credential to be stored. This option "
"causes the `-v` flag to be ignored. "
"(This option will fail if the currently stored "
"version is not numeric.)")
parsers[action].set_defaults(action=action)
action = 'setup'
parsers[action] = subparsers.add_parser(action,
help='setup the credential store')
parsers[action].set_defaults(action=action)
args = parsers['super'].parse_args()
region = os.getenv(
"AWS_DEFAULT_REGION", DEFAULT_REGION) if not args.region \
else args.region
if "action" in vars(args):
if args.action == "delete":
deleteSecrets(args.credential, region=region, table=args.table)
return
if args.action == "list":
credential_list = listSecrets(region=region, table=args.table)
if credential_list:
# print list of credential names and versions,
# sorted by name and then by version
max_len = max([len(x["name"]) for x in credential_list])
for cred in sorted(credential_list,
key=operator.itemgetter("name", "version")):
print("{0:{1}} -- version {2:>}".format(
cred["name"], max_len, cred["version"]))
else:
return
if args.action == "put":
if args.autoversion:
latestVersion = getHighestVersion(args.credential, region,
args.table)
try:
version = paddedInt(int(latestVersion) + 1)
except ValueError:
printStdErr("Can not autoincrement version. The current "
"version: %s is not an int" % latestVersion)
return
else:
version = args.version
try:
if putSecret(args.credential, args.value, version,
kms_key=args.key, region=region, table=args.table,
context=args.context):
print("{0} has been stored".format(args.credential))
except KmsError as e:
printStdErr(e)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "ConditionalCheckFailedException":
latestVersion = getHighestVersion(args.credential, region,
args.table)
printStdErr("%s version %s is already in the credential store. "
"Use the -v flag to specify a new version" %
(args.credential, latestVersion))
return
if args.action == "get":
try:
if WILDCARD_CHAR in args.credential:
names = expand_wildcard(args.credential,
[x["name"]
for x
in listSecrets(region=region,
table=args.table)])
print(json.dumps(dict((name,
getSecret(name,
args.version,
region=region,
table=args.table,
context=args.context))
for name in names)))
else:
sys.stdout.write(getSecret(args.credential, args.version,
region=region, table=args.table,
context=args.context))
if not args.noline:
sys.stdout.write("\n")
except ItemNotFound as e:
printStdErr(e)
except KmsError as e:
printStdErr(e)
except IntegrityError as e:
printStdErr(e)
return
if args.action == "getall":
secrets = getAllSecrets(args.version,
region=region,
table=args.table,
context=args.context)
if args.format == "json":
output_func = json.dumps
output_args = {"sort_keys": True,
"indent": 4,
"separators": (',', ': ')}
elif not NO_YAML and args.format == "yaml":
output_func = yaml.dump
output_args = {"default_flow_style": False}
elif args.format == 'csv':
output_func = csv_dump
output_args = {}
print(output_func(secrets, **output_args))
return
if args.action == "setup":
createDdbTable(region=region, table=args.table)
return
else:
parsers['super'].print_help()
if __name__ == '__main__':
main()
| 40.668459 | 107 | 0.524567 | [
"Apache-2.0"
] | traveloka/credstash | credstash.py | 22,693 | Python |
host = "localhost"
port = 1111
max_users = 100
buffer_size = 1024
| 11.166667 | 18 | 0.716418 | [
"MIT"
] | iskrich/simplechat | params.py | 67 | Python |
"""
Django settings for lab01 project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-7-8hv&pc-$$1)7eiiy2m#m^o6cx%oqqv9@z071ec0%218iwt0!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'lab01.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'lab01.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 26.674603 | 92 | 0.675097 | [
"MIT"
] | car1os1/TECSUP-DAE-2021-2-B | lab01/lab01/settings.py | 3,361 | Python |
# Copyright 2018, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def tupleUnpacking():
return (*a, b, *c)
def listUnpacking():
return [*a, b, *c]
def setUnpacking():
return {*a, b, *c}
def dictUnpacking():
return {"a" : 1, **d}
a = range(3)
b = 5
c = range(8,10)
d = {"a" : 2}
print("Tuple unpacked", tupleUnpacking())
print("List unpacked", listUnpacking())
print("Set unpacked", setUnpacking())
print("Dict unpacked", dictUnpacking())
non_iterable = 2.0
def tupleUnpackingError():
try:
return (*a,*non_iterable,*c)
except Exception as e:
return e
def listUnpackingError():
try:
return [*a,*non_iterable,*c]
except Exception as e:
return e
def setUnpackingError():
try:
return {*a,*non_iterable,*c}
except Exception as e:
return e
def dictUnpackingError():
try:
return {"a" : 1, **non_iterable}
except Exception as e:
return e
print("Tuple unpacked error:", tupleUnpackingError())
print("List unpacked error:", listUnpackingError())
print("Set unpacked error:", setUnpackingError())
print("Dict unpacked error:", dictUnpackingError())
| 25.561644 | 79 | 0.665595 | [
"Apache-2.0"
] | 4O4/Nuitka | tests/basics/Unpacking35.py | 1,866 | Python |
# Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmcls.models.utils import make_divisible
from ..builder import BACKBONES
from .base_backbone import BaseBackbone
class InvertedResidual(BaseModule):
"""InvertedResidual block for MobileNetV2.
Args:
in_channels (int): The input channels of the InvertedResidual block.
out_channels (int): The output channels of the InvertedResidual block.
stride (int): Stride of the middle (first) 3x3 convolution.
expand_ratio (int): adjusts number of channels of the hidden layer
in InvertedResidual by this amount.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
Returns:
Tensor: The output tensor
"""
def __init__(self,
in_channels,
out_channels,
stride,
expand_ratio,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.stride = stride
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.use_res_connect = self.stride == 1 and in_channels == out_channels
hidden_dim = int(round(in_channels * expand_ratio))
layers = []
if expand_ratio != 1:
layers.append(
ConvModule(
in_channels=in_channels,
out_channels=hidden_dim,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
layers.extend([
ConvModule(
in_channels=hidden_dim,
out_channels=hidden_dim,
kernel_size=3,
stride=stride,
padding=1,
groups=hidden_dim,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
ConvModule(
in_channels=hidden_dim,
out_channels=out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
])
self.conv = nn.Sequential(*layers)
def forward(self, x):
def _inner_forward(x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
@BACKBONES.register_module()
class MobileNetV2(BaseBackbone):
"""MobileNetV2 backbone.
Args:
widen_factor (float): Width multiplier, multiply number of
channels in each layer by this amount. Default: 1.0.
out_indices (None or Sequence[int]): Output from which stages.
Default: (7, ).
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU6').
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
"""
# Parameters to build layers. 4 parameters are needed to construct a
# layer, from left to right: expand_ratio, channel, num_blocks, stride.
arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],
[6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],
[6, 320, 1, 1]]
def __init__(self,
widen_factor=1.,
out_indices=(7, ),
frozen_stages=-1,
deep_stem=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]):
super(MobileNetV2, self).__init__(init_cfg)
self.widen_factor = widen_factor
self.out_indices = out_indices
for index in out_indices:
if index not in range(0, 8):
raise ValueError('the item in out_indices must in '
f'range(0, 8). But received {index}')
if frozen_stages not in range(-1, 8):
raise ValueError('frozen_stages must be in range(-1, 8). '
f'But received {frozen_stages}')
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = make_divisible(32 * widen_factor, 8)
if deep_stem:
self.conv0 = ConvModule(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
in_channels_ = 16
else:
in_channels_ = 3
self.conv0 = nn.Sequential()
self.conv1 = ConvModule(
in_channels=in_channels_,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.layers = []
for i, layer_cfg in enumerate(self.arch_settings):
expand_ratio, channel, num_blocks, stride = layer_cfg
out_channels = make_divisible(channel * widen_factor, 8)
inverted_res_layer = self.make_layer(
out_channels=out_channels,
num_blocks=num_blocks,
stride=stride,
expand_ratio=expand_ratio)
layer_name = f'layer{i + 1}'
self.add_module(layer_name, inverted_res_layer)
self.layers.append(layer_name)
if widen_factor > 1.0:
self.out_channel = int(1280 * widen_factor)
else:
self.out_channel = 1280
layer = ConvModule(
in_channels=self.in_channels,
out_channels=self.out_channel,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.add_module('conv2', layer)
self.layers.append('conv2')
def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
"""Stack InvertedResidual blocks to build a layer for MobileNetV2.
Args:
out_channels (int): out_channels of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
expand_ratio (int): Expand the number of channels of the
hidden layer in InvertedResidual by this ratio. Default: 6.
"""
layers = []
for i in range(num_blocks):
if i >= 1:
stride = 1
layers.append(
InvertedResidual(
self.in_channels,
out_channels,
stride,
expand_ratio=expand_ratio,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
with_cp=self.with_cp))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| 36.639706 | 173 | 0.553783 | [
"Apache-2.0"
] | ChaseMonsterAway/mmclassification | mmcls/models/backbones/mobilenet_v2.py | 9,966 | Python |
import glob
import time
from os import path
from flask import Blueprint, jsonify, current_app, request, Response, json
from flask_login import login_required
from .. import pz_server_state
from ..services.power_actions_service import is_valid_power_action, execute_action
from ..services.server_options_service import read_config, save_config, prepared_config_to_view, formatted_config_lines
from ..services.server_status_service import get_server_status
from ..utils.resources_functions import server_resources
server_blueprint = Blueprint('server', __name__, url_prefix='/server')
@server_blueprint.route('/status')
@login_required
def status():
rcon_host = current_app.config['RCON_HOST']
rcon_password = current_app.config['RCON_PASSWORD']
server_state, players = get_server_status(rcon_host, rcon_password)
return jsonify(
server_state=server_state,
online_players=players,
server_resources=server_resources()
)
@server_blueprint.route('/power-actions', methods=['POST'])
@login_required
def power_actions():
request_data = request.get_json()
pz_user_home = current_app.config["PZ_USER_HOME"]
power_action = request_data.get("power_action", None)
if not is_valid_power_action(power_action):
return jsonify(error="Unknown action"), 400
if not execute_action(power_action, pz_user_home):
return '', 500
return jsonify(server_state=pz_server_state.state)
def get_config(pz_server_config):
config = read_config(pz_server_config)
return {
"WorkshopItems": config["WorkshopItems"],
"Mods": config["Mods"]
}
@server_blueprint.route('/options')
@login_required
def list_workshop_items():
export_config = get_config(current_app.config['PZ_SERVER_CONFIG'])
return jsonify(
WorkshopItems=prepared_config_to_view(export_config["WorkshopItems"]),
Mods=prepared_config_to_view(export_config["Mods"])
)
@server_blueprint.route('/options/export')
@login_required
def export_server_config():
export_config = get_config(current_app.config['PZ_SERVER_CONFIG'])
return current_app.response_class(
formatted_config_lines(export_config),
mimetype='text/event-stream',
headers={"Content-Disposition": "attachment;filename=server_config.ini"}
)
@server_blueprint.route('/options', methods=['POST'])
@login_required
def save_items():
request_data = request.get_json()
config = save_config(current_app.config['PZ_SERVER_CONFIG'], request_data)
export_config = {
"WorkshopItems": prepared_config_to_view(config["WorkshopItems"]),
"Mods": prepared_config_to_view(config["Mods"])
}
return jsonify(export_config)
@server_blueprint.route('/log')
@login_required
def listen_log():
def followLog(serverLogsDir):
logFilePattern = "*_DebugLog-server.txt"
logFiles = glob.glob(path.join(serverLogsDir, logFilePattern))
if not logFiles:
yield 'data: {}\n\n'.format(
json.dumps({"error": True, "errorMessage": "No log file found"})
)
return
logFiles.sort(reverse=True)
with open(logFiles[0]) as serverLogFile:
try:
while True:
line = serverLogFile.readline()
if not line:
continue
time.sleep(0.01)
yield 'data: {}\n\n'.format(
json.dumps({"log": line.strip()})
)
finally:
pass
serverLogsDir = current_app.config['PZ_SERVER_LOGS_DIR']
return Response(followLog(serverLogsDir), mimetype='text/event-stream')
| 29.8 | 119 | 0.68698 | [
"MIT"
] | emilio2hd/pz-panel | panel/routes/server.py | 3,725 | Python |
import pytest
from receptor.router import MeshRouter
test_networks = [
(
[
("a", "b", 1),
("a", "d", 1),
("a", "f", 1),
("b", "d", 1),
("b", "c", 1),
("c", "e", 1),
("c", "h", 1),
("c", "j", 1),
("e", "f", 1),
("e", "g", 1),
("e", "h", 1),
("f", "g", 1),
("g", "h", 1),
("h", "j", 1),
("h", "k", 1),
("j", "k", 1),
("j", "m", 1),
("l", "m", 1),
],
[("a", "f", "f"), ("a", "m", "b"), ("h", "d", "c")],
[("a", {"b", "d", "f"}), ("f", {"a", "e", "g"}), ("j", {"c", "h", "k", "m"})],
),
(
[("a", "b", 1), ("b", "c", 1), ("c", "d", 1), ("d", "e", 1), ("e", "f", 1)],
[("a", "f", "b"), ("c", "a", "b"), ("f", "c", "e")],
[("a", {"b"}), ("f", {"e"}), ("c", {"b", "d"})],
),
]
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_next_hop(edges, expected_next_hops, expected_neighbors):
for node_id, remote, enh in expected_next_hops:
r = MeshRouter(node_id=node_id)
r.add_or_update_edges(edges)
assert r.next_hop(remote) == enh
@pytest.mark.parametrize("edges, expected_next_hops, expected_neighbors", test_networks)
def test_neighbors(edges, expected_next_hops, expected_neighbors):
r = MeshRouter(node_id=edges[0][0])
r.add_or_update_edges(edges)
for node_id, neighbors in expected_neighbors:
assert r.get_neighbors(node_id) == neighbors
| 31.745098 | 88 | 0.413218 | [
"Apache-2.0"
] | RedHatOfficial/receptor | test/unit/test_router.py | 1,619 | Python |
"""
多线程操作共享的全局变量是不安全的,多线程操作局部 只归某个线程私有,其他线程是不能访问的
"""
import threading
def do_sth(arg1, arg2, arg3):
local_var1 = arg1
local_var2 = arg2
local_var3 = arg3
fun1(local_var1, local_var2, local_var3)
fun2(local_var1, local_var2, local_var3)
fun3(local_var1, local_var2, local_var3)
def fun1(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
def fun2(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
def fun3(local_var1, local_var2, local_var3):
print('%s: %s -- %s -- %s' % (threading.current_thread().name, local_var1,
local_var2, local_var3))
t1 = threading.Thread(target=do_sth, args=('a', 'b', 'c'))
t2 = threading.Thread(target=do_sth, args=('d', 'e', 'f'))
t1.start()
t2.start()
| 27.189189 | 78 | 0.61332 | [
"Apache-2.0"
] | hemuke/python | 17_process_thread/46_why_need_ThreadLocal.py | 1,094 | Python |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2019, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from neotime import Date, Time, DateTime, Duration
from py2neo.data import Node
from py2neo.cypher import cypher_escape, cypher_repr
from py2neo.cypher.encoding import LabelSetView, PropertyDictView, PropertySelector
class LabelSetViewTestCase(TestCase):
def test_can_create_empty_view(self):
view = LabelSetView([])
self.assertEqual(repr(view), "")
def test_can_create_single_label_view(self):
view = LabelSetView(["A"])
self.assertEqual(repr(view), ":A")
def test_can_create_double_label_view(self):
view = LabelSetView(["A", "B"])
self.assertEqual(repr(view), ":A:B")
def test_can_select_existing_in_view(self):
view = LabelSetView(["A", "B"]).B
self.assertEqual(repr(view), ":B")
def test_can_select_non_existing_in_view(self):
view = LabelSetView(["A", "B"]).C
self.assertEqual(repr(view), "")
def test_can_chain_select(self):
view = LabelSetView(["A", "B", "C"]).B.C
self.assertEqual(repr(view), ":B:C")
def test_can_reselect_same(self):
view = LabelSetView(["A", "B", "C"]).B.B.C
self.assertEqual(repr(view), ":B:C")
def test_length(self):
view = LabelSetView(["A", "B", "C"])
self.assertEqual(len(view), 3)
def test_iterable(self):
view = LabelSetView(["A", "B", "C"])
self.assertSetEqual(set(view), {"A", "B", "C"})
def test_containment(self):
view = LabelSetView(["A", "B", "C"])
self.assertIn("A", view)
def test_non_containment(self):
view = LabelSetView(["A", "B", "C"])
self.assertNotIn("D", view)
class PropertyDictViewTestCase(TestCase):
def test_can_create_empty_view(self):
view = PropertyDictView({})
self.assertEqual(repr(view), "{}")
def test_can_create_single_property_view(self):
view = PropertyDictView({"A": 1})
self.assertEqual(repr(view), "{A: 1}")
def test_can_create_double_property_view(self):
view = PropertyDictView({"A": 1, "B": 2})
self.assertEqual(repr(view), "{A: 1, B: 2}")
def test_can_select_existing_in_view(self):
view = PropertyDictView({"A": 1, "B": 2}).B
self.assertEqual(repr(view), "{B: 2}")
def test_can_select_non_existing_in_view(self):
view = PropertyDictView({"A": 1, "B": 2}).C
self.assertEqual(repr(view), "{}")
def test_can_chain_select(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3}).B.C
self.assertEqual(repr(view), "{B: 2, C: 3}")
def test_can_reselect_same(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3}).B.B.C
self.assertEqual(repr(view), "{B: 2, C: 3}")
def test_length(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertEqual(len(view), 3)
def test_iterable(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertEqual(set(view), {"A", "B", "C"})
def test_containment(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertIn("A", view)
def test_non_containment(self):
view = PropertyDictView({"A": 1, "B": 2, "C": 3})
self.assertNotIn("D", view)
class PropertySelectorTestCase(TestCase):
def test_simple(self):
selector = PropertySelector({"A": 1, "B": 2, "C": 3})
self.assertEqual(selector.A, "1")
def test_non_existent(self):
selector = PropertySelector({"A": 1, "B": 2, "C": 3})
self.assertEqual(selector.D, "null")
class NodeReprTestCase(TestCase):
def test_empty(self):
a = Node()
r = cypher_repr(a)
self.assertEqual("({})", r)
def test_single_property(self):
a = Node(name="Alice")
r = cypher_repr(a)
self.assertEqual("({name: 'Alice'})", r)
def test_property_and_label(self):
a = Node("Person", name="Alice")
r = cypher_repr(a)
self.assertEqual("(:Person {name: 'Alice'})", r)
def test_date_property(self):
a = Node(d=Date(1970, 1, 1))
r = cypher_repr(a)
self.assertEqual("({d: date('1970-01-01')})", r)
def test_time_property(self):
a = Node(t=Time(12, 34, 56))
r = cypher_repr(a)
self.assertEqual("({t: time('12:34:56.000000000')})", r)
def test_datetime_property(self):
a = Node(dt=DateTime(1970, 1, 1, 12, 34, 56))
r = cypher_repr(a)
self.assertEqual("({dt: datetime('1970-01-01T12:34:56.000000000')})", r)
def test_duration_property(self):
a = Node(dur=Duration(days=3))
r = cypher_repr(a)
self.assertEqual("({dur: duration('P3D')})", r)
class CypherEscapeTestCase(TestCase):
def test_empty_string(self):
value = ""
with self.assertRaises(ValueError):
_ = cypher_escape(value)
def test_simple_string(self):
value = "foo"
escaped = "foo"
self.assertEqual(escaped, cypher_escape(value))
def test_string_with_space(self):
value = "foo bar"
escaped = "`foo bar`"
self.assertEqual(escaped, cypher_escape(value))
def test_string_with_backtick(self):
value = "foo `bar`"
escaped = "`foo ``bar```"
self.assertEqual(escaped, cypher_escape(value))
| 31.114583 | 83 | 0.61165 | [
"Apache-2.0"
] | CyberGRX/py2neo | test/unit/test_cypher_encoding.py | 5,974 | Python |
# Written by David Weber
# dsw7@sfu.ca
"""
In this short namespace I house a class that connects to PDB and downloads
file over PDB file transfer protocol.
"""
# ------------------------------------------------------------------------------
import gzip
from os import remove, getcwd, path # built in
# my pymol API built on Python2 - try both imports
try:
from urllib.request import urlretrieve, urlcleanup
except ImportError:
from urllib import urlretrieve, urlcleanup
ROOT = 'ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/{}/{}'
class PDBFile:
def __init__(self, code):
"""Initialize a PDBFile object with a pdb file of interest
Parameters
----------
code : the pdb code if interest
Any valid PDB code can be passed into PDBFile.
Examples
--------
>>> pdb_file = PDBFile('1rcy')
"""
self.code = code.lower()
def fetch_from_PDB(self):
"""
Connects to PDB FTP server, downloads a .gz file of interest,
decompresses the .gz file into .ent and then dumps a copy of
the pdb{code}.ent file into cwd.
Parameters
----------
None
Examples
--------
>>> inst = PDBFile('1rcy')
>>> path_to_file = inst.fetch_from_PDB()
>>> print(path_to_file)
"""
subdir = self.code[1:3]
infile = 'pdb{}.ent.gz'.format(self.code)
decompressed = infile.strip('.gz')
fullpath = ROOT.format(subdir, infile)
try:
urlcleanup()
urlretrieve(fullpath, infile)
except Exception:
return 'URLError'
else:
with gzip.open(infile, 'rb') as gz:
with open(decompressed, 'wb') as out:
out.writelines(gz)
remove(infile)
return path.join(getcwd(), decompressed)
def clear(self):
"""
Deletes file from current working directory after the file has
been processed by some algorithm.
Parameters
----------
None
Examples
--------
>>> inst = PDBFile('1rcy')
>>> path_to_file = inst.fetch_from_PDB()
>>> print(path_to_file) # process the file using some algorithm
>>> inst.clear()
"""
filename = 'pdb{}.ent'.format(self.code)
try:
remove(path.join(getcwd(), filename))
except FileNotFoundError:
print('Cannot delete file. Does not exist.')
| 28.222222 | 81 | 0.494273 | [
"MIT"
] | dsw7/BridgingInteractions | scalene-triangle/libs/PDB_filegetter.py | 2,794 | Python |
# -*- coding: utf-8 -*-
#Chucky_Bot
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
from threading import Thread
from googletrans import Translator
from gtts import gTTS
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,ast,os,subprocess,requests,tempfile
cl = LINETCR.LINE()
#cl.login(qr=True)
cl.login(token='EsOvPPzeFykCVG8OoGf0.hE4TS1Hheb46PcdMzZKaaa.rzBOrFqSAApZownyv2qBJWU3PWWbf9/oE6G+sSVzUTo=')
cl.loginResult()
print "Azmi 1-Login Success\n"
ki = LINETCR.LINE()
#ki.login(qr=True)
ki.login(token='EsTdk3fyUSbT7LJVwoEd.rLylacrPH39WJb0UIwB8Nq.GYYzsgzj7aHd7mzCSluc3162Uqrry6Jjwf/bFuq9Etw=')
ki.loginResult()
print "Ki-Login Success\n"
kk = LINETCR.LINE()
#kk.login(qr=True)
kk.login(token='EsNKJDaP0J7Pt7syTOW9.GgPTp3/FisKkVX1rJHeroq.hUG0VDbWHz8R7o80xI0Pvme8dBb3dSsmCnat0PRX+JM=')
kk.loginResult()
print "Kk-Login Success\n"
#kc = LINETCR.LINE()
#kc.login(qr=True)
#kc.login(token='TOKEN_KAMU_DISINI_BEIB')
#kc.loginResult()
#print "Kc-Login Success\n"
#kr = LINETCR.LINE()
#kr.login(qr=True)
#kr.login(token='TOKEN_KAMU_DISINI_BEIB')
#kr.loginResult()
#print "Kr-Login Success\n"
#km = LINETCR.LINE()
#km.login(qr=True)
#km.login(token='TOKEN_KAMU_DISINI_BEIB')
#km.loginResult()
print "Km-Login Success\n\n=====[Sukses All Login]====="
reload(sys)
sys.setdefaultencoding('utf-8')
selfMessage ="""
╔═════════════════════════
║ ☆☞ S E L F ☜☆
╠═════════════════════════
╠➩〘Hi〙
╠➩〘Me〙
╠➩〘Mymid〙
╠➩〘Mid @〙
╠➩〘SearchID: (ID LINE)〙
╠➩〘Checkdate (DD/MM/YY)〙
╠➩〘Kalender〙
╠➩〘Steal contact〙
╠➩〘Pp @〙
╠➩〘Cover @〙
╠➩〘Auto like〙
╠➩〘Scbc Text〙
╠➩〘Cbc Text〙
╠➩〘Gbc Text〙
╠➩〘Getbio @〙
╠➩〘Getinfo @〙
╠➩〘Getname @〙
╠➩〘Getprofile @〙
╠➩〘Getcontact @〙
╠➩〘Getvid @〙
╠➩〘Friendlist〙
╠═════════════════════════
║ ☆☞ S E L F ☜☆
╚═════════════════════════
"""
botMessage ="""
╔═════════════════════════
║ ☆☞ B O T ☜☆
╠═════════════════════════
╠➩〘Absen〙
╠➩〘Respon〙
╠➩〘Runtime〙
╠➩〘Kapten copy @〙
╠➩〘TC1 copy @〙
╠➩〘TC2 copy @〙
╠➩〘TC3 copy @〙
╠➩〘TC4 copy @〙
╠➩〘Backup all〙
╠➩〘/bio Text〙
╠➩〘@bye (Usir Kapten)〙
╠➩〘Bye all (Usir Semua)〙
╠═════════════════════════
║ ☆☞ B O T ☜☆
╚═════════════════════════
"""
mediaMessage ="""
╔═════════════════════════
║ ☆☞ M E D I A ☜☆
╠═════════════════════════
╠➩〘Gift〙
╠➩〘Gift1 @ s/d Gift10 @〙
╠➩〘Giftbycontact〙
╠➩〘All gift〙
╠➩〘Gif gore〙
╠➩〘Google: (Text)〙
╠➩〘Playstore NamaApp〙
╠➩〘Fancytext: Text〙
╠➩〘/musik Judul-Penyanyi〙
╠➩〘/lirik Judul-Penyanyi〙
╠➩〘/musrik Judul-Penyanyi〙
╠➩〘/ig UrsnameInstagram〙
╠➩〘Checkig UrsnameInstagram〙
╠➩〘/apakah Text (Kerang Ajaib)〙
╠➩〘/kapan Text (Kerang Ajaib)〙
╠➩〘/hari Text (Kerang Ajaib)〙
╠➩〘/berapa Text (Kerang Ajaib)〙
╠➩〘/berapakah Text〙
╠➩〘Youtubelink: Judul Video〙
╠➩〘Youtubevideo: Judul Video〙
╠➩〘Youtubesearch: Judul Video〙
╠➩〘Image NamaGambar〙
╠➩〘Say-id Text〙
╠➩〘Say-en Text〙
╠➩〘Say-jp Text〙
╠➩〘Image NamaGambar〙
╠➩〘Tr-id Text (Translate En Ke ID〙
╠➩〘Tr-en Text (Translate ID Ke En〙
╠➩〘Tr-th Text (Translate ID Ke Th〙
╠➩〘Id@en Text (Translate ID Ke En〙
╠➩〘Id@th Text (Translate ID Ke TH〙
╠➩〘En@id Text (Translate En Ke ID〙
╠═════════════════════════
║ ☆☞ M E D I A ☜☆
╚═════════════════════════
"""
groupMessage ="""
╔═════════════════════════
║ ☆☞ G R O U P ☜☆
╠═════════════════════════
╠➩〘Welcome〙
╠➩〘Say welcome〙
╠➩〘Invite creator〙
╠➩〘Setview〙
╠➩〘Viewseen〙
╠➩〘Gn: (NamaGroup)〙
╠➩〘Tag all〙
╠➩〘Recover〙
╠➩〘Cancel〙
╠➩〘Cancelall〙
╠➩〘Gcreator〙
╠➩〘Ginfo〙
╠➩〘Gurl〙
╠➩〘List group〙
╠➩〘Pict group: (NamaGroup)〙
╠➩〘Spam: (Text)〙
╠➩〘Spam〙
╠➩〘Add all〙
╠➩〘Kick: (Mid)〙
╠➩〘Invite: (Mid)〙
╠➩〘Invite〙
╠➩〘Memlist〙
╠➩〘Getgroup image〙
╠➩〘Urlgroup Image〙
╠═════════════════════════
║ ☆☞ G R O U P ☜☆
╚═════════════════════════
"""
tjia="u71b6799e1c37868a871d442e67633182"
setMessage ="""
╔═════════════════════════
║ ☆☞ S E T ☜☆
╠═════════════════════════
╠➩〘Sambutan on/off〙
╠➩〘Url on/off〙
╠➩〘Alwaysread on/off〙
╠➩〘Sider on/off〙
╠➩〘Contact on/off〙
╠➩〘Simisimi on/off〙
╠═════════════════════════
║ ☆☞ S E T ☜☆
╚═════════════════════════
"""
creatorMessage ="""
╔═════════════════════════
║ ☆☞ C R E A T O R ☜☆
╠═════════════════════════
╠➩〘Admin add @〙
╠➩〘Admin remove @〙
╠➩〘/cnkapten〙
╠➩〘/cntc1〙
╠➩〘/cntc2〙
╠➩〘/cntc3〙
╠➩〘/cntc4〙
╠➩〘Crash〙
╠➩〘Kickall〙
╠➩〘Bc: (Text)〙
╠➩〘Nk: @〙
╠➩〘Ulti @〙
╠➩〘Join group: (NamaGroup〙
╠➩〘Leave group: (NamaGroup〙
╠➩〘Leave all group〙
╠➩〘Bot restart〙
╠➩〘Turn off〙
╠═════════════════════════
║ ☆☞ C R E A T O R ☜☆
╚═════════════════════════
"""
adminMessage ="""
╔═════════════════════════
║ ☆☞ A D M I N ☜☆
╠═════════════════════════
╠➩〘Admin list〙
╠➩〘Ban〙
╠➩〘Unban〙
╠➩〘Ban @〙
╠➩〘Unban @〙
╠➩〘Ban list〙
╠➩〘Clear ban〙
╠➩〘Kill〙
╠➩〘Kick @〙
╠➩〘Set member: (Jumblah)〙
╠➩〘Ban group: (NamaGroup〙
╠➩〘Del ban: (NamaGroup〙
╠➩〘List ban〙
╠➩〘Kill ban〙
╠➩〘Glist〙
╠➩〘Glistmid〙
╠➩〘Details group: (Gid)〙
╠➩〘Cancel invite: (Gid)〙
╠➩〘Invitemeto: (Gid)〙
╠➩〘Kapten acc invite〙
╠➩〘TC1 acc invite〙
╠➩〘TC2 acc invite〙
╠➩〘TC3 acc invite〙
╠➩〘TC4 acc invite〙
╠➩〘Removechat〙
╠➩〘Join on/off〙
╠➩〘Joincancel on/off〙
╠➩〘Respon on/off〙
╠➩〘Responkick on/off〙
╠➩〘Leave on/off〙
╠➩〘All join / (TC1/2/3/4 Join)〙
╠═════════════════════════
║ ☆☞ A D M I N ☜☆
╚═════════════════════════
"""
helpMessage ="""
╔═════════════════════════
║ ☆☞ H E L P ☜☆
╠═════════════════════════
╠➩〘Help protect〙
╠➩〘Help self〙
╠➩〘Help bot〙
╠➩〘Help group〙
╠➩〘Help set〙
╠➩〘Help media〙
╠➩〘Help admin〙
╠➩〘Help creator〙
╠➩〘Owner〙
╠➩〘Pap owner〙
╠➩〘Admin〙
╠➩〘Speed〙
╠➩〘Speed test〙
╠➩〘Status〙
╠═════════════════════════
║ ☆☞ H E L P ☜☆
╚═════════════════════════
"""
protectMessage ="""
╔═════════════════════════
║ ☆☞ P R O T E C T ☜☆
╠═════════════════════════
╠➩〘Allprotect on/off〙
╠➩〘Autocancel on/off〙
╠➩〘Qr on/off〙
╠➩〘Autokick on/off〙
╠➩〘Ghost on/off〙
╠➩〘Invitepro on/off〙
╠═════════════════════════
║ ☆☞ P R O T E C T ☜☆
╚═════════════════════════
"""
KAC=[cl,ki,kk]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Bots=[mid,Amid,Bmid]
Creator=["u71b6799e1c37868a871d442e67633182"]
admin=["u71b6799e1c37868a871d442e67633182"]
contact = cl.getProfile()
backup1 = cl.getProfile()
backup1.displayName = contact.displayName
backup1.statusMessage = contact.statusMessage
backup1.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup2 = ki.getProfile()
backup2.displayName = contact.displayName
backup2.statusMessage = contact.statusMessage
backup2.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup3 = kk.getProfile()
backup3.displayName = contact.displayName
backup3.statusMessage = contact.statusMessage
backup3.pictureStatus = contact.pictureStatus
responsename = cl.getProfile().displayName
responsename2 = ki.getProfile().displayName
responsename3 = kk.getProfile().displayName
wait = {
"LeaveRoom":True,
"AutoJoin":False,
"AutoJoinCancel":True,
"memberscancel":0,
"Members":1,
"AutoCancel":{},
"AutoCancelon":False,
"joinkick":False,
"AutoKick":{},
"AutoKickon":False,
'pap':{},
'invite':{},
'steal':{},
'gift':{},
'likeOn':{},
'Leave':{},
'detectMention':True,
'kickMention':False,
'timeline':True,
"Timeline":True,
"comment1":"Kenapa Kak?",
"comment2":"Wkwkwk \(○^ω^○)/",
"comment3":"Lucu Banget!!! ヘ(^_^)ヘ",
"comment4":"Nice Kak (^_^)",
"comment5":"Bot Auto Like ©By : Azmi\nContact Me : 👉 line.me/ti/p/~a_ulul15",
"commentOn":True,
"commentBlack":{},
"message":"Thx For Add Me (^_^)\nInvite Me To Your Group ヘ(^_^)ヘ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Qr":{},
"Qron":False,
"Contact":False,
"Sambutan":True,
"Ghost":False,
"inviteprotect":False,
"alwaysRead":False,
"Sider":{},
"Simi":{},
"lang":"JP",
"BlGroup":{}
}
settings = {
"simiSimi":{}
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version:
import urllib,request
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else:
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1:
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item)
time.sleep(0.1)
page = page[end_content:]
return items
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands):# /XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendAudio(self, to_, path):
M = Message()
M.text = None
M.to = to_
M.contentMetadata = None
M.contentPreview = None
M.contentType = 3
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(self, to_, path):
M = Message(to=to_, text=None, contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M2 = self._client.sendMessage(0,M)
M_id = M2.id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://obs-sg.line-apps.com/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except:
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendAudio(self, to_, path):
M = Message()
M.text = None
M.to = to_
M.contentMetadata = None
M.contentPreview = None
M.contentType = 3
M_id = self._client.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload audio failure.')
return True
def sendAudioWithURL(self, to_, url):
path = self.downloadFileWithURL(url)
try:
self.sendAudio(to_, path)
except Exception as e:
raise Exception(e)
def sendAudioWithUrl(self, to_, url):
path = '%s/pythonLine-%1.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True, verify=False)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def downloadFileWithURL(self, fileUrl):
saveAs = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = self.get_content(fileUrl)
if r.status_code == 200:
with open(saveAs, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return saveAs
else:
raise Exception('Download file failure.')
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if(wait["message"]in[""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
Name = ki.getContact(op.param2).displayName
Name = kk.getContact(op.param2).displayName
Name = kc.getContact(op.param2).displayName
Name = kr.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
random.choice(KAC).sendText(op.param1, "Haii " + "☞ " + nick[0] + " ☜" + "\nNgintip Aja Niih. . .\nChat Kek Idiih (-__-) ")
else:
random.choice(KAC).sendText(op.param1, "Haii " + "☞ " + nick[1] + " ☜" + "\nBetah Banget Jadi Penonton. . .\nChat Napa (-__-) ")
else:
random.choice(KAC).sendText(op.param1, "Haii " + "☞ " + Name + " ☜" + "\nNgapain Kak Ngintip Aja???\nSini Gabung Chat... ")
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 22:
cl.leaveRoom(op.param1)
if op.type == 21:
cl.leaveRoom(op.param1)
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in Creator:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Creator:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Creator:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Creator:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Creator:
kr.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Amid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
cl.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in mid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Cmid:
ki.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in mid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
kk.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in mid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Amid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Cmid:
if op.param2 in Cmid:
kc.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in mid:
kr.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Amid:
kr.acceptGroupInvitation(op.param1)
if op.param3 in Dmid:
if op.param2 in Bmid:
kr.acceptGroupInvitation(op.param1)
if mid in op.param3:
if wait["AutoJoinCancel"] == True:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["memberscancel"]:
cl.acceptGroupInvitation(op.param1)
cl.sendText(op.param1,"Maaf " + cl.getContact(op.param2).displayName + "\nMember Kurang Dari 30 Orang\nUntuk Info, Silahkan Chat Owner Kami!")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':tjia}
cl.sendMessage(c)
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G.preventJoinByTicket = True
cl.updateGroup(G)
cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
if mid in op.param3:
if wait["AutoJoin"] == True:
G = cl.getGroup(op.param1)
if len(G.members) <= wait["Members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G.preventJoinByTicket = True
cl.updateGroup(G)
cl.sendText(op.param1,"☆Ketik ☞Help☜ Untuk Bantuan☆\n☆Harap Gunakan Dengan Bijak ^_^ ☆")
else:
if wait["AutoCancel"][op.param1] == True:
if op.param3 in admin:
pass
else:
cl.cancelGroupInvitation(op.param1, [op.param3])
else:
if op.param3 in wait["blacklist"]:
cl.cancelGroupInvitation(op.param1, [op.param3])
cl.sendText(op.param1, "Blacklist Detected")
else:
pass
if op.type == 19:
if wait["AutoKick"][op.param1] == True:
try:
if op.param3 in Creator:
if op.param3 in admin:
if op.param3 in Bots:
pass
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
try:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
else:
pass
if mid in op.param3:
if op.param2 in Creator:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Creator in op.param3:
if admin in op.param3:
if op.param2 in Bots:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
try:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param2 in wait["blacklist"]:
pass
else:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if op.type == 11:
if wait["Qr"][op.param1] == True:
if op.param2 not in Bots:
if op.param2 not in admin:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).updateGroup(G)
if op.type == 17:
if wait["Sambutan"] == True:
if op.param2 in admin:
return
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
cl.sendText(op.param1,"Hallo " + cl.getContact(op.param2).displayName + "\nWelcome To ☞ " + str(ginfo.name) + " ☜" + "\nBudayakan Cek Note\nDan Semoga Betah Disini ^_^")
cl.sendImageWithURL(op.param1,image)
print "MEMBER JOIN TO GROUP"
if op.type == 17:
if wait["joinkick"] == True:
if op.param2 in admin:
if op.param2 in Bots:
return
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
print "MEMBER JOIN KICK TO GROUP"
if op.type == 15:
if wait["Sambutan"] == True:
if op.param2 in admin:
return
cl.sendText(op.param1,"Good Bye " + cl.getContact(op.param2).displayName + "\nSee You Next Time . . . (p′︵‵。) 🤗")
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
print "MEMBER HAS LEFT THE GROUP"
if op.type == 13:
if op.param2 not in Creator:
if op.param2 not in admin:
if op.param2 not in Bots:
if op.param2 in Creator:
if op.param2 in admin:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 19:
if wait["Ghost"] == True:
if op.param2 in admin:
if op.param2 in Bots:
pass
else:
try:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
km.kickoutFromGroup(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
km.sendMessage(c)
km.leaveGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = True
except:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.01)
km.kickoutFromGroup(op.param1,[op.param2])
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
km.sendMessage(c)
km.leaveGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
wait["blacklist"][op.param2] = True
if op.type == 26:
msg = op.message
if wait["alwaysRead"] == True:
if msg.toType == 0:
cl.sendChatChecked(msg.from_,msg.id)
else:
cl.sendChatChecked(msg.to,msg.id)
if msg.contentType == 16:
if wait['likeOn'] == True:
url = msg.contentMetadata["postEndUrl"]
cl.like(url[25:58], url[66:], likeType=1005)
ki.like(url[25:58], url[66:], likeType=1002)
kk.like(url[25:58], url[66:], likeType=1004)
kc.like(url[25:58], url[66:], likeType=1003)
kr.like(url[25:58], url[66:], likeType=1001)
cl.comment(url[25:58], url[66:], wait["comment1"])
ki.comment(url[25:58], url[66:], wait["comment2"])
kk.comment(url[25:58], url[66:], wait["comment3"])
kc.comment(url[25:58], url[66:], wait["comment4"])
kr.comment(url[25:58], url[66:], wait["comment5"])
cl.sendText(msg.to,"Like Success")
wait['likeOn'] = False
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
cl.sendText(msg.to,data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Aku Bilang Jangan Ngetag Lagi " + cName + "\nAku Kick Kamu! Sorry, Byee!!!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in admin:
cl.sendText(msg.to,ret_)
random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])
break
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
random.choice(KAC).kickoutFromGroup(msg.to,[msg.from_])
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = cl.getContact(msg.from_)
cName = contact.displayName
balas = ["Sekali lagi nge tag gw sumpahin jomblo seumur hidup!","Dont Tag!! Lagi Sibuk",cName + " Ngapain Ngetag?",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja","Tag Mulu Lo Anjirr!","Dia Lagi Off", cName + " Kenapa Tag? Kangen?","Dia Lagi Tidur\nJangan Di Tag " + cName, "Jangan Suka Tag Gua " + cName, "Kamu Siapa " + cName + "?", "Ada Perlu Apa " + cName + "?","Woii " + cName + " Jangan Ngetag, Riibut!"]
ret_ = random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in admin:
cl.sendText(msg.to,ret_)
break
if mention['M'] in Bots:
cl.sendText(msg.to,ret_)
break
if msg.contentType == 13:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] not in admin:
if msg.contentMetadata["mid"] in wait["blacklist"]:
random.choice(KAC).sendText(msg.to,"Sudah")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
random.choice(KAC).sendText(msg.to,"Ditambahkan")
else:
cl.sendText(msg.to,"Admin Detected~")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
random.choice(KAC).sendText(msg.to,"Terhapus")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
random.choice(KAC).sendText(msg.to,"Tidak Ada Black List")
elif wait["Contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"Nama:\n" + msg.contentMetadata["displayName"] + "\n\nMid:\n" + msg.contentMetadata["mid"] + "\n\nStatus:\n" + contact.statusMessage + "\n\nPhoto Profile:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nPhoto Cover:\n" + str(cu))
elif msg.text == "Ginfo":
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
if ginfo.preventJoinByTicket == True:
u = "close"
else:
u = "open"
cl.sendText(msg.to,"[Group name]\n" + str(ginfo.name) + "\n\n[Gid]\n" + msg.to + "\n\n[Group creator]\n" + gCreator + "\n\n[Profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\nMembers:" + str(len(ginfo.members)) + "members\nPending:" + sinvitee + "people\nURL:" + u + "it is inside")
else:
cl.sendText(msg.to,"[group name]\n" + str(ginfo.name) + "\n[gid]\n" + msg.to + "\n[group creator]\n" + gCreator + "\n[profile status]\nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text is None:
return
elif msg.text in ["Creator","Owner"]:
msg.contentType = 13
msg.contentMetadata = {'mid': tjia}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu tukang tikungnya(^_^)")
elif msg.text in ["Admin","admin"]:
msg.contentType = 13
admin1 = "u71b6799e1c37868a871d442e67633182"
admin2 = "u46560b002469877f708c1d2e8966fc9d"
admin3 = "u1dee2db35847101e3aa420e667390000"
msg.contentMetadata = {'mid': tjia}
random.choice(KAC).sendMessage(msg)
msg.contentMetadata = {'mid': admin1}
random.choice(KAC).sendMessage(msg)
msg.contentMetadata = {'mid': admin2}
random.choice(KAC).sendMessage(msg)
msg.contentMetadata = {'mid': admin3}
random.choice(KAC).sendMessage(msg)
random.choice(KAC).sendText(msg.to,"Itu Admin Kami (^_^)")
elif "Admin add @" in msg.text:
if msg.from_ in Creator:
print "[Command]Admin add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Chucky Ditambahkan")
except:
pass
print "[Command]Admin add executed"
else:
cl.sendText(msg.to,"Command Denied.")
cl.sendText(msg.to,"Creator Permission Required.")
elif "Admin remove @" in msg.text:
if msg.from_ in Creator:
print "[Command]Admin Remove Executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact Tidak Di Temukan")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Chucky Dihapus")
except:
pass
print "[Command]Admin remove executed"
else:
cl.sendText(msg.to,"Command Denied.")
cl.sendText(msg.to,"Creator Permission Required.")
elif msg.text in ["Admin list","admin list","List admin"]:
if admin == []:
cl.sendText(msg.to,"The Admin List Is Empty")
else:
cl.sendText(msg.to,"Tunggu...")
mc = "╔═════════════════════════\n║ ☆☞ ADMIN CHUCKY ☜☆\n╠═════════════════════════\n"
for mi_d in admin:
mc += "╠••> " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc + "╚═════════════════════════")
print "[Command]Admin List executed"
elif msg.text in ["Group creator","Gcreator","gcreator"]:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': gCreator}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu Yang Buat Grup Ini")
elif msg.contentType == 16:
if wait["Timeline"] == True:
msg.contentType = 0
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
random.choice(KAC).sendText(msg.to,msg.text)
if msg.contentType == 13:
if wait["steal"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Stealed"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
wait["steal"] = False
break
except:
pass
if msg.contentType == 13:
if wait["gift"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
print "[Target] Gift"
break
else:
targets.append(copy)
if targets == []:
pass
else:
for target in targets:
try:
cl.sendText(msg.to,"Gift Sudah Terkirim!")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
wait['gift'] = False
break
except:
msg.contentMetadata = {'mid': target}
wait["gift"] = False
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
groups = ki.getGroup(msg.to)
groups = kk.getGroup(msg.to)
groups = kc.getGroup(msg.to)
groups = kr.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
random.choice(KAC).sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
ki.findAndAddContactsByMid(target)
kk.findAndAddContactsByMid(target)
kc.findAndAddContactsByMid(target)
kr.findAndAddContactsByMid(target)
random.choice(KAC).inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
random.choice(KAC).sendText(msg.to,"Limit Invite")
wait['invite'] = False
break
elif msg.text in ["Key creator","help creator","Help creator"]:
cl.sendText(msg.to,creatorMessage)
elif msg.text in ["Key group","help group","Help group"]:
cl.sendText(msg.to,groupMessage)
elif msg.text in ["Key","help","Help"]:
cl.sendText(msg.to,helpMessage)
elif msg.text in ["Key self","help self","Help self"]:
cl.sendText(msg.to,selfMessage)
elif msg.text in ["Key bot","help bot","Help bot"]:
cl.sendText(msg.to,botMessage)
elif msg.text in ["Key set","help set","Help set"]:
cl.sendText(msg.to,setMessage)
elif msg.text in ["Key media","help media","Help media"]:
cl.sendText(msg.to,mediaMessage)
elif msg.text in ["Key admin","help admin","Help admin"]:
cl.sendText(msg.to,adminMessage)
elif msg.text in ["Key protect","help protect","Help protect"]:
cl.sendText(msg.to,protectMessage)
elif msg.text in ["List group"]:
gid = cl.getGroupIdsJoined()
h = ""
jml = 0
for i in gid:
gn = cl.getGroup(i).name
h += "♦【%s】\n" % (gn)
jml += 1
cl.sendText(msg.to,"=======[List Group]=======\n"+ h +"\nTotal Group: "+str(jml))
elif "Ban group: " in msg.text:
grp = msg.text.replace("Ban group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in admin:
for i in gid:
h = cl.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
cl.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
cl.sendText(msg.to, "Only Admin")
elif msg.text in ["List ban","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +cl.getGroup(gid).name + "\n"
random.choice(KAC).sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
cl.sendText(msg.to, "Khusus Admin")
elif msg.text in ["Del ban: "]:
if msg.from_ in admin:
ng = msg.text.replace("Del ban: ","")
for gid in wait["BlGroup"]:
if cl.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
cl.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
cl.sendText(msg.to, "Only Admin")
elif "Join group: " in msg.text:
ng = msg.text.replace("Join group: ","")
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = kr.getGroupIdsJoined()
try:
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
h = ki.getGroup(i).name
h = kk.getGroup(i).name
h = kc.getGroup(i).name
h = kr.getGroup(i).name
if h == ng:
random.choice(KAC).inviteIntoGroup(i,[Creator])
cl.sendText(msg.to,"Success Join To ["+ h +"] Group")
else:
pass
else:
cl.sendText(msg.to,"Only Admin")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "Leave group: " in msg.text:
ng = msg.text.replace("Leave group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendText(i,"Bot Di Paksa Keluar Oleh Owner!")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
kr.leaveGroup(i)
cl.sendText(msg.to,"Success Left ["+ h +"] group")
else:
pass
else:
cl.sendText(msg.to,"Only Admin")
elif "Leave all group" == msg.text:
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
cl.sendText(i,"Bot Di Paksa Keluar Oleh Owner!")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
kr.leaveGroup(i)
cl.sendText(msg.to,"Success Leave All Group")
else:
cl.sendText(msg.to,"Only Admin")
elif "Pict group: " in msg.text:
saya = msg.text.replace('Pict group: ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["cancelall","Cancelall"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
cl.sendText(msg.to,"Tidak Ada Yang Pending")
else:
cl.sendText(msg.to,"Tidak Bisa Digunakan Diluar Group")
elif msg.text in ["Ourl","Url on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
cl.sendText(msg.to,"Url Sudah Aktif")
else:
cl.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Curl","Url off"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
cl.sendText(msg.to,"Url Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Can not be used outside the group")
elif msg.text in ["Join on","Autojoin on"]:
if msg.from_ in admin:
wait["AutoJoin"] = True
wait["AutoJoinCancel"] = False
cl.sendText(msg.to,"Auto Join Sudah Aktif")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Join off","Autojoin off"]:
if msg.from_ in admin:
wait["AutoJoin"] = False
cl.sendText(msg.to,"Auto Join Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Joincancel on","Autojoincancel on"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = True
wait["AutoJoin"] = False
cl.sendText(msg.to,"Auto Join Cancel Sudah Aktif")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Joincancel off","Autojoincancel off"]:
if msg.from_ in admin:
wait["AutoJoinCancel"] = False
cl.sendText(msg.to,"Auto Join Cancel Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Respon on"]:
if msg.from_ in admin:
wait["detectMention"] = True
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon Sudah Aktif")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Respon off"]:
if msg.from_ in admin:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon Sudah Off")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Responkick on"]:
if msg.from_ in admin:
wait["kickMention"] = True
wait["detectMention"] = False
cl.sendText(msg.to,"Auto Respon Kick Sudah Aktif")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Responkick off"]:
if msg.from_ in admin:
wait["kickMention"] = False
cl.sendText(msg.to,"Auto Respon Kick Sudah Off")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Leave on"]:
if msg.from_ in admin:
wait["Leave"] = True
cl.sendText(msg.to,"Leave Sudah Aktif")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Autocancel on"]:
if msg.from_ in admin:
wait["AutoCancel"][msg.to] = True
wait["AutoCancelon"] = True
cl.sendText(msg.to,"Auto Cancel Sudah Aktif")
print wait["AutoCancel"]
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Autocancel off"]:
if msg.from_ in admin:
wait["AutoCancel"][msg.to] = False
wait["AutoCancelon"] = False
cl.sendText(msg.to,"Auto Cancel Sudah Di Nonaktifkan")
print wait["AutoCancel"]
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Joinkick on"]:
if msg.from_ in admin:
wait["joinkick"] = True
wait["Sambutan"] = False
cl.sendText(msg.to,"Join Kick Sudah Aktif")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Joinkick off"]:
if msg.from_ in admin:
wait["joinkick"] = False
cl.sendText(msg.to,"Join Kick Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Invitepro on","Inviteprotect on"]:
if msg.from_ in admin:
wait["inviteprotect"] = True
cl.sendText(msg.to,"Invite Protect Sudah Aktif")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Invitepro off","Inviteprotect off"]:
if msg.from_ in admin:
wait["inviteprotect"] = False
cl.sendText(msg.to,"Invite Protect Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Only Admin")
elif "Qr on" in msg.text:
if msg.from_ in admin:
wait["Qr"][msg.to] = True
wait["Qron"] = True
cl.sendText(msg.to,"QR Protect Sudah Aktif")
print wait["Qr"]
else:
cl.sendText(msg.to,"Only Admin")
elif "Qr off" in msg.text:
if msg.from_ in admin:
wait["Qr"][msg.to] = False
wait["Qron"] = False
cl.sendText(msg.to,"Qr Protect Sudah Di Nonaktifkan")
print wait["Qr"]
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Autokick on"]:
if msg.from_ in admin:
wait["AutoKick"][msg.to] = True
wait["AutoKickon"] = True
cl.sendText(msg.to,"Auto Kick Sudah Aktif")
print wait["AutoKick"]
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Autokick off"]:
if msg.from_ in admin:
wait["AutoKick"][msg.to] = False
wait["AutoKickon"] = False
cl.sendText(msg.to,"Auto Kick Sudah Di Nonaktifkan")
print wait["AutoKick"]
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Ghost on"]:
if msg.from_ in admin:
wait["Ghost"] = True
cl.sendText(msg.to,"Ghost Sudah Aktif")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Ghost off"]:
if msg.from_ in admin:
wait["Ghost"] = False
cl.sendText(msg.to,"Ghost Sudah Di Nonaktifkan")
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Allprotect on"]:
if msg.from_ in admin:
wait["AutoCancel"][msg.to] = True
wait["AutoCancelon"] = True
wait["inviteprotect"] = True
wait["joinkick"] = True
wait["AutoKick"][msg.to] = True
wait["AutoKickon"] = True
wait["Qr"][msg.to] = True
wait["Qron"] = True
wait["Ghost"] = True
cl.sendText(msg.to,"All Protect Sudah Aktif Semua")
print wait["AutoCancel"]
print wait["AutoKick"]
print wait["Qr"]
else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["Allprotect off"]:
if msg.from_ in admin:
wait["AutoCancel"][msg.to] = False
wait["AutoCancelon"] = False
wait["inviteprotect"] = False
wait["joinkick"] = False
wait["AutoKick"][msg.to] = False
wait["AutoKickon"] = False
wait["Qr"][msg.to] = False
wait["Qron"] = False
wait["Ghost"] = False
cl.sendText(msg.to,"All Protect Sudah Di Nonaktifkan Semua")
print wait["AutoCancel"]
print wait["AutoKick"]
print wait["Qr"]
else:
#else:
cl.sendText(msg.to,"Only Admin")
elif msg.text in ["K on","Contact on"]:
wait["Contact"] = True
cl.sendText(msg.to,"Contact Sudah Aktif")
elif msg.text in ["K off","Contact off"]:
wait["Contact"] = False
cl.sendText(msg.to,"Contact Sudah Di Nonaktifkan")
elif msg.text in ["Alwaysread on"]:
wait["alwaysRead"] = True
cl.sendText(msg.to,"Always Read Sudah Aktif")
elif msg.text in ["Alwaysread off"]:
wait["alwaysRead"] = False
cl.sendText(msg.to,"Always Read Sudah Di Nonaktifkan")
elif msg.text in ["Sambutan on"]:
if wait["Sambutan"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Aktifkanヾ(*´∀`*)ノ")
else:
wait["Sambutan"] = True
wait["joinkick"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Onヽ(´▽`)/")
elif msg.text in ["Sambutan off"]:
if wait["Sambutan"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sambutan Di Nonaktifkan( ^∇^)")
else:
wait["Sambutan"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Sudah Off(p′︵‵。)")
elif "Sider on" in msg.text:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] = True
cl.sendText(msg.to,"Siap On Cek Sider")
elif "Sider off" in msg.text:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
cl.sendText(msg.to, "Cek Sider Off")
else:
cl.sendText(msg.to, "Heh Belom Di Set")
elif msg.text in ["Status"]:
md = ""
if wait["Sambutan"] == True: md+="╠➩✔️ Sambutan : On\n"
else:md+="╠➩❌ Sambutan : Off\n"
if wait["joinkick"] == True: md+="╠➩✔️ Join Kick : On\n"
else:md+="╠➩❌ Join Kick : Off\n"
if wait["AutoJoin"] == True: md+="╠➩✔️ Auto Join : On\n"
else: md +="╠➩❌ Auto Join : Off\n"
if wait["AutoJoinCancel"] == True: md+="╠➩✔️ Auto Join Cancel : On\n"
else: md +="╠➩❌ Auto Join Cancel : Off\n"
if wait["Leave"] == True: md+="╠➩✔️ Leave : On\n"
else: md +="╠➩❌ Leave : Off\n"
if wait["Contact"] == True: md+="╠➩✔️ Info Contact : On\n"
else: md+="╠➩❌ Info Contact : Off\n"
if wait["AutoCancelon"] == True:md+="╠➩✔️ Auto Cancel : On\n"
else: md+= "╠➩❌ Auto Cancel : Off\n"
if wait["inviteprotect"] == True:md+="╠➩✔️ Invite Protect : On\n"
else: md+= "╠➩❌ Invite Protect : Off\n"
if wait["Qron"] == True: md+="╠➩✔️ Qr Protect : On\n"
else:md+="╠➩❌ Qr Protect : Off\n"
if wait["AutoKickon"] == True: md+="╠➩✔️ Auto Kick : On\n"
else:md+="╠➩❌ Auto Kick : Off\n"
if wait["Ghost"] == True: md+="╠➩✔️ Ghost : On\n"
else:md+="╠➩❌ Ghost : Off\n"
if wait["alwaysRead"] == True: md+="╠➩✔️ Always Read : On\n"
else:md+="╠➩❌ Always Read: Off\n"
if wait["detectMention"] == True: md+="╠➩✔️ Auto Respon : On\n"
else:md+="╠➩❌ Auto Respon : Off\n"
if wait["kickMention"] == True: md+="╠➩✔️ Auto Respon Kick : On\n"
else:md+="╠➩❌ Auto Respon Kick : Off\n"
if wait["Sider"] == True: md+="╠➩✔️ Auto Sider : On\n"
else:md+="╠➩❌ Auto Sider: Off\n"
if wait["Simi"] == True: md+="╠➩✔️ Simisimi : On\n"
else:md+="╠➩❌ Simisimi: Off\n"
cl.sendText(msg.to,"╔═════════════════════════\n""║ ☆☞ S T A T U S ☜☆\n""╠═════════════════════════\n"+md+"╚═════════════════════════")
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["All gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
elif msg.text in ["TC1 Gift","TC1 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
ki.sendMessage(msg)
elif msg.text in ["TC2 Gift","TC2 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
kk.sendMessage(msg)
elif msg.text in ["TC3 Gift","TC3 gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
kc.sendMessage(msg)
elif "Gift1 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift1 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift2 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift2 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '1360738'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift3 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift3 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '1395389'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift4 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift4 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1329191'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift5 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift5 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '9057'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift6 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift6 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '2',
'STKPKGID': '9167'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift7 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift7 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '3',
'STKPKGID': '7334'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift8 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift8 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1380280'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift9 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift9 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '4',
'STKPKGID': '1405277'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif "Gift10 " in msg.text:
msg.contentType = 13
nk0 = msg.text.replace("Gift10 ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
cl.sendText(msg.to,_name + " Check Your Gift")
msg.contentType = 9
msg.contentMetadata= {'PRDTYPE': 'STICKER',
'STKVER': '1',
'MSGTPL': '1',
'STKPKGID': '1296261'}
msg.to = target
msg.text = None
cl.sendMessage(msg)
except:
msg.contentMetadata = {'mid': target}
elif msg.text.lower() in ["wkwkwk","wkwk","hahaha","haha"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '100',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["hehehe","hehe"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '10',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["galau"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '9',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["you","kau","kamu"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '7',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["marah","hadeuh","hadeh"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '6',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["please","pliss","mohon","tolong"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '4',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["haa","haaa","kaget"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '3',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["lucu","ngakak","lol"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '110',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["hmm","hmmm"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '101',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["tidur"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '1',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["gemes"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '2',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["cantik","imut"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '5',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["nyanyi","lalala"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '11',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["gugup"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '8',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["ok","oke","okay","oce","okee","sip","siph"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '13',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["mantab","mantap","nice","keren"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '14',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["ngejek"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '15',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["nangis","sedih"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '16',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["woi","kampret"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '102',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() in ["huft"]:
msg.contentType = 7
msg.contentMetadata={'STKID': '104',
'STKPKGID': '1',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["Tagall","Tag all"]:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
cb = ""
cb2 = ""
strt = int(0)
akh = int(0)
for md in nama:
akh = akh + int(6)
cb += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(md)+"},"""
strt = strt + int(7)
akh = akh + 1
cb2 += "@nrik \n"
cb = (cb[:int(len(cb)-1)])
msg.contentType = 0
msg.text = cb2
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+cb+']}','EMTVER':'4'}
try:
cl.sendMessage(msg)
except Exception as error:
print error
elif msg.text in ["Setview","Setpoint","Cctv"]:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "☆Checkpoint Checked☆")
print "Setview"
elif msg.text in ["Viewseen","Check","Ciduk","Cyduk"]:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
tukang = "╔═════════════════════════\n║ ☆☞ LIST VIEWERS ☜☆\n╠═════════════════════════\n╠➩"
grp = '\n╠➩ '.join(str(f) for f in dataResult)
total = '\n╠═════════════════════════\n╠➩ Total %i Viewers (%s)' % (len(dataResult), datetime.now().strftime('%H:%M:%S')) + "\n╚═════════════════════════"
cl.sendText(msg.to, "%s %s %s" % (tukang, grp, total))
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
cl.sendText(msg.to, "☆Auto Checkpoint☆")
else:
cl.sendText(msg.to, "☆Belum Ada Viewers☆")
print "Viewseen"
elif "Kick " in msg.text:
if msg.from_ in admin:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
print mentionees
for mention in mentionees:
ki.kickoutFromGroup(msg.to,[mention['M']])
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["memberscancel"] = int(jml)
cl.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
elif "Add all" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
elif msg.text in ["Invite"]:
wait["invite"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Auto like"]:
wait["likeOn"] = True
cl.sendText(msg.to,"Shere Post Kamu Yang Mau Di Like!")
elif msg.text in ["Steal contact"]:
wait["steal"] = True
cl.sendText(msg.to,"Send Contact")
elif msg.text in ["Giftbycontact"]:
wait["gift"] = True
cl.sendText(msg.to,"Send Contact")
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
elif ("Gn: " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
kicker = [ki,kk,kc]
if midd not in admin:
random.choice(kicker).kickoutFromGroup(msg.to,[midd])
else:
cl.sendText(msg.to,"Admin Detected")
elif "Invite: " in msg.text:
midd = msg.text.replace("Invite: ","")
cl.findAndAddContactsByMid(midd)
ki.findAndAddContactsByMid(midd)
kk.findAndAddContactsByMid(midd)
kc.findAndAddContactsByMid(midd)
kr.findAndAddContactsByMid(midd)
random.choice(KAC).inviteIntoGroup(msg.to,[midd])
elif "Invite creator" in msg.text:
midd = "u71b6799e1c37868a871d442e67633182"
random.choice(KAC).inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Welcome","welcome","Welkam","welkam","Wc","wc"]:
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di "+ gs.name)
msg.contentType = 7
msg.contentMetadata={'STKID': '247',
'STKPKGID': '3',
'STKVER': '100'}
msg.text = None
cl.sendMessage(msg)
elif "Bc: " in msg.text:
bc = msg.text.replace("Bc: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
cl.sendText(i,"=======[BROADCAST]=======\n\n"+bc+"\n\nContact Me : line.me/ti/p/~a_ulul15")
cl.sendText(msg.to,"Success BC BosQ")
else:
cl.sendText(msg.to,"Khusus Admin")
elif msg.text in ["Cancel"]:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
cl.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["TC1 Cancel"]:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
ki.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["TC2 Cancel"]:
gid = kk.getGroupIdsInvited()
for i in gid:
kk.rejectGroupInvitation(i)
kk.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["TC3 Cancel"]:
gid = kc.getGroupIdsInvited()
for i in gid:
kc.rejectGroupInvitation(i)
kc.sendText(msg.to,"All invitations have been refused")
elif msg.text in ["Gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["All join","Join all"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kr.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
G.preventJoinByTicket(G)
ki.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["TC1 join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ti)
G = kk.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["TC2 join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ti)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["TC3 join"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
G.preventJoinByTicket = True
kc.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["TC4 join"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kr.acceptGroupInvitationByTicket(msg.to,Ticket)
G.preventJoinByTicket = True
kr.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["Ghost join"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
G.preventJoinByTicket = True
km.updateGroup(G)
else:
cl.sendText(msg.to,"Sape lu!")
elif msg.text in ["timeline"]:
try:
url = cl.activity(limit=5)
cl.sendText(msg.to,url['result']['posts'][0]['postInfo']['postId'])
except Exception as E:
print E
elif msg.text in ["Bye all"]:
if wait["Leave"] == True:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
kr.leaveGroup(msg.to)
else:
cl.sendText(msg.to,"Leavenya Belum On")
elif msg.text in ["@bye","@Bye"]:
if wait["Leave"] == True:
cl.leaveGroup(msg.to)
wait["Leave"] = False
else:
cl.sendText(msg.to,"Bilang Dulu Sama Admin Ku")
elif msg.text in ["Absen"]:
cl.sendText(msg.to,"Pasukan Absen!!")
ki.sendText(msg.to,"TC1 Hadiir \(ˆ▿ˆ)/")
kk.sendText(msg.to,"TC2 Hadiir \(ˆ▿ˆ)/")
kc.sendText(msg.to,"TC3 Hadiir \(ˆ▿ˆ)/")
kr.sendText(msg.to,"Hadiir Semua Kapten \(ˆ▿ˆ)/")
elif msg.text.lower() in ["respon"]:
cl.sendText(msg.to,responsename)
ki.sendText(msg.to,responsename2)
kk.sendText(msg.to,responsename3)
kc.sendText(msg.to,responsename4)
kr.sendText(msg.to,responsename5)
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
print("Speed")
elapsed_time = time.time() - start
cl.sendText(msg.to, "Tunggu Bentaar BOS....")
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text in ["Speed test"]:
start = time.time()
cl.sendText(msg.to, "Tunggu Bentaar BOS......")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
elif "Nk: " in msg.text:
if msg.from_ in Creator:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kr.acceptGroupInvitationByTicket(msg.to,Ti)
G = kk.getGroup(msg.to)
G.preventJoinByTicket = True
kk.updateGroup(G)
nk0 = msg.text.replace("Nk: ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
targets = []
for s in X.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
if target not in admin:
kr.kickoutFromGroup(msg.to,[target])
kr.leaveGroup(msg.to)
ki.sendText(msg.to,"Succes BosQ")
kk.sendText(msg.to,"Pakyu~")
else:
cl.sendText(msg.to,"Admin Detected")
else:
cl.sendText(msg.to,"Lu sape!")
elif msg.text in ["Ban"]:
if msg.from_ in admin:
wait["wblacklist"] = True
ki.sendText(msg.to,"send contact")
elif msg.text in ["Unban"]:
if msg.from_ in admin:
wait["dblacklist"] = True
ki.sendText(msg.to,"send contact")
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "@Ban by mention"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kc.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
ki.sendText(msg.to,"Succes BosQ")
except:
ki.sendText(msg.to,"Error")
else:
cl.sendText(msg.to,"Admin Detected~")
elif msg.text in ["Banlist","Ban list"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
random.choice(KAC).sendText(msg.to,"Tidak Ada")
else:
mc = ""
for mi_d in wait["blacklist"]:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
random.choice(KAC).sendText(msg.to,"===[Blacklist User]===\n"+mc)
elif "Unban @" in msg.text:
if msg.toType == 2:
print "@Unban by mention"
if msg.from_ in admin:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kk.sendText(msg.to,"Not found")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
ki.sendText(msg.to,"Succes BosQ")
except:
ki.sendText(msg.to,"Succes BosQ")
elif msg.text.lower() == 'clear ban':
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"ヽ( ^ω^)ノ└ ❉Unbanned All Success❉ ┐")
elif msg.text.lower() in ["sayang","chucky"]:
ki.sendText(msg.to,"Apa Sayang :*")
elif msg.text in ["Kill ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
ki.sendText(msg.to,"Blacklist emang pantas tuk di usir")
else:
cl.sendText(msg.to, "Khusus creator")
elif msg.text in ["Kill"]:
if msg.toType == 2:
if msg.from_ in admin:
group = ki.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kk.sendText(msg.to,"Fuck You")
kc.sendText(msg.to,"Fuck You")
return
for jj in matched_list:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Kickall" == msg.text:
if msg.from_ in Creator:
if msg.toType == 2:
print "Kick all member"
_name = msg.text.replace("Kickall","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
ki.sendText(msg.to,"Sampai jumpaa~")
kc.sendText(msg.to,"Dadaaah~")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
else:
for target in targets:
if target not in admin:
try:
klist=[ki,kk,kc]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except Exception as e:
cl.sendText(msg.to,str(e))
cl.inviteIntoGroup(msg.to, targets)
elif msg.text in ["Bot restart","Reboot"]:
if msg.from_ in Creator:
cl.sendText(msg.to, "Bot Has Been Restarted...")
restart_program()
print "@Restart"
else:
cl.sendText(msg.to, "No Access")
elif msg.text in ["Turn off"]:
if msg.from_ in Creator:
try:
import sys
sys.exit()
except:
pass
elif 'Crash' in msg.text:
if msg.from_ in Creator:
msg.contentType = 13
msg.contentMetadata = {'mid': "NADYA,'"}
cl.sendMessage(msg)
elif "Kapten copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Kapten copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "TC1 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("TC1 copy @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki.CloneContactProfile(target)
ki.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "TC2 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("TC2 copy @","")
_nametarget = _name.rstrip(' ')
gs = kk.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kk.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kk.CloneContactProfile(target)
kk.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "TC3 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("TC3 copy @","")
_nametarget = _name.rstrip(' ')
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kc.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kc.CloneContactProfile(target)
kc.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif "TC4 copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("TC4 copy @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kr.CloneContactProfile(target)
kr.sendText(msg.to, "Copied (^_^)")
except Exception as e:
print e
elif msg.text in ["Backup all"]:
try:
ki.updateDisplayPicture(backup2.pictureStatus)
ki.updateProfile(backup2)
kk.updateDisplayPicture(backup3.pictureStatus)
kk.updateProfile(backup3)
kc.updateDisplayPicture(backup4.pictureStatus)
kc.updateProfile(backup4)
kr.updateDisplayPicture(backup5.pictureStatus)
kr.updateProfile(backup5)
cl.updateDisplayPicture(backup1.pictureStatus)
cl.updateProfile(backup1)
cl.sendText(msg.to, "All Done (^_^)")
except Exception as e:
cl.sendText(msg.to, str(e))
elif "/musik " in msg.text:
songname = msg.text.replace("/musik ","")
params = {"songname": songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
cl.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4])
cl.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
cl.sendAudioWithURL(msg.to,abc)
cl.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif '/lirik ' in msg.text.lower():
try:
songname = msg.text.lower().replace('/lirik ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif "/musrik " in msg.text:
songname = msg.text.replace("/musrik ","")
params = {"songname": songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
abc = song[3].replace('https://','http://')
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, "Lagu " + song[0] + "\nSedang Di Prosses... Tunggu Sebentar ^_^ ")
cl.sendAudioWithURL(msg.to,abc)
cl.sendText(msg.to, "Title : " + song[0] + "\nLength : " + song[1] + "\nLink download : " + song[4] +"\n\n" + hasil)
cl.sendText(msg.to, "Selamat Mendengarkan Lagu " + song[0])
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("cover @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "Cover @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Cover @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.channel.getHome(target)
objId = h["result"]["homeInfo"]["objectId"]
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/myhome/c/download.nhn?userid=" + target + "&oid=" + objId)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif "Pp @" in msg.text:
if msg.toType == 2:
cover = msg.text.replace("Pp @","")
_nametarget = cover.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
try:
h = cl.getContact(target)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
except Exception as error:
print error
cl.sendText(msg.to,"Upload image failed.")
elif msg.text.lower() in ["van","yog","wan","gong","tep","pap creator"]:
link = ["http://dl.profile.line-cdn.net/0hbPvoismJPX9LFhHY8ztCKHdTMxI8OCw3JXclGzwRYBpgci99fyV2GzwUY01icXl5J3EnHjxBakxj"]
pilih = random.choice(link)
ki.sendImageWithURL(msg.to,pilih)
elif msg.text.lower() in ["van","yog","wan","gong","tep","pap owner","pap creator"]:
link = ["http://dl.profile.line-cdn.net/0hbPvoismJPX9LFhHY8ztCKHdTMxI8OCw3JXclGzwRYBpgci99fyV2GzwUY01icXl5J3EnHjxBakxj"]
pilih = random.choice(link)
ki.sendImageWithURL(msg.to,pilih)
elif "Spam: " in msg.text:
bctxt = msg.text.replace("Spam: ", "")
t = 10
while(t):
random.choice(KAC).sendText(msg.to, (bctxt))
t-=1
elif "Scbc " in msg.text:
bctxt = msg.text.replace("Scbc ", "")
orang = cl.getAllContactIds()
t = 20
for manusia in orang:
while(t):
cl.sendText(manusia, (bctxt))
t-=1
elif "Cbc " in msg.text:
broadcasttxt = msg.text.replace("Cbc ", "")
orang = cl.getAllContactIds()
for manusia in orang:
cl.sendText(manusia, (broadcasttxt))
elif '/ig ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("/ig ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html.parser')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
nadya = text1[0].replace("s150x150/","")
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO ========\n"
details = "\n========INSTAGRAM INFO ========"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, nadya)
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif "Checkig " in msg.text:
separate = msg.text.split(" ")
user = msg.text.replace(separate[0] + " ","")
if user.startswith("@"):
user = user.replace("@","")
profile = "https://www.instagram.com/" + user
with requests.session() as x:
x.headers['user-agent'] = 'Mozilla/5.0'
end_cursor = ''
for count in range(1, 999):
print('PAGE: ', count)
r = x.get(profile, params={'max_id': end_cursor})
data = re.search(r'window._sharedData = (\{.+?});</script>', r.text).group(1)
j = json.loads(data)
for node in j['entry_data']['ProfilePage'][0]['user']['media']['nodes']:
if node['is_video']:
page = 'https://www.instagram.com/p/' + node['code']
r = x.get(page)
url = re.search(r'"video_url": "([^"]+)"', r.text).group(1)
print(url)
cl.sendVideoWithURL(msg.to,url)
else:
print (node['display_src'])
cl.sendImageWithURL(msg.to,node['display_src'])
end_cursor = re.search(r'"end_cursor": "([^"]+)"', r.text).group(1)
elif 'Youtubelink: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
elif 'Youtubevideo: ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtubevideo: ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
cl.sendVideoWithURL(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to, "Could not find it")
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "Say welcome" in msg.text:
gs = cl.getGroup(msg.to)
say = msg.text.replace("Say welcome","Selamat Datang Di "+ gs.name)
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text.lower() in ["hi","hai","halo","hallo"]:
beb = "Hi Sayang 😘 " +cl.getContact(msg.from_).displayName + " starry heart"
kr.sendText(msg.to,beb)
elif "playstore " in msg.text.lower():
tob = msg.text.lower().replace("playstore ","")
cl.sendText(msg.to,"Sedang Mencari...")
cl.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLink : https://play.google.com/store/search?q=" + tob)
cl.sendText(msg.to,"Tuh Linknya Kak (^_^)")
elif "Mid @" in msg.text:
_name = msg.text.replace("Mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
random.choice(KAC).sendText(msg.to, g.mid)
else:
pass
elif "/bio " in msg.text:
string = msg.text.replace("/bio ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
ki.updateProfile(profile)
kk.updateProfile(profile)
kc.updateProfile(profile)
kr.updateProfile(profile)
cl.sendText(msg.to,"All Done")
elif "/cnkapten" in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("/cnkapten","Mi Kapten")
if len(string.decode('utf-8')) <= 5000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Done")
elif "/cntc1" in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("/cntc1","Mi TC1")
if len(string.decode('utf-8')) <= 5000:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"Done")
elif "/cntc2" in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("/cntc2","Mi TC2")
if len(string.decode('utf-8')) <= 5000:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"Done")
elif "/cntc3" in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("/cntc3","Mi TC3")
if len(string.decode('utf-8')) <= 5000:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"Done")
elif "/cntc4" in msg.text:
if msg.from_ in Creator:
string = msg.text.replace("/cntc4","Mi TC4")
if len(string.decode('utf-8')) <= 5000:
profile = cl.getProfile()
profile.displayName = string
kr.updateProfile(profile)
kr.sendText(msg.to,"Done")
elif "Ulti " in msg.text:
if msg.from_ in Creator:
ulti0 = msg.text.replace("Ulti ","")
ulti1 = ulti0.rstrip()
ulti2 = ulti1.replace("@","")
ulti3 = ulti2.rstrip()
_name = ulti3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets ==[]:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
km.kickoutFromGroup(msg.to,[target])
km.leaveGroup(msg.to)
print (msg.to,[g.mid])
except:
km.sendText(msg.t,"Ter ELIMINASI....")
km.sendText(msg.to,"WOLES brooo....!!!")
km.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.updateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif msg.text.lower() in ["mymid","myid"]:
middd = "Name : " +cl.getContact(msg.from_).displayName + "\nMid : " +msg.from_
kr.sendText(msg.to,middd)
elif msg.text.lower() in ["me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif "/apakah " in msg.text:
apk = msg.text.replace("/apakah ","")
rnd = ["Ya","Tidak","Bisa Jadi","Mungkin"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "/hari " in msg.text:
apk = msg.text.replace("/hari ","")
rnd = ["Senin","Selasa","Rabu","Kamis","Jumat","Sabtu","Minggu"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "/berapa " in msg.text:
apk = msg.text.replace("/berapa ","")
rnd = ['10%','20%','30%','40%','50%','60%','70%','80%','90%','100%','0%']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "/berapakah " in msg.text:
apk = msg.text.replace("/berapakah ","")
rnd = ['1','2','3','4','5','6','7','8','9','10','Tidak Ada']
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif "/kapan " in msg.text:
apk = msg.text.replace("/kapan ","")
rnd = ["kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi","Tidak Akan Pernah"]
p = random.choice(rnd)
lang = 'id'
tts = gTTS(text=p, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
wait["Simi"] = True
cl.sendText(msg.to," Simisimi Di Aktifkan")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
wait["Simi"] = False
cl.sendText(msg.to,"Simisimi Di Nonaktifkan")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Youtubesearch: " in msg.text:
query = msg.text.replace("Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html.parser')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
cl.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "Tr-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Tr-th " in msg.text:
isi = msg.text.replace("Tr-th ","")
translator = Translator()
hasil = translator.translate(isi, dest='th')
A = hasil.text
A = A.encode('utf-8')
cl.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Inggris----\n" + "" + result)
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Inggris----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Indonesia----\n" + "" + kata + "\n\n----Ke Thailand----\n" + "" + result)
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"----Dari Thailand----\n" + "" + kata + "\n\n----Ke Indonesia----\n" + "" + result)
elif msg.text in ["Friendlist"]:
contactlist = cl.getAllContactIds()
kontak = cl.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Memlist"]:
kontak = cl.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═�����═══════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
cl.sendText(msg.to, msgs)
elif msg.text in ["Spam"]:
if msg.from_ in admin:
cl.sendText(msg.to,"Aku belum mandi")
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Tapi masih cantik juga")
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"apalagi kalau sudah mandi")
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Pasti cantik sekali")
cl.sendText(msg.to,"yiha")
ki.sendText(msg.to,"Kalau orang lain melihatku")
kk.sendText(msg.to,"Tak tun tuang")
cl.sendText(msg.to,"Badak aku taba bana")
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Tak tuntuang")
cl.sendText(msg.to,"Tapi kalau langsuang diidu")
ki.sendText(msg.to,"Tak tun tuang")
kk.sendText(msg.to,"Atagfirullah baunya")
cl.sendText(msg.to,"Males lanjutin ah")
ki.sendText(msg.to,"Sepi bat")
kk.sendText(msg.to,"Iya sepi udah udah")
cl.sendText(msg.to,"Gaada yang denger juga kita nyanyi")
ki.sendText(msg.to,"Nah")
kk.sendText(msg.to,"Mending gua makan dulu")
cl.sendText(msg.to,"Siyap")
ki.sendText(msg.to,"Okeh")
kk.sendText(msg.to,"Katanya owner kita Jomblo ya")
cl.sendText(msg.to,"Iya emang")
ki.sendText(msg.to,"Denger denger si lagi nyari pacar doi")
kk.sendText(msg.to,"Udah ah gosip mulu doain aja biar dapet")
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getgroup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Urlgroup image" in msg.text:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendText(msg.to,path)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
cl.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
cl.sendText(msg.to,"Profile Picture " + contact.displayName)
cl.sendImageWithURL(msg.to,image)
cl.sendText(msg.to,"Cover " + contact.displayName)
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
cl.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
cl.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot Sudah Berjalan Selama :\n"+waktu(eltime)
cl.sendText(msg.to,van)
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"========== I N F O R M A S I ==========\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n========== I N F O R M A S I ==========")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
elif "SearchID: " in msg.text:
userid = msg.text.replace("SearchID: ","")
contact = cl.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
cl.sendMessage(msg)
elif "Searchid: " in msg.text:
userid = msg.text.replace("Searchid: ","")
contact = cl.findContactsByUserid(userid)
msg.contentType = 13
msg.contentMetadata = {'mid': contact.mid}
cl.sendMessage(msg)
elif "removechat" in msg.text.lower():
if msg.from_ in admin:
try:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
kr.removeAllMessages(op.param2)
print "[Command] Remove Chat"
cl.sendText(msg.to,"Done")
except Exception as error:
print error
cl.sendText(msg.to,"Error")
elif "Invitemeto: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Invitemeto: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
ki.findAndAddContactsByMid(msg.from_)
kk.findAndAddContactsByMid(msg.from_)
kc.findAndAddContactsByMid(msg.from_)
kr.findAndAddContactsByMid(msg.from_)
random.choice(KAC).inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin Saya Tidak Di Dalaam Grup Itu")
elif msg.text in ["Glist"]:
cl.sendText(msg.to, "Tunggu Sebentar. . .")
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "╠➩" + "%s\n" % (cl.getGroup(i).name +" ~> ["+str(len(cl.getGroup(i).members))+"]")
cl.sendText(msg.to,"╔═════════════════════════\n║ ☆☞ LIST GROUPS☜☆\n╠═════════════════════════\n" + h + "╠═════════════════════════" + "\n║ Total Groups =" +" ["+str(len(gid))+"]\n╚═════════════════════════")
elif msg.text in ["Glistmid"]:
gruplist = kr.getGroupIdsJoined()
kontak = kr.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
kr.sendText(msg.to, msgs)
elif "Google: " in msg.text:
a = msg.text.replace("Google: ","")
b = urllib.quote(a)
cl.sendText(msg.to,"Sedang Mencari...")
cl.sendText(msg.to, "https://www.google.com/" + b)
cl.sendText(msg.to,"Itu Dia Linknya. . .")
elif "Details group: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("Details group: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))
elif "Cancel invite: " in msg.text:
if msg.from_ in admin:
gids = msg.text.replace("Cancel invite: ","")
gid = cl.getGroup(gids)
for i in gid:
if i is not None:
try:
cl.rejectGroupInvitation(i)
except:
cl.sendText(msg.to,"Error!")
break
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil tolak undangan dari grup " + gid.name)
else:
cl.sendText(msg.to,"Grup tidak ditemukan")
elif msg.text in ["Kapten acc invite"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = cl.getGroup(i)
_list += gids.name
cl.acceptGroupInvitation(i)
else:
break
if gid is not None:
cl.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
cl.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif msg.text in ["TC1 acc invite"]:
if msg.from_ in admin:
gid = ki.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = ki.getGroup(i)
_list += gids.name
ki.acceptGroupInvitation(i)
else:
break
if gid is not None:
ki.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
ki.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif msg.text in ["TC2 acc invite"]:
if msg.from_ in admin:
gid = kk.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = kk.getGroup(i)
_list += gids.name
kk.acceptGroupInvitation(i)
else:
break
if gid is not None:
kk.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
kk.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif msg.text in ["TC3 acc invite"]:
if msg.from_ in admin:
gid = kc.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = kc.getGroup(i)
_list += gids.name
kc.acceptGroupInvitation(i)
else:
break
if gid is not None:
kc.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
kc.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif msg.text in ["TC4 acc invite"]:
if msg.from_ in admin:
gid = kr.getGroupIdsInvited()
_list = ""
for i in gid:
if i is not None:
gids = kr.getGroup(i)
_list += gids.name
kr.acceptGroupInvitation(i)
else:
break
if gid is not None:
kr.sendText(msg.to,"Berhasil terima semua undangan dari grup :\n" + _list)
else:
kr.sendText(msg.to,"Tidak ada grup yang tertunda saat ini")
elif "Gif gore" in msg.text:
gif = ("https://media.giphy.com/media/l2JHVsQiOZrNMGzYs/giphy.gif","https://media.giphy.com/media/OgltQ2hbilzJS/200w.gif")
gore = random.choice(gif)
cl.sendGifWithURL(msg.to,gore)
if op.type == 59:
print op
except Exception as error:
print error
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
| 40.951759 | 443 | 0.429363 | [
"MIT"
] | azmi155/mu | ma.py | 177,124 | Python |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
from PIL import Image
import random
import os
from sample import sample_conf
from tensorflow.python.framework.errors_impl import NotFoundError
# 设置以下环境变量可开启CPU识别
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class TrainError(Exception):
pass
class TrainModel(object):
def __init__(self, img_path, char_set, model_save_dir, verify=False):
# 模型路径
self.model_save_dir = model_save_dir
# 打乱文件顺序+校验图片格式
self.img_path = img_path
self.img_list = os.listdir(img_path)
# 校验格式
if verify:
self.confirm_image_suffix()
# 打乱文件顺序
random.seed(time.time())
random.shuffle(self.img_list)
# 获得图片宽高和字符长度基本信息
label, captcha_array = self.gen_captcha_text_image(self.img_list[0])
captcha_shape = captcha_array.shape
captcha_shape_len = len(captcha_shape)
if captcha_shape_len == 3:
image_height, image_width, channel = captcha_shape
self.channel = channel
elif captcha_shape_len == 2:
image_height, image_width = captcha_shape
else:
raise TrainError("图片转换为矩阵时出错,请检查图片格式")
# 初始化变量
# 图片尺寸
self.image_height = image_height
self.image_width = image_width
# 验证码长度(位数)
self.max_captcha = len(label)
# 验证码字符类别
self.char_set = char_set
self.char_set_len = len(char_set)
# 相关信息打印
print("-->图片尺寸: {} X {}".format(image_height, image_width))
print("-->验证码长度: {}".format(self.max_captcha))
print("-->验证码共{}类 {}".format(self.char_set_len, char_set))
print("-->使用测试集为 {}".format(img_path))
# tf初始化占位符
self.X = tf.placeholder(tf.float32, [None, image_height * image_width]) # 特征向量
self.Y = tf.placeholder(tf.float32, [None, self.max_captcha * self.char_set_len]) # 标签
self.keep_prob = tf.placeholder(tf.float32) # dropout值
self.w_alpha = 0.01
self.b_alpha = 0.1
# test model input and output
print(">>> Start model test")
batch_x, batch_y = self.get_batch(0, size=100)
print(">>> input batch images shape: {}".format(batch_x.shape))
print(">>> input batch labels shape: {}".format(batch_y.shape))
def gen_captcha_text_image(self, img_name):
"""
返回一个验证码的array形式和对应的字符串标签
:return:tuple (str, numpy.array)
"""
# 标签
label = img_name.split("_")[0]
# 文件
img_file = os.path.join(self.img_path, img_name)
captcha_image = Image.open(img_file)
captcha_array = np.array(captcha_image) # 向量化
return label, captcha_array
@staticmethod
def convert2gray(img):
"""
图片转为灰度图,如果是3通道图则计算,单通道图则直接返回
:param img:
:return:
"""
if len(img.shape) > 2:
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
else:
return img
def text2vec(self, text):
"""
转标签为oneHot编码
:param text: str
:return: numpy.array
"""
text_len = len(text)
if text_len > self.max_captcha:
raise ValueError('验证码最长{}个字符'.format(self.max_captcha))
vector = np.zeros(self.max_captcha * self.char_set_len)
for i, ch in enumerate(text):
idx = i * self.char_set_len + self.char_set.index(ch)
vector[idx] = 1
return vector
def get_batch(self, n, size=128):
batch_x = np.zeros([size, self.image_height * self.image_width]) # 初始化
batch_y = np.zeros([size, self.max_captcha * self.char_set_len]) # 初始化
max_batch = int(len(self.img_list) / size)
# print(max_batch)
if max_batch - 1 < 0:
raise TrainError("训练集图片数量需要大于每批次训练的图片数量")
if n > max_batch - 1:
n = n % max_batch
s = n * size
e = (n + 1) * size
this_batch = self.img_list[s:e]
# print("{}:{}".format(s, e))
for i, img_name in enumerate(this_batch):
label, image_array = self.gen_captcha_text_image(img_name)
image_array = self.convert2gray(image_array) # 灰度化图片
batch_x[i, :] = image_array.flatten() / 255 # flatten 转为一维
batch_y[i, :] = self.text2vec(label) # 生成 oneHot
return batch_x, batch_y
def confirm_image_suffix(self):
# 在训练前校验所有文件格式
print("开始校验所有图片后缀")
for index, img_name in enumerate(self.img_list):
print("{} image pass".format(index), end='\r')
if not img_name.endswith(sample_conf['image_suffix']):
raise TrainError('confirm images suffix:you request [.{}] file but get file [{}]'
.format(sample_conf['image_suffix'], img_name))
print("所有图片格式校验通过")
def model(self):
x = tf.reshape(self.X, shape=[-1, self.image_height, self.image_width, 1])
print(">>> input x: {}".format(x))
# 卷积层1
wc1 = tf.get_variable(name='wc1', shape=[3, 3, 1, 32], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc1 = tf.Variable(self.b_alpha * tf.random_normal([32]))
conv1 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='SAME'), bc1))
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv1 = tf.nn.dropout(conv1, self.keep_prob)
# 卷积层2
wc2 = tf.get_variable(name='wc2', shape=[3, 3, 32, 64], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc2 = tf.Variable(self.b_alpha * tf.random_normal([64]))
conv2 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv1, wc2, strides=[1, 1, 1, 1], padding='SAME'), bc2))
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv2 = tf.nn.dropout(conv2, self.keep_prob)
# 卷积层3
wc3 = tf.get_variable(name='wc3', shape=[3, 3, 64, 128], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bc3 = tf.Variable(self.b_alpha * tf.random_normal([128]))
conv3 = tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(conv2, wc3, strides=[1, 1, 1, 1], padding='SAME'), bc3))
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
conv3 = tf.nn.dropout(conv3, self.keep_prob)
print(">>> convolution 3: ", conv3.shape)
next_shape = conv3.shape[1] * conv3.shape[2] * conv3.shape[3]
# 全连接层1
wd1 = tf.get_variable(name='wd1', shape=[next_shape, 1024], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bd1 = tf.Variable(self.b_alpha * tf.random_normal([1024]))
dense = tf.reshape(conv3, [-1, wd1.get_shape().as_list()[0]])
dense = tf.nn.relu(tf.add(tf.matmul(dense, wd1), bd1))
dense = tf.nn.dropout(dense, self.keep_prob)
# 全连接层2
wout = tf.get_variable('name', shape=[1024, self.max_captcha * self.char_set_len], dtype=tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
bout = tf.Variable(self.b_alpha * tf.random_normal([self.max_captcha * self.char_set_len]))
y_predict = tf.add(tf.matmul(dense, wout), bout)
return y_predict
def train_cnn(self):
y_predict = self.model()
print(">>> input batch predict shape: {}".format(y_predict.shape))
print(">>> End model test")
# 计算概率 损失
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y_predict, labels=self.Y))
# 梯度下降
optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)
# 计算准确率
predict = tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]) # 预测结果
max_idx_p = tf.argmax(predict, 2) # 预测结果
max_idx_l = tf.argmax(tf.reshape(self.Y, [-1, self.max_captcha, self.char_set_len]), 2) # 标签
# 计算准确率
correct_pred = tf.equal(max_idx_p, max_idx_l)
accuracy = tf.reduce_mean(tf.reduce_min(tf.cast(correct_pred, tf.float32), axis=1))
# 模型保存对象
saver = tf.train.Saver()
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
# 恢复模型
if os.path.exists(self.model_save_dir):
try:
saver.restore(sess, self.model_save_dir)
# 判断捕获model文件夹中没有模型文件的错误
except NotFoundError:
print("model文件夹为空,将创建新模型")
else:
pass
step = 1
for i in range(3000):
batch_x, batch_y = self.get_batch(i, size=128)
_, cost_ = sess.run([optimizer, cost], feed_dict={self.X: batch_x, self.Y: batch_y, self.keep_prob: 0.75})
if step % 10 == 0:
batch_x_test, batch_y_test = self.get_batch(i, size=100)
acc = sess.run(accuracy, feed_dict={self.X: batch_x_test, self.Y: batch_y_test, self.keep_prob: 1.})
print("第{}次训练 >>> 准确率为 {} >>> loss {}".format(step, acc, cost_))
# 准确率达到99%后保存并停止
if acc > 0.99:
saver.save(sess, self.model_save_dir)
break
# 每训练500轮就保存一次
if i % 500 == 0:
saver.save(sess, self.model_save_dir)
step += 1
saver.save(sess, self.model_save_dir)
def recognize_captcha(self):
label, captcha_array = self.gen_captcha_text_image(random.choice(self.img_list))
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9, "origin:" + label, ha='center', va='center', transform=ax.transAxes)
plt.imshow(captcha_array)
# 预测图片
image = self.convert2gray(captcha_array)
image = image.flatten() / 255
y_predict = self.model()
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, self.model_save_dir)
predict = tf.argmax(tf.reshape(y_predict, [-1, self.max_captcha, self.char_set_len]), 2)
text_list = sess.run(predict, feed_dict={self.X: [image], self.keep_prob: 1.})
predict_text = text_list[0].tolist()
print("正确: {} 预测: {}".format(label, predict_text))
# 显示图片和预测结果
p_text = ""
for p in predict_text:
p_text += str(self.char_set[p])
print(p_text)
plt.text(20, 1, 'predict:{}'.format(p_text))
plt.show()
def main():
train_image_dir = sample_conf["train_image_dir"]
char_set = sample_conf["char_set"]
model_save_dir = sample_conf["model_save_dir"]
tm = TrainModel(train_image_dir, char_set, model_save_dir, verify=False)
tm.train_cnn() # 开始训练模型
# tm.recognize_captcha() # 识别图片示例
if __name__ == '__main__':
main()
| 39.534965 | 122 | 0.580437 | [
"Apache-2.0"
] | shineyjg/cnn_captcha | train_model.py | 12,141 | Python |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# The idea for this module (but no code) was borrowed from the
# quantities (http://pythonhosted.org/quantities/) package.
"""Helper functions for Quantity.
In particular, this implements the logic that determines scaling and result
units for a given ufunc, given input units.
"""
from fractions import Fraction
import numpy as np
from . import UFUNC_HELPERS, UNSUPPORTED_UFUNCS
from ..core import (UnitsError, UnitConversionError, UnitTypeError,
dimensionless_unscaled, get_current_unit_registry)
def _d(unit):
if unit is None:
return dimensionless_unscaled
else:
return unit
def get_converter(from_unit, to_unit):
"""Like Unit._get_converter, except returns None if no scaling is needed,
i.e., if the inferred scale is unity."""
try:
scale = from_unit._to(to_unit)
except UnitsError:
return from_unit._apply_equivalencies(
from_unit, to_unit, get_current_unit_registry().equivalencies)
except AttributeError:
raise UnitTypeError("Unit '{0}' cannot be converted to '{1}'"
.format(from_unit, to_unit))
if scale == 1.:
return None
else:
return lambda val: scale * val
def get_converters_and_unit(f, unit1, unit2):
converters = [None, None]
# By default, we try adjusting unit2 to unit1, so that the result will
# be unit1 as well. But if there is no second unit, we have to try
# adjusting unit1 (to dimensionless, see below).
if unit2 is None:
if unit1 is None:
# No units for any input -- e.g., np.add(a1, a2, out=q)
return converters, dimensionless_unscaled
changeable = 0
# swap units.
unit2 = unit1
unit1 = None
elif unit2 is unit1:
# ensure identical units is fast ("==" is slow, so avoid that).
return converters, unit1
else:
changeable = 1
# Try to get a converter from unit2 to unit1.
if unit1 is None:
try:
converters[changeable] = get_converter(unit2,
dimensionless_unscaled)
except UnitsError:
# special case: would be OK if unitless number is zero, inf, nan
converters[1-changeable] = False
return converters, unit2
else:
return converters, dimensionless_unscaled
else:
try:
converters[changeable] = get_converter(unit2, unit1)
except UnitsError:
raise UnitConversionError(
"Can only apply '{0}' function to quantities "
"with compatible dimensions"
.format(f.__name__))
return converters, unit1
# SINGLE ARGUMENT UFUNC HELPERS
#
# The functions below take a single argument, which is the quantity upon which
# the ufunc is being used. The output of the helper function should be two
# values: a list with a single converter to be used to scale the input before
# it is being passed to the ufunc (or None if no conversion is needed), and
# the unit the output will be in.
def helper_onearg_test(f, unit):
return ([None], None)
def helper_invariant(f, unit):
return ([None], _d(unit))
def helper_square(f, unit):
return ([None], unit ** 2 if unit is not None else dimensionless_unscaled)
def helper_reciprocal(f, unit):
return ([None], unit ** -1 if unit is not None else dimensionless_unscaled)
one_half = 0.5 # faster than Fraction(1, 2)
one_third = Fraction(1, 3)
def helper_sqrt(f, unit):
return ([None], unit ** one_half if unit is not None
else dimensionless_unscaled)
def helper_cbrt(f, unit):
return ([None], (unit ** one_third if unit is not None
else dimensionless_unscaled))
def helper_modf(f, unit):
if unit is None:
return [None], (dimensionless_unscaled, dimensionless_unscaled)
try:
return ([get_converter(unit, dimensionless_unscaled)],
(dimensionless_unscaled, dimensionless_unscaled))
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper__ones_like(f, unit):
return [None], dimensionless_unscaled
def helper_dimensionless_to_dimensionless(f, unit):
if unit is None:
return [None], dimensionless_unscaled
try:
return ([get_converter(unit, dimensionless_unscaled)],
dimensionless_unscaled)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_dimensionless_to_radian(f, unit):
from ..si import radian
if unit is None:
return [None], radian
try:
return [get_converter(unit, dimensionless_unscaled)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
def helper_degree_to_radian(f, unit):
from ..si import degree, radian
try:
return [get_converter(unit, degree)], radian
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_degree(f, unit):
from ..si import degree, radian
try:
return [get_converter(unit, radian)], degree
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_radian_to_dimensionless(f, unit):
from ..si import radian
try:
return [get_converter(unit, radian)], dimensionless_unscaled
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"quantities with angle units"
.format(f.__name__))
def helper_frexp(f, unit):
if not unit.is_unity():
raise UnitTypeError("Can only apply '{0}' function to "
"unscaled dimensionless quantities"
.format(f.__name__))
return [None], (None, None)
# TWO ARGUMENT UFUNC HELPERS
#
# The functions below take a two arguments. The output of the helper function
# should be two values: a tuple of two converters to be used to scale the
# inputs before being passed to the ufunc (None if no conversion is needed),
# and the unit the output will be in.
def helper_multiplication(f, unit1, unit2):
return [None, None], _d(unit1) * _d(unit2)
def helper_division(f, unit1, unit2):
return [None, None], _d(unit1) / _d(unit2)
def helper_power(f, unit1, unit2):
# TODO: find a better way to do this, currently need to signal that one
# still needs to raise power of unit1 in main code
if unit2 is None:
return [None, None], False
try:
return [None, get_converter(unit2, dimensionless_unscaled)], False
except UnitsError:
raise UnitTypeError("Can only raise something to a "
"dimensionless quantity")
def helper_ldexp(f, unit1, unit2):
if unit2 is not None:
raise TypeError("Cannot use ldexp with a quantity "
"as second argument.")
else:
return [None, None], _d(unit1)
def helper_copysign(f, unit1, unit2):
# if first arg is not a quantity, just return plain array
if unit1 is None:
return [None, None], None
else:
return [None, None], unit1
def helper_heaviside(f, unit1, unit2):
try:
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply 'heaviside' function with a "
"dimensionless second argument.")
return ([None, converter2], dimensionless_unscaled)
def helper_two_arg_dimensionless(f, unit1, unit2):
try:
converter1 = (get_converter(unit1, dimensionless_unscaled)
if unit1 is not None else None)
converter2 = (get_converter(unit2, dimensionless_unscaled)
if unit2 is not None else None)
except UnitsError:
raise UnitTypeError("Can only apply '{0}' function to "
"dimensionless quantities"
.format(f.__name__))
return ([converter1, converter2], dimensionless_unscaled)
# This used to be a separate function that just called get_converters_and_unit.
# Using it directly saves a few us; keeping the clearer name.
helper_twoarg_invariant = get_converters_and_unit
def helper_twoarg_comparison(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, None
def helper_twoarg_invtrig(f, unit1, unit2):
from ..si import radian
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, radian
def helper_twoarg_floor_divide(f, unit1, unit2):
converters, _ = get_converters_and_unit(f, unit1, unit2)
return converters, dimensionless_unscaled
def helper_divmod(f, unit1, unit2):
converters, result_unit = get_converters_and_unit(f, unit1, unit2)
return converters, (dimensionless_unscaled, result_unit)
# list of ufuncs:
# http://docs.scipy.org/doc/numpy/reference/ufuncs.html#available-ufuncs
UNSUPPORTED_UFUNCS |= {
np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.invert, np.left_shift,
np.right_shift, np.logical_and, np.logical_or, np.logical_xor,
np.logical_not}
for name in 'isnat', 'gcd', 'lcm':
# isnat was introduced in numpy 1.14, gcd+lcm in 1.15
ufunc = getattr(np, name, None)
if isinstance(ufunc, np.ufunc):
UNSUPPORTED_UFUNCS |= {ufunc}
# SINGLE ARGUMENT UFUNCS
# ufuncs that return a boolean and do not care about the unit
onearg_test_ufuncs = (np.isfinite, np.isinf, np.isnan, np.sign, np.signbit)
for ufunc in onearg_test_ufuncs:
UFUNC_HELPERS[ufunc] = helper_onearg_test
# ufuncs that return a value with the same unit as the input
invariant_ufuncs = (np.absolute, np.fabs, np.conj, np.conjugate, np.negative,
np.spacing, np.rint, np.floor, np.ceil, np.trunc,
np.positive)
for ufunc in invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_invariant
# ufuncs that require dimensionless input and and give dimensionless output
dimensionless_to_dimensionless_ufuncs = (np.exp, np.expm1, np.exp2, np.log,
np.log10, np.log2, np.log1p)
# As found out in gh-7058, some numpy 1.13 conda installations also provide
# np.erf, even though upstream doesn't have it. We include it if present.
if isinstance(getattr(np.core.umath, 'erf', None), np.ufunc):
dimensionless_to_dimensionless_ufuncs += (np.core.umath.erf,)
for ufunc in dimensionless_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_dimensionless
# ufuncs that require dimensionless input and give output in radians
dimensionless_to_radian_ufuncs = (np.arccos, np.arcsin, np.arctan, np.arccosh,
np.arcsinh, np.arctanh)
for ufunc in dimensionless_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_dimensionless_to_radian
# ufuncs that require input in degrees and give output in radians
degree_to_radian_ufuncs = (np.radians, np.deg2rad)
for ufunc in degree_to_radian_ufuncs:
UFUNC_HELPERS[ufunc] = helper_degree_to_radian
# ufuncs that require input in radians and give output in degrees
radian_to_degree_ufuncs = (np.degrees, np.rad2deg)
for ufunc in radian_to_degree_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_degree
# ufuncs that require input in radians and give dimensionless output
radian_to_dimensionless_ufuncs = (np.cos, np.sin, np.tan, np.cosh, np.sinh,
np.tanh)
for ufunc in radian_to_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_radian_to_dimensionless
# ufuncs handled as special cases
UFUNC_HELPERS[np.sqrt] = helper_sqrt
UFUNC_HELPERS[np.square] = helper_square
UFUNC_HELPERS[np.reciprocal] = helper_reciprocal
UFUNC_HELPERS[np.cbrt] = helper_cbrt
UFUNC_HELPERS[np.core.umath._ones_like] = helper__ones_like
UFUNC_HELPERS[np.modf] = helper_modf
UFUNC_HELPERS[np.frexp] = helper_frexp
# TWO ARGUMENT UFUNCS
# two argument ufuncs that require dimensionless input and and give
# dimensionless output
two_arg_dimensionless_ufuncs = (np.logaddexp, np.logaddexp2)
for ufunc in two_arg_dimensionless_ufuncs:
UFUNC_HELPERS[ufunc] = helper_two_arg_dimensionless
# two argument ufuncs that return a value with the same unit as the input
twoarg_invariant_ufuncs = (np.add, np.subtract, np.hypot, np.maximum,
np.minimum, np.fmin, np.fmax, np.nextafter,
np.remainder, np.mod, np.fmod)
for ufunc in twoarg_invariant_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invariant
# two argument ufuncs that need compatible inputs and return a boolean
twoarg_comparison_ufuncs = (np.greater, np.greater_equal, np.less,
np.less_equal, np.not_equal, np.equal)
for ufunc in twoarg_comparison_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_comparison
# two argument ufuncs that do inverse trigonometry
twoarg_invtrig_ufuncs = (np.arctan2,)
# another private function in numpy; use getattr in case it disappears
if isinstance(getattr(np.core.umath, '_arg', None), np.ufunc):
twoarg_invtrig_ufuncs += (np.core.umath._arg,)
for ufunc in twoarg_invtrig_ufuncs:
UFUNC_HELPERS[ufunc] = helper_twoarg_invtrig
# ufuncs handled as special cases
UFUNC_HELPERS[np.multiply] = helper_multiplication
UFUNC_HELPERS[np.divide] = helper_division
UFUNC_HELPERS[np.true_divide] = helper_division
UFUNC_HELPERS[np.power] = helper_power
UFUNC_HELPERS[np.ldexp] = helper_ldexp
UFUNC_HELPERS[np.copysign] = helper_copysign
UFUNC_HELPERS[np.floor_divide] = helper_twoarg_floor_divide
UFUNC_HELPERS[np.heaviside] = helper_heaviside
UFUNC_HELPERS[np.float_power] = helper_power
UFUNC_HELPERS[np.divmod] = helper_divmod
| 35.461916 | 79 | 0.677406 | [
"BSD-3-Clause"
] | PriyankaH21/astropy | astropy/units/quantity_helper/helpers.py | 14,433 | Python |
# Uses python3
import sys
def get_change(money, coins):
t = [j+1 for j in range(money+1)]
# boundary condition
t[0] = 0
for j in range(1, money+1):
for c in coins:
if c <= j:
t[j] = min(t[j], 1+t[j-c])
return t[money]
if __name__ == '__main__':
coins = [1, 3, 4]
money = int(input())
print(get_change(money, coins))
| 18.714286 | 42 | 0.516539 | [
"MIT"
] | vishweshwartyagi/Data-Structures-and-Algorithms-UCSD | 1. Algorithmic Toolbox/week5_dynamic_programming1/1_money_change_again.py | 393 | Python |
from sepal_ui import sepalwidgets as sw
from ipywidgets import dlink
from component import parameter as cp
class ParamTile(sw.Card):
def __init__(self, model):
# read the model
self.model = model
# add the base widgets
self.close = sw.Icon(children=["mdi-close"], small=True)
self.title = sw.CardTitle(
class_="pa-0 ma-0", children=[sw.Spacer(), self.close]
)
# create the widgets
self.w_target = sw.Select(
small=True,
items=[{"text": f"{i+1}0%", "value": i + 1} for i in range(cp.nb_target)],
v_model=model.target,
label="target",
dense=True,
)
self.w_weight = sw.Select(
small=True,
items=[i + 1 for i in range(cp.nb_weight)],
v_model=model.weight,
label="weight",
dense=True,
)
# link the widgets to the model
self.model.bind(self.w_target, "target").bind(self.w_weight, "weight")
# create the object
super().__init__(
max_width="500px",
class_="pa-1",
children=[self.title, self.w_target, self.w_weight],
viz=False,
disabled=False,
)
# add javascript events
self.close.on_event("click", lambda *args: self.hide())
dlink((self, "disabled"), (self, "loading"))
def reset(self):
self.w_target.v_model = None
self.w_weight.v_model = None
self.hide()
return
| 26.355932 | 86 | 0.540836 | [
"MIT"
] | 12rambau/weplan | component/tile/param_tile.py | 1,555 | Python |
#! /usr/bin/env python
"""Functions for working with the DLRN API"""
import csv
import os.path
import requests
from toolchest import yaml
from atkinson.config.manager import ConfigManager
from atkinson.logging.logger import getLogger
def _raw_fetch(url, logger):
"""
Fetch remote data and return the text output.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Raw text data, None otherwise
"""
ret_data = None
try:
req = requests.get(url)
if req.status_code == requests.codes.ok:
ret_data = req.text
except requests.exceptions.ConnectionError as error:
logger.warning(error.request)
return ret_data
def _fetch_yaml(url, logger):
"""
Fetch remote data and process the text as yaml.
:param url: The URL to fetch the data from
:param logger: A logger instance to use.
:return: Parsed yaml data in the form of a dictionary
"""
ret_data = None
raw_data = _raw_fetch(url, logger)
if raw_data is not None:
ret_data = yaml.parse(raw_data)
return ret_data
def dlrn_http_factory(host, config_file=None, link_name=None,
logger=getLogger()):
"""
Create a DlrnData instance based on a host.
:param host: A host name string to build instances
:param config_file: A dlrn config file(s) to use in addition to
the default.
:param link_name: A dlrn symlink to use. This overrides the config files
link parameter.
:param logger: An atkinson logger to use. Default is the base logger.
:return: A DlrnData instance
"""
manager = None
files = ['dlrn.yml']
if config_file is not None:
if isinstance(config_file, list):
files.extend(config_file)
else:
files.append(config_file)
local_path = os.path.realpath(os.path.dirname(__file__))
manager = ConfigManager(filenames=files, paths=local_path)
if manager is None:
return None
config = manager.config
if host not in config:
return None
link = config[host]['link']
if link_name is not None:
link = link_name
return DlrnHttpData(config[host]['url'],
config[host]['release'],
link_name=link,
logger=logger)
class DlrnHttpData():
"""A class used to interact with the dlrn API"""
def __init__(self, url, release, link_name='current', logger=getLogger()):
"""
Class constructor
:param url: The URL to the host to obtain data.
:param releases: The release name to use for lookup.
:param link_name: The name of the dlrn symlink to fetch data from.
:param logger: An atkinson logger to use. Default is the base logger.
"""
self.url = os.path.join(url, release)
self.release = release
self._logger = logger
self._link_name = link_name
self._commit_data = {}
self._fetch_commit()
def _fetch_commit(self):
"""
Fetch the commit data from dlrn
"""
full_url = os.path.join(self.url,
self._link_name,
'commit.yaml')
data = _fetch_yaml(full_url, self._logger)
if data is not None and 'commits' in data:
pkg = data['commits'][0]
if pkg['status'] == 'SUCCESS':
self._commit_data = {'name': pkg['project_name'],
'dist_hash': pkg['distro_hash'],
'commit_hash': pkg['commit_hash'],
'extended_hash': pkg.get('extended_hash')}
else:
msg = '{0} has a status of error'.format(str(pkg))
self._logger.warning(msg)
def _build_url(self):
"""
Generate a url given a commit hash and distgit hash to match the format
base/AB/CD/ABCD123_XYZ987 where ABCD123 is the commit hash and XYZ987
is a portion of the distgit hash.
:return: A string with the full URL.
"""
first = self._commit_data['commit_hash'][0:2]
second = self._commit_data['commit_hash'][2:4]
third = self._commit_data['commit_hash']
for key in ['dist_hash', 'extended_hash']:
if self._commit_data.get(key, 'None') != 'None':
third += '_' + self._commit_data[key][0:8]
return os.path.join(self.url,
first,
second,
third)
@property
def commit(self):
"""
Get the dlrn commit information
:return: A dictionary of name, dist-git hash, commit hash and
extended hash.
An empty dictionary is returned otherwise.
"""
return self._commit_data
@property
def versions(self):
"""
Get the version data for the versions.csv file and return the
data in a dictionary
:return: A dictionary of packages with commit and dist-git hashes
"""
ret_dict = {}
full_url = os.path.join(self._build_url(), 'versions.csv')
data = _raw_fetch(full_url, self._logger)
if data is not None:
data = data.replace(' ', '_')
split_data = data.split()
reader = csv.DictReader(split_data)
for row in reader:
ret_dict[row['Project']] = {'source': row['Source_Sha'],
'state': row['Status'],
'distgit': row['Dist_Sha'],
'nvr': row['Pkg_NVR']}
else:
msg = 'Could not fetch {0}'.format(full_url)
self._logger.error(msg)
return ret_dict
| 32.692308 | 79 | 0.565546 | [
"MIT"
] | jpichon/atkinson | atkinson/dlrn/http_data.py | 5,950 | Python |
"""
Module for the selection of machine learning models.
There are several different functions which can perform the model selection: all of them have an intuitive interface, but
are also powerful and flexible.
In addition, almost all these functions can optionally make plots, which sum up the performed selection in a visual way.
These different functions perform the model selection in different contexts, i.e. each function is specifically meant for a
specific scenario. Certain contexts are more specific, and other are more general.
On the whole, there are six different model selection functions, divided into two main groups:
1. functions that perform the model selection with respect to a **single dataset**;
2. functions that perform the model selection with respect to **multiple datasets**.
The six functions, sorted from the most specific context to the most general one, are:
- *hyperparameter_validation*, *hyperparameters_validation*, *models_validation* (single dataset);
- *datasets_hyperparameter_validation*, *datasets_hyperparameters_validation*, *datasets_models_validation* (multiple
datasets).
This module deeply uses the **numpy** library. It is built on the top of it. In fact, the datasets are represented as np.array.
Moreover, the plots are made using the **matplotlib** library. In addition, it is built on the top of the **sklearn** module:
- the machine learning models are represented as sklearn models (i.e. sklearn estimators);
- under the hood, the selection is performed using the grid search cross validation provided by sklearn (i.e.
GridSearchCV);
- several other operations are done using the functionalities provided by sklearn.
This module, besides the model selection functions, contains also some utilities:
- the PolynomialRegression class;
- some utility functions.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.utils import resample
from sklearn.model_selection import train_test_split, cross_val_score, TimeSeriesSplit, GridSearchCV
from sklearn.metrics import mean_squared_error, accuracy_score
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from sklearn.base import BaseEstimator
from sklearn.linear_model import LinearRegression
#----------------------------------------------------------------------------------------------------------------------------
# POLYNOMIAL REGRESSOR MODEL
class PolynomialRegression(BaseEstimator):
"""
Polynomial regression model.
It's a sklearn model: it's compliant to the sklearn estimators interface.
`Example <https://scikit-learn.org/stable/developers/develop.html>`_
Parameters
----------
degree: int
Degree to apply for the polynomial transformation.
Notes
----------
The polynomial transformation is performed using the sklearn PolynomialFeatures.
"""
def __init__(self, degree=1):
self.degree=degree
def fit(self, X, y):
self.poly_transformer = PolynomialFeatures(self.degree, include_bias=False)
self.poly_transformer.fit(X)
X = self.poly_transformer.transform(X)
self.model = LinearRegression(fit_intercept=True)
self.model.fit(X,y)
return self
def predict(self, X):
X = self.poly_transformer.transform(X)
return self.model.predict(X)
def get_params(self, deep=True):
return {"degree": self.degree}
def set_params(self, **parameters):
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
#----------------------------------------------------------------------------------------------------------------------------
# UTILITY FUNCTIONS
def compute_train_val_test(X, y, model, scale=False, test_size=0.2, time_series=False, random_state=123, n_folds=5,
regr=True):
"""
Compute the training-validation-test scores for the given model on the given dataset.
The training and test scores are simply computed by splitting the dataset into the training and test sets. The validation
score is performed applying the cross validation on the training set.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model to evaluate.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
time_series: bool
Indicates if the given dataset is a time series dataset (i.e. datasets indexed by days).
(This affects the computing of the scores).
random_state: int
Used in the training-test splitting of the dataset.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
Returns
----------
train_score: float
val_score: float
test_score: float
Notes
----------
- If `regr` is True, the returned scores are errors, computed using the MSE formula (i.e. Mean Squared Error).
Otherwise, the returned scores are accuracy measures.
- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are obtained simply by splitting the dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
if regr:
scoring="neg_mean_squared_error"
else:
scoring="accuracy"
# Split into training e test.
if not time_series : # Random splitting (not time series)
X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
else: # time series splitting
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale): # Scale the features in X
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
# Cross validation
if not time_series: # k-fold cross validation
cv = n_folds
else: # cross validation for time series
cv = TimeSeriesSplit(n_splits = n_folds)
scores = cross_val_score(model, X_train_80, y_train_80, cv=cv, scoring=scoring)
val_score = scores.mean() # validation score
if regr:
val_score = -val_score
model.fit(X_train_80,y_train_80) # Fit the model using all the training
# Compute training and test scores
train_score=0
test_score=0
if regr:
train_score = mean_squared_error(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_score = mean_squared_error(y_true=y_test, y_pred=model.predict(X_test))
else:
train_score = accuracy_score(y_true=y_train_80, y_pred=model.predict(X_train_80))
test_score = accuracy_score(y_true=y_test, y_pred=model.predict(X_test))
return train_score, val_score, test_score # Return a triple
def compute_bias_variance_error(X, y, model, scale=False, N_TESTS = 20, sample_size=0.67):
"""
Compute the bias^2-variance-error scores for the given model on the given dataset.
These measures are computed in an approximate way, using `N_TESTS` random samples of size `sample_size` from the
dataset.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model to evaluate.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
N_TESTS: int
Number of samples that are made in order to compute the measures.
sample_size: float
Decimal number between 0 and 1, which indicates the proportion of the sample.
Returns
----------
bias: float
variance: float
error: float
"""
# Scale the features in `X`
if(scale):
scaler = MinMaxScaler()
scaler.fit(X)
X = scaler.transform(X)
# Vector 'vector_ypred': at the beginning is a list of lists (i.e. two dimensional list).
# In the end it will be a matrix which has as many rows as `N_TESTS` (each row corresponds to a sample) and as many
# columns as the number of instances in `X` (each column is a point of the dataset).
# Row 'i' --> there are the predictions made by the model on the sample 'i' using all the dataset points.
# Column 'j' --> there are the predictions made by the model on the point 'j' using all the `N_TESTS` samples.
vector_ypred = []
# Iterate through N_TESTS. At each iteration extract a new sample and fit the model on it.
for i in range(N_TESTS):
# Extract a new sample (sample 'i')
Xs, ys = resample(X,y, n_samples=int(sample_size*len(y)) )
# Fit the model on this sample 'i'
model.fit(Xs,ys)
# Add the predictions made by the model on all the dataset points
vector_ypred.append(list(model.predict(X)))
vector_ypred = np.array(vector_ypred) # Transform into numpy array
# Vector that has as many elements as the dataset points, and for each of them it has the associated bias^2 computed on
# the `N_TEST` samples.
vector_bias = (y - np.mean(vector_ypred, axis=0))**2
# Vector that has as many elements as the dataset points, and for each of them it has the associated variance computed on
# the `N_TEST` samples.
vector_variance = np.var(vector_ypred, axis=0)
# Vector that has as many elements as the dataset points, and for each of them it has the associated error computed on
# the `N_TEST` samples.
vector_error = np.sum((vector_ypred - y)**2, axis=0)/N_TESTS
bias = np.mean(vector_bias) # Total bias^2 of the model
variance = np.mean(vector_variance) # Total variance of the model
error = np.mean(vector_error) # Total error of the model
return bias,variance,error # Return a triple
def plot_predictions(X, y, model, scale=False, test_size=0.2, plot_type=0, xvalues=None, xlabel="Index",
title="Actual vs Predicted values", figsize=(6,6)):
"""
Plot the predictions made by the given model on the given dataset, versus its actual values.
The dataset is split into training-test sets: the former is used to train the `model`, on the latter the predictions are
made.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model used to make the predictions.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
plot_type: int
Indicates the type of the plot.
- 0 -> In the same plot two different curves are drawn: the first has on the x axis `xvalues` and on the y axis
the actual values (i.e. `y`); the second has on the x axis `xvalues` and on the y axis the computed
predicted values.
- 1 -> On the x axis the actual values are put, on the y axis the predicted ones.
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the plot.
(It's used only if `plot_type` is 0).
xlabel: str
Label of the x axis of the plot.
(It's used only if `plot_type` is 0).
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
Returns
----------
matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
Notes
----------
The splitting of the datasets into the training-test sets is simply made by dividing the dataset into two contiguous
sequences.
I.e. it is the same technique used usually when the dataset is a time series dataset. (This is done in order to simplify
the visualization).
For this reason, typically this function is applied on time series datasets.
"""
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale): # Scale the features in X
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
model.fit(X_train_80,y_train_80) # Fit using all the training set
predictions = model.predict(X_test)
fig, ax = plt.subplots(figsize=figsize)
if plot_type==0:
if xvalues is None:
xvalues=range(len(X))
ax.plot(xvalues,y, 'o:', label='actual values')
ax.plot(xvalues[train_len:],predictions, 'o:', label='predicted values')
ax.legend()
elif plot_type==1:
ax.plot(y[train_len:],predictions,'o')
ax.plot([0, 1], [0, 1], 'r-',transform=ax.transAxes)
xlabel="Actual values"
ax.set_ylabel("Predicted values")
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.grid()
return ax
def _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize=(6,6), bar=False):
"""
Plot the given list of training-validation scores.
This function is an auxiliary function for the model selection functions. It's meant to be private in the
module.
Parameters
----------
xvalues: list (in general iterable)
Values to put in the x axis of the plot.
train_val_scores: np.array
Two dimensional np.array, containing two columns: the first contains the trainining scores, the second the validation
scores.
Basically, it is a list of training-validation scores.
plot_train: bool
Indicates whether to plot also the training scores or to plot only the validation ones.
xlabel: str
Label of the x axis.
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
bar: bool
Indicates whether to plot the scores using bars or using points.
If `bar` it's True, `xvalues` must contain string (i.e. labels).
Returns
----------
matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
"""
fig, ax = plt.subplots(figsize=figsize)
if not bar: # Points
if plot_train: # Plot also the training scores
ax.plot(xvalues,train_val_scores[:,0], 'o:', label='Train')
ax.plot(xvalues,train_val_scores[:,1], 'o:', label='Validation') # Validation scores
else: # Bars
if plot_train: # Plot also the training scores
x = np.arange(len(xvalues)) # The label locations
width = 0.35 # The width of the bars
ax.bar(x-width/2,train_val_scores[:,0], width=width, label='Train')
ax.bar(x+width/2,train_val_scores[:,1], width=width, label='Validation') # Validation scores
ax.set_xticks(x)
ax.set_xticklabels(xvalues)
else:
ax.bar(xvalues,train_val_scores[:,1],label='Validation')
ax.set_xlabel(xlabel)
ax.set_title(title)
ax.grid()
ax.legend()
return ax
#----------------------------------------------------------------------------------------------------------------------------
# FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO A SINGLE DATASET
def hyperparameter_validation(X, y, model, hyperparameter, hyperparameter_values, scale=False, test_size=0.2,
time_series=False, random_state=123, n_folds=5, regr=True, plot=False, plot_train=False,
xvalues=None, xlabel=None, title="Hyperparameter validation", figsize=(6,6)):
"""
Select the best value for the specified hyperparameter of the specified model on the given dataset.
In other words, perform the tuning of the `hyperparameter` among the values in `hyperparameter_values`.
This selection is made using the validation score (i.e. the best hyperparameter value is the one with the best validation
score).
The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the `hyperparameter_values` can be plotted, making a graphical visualization of the
selection.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model which has the specified `hyperparameter`.
hyperparameter: str
The name of the hyperparameter that has to be validated.
hyperparameter_values: list
List of values for `hyperparameter` that have to be taken into account in the selection.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
time_series: bool
Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the dataset.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values.
plot_train: bool
Indicates whether to plot also the training scores.
(It's considered only if `plot` is True).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the plot.
xlabel: str
Label of the x axis of the plot.
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
Returns
----------
train_val_scores: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of values in `hyperparameter_values` (i.e. number of values to be tested).
best_index: int
Index of `hyperparameter_values` that indicates which is the best hyperparameter value.
test_score: float
Test score associated with the best hyperparameter value.
ax: matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
If `plot` is False, then it is None.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
hyperparameter value is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best hyperparameter value is the one associated
with the maximum validation score.
- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
param_grid = {hyperparameter:hyperparameter_values} # Create the hyperparameter grid
# Call the function for the validation of an arbitrary number of hyperparameters
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale,
test_size=test_size,
time_series=time_series,
random_state=random_state, n_folds=n_folds,
regr=regr)
ax = None
if(plot): # Make the plot
if not xvalues: # Default values on the x axis
xvalues = hyperparameter_values
if not xlabel: # Default label on the x axis
xlabel = hyperparameter
ax = _plot_TrainVal_values(xvalues, train_val_scores, plot_train, xlabel, title, figsize)
return train_val_scores, best_index, test_score, ax
def hyperparameters_validation(X, y, model, param_grid, scale=False, test_size=0.2, time_series=False, random_state=123,
n_folds=5, regr=True):
"""
Select the best combination of values for the specified hyperparameters of the specified model on the given dataset.
In other words, perform the tuning of multiple hyperparameters.
The parameter `param_grid` is a dictionary that indicates which are the specified hyperparameters and what are the
associated values to test.
All the possible combinations of values are tested, in an exhaustive way (i.e. grid search).
This selection is made using the validation score (i.e. the best combination of hyperparameters values is the one with
the best validation score).
The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model which has the specified hyperparameters.
param_grid: dict
Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of
values to test.
scale: bool
Indicates whether to scale or not the features in `X`.
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
time_series: bool
Indicates if the given dataset is a time series dataset (i.e. dataframe indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the dataset.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
Returns
----------
params: list
List which enumerates all the possible combinations of hyperparameters values.
It's a list of dictionaries: each dictionary represents a specific combination of hyperparameters values. (It's a
dictionary which has as keys the hyperparameters names and as values the specific associated values of that combination).
train_val_scores: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of possible combinations of the hyperparameters values.
(It has as many rows as the elements of `params`).
best_index: int
Index of `params` that indicates which is the best combination of hyperparameters values.
test_score: float
Test score associated with the best combination of hyperparameters values.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
combination of hyperparameters values is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best combination of hyperparameters values is the
one associated with the maximum validation score.
- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
if regr:
scoring="neg_mean_squared_error"
else:
scoring="accuracy"
# Split into training-test sets
if not time_series : # Random splitting
X_train_80, X_test, y_train_80, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
else: # Time series splitting
train_len = int(X.shape[0]*(1-test_size))
X_train_80 = X[:train_len]
y_train_80 = y[:train_len]
X_test = X[train_len:]
y_test = y[train_len:]
if(scale): # Scale the features in `X`
scaler = MinMaxScaler()
scaler.fit(X_train_80)
X_train_80 = scaler.transform(X_train_80)
X_test = scaler.transform(X_test)
# Cross validation strategy
if not time_series: # The strategy is the classic k-fold cross validation
cv = n_folds
else: # Time series cross validation strategy
cv = TimeSeriesSplit(n_splits = n_folds)
# Grid search
grid_search = GridSearchCV(model,param_grid,scoring=scoring,cv=cv,return_train_score=True)
grid_search.fit(X_train_80,y_train_80)
params = grid_search.cv_results_["params"] # List of all the possible combinations of hyperparameters values
# List where for all the possible combinations of hyperparameters values there is the associated training score
train_scores = grid_search.cv_results_["mean_train_score"]
# List where for all the possible combinations of hyperparameters values there is the associated validation score
val_scores = grid_search.cv_results_["mean_test_score"]
# Index of `params`, corresponding to the best combination of hyperparameters values
best_index = grid_search.best_index_
# Model with the best combination of hyperparameters values
best_model = grid_search.best_estimator_
if regr: # The scores are negative: moltiply by -1
train_scores = train_scores*(-1)
val_scores = val_scores*(-1)
train_val_scores = np.concatenate((train_scores.reshape(-1,1), val_scores.reshape(-1,1)), axis=1)
# Fit the best model on all the training set
best_model.fit(X_train_80,y_train_80)
# Compute the test score of the best model
test_score=0
if regr:
test_score = mean_squared_error(y_true=y_test, y_pred=best_model.predict(X_test))
else:
test_score = accuracy_score(y_true=y_test, y_pred=best_model.predict(X_test))
return params, train_val_scores, best_index, test_score
def models_validation(X, y, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False, random_state=123,
n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None, xlabel="Models",
title="Models validation", figsize=(6,6)):
"""
Select the best model on the given dataset.
The parameter `model_paramGrid_list` is the list of the models to test. It also contains, for each model, the grid of
hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for each
specified hyperparameter of the model).
(That grid has the same structure as the `param_grid` parameter of the function `hyperparameters_validation`. See
`hyperparameters_validation`).
For each specified model, the best combination of hyperparameters values is selected in an exhaustive way (i.e. grid
search).
Actually, the function `hyperparameters_validation` is used.
(See `hyperparameters_validation`).
The selection of the best model is made using the validation score (i.e. the best model is the one with the best
validation score).
The validation score is computed by splitting the dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the different models can be plotted, making a graphical visualization of the
selection.
Parameters
----------
X: np.array
Two-dimensional np.array, containing the explanatory features of the dataset.
y: np.array
Mono dimensional np.array, containing the response feature of the dataset.
model_paramGrid_list: list
List that specifies the models and the relative grids of hyperparameters to be tested.
It's a list of triples (i.e. tuples), where each triple represents a model:
- the first element is a string, which is a mnemonic name of that model;
- the second element is the sklearn model;
- the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same
structure of the parameter `param_grid` of the function `hyperparameters_validation`.
scale_list: list or bool
List of booleans, which has as many elements as the models to test (i.e. as the elements of the
`model_paramGrid_list` list).
This list indicates, for each different model, if the features in `X` have to be scaled or not.
`scale_list` can be None or False: in this case the `X` features aren't scaled for any model. `scale_list` can be
True: in this case the `X` features are scaled for all the models.
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set.
time_series: bool
Indicates if the given dataset is a time series dataset (i.e. dataset indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the dataset.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values.
plot_train: bool
Indicates whether to plot also the training scores.
(It's considered only if `plot` is True).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the plot.
xlabel: str
Label of the x axis of the plot.
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
Returns
----------
models_train_val_score: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of models to test (i.e. number of elements in the `model_paramGrid_list` list).
models_best_params: list
List which indicates, for each model, the best combination of the hyperparameters values for that model.
It has as many elements as the models to test (i.e. as the elements of the `model_paramGrid_list` list), and it
contains dictionaries: each dictionary represents the best combination of the hyperparameters values for the
associated model.
best_index: int
Index of `model_paramGrid_list` that indicates which is the best model.
test_score: float
Test score associated with the best model.
ax: matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
If `plot` is False, then it is None.
See also
----------
hyperparameters_validation:
select the best combination of values for the specified hyperparameters of the specified model on the given dataset.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
model is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best model is the one associated with the
maximum validation score.
- If `time_series` is False, the training-test splitting of the dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting the dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
if not scale_list: # `scale_list` is either None or False
scale_list = [False]*len(model_paramGrid_list)
elif scale_list is True: # `scale_list` is True
scale_list = [True]*len(model_paramGrid_list)
# Numpy matrix (np.array) which has as many rows as the models and which has two columns, one for the training scores and
# the other for the validation scores. At the beginning it is a list of tuples.
models_train_val_score = []
# List which has as many elements as the models: for each model there is the dictionary of the best combination of
# hyperparameters values.
models_best_params = []
# List which has as many elements as the models: for each model there is the test score (associated with the best
# combination of hyperparameters values).
models_test_score = []
for i,triple in enumerate(model_paramGrid_list): # Iterate through all the cuples model-param_grid
model,param_grid = triple[1:]
# Apply the grid search on model-param_grid
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid,
scale=scale_list[i],
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds, regr=regr)
models_train_val_score.append(tuple(train_val_scores[best_index])) # Add the row for that model
models_best_params.append(params[best_index]) # Add the element for that model
models_test_score.append(test_score) # Add the element for that model
models_train_val_score = np.array(models_train_val_score) # Transform into numpy matrix (i.e. np.array)
# Find the best index (i.e. the best model)
if regr:
best_index = np.argmin(models_train_val_score,axis=0)[1]
else:
best_index = np.argmax(models_train_val_score,axis=0)[1]
# Test score of the best model
test_score = models_test_score[best_index]
ax = None
if(plot): # Make the plot
if not xvalues: # Default values for the x axis
xvalues = [model_paramGrid_list[i][0] for i in range(len(model_paramGrid_list))]
ax = _plot_TrainVal_values(xvalues, models_train_val_score, plot_train, xlabel, title, figsize, bar=True)
return models_train_val_score, models_best_params, best_index, test_score, ax
#----------------------------------------------------------------------------------------------------------------------------
# FUNCTIONS THAT PERFORM THE MODEL SELECTION WITH RESPECT TO MULTIPLE DATASETS
def datasets_hyperparameter_validation(dataset_list, model, hyperparameter, hyperparameter_values, scale=False,
test_size=0.2, time_series=False, random_state=123, n_folds=5, regr=True, plot=False,
plot_train=False, xvalues=None, xlabel="Datasets", title="Datasets validation",
figsize=(6,6) ,verbose=False, figsize_verbose=(6,6)):
"""
Select the best dataset and the best value for the specified hyperparameter of the specified model (i.e. select the best
couple dataset-hyperparameter value).
For each dataset in `dataset_list`, all the specified values `hyperparameter_values` are tested for the specified
`hyperparameter` of `model`.
In other words, on each dataset the tuning of `hyperparameter` is performed: in fact, on each dataset, the function
`hyperparameter_validation` is applied. (See `hyperparameter_validation`).
In the end, the best couple dataset-hyperparameter value is selected.
Despite the fact that a couple dataset-hyperparameter value is selected, the main viewpoint is focused with respect to
the datasets. It's a validation focused on the datasets.
In fact, first of all, for each dataset the hyperparameter tuning is performed: in this way the best value is selected
and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each
dataset the function `hyperparameter_validation` is applied). Finally, after that, the best dataset is selected.
It's a two-levels selection.
This selection is made using the validation score (i.e. the best couple dataset-hyperparameter value is the one with the
best validation score).
The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset
selection. This is the 'main' plot.
Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the
`hyperparameter_values` are plotted, making a graphical visualization of the hyperparameter tuning on that dataset.
(As the plot made by the `hyperparameter_validation` function).
Parameters
----------
dataset_list: list
List of couples, where each couple is a dataset.
- The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.
- The second element is y, the mono dimensional np.array containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model which has the specified `hyperparameter`.
hyperparameter: str
The name of the hyperparameter that has to be validated.
hyperparameter_values: list
List of values for `hyperparameter` that have to be taken into account in the selection.
scale: bool
Indicates whether to scale or not the features in 'X' (for all the datasets).
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).
time_series: bool
Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).
(This affects the computing of the validation scores).
random_state: int
Used in the training-test splitting of the datasets.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).
plot_train: bool
Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the 'main' plot.
xlabel: str
Label of the x axis of the 'main' plot.
title: str
Title of the 'main' plot.
figsize: tuple
Two dimensions of the 'main' plot.
verbose: bool
If True, for each dataset are plotted the validation scores of the hyperparameter tuning (these are the 'secondary'
plots).
(See 'hyperparameter_validation').
figsize_verbose: tuple
Two dimensions of the 'secondary' plots.
Returns
----------
datasets_train_val_score: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.
datasets_best_hyperparameter_value: list
List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For
each dataset, it contains the best `hyperparameter` value on that dataset.
best_index: int
Index of `dataset_list` that indicates which is the best dataset.
test_score: float
Test score associated with the best couple dataset-hyperparameter value.
axes: list
List of the matplotlib Axes where the plots have been made.
Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).
If no plot has been made, `axes` is an empty list.
See also
----------
hyperparameter_validation:
select the best value for the specified hyperparameter of the specified model on the given dataset.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
couple dataset-hyperparameter value is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the
maximum validation score.
- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
# numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as
# columns. At the beginning it is a list.
datasets_train_val_score = []
# List which contains, for each dataset, the best hyperparameter value
datasets_best_hyperparameter_value = []
# List which contains, for each dataset, its test score (associated with the best hyperparameter value)
datasets_test_score = []
# List of axes
axes = []
for i,dataset in enumerate(dataset_list): # Iterate through all the datasets
X,y = dataset
# Perform the hyperparameter tuning on the current dataset
train_val_scores, best_index, test_score, ax = hyperparameter_validation(X, y, model, hyperparameter,
hyperparameter_values, scale=scale, test_size=test_size, time_series=time_series,
random_state=random_state, n_folds=n_folds, regr=regr, plot=verbose, plot_train=plot_train,
xvalues=hyperparameter_values, xlabel=hyperparameter,
title="Dataset "+str(i)+" : hyperparameter validation", figsize=figsize_verbose)
datasets_train_val_score.append(tuple(train_val_scores[best_index,:])) # Add the row related to that dataset
datasets_best_hyperparameter_value.append(hyperparameter_values[best_index]) # Add the element related to that dataset
datasets_test_score.append(test_score) # Add the row related to that dataset
if ax:
axes.append(ax)
datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy
# Find the best index, i.e. the best dataset (more precisely, the best couple dataset-hyperparameter value)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
# Test score of the best couple dataset-hyperparameter value
test_score = datasets_test_score[best_index]
if(plot): # Make the plot
if not xvalues: # Default values on the x axis
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
axes.append(ax)
return datasets_train_val_score, datasets_best_hyperparameter_value, best_index, test_score, axes
def datasets_hyperparameters_validation(dataset_list, model, param_grid, scale=False, test_size=0.2, time_series=False,
random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None,
xlabel="Datasets", title="Datasets validation",figsize=(6,6)):
"""
Select the best dataset and the best combination of values for the specified hyperparameters of the specified model (i.e.
select the best couple dataset-combination of hyperparameters values).
For each dataset in `dataset_list`, all the possible combinations of the hyperparameters values for `model` (specified
with `param_grid`) are tested.
In other words, on each dataset the tuning of the specified hyperparameters is performed in an exhaustive way: in fact,
on each dataset, the function `hyperparameters_validation` is applied. (See `hyperparameters_validation`).
In the end, the best couple dataset-combination of hyperparameters values is selected.
Despite the fact that a couple dataset-combination of hyperparameters values is selected, the main viewpoint is focused
with respect to the datasets. It's a validation focused on the datasets.
In fact, first of all, for each dataset the hyperparameters tuning is performed: in this way the best combination of
values is selected and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other
words, on each dataset the function `hyperparameters_validation` is applied). Finally, after that, the best dataset is
selected. It's a two-levels selection.
This selection is made using the validation score (i.e. the best couple dataset-combination of hyperparameters values, is
the one with best validation score).
The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset
selection.
Parameters
----------
dataset_list: list
List of couple, where each couple is a dataset.
- The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.
- The second element is y, the mono dimensional np.array containing the response feature of the dataset.
model: sklearn.base.BaseEstimator
Model which has the specified hyperparameters.
param_grid: dict
Dictionary which has as keys the names of the specified hyperparameters and as values the associated list of
values to test.
scale: bool
Indicates whether to scale or not the features in 'X' (for all the datasets).
(The scaling is performed using the sklearn MinMaxScaler).
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).
time_series: bool
Indicates if the given datasets are time series datasets (i.e. datasets indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the datasets.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values of the datasets.
plot_train: bool
Indicates whether to plot also the training scores.
(It's considered only if `plot` is True).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the plot.
xlabel: str
Label of the x axis of the plot.
title: str
Title of the plot.
figsize: tuple
Two dimensions of the plot.
Returns
----------
datasets_train_val_score: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.
datasets_best_params: list
List which has as many elements as the number of the datasets (i.e. as the number of elements in `dataset_list`). For
each dataset, it contains the best combination of hyperparameters values on that dataset.
Each combination is represented as a dictionary, with keys the hyperparameters names and values the associated
values.
best_index: int
Index of `dataset_list` that indicates which is the best dataset.
test_score: float
Test score associated with the best couple dataset-combination of hyperparameters values.
ax: matplotlib.axes.Axes
The matplotlib Axes where the plot has been made.
See also
----------
hyperparameters_validation:
select the best combination of values for the specified hyperparameters of the specified model on the given dataset.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
couple dataset-combination of hyperparameters values is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the
maximum validation score.
- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
# numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as
# columns . At the beginning it is a list.
datasets_train_val_score = []
# List which contains, for each dataset, the best combination of hyperparameters values (i.e. a dictionary)
datasets_best_params = []
# List which contains, for each dataset, its test score (associated to the best combination of hyperparameters values)
datasets_test_score = []
for X,y in dataset_list: # Iterate through all the datasets
# Perform the exaustive hyperparameters tuning on the current dataset
params, train_val_scores, best_index, test_score = hyperparameters_validation(X, y, model, param_grid, scale=scale,
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds, regr=regr)
datasets_train_val_score.append(tuple(train_val_scores[best_index,:])) # Add the row related to that dataset
datasets_best_params.append(params[best_index]) # Add the element related to that dataset
datasets_test_score.append(test_score) # Add the row related to that dataset
datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy
# Find the best index, i.e. the best dataset (more precisely, the best couple dataset-combination of hyperparameters
# values)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
# Test score of the best couple dataset-combination of hyperparameters values
test_score = datasets_test_score[best_index]
ax = None
if(plot): # Make the plot
if not xvalues: # Default values on the x axis
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
return datasets_train_val_score, datasets_best_params, best_index, test_score, ax
def datasets_models_validation(dataset_list, model_paramGrid_list, scale_list=None, test_size=0.2, time_series=False,
random_state=123, n_folds=5, regr=True, plot=False, plot_train=False, xvalues=None,
xlabel="Datasets", title="Datasets validation", figsize=(6,6) ,verbose=False,
figsize_verbose=(6,6)):
"""
Select the best dataset and the best model (i.e. select the best couple dataset-model).
For each dataset in `dataset_list`, all the models in `model_paramGrid_list` are tested: each model is tested performing
an exhaustive tuning of the specified hyperparameters. In fact, `model_paramGrid_list` also contains, for each model, the
grid of the hyperparameters that have to be tested on that model (i.e. the grid which contains the values to test for
each specified hyperparameter of the model).
In other words, on each dataset the selection of the best model is performed: in fact, on each dataset, the function
`models_validation` is applied. (See `models_validation`).
In the end, the best couple dataset-model is selected.
Despite the fact that a couple dataset-model is selected, the main viewpoint is focused with respect to the datasets.
It's a validation focused on the datasets.
In fact, first of all, for each dataset the model selection is performed: in this way the best model is selected
and its relative score is associated with the dataset (i.e. it's the score of the dataset). (In other words, on each
dataset the function `models_validation` is applied). Finally, after that, the best dataset is selected.
It's a two-levels selection.
This selection is made using the validation score (i.e. the best couple dataset-model is the one with best validation
score).
The validation score is computed by splitting each dataset into the training-test sets and then by applying the cross
validation on the training set.
Additionally, the training and test scores are also computed.
Optionally, the validation scores of the datasets can be plotted, making a graphical visualization of the dataset
selection. This is the 'main' plot.
Moreover, still optionally, the 'secondary' plots can be done: for each dataset, the validation scores of the models are
plotted, making a graphical visualization of the models selection on that dataset. (As the plot made by the
`models_validation` function).
Parameters
----------
dataset_list: list
List of couples, where each couple is a dataset.
- The first element is X, the two-dimensional np.array containing the explanatory features of the dataset.
- The second element is y, the mono dimensional np.array containing the response feature of the dataset.
model_paramGrid_list: list
List that specifies the models and the relative grid of hyperparameters to be tested.
It's a list of triples (i.e. tuples), where each triple represents a model:
- the first element is a string, which is a mnemonic name of that model;
- the second element is the sklearn model;
- the third element is the grid of hyperparameters to test for that model. It's a dictionary, with the same
structure of parameter `param_grid` of the function `hyperparameters_validation`.
scale_list: list or bool
List of booleans, which has as many elements as the number of models to test (i.e. number of elements in the
`model_paramGrid_list` list).
This list indicates, for each different model, if the features in 'X' have to be scaled or not (for all the datasets).
`scale_list` can be None or False: in this case the 'X' features aren't scaled for any model. `scale_list` can be
True: in this case the 'X' features are scaled for all the models.
test_size: float
Decimal number between 0 and 1, which indicates the proportion of the test set (for each dataset).
time_series: bool
Indicates if the given datasets are time series dataset (i.e. datasets indexed by days).
(This affects the computing of the validation score).
random_state: int
Used in the training-test splitting of the datasets.
n_folds: int
Indicates how many folds are made in order to compute the k-fold cross validation.
(It's used only if `time_series` is False).
regr: bool
Indicates if it's either a regression or a classification problem.
plot: bool
Indicates whether to plot or not the validation score values of the datasets (i.e. this is the 'main' plot).
plot_train: bool
Indicates whether to plot also the training scores (both in the 'main' and 'secondary' plots).
xvalues: list (in general, iterable)
Values that have to be put in the x axis of the 'main' plot.
xlabel: str
Label of the x axis of the 'main' plot.
title: str
Title of the 'main' plot.
figsize: tuple
Two dimensions of the 'main' plot.
verbose: bool
If True, for each dataset the validation scores of the models are plotted (i.e. these are the 'secondary' plots).
(See 'models_validation').
figsize_verbose: tuple
Two dimensions of the 'secondary' plots.
Returns
----------
datasets_train_val_score: np.array
Two dimensional np.array, containing two columns: the first contains the training scores, the second the validation
scores.
It has as many rows as the number of datasets to test, i.e. as the number of elements in `dataset_list`.
datasets_best_model: list
List which has as many elements as the number of the datasets (i.e. number of elements in `dataset_list`). For
each dataset, it contains the best model for that dataset.
More precisely, it is a list of triple:
- the first element is the index of `model_paramGrid_list` which indicates the best model;
- the second element is the mnemonic name of the best model;
- the third element is the best combination of hyperparameters values on that best model (i.e. it's a dictionary
which has as keys the hyperparameters names and as values their associated values).
best_index: int
Index of `dataset_list` that indicates which is the best dataset.
test_score: float
Test score associated with the best couple dataset-model.
axes: list
List of the matplotlib Axes where the plots have been made.
Firstly, the 'secondary' plots are put (if any). And, as last, the 'main' plot is put (if any).
If no plot has been made, `axes` is an empty list.
See also
----------
models_validation: select the best model on the given dataset.
Notes
----------
- If `regr` is True, the validation scores are errors (MSE, i.e. Mean Squared Errors): this means that the best
couple dataset-model is the one associated with the minimum validation score.
Otherwise, the validation scores are accuracies: this means that the best couple is the one associated with the
maximum validation score.
- If `time_series` is False, the training-test splitting of each dataset is made randomly. In addition, the cross
validation strategy performed is the classic k-fold cross validation: the number of folds is specified by `n_folds`.
Otherwise, if `time_series` is True, the training-test sets are simply obtained by splitting each dataset into two
contiguous parts. In addition, the cross validation strategy performed is the sklearn TimeSeriesSplit.
"""
# numpy matrix (i.e. np.array) which has as many rows as the datasets, and it has the training and validation scores as
# columns. At the beginning it is a list.
datasets_train_val_score = []
# List which contains, for each dataset, the best model. I.e. there is the triple index-model name-best combination of
# hyperparameters values
datasets_best_model = []
# List which contains, for each dataset, its test score (associated to the best model)
datasets_test_score = []
# List of axes
axes = []
for i,dataset in enumerate(dataset_list): # Iterate through all the datasets
X,y = dataset
# Perform the models validation on the current dataset
models_train_val_score, models_best_params, best_index, test_score, ax = models_validation(X, y,
model_paramGrid_list,
scale_list=scale_list,
test_size=test_size,
time_series=time_series,
random_state=random_state,
n_folds=n_folds,
regr=regr, plot=verbose,
plot_train=plot_train,
xlabel="Models",
title=("Dataset "+str(i)+
" : models validation"),
figsize=figsize_verbose)
datasets_train_val_score.append(tuple(models_train_val_score[best_index,:])) # Add the row related to that dataset
# Add the element related to that dataset
datasets_best_model.append((best_index,model_paramGrid_list[best_index][0],models_best_params[best_index]))
datasets_test_score.append(test_score) # Add the element related to that dataset
if ax:
axes.append(ax)
datasets_train_val_score = np.array(datasets_train_val_score) # Transform into numpy
# Find the best index, i.e. the best dataset (more precisely, the best couple dataset-model)
if regr:
best_index = np.argmin(datasets_train_val_score,axis=0)[1]
else:
best_index = np.argmax(datasets_train_val_score,axis=0)[1]
# Test score of the best couple dataset-model
test_score = datasets_test_score[best_index]
if(plot): # Make the plot
if not xvalues: # Default values on the x axis
xvalues = range(len(dataset_list))
ax = _plot_TrainVal_values(xvalues,datasets_train_val_score,plot_train,xlabel,title,figsize, bar=True)
axes.append(ax)
return datasets_train_val_score, datasets_best_model, best_index, test_score, axes
| 52.110855 | 130 | 0.664643 | [
"MIT"
] | EnricoPittini/model-selection | model_selection.py | 67,692 | Python |
from asyncio import AbstractEventLoop
from asyncio import iscoroutinefunction
from collections import defaultdict
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import NamedTuple
from typing import Set
from typing import Tuple
from .api.master_api_client import MasterApiClient
CallbackFunc = Callable[[str, Any], None]
class Callback(NamedTuple):
callback: CallbackFunc
class ParamManager:
def __init__(
self,
master_api_client: MasterApiClient,
loop: AbstractEventLoop
) -> None:
self._master_api_client = master_api_client
self._loop = loop
self._callbacks: DefaultDict[str, Set[Callback]] = defaultdict(set)
self._cache: Dict[str, Any] = {}
async def subscribe_param(
self,
key: str,
callback: CallbackFunc
) -> Tuple[Any, Callback]:
if key not in self._callbacks:
param_value = await self._master_api_client.subscribe_param(key)
self._cache[key] = param_value
else:
param_value = self._cache[key]
cb = Callback(callback)
self._callbacks[key].add(cb)
return param_value, cb
async def unsubscribe_callback(
self,
callback: Callback
) -> bool:
for key, callbacks in self._callbacks.items():
if callback in callbacks:
callbacks.discard(callback)
break
else:
return False
if not callbacks:
await self._master_api_client.unsusbcribe_param(key)
self._cache.pop(key)
self._callbacks.pop(key)
return True
def update(self, key: str, value: Any) -> bool:
self._cache[key] = value
callbacks = set()
namespace = '/'
for ns in key.split('/'):
if not ns:
continue
namespace += ns
callbacks |= set(self._callbacks.get(namespace, set()))
namespace += '/'
if not callbacks:
return False
for callback in callbacks:
if iscoroutinefunction(callback.callback):
self._loop.create_task(callback.callback(key, value))
else:
self._loop.call_soon(callback.callback, key, value)
return True
| 27.55814 | 76 | 0.613924 | [
"Apache-2.0"
] | mgrrx/aioros | src/aioros/param_manager.py | 2,370 | Python |
#!/usr/bin/env python3
import importlib.machinery as imm
import logging
import pathlib
import re
import configargparse
class ModuleInfo:
def __init__(self, path):
self.path = pathlib.Path(path)
name = str(self.path.parent / self.path.stem)
name = name.replace("/", ".")
self.name = re.sub(r"^[\.]+", "", name)
self.module = imm.SourceFileLoader(self.name, path).load_module()
if not hasattr(self.module, "get_parser"):
raise ValueError(f"{path} does not have get_parser()")
def get_parser():
parser = configargparse.ArgumentParser(
description='generate RST from argparse options',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('src', type=str, nargs='+',
help='source python files that contain get_parser() func')
return parser
# parser
args = get_parser().parse_args()
modinfo = []
for p in args.src:
if "__init__.py" in p:
continue
modinfo.append(ModuleInfo(p))
# print refs
for m in modinfo:
logging.info(f"processing: {m.path.name}")
d = m.module.get_parser().description
assert d is not None
print(f"- :ref:`{m.path.name}`: {d}")
print()
# print argparse
for m in modinfo:
cmd = m.path.name
sep = "~" * len(cmd)
print(f"""
.. _{cmd}:
{cmd}
{sep}
.. argparse::
:module: {m.name}
:func: get_parser
:prog: {cmd}
""")
| 21.927536 | 82 | 0.637145 | [
"Apache-2.0"
] | 18445864529/espnet | doc/argparse2rst.py | 1,513 | Python |
"""
util_list module. Contains the mflist class.
This classes encapsulates modflow-style list inputs away
from the individual packages. The end-user should not need to
instantiate this class directly.
some more info
"""
from __future__ import division, print_function
import os
import warnings
import numpy as np
from ..datbase import DataInterface, DataListInterface, DataType
from ..utils.recarray_utils import create_empty_recarray
try:
from numpy.lib import NumpyVersion
numpy114 = NumpyVersion(np.__version__) >= "1.14.0"
except ImportError:
numpy114 = False
class MfList(DataInterface, DataListInterface):
"""
a generic object for handling transient boundary condition lists
Parameters
----------
package : package object
The package object (of type :class:`flopy.pakbase.Package`) to which
this MfList will be added.
data : varies
the data of the transient list (optional). (the default is None)
Attributes
----------
mxact : int
the max number of active bc for any stress period
Methods
-------
add_record(kper,index,value) : None
add a record to stress period kper at index location
write_transient(f) : None
write the transient sequence to the model input file f
check_kij() : None
checks for boundaries outside of model domain - issues warnings only
See Also
--------
Notes
-----
Examples
--------
"""
def __init__(
self,
package,
data=None,
dtype=None,
model=None,
list_free_format=None,
binary=False,
):
if isinstance(data, MfList):
for attr in data.__dict__.items():
setattr(self, attr[0], attr[1])
if model is None:
self._model = package.parent
else:
self._model = model
self._package = package
return
self._package = package
if model is None:
self._model = package.parent
else:
self._model = model
if dtype is None:
assert isinstance(self.package.dtype, np.dtype)
self.__dtype = self.package.dtype
else:
self.__dtype = dtype
self.__binary = binary
self.__vtype = {}
self.__data = {}
if data is not None:
self.__cast_data(data)
self.__df = None
if list_free_format is None:
if package.parent.version == "mf2k":
list_free_format = False
self.list_free_format = list_free_format
return
@property
def name(self):
return self.package.name
@property
def mg(self):
return self._model.modelgrid
@property
def sr(self):
return self.mg.sr
@property
def model(self):
return self._model
@property
def package(self):
return self._package
@property
def data_type(self):
return DataType.transientlist
@property
def plotable(self):
return True
def get_empty(self, ncell=0):
d = create_empty_recarray(ncell, self.dtype, default_value=-1.0e10)
return d
def export(self, f, **kwargs):
from flopy import export
return export.utils.mflist_export(f, self, **kwargs)
def append(self, other):
""" append the recarrays from one MfList to another
Parameters
----------
other: variable: an item that can be cast in to an MfList
that corresponds with self
Returns
-------
dict of {kper:recarray}
"""
if not isinstance(other, MfList):
other = MfList(
self.package,
data=other,
dtype=self.dtype,
model=self._model,
list_free_format=self.list_free_format,
)
msg = (
"MfList.append(): other arg must be "
+ "MfList or dict, not {0}".format(type(other))
)
assert isinstance(other, MfList), msg
other_kpers = list(other.data.keys())
other_kpers.sort()
self_kpers = list(self.data.keys())
self_kpers.sort()
new_dict = {}
for kper in range(self._model.nper):
other_data = other[kper].copy()
self_data = self[kper].copy()
other_len = other_data.shape[0]
self_len = self_data.shape[0]
if (other_len == 0 and self_len == 0) or (
kper not in self_kpers and kper not in other_kpers
):
continue
elif self_len == 0:
new_dict[kper] = other_data
elif other_len == 0:
new_dict[kper] = self_data
else:
new_len = other_data.shape[0] + self_data.shape[0]
new_data = np.recarray(new_len, dtype=self.dtype)
new_data[:self_len] = self_data
new_data[self_len : self_len + other_len] = other_data
new_dict[kper] = new_data
return new_dict
def drop(self, fields):
"""drop fields from an MfList
Parameters
----------
fields : list or set of field names to drop
Returns
-------
dropped : MfList without the dropped fields
"""
if not isinstance(fields, list):
fields = [fields]
names = [n for n in self.dtype.names if n not in fields]
dtype = np.dtype(
[(k, d) for k, d in self.dtype.descr if k not in fields]
)
spd = {}
for k, v in self.data.items():
# because np 1.9 doesn't support indexing by list of columns
newarr = np.array([self.data[k][n] for n in names]).transpose()
newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view(
np.recarray
)
for n in dtype.names:
newarr[n] = self.data[k][n]
spd[k] = newarr
return MfList(self.package, spd, dtype=dtype)
@property
def data(self):
return self.__data
@property
def df(self):
if self.__df is None:
self.__df = self.get_dataframe()
return self.__df
@property
def vtype(self):
return self.__vtype
@property
def dtype(self):
return self.__dtype
# Get the itmp for a given kper
def get_itmp(self, kper):
if kper not in list(self.__data.keys()):
return None
if self.__vtype[kper] is None:
return -1
# If an external file, have to load it
if self.__vtype[kper] == str:
return self.__fromfile(self.__data[kper]).shape[0]
if self.__vtype[kper] == np.recarray:
return self.__data[kper].shape[0]
# If not any of the above, it must be an int
return self.__data[kper]
@property
def mxact(self):
mxact = 0
for kper in list(self.__data.keys()):
mxact = max(mxact, self.get_itmp(kper))
return mxact
@property
def fmt_string(self):
"""Returns a C-style fmt string for numpy savetxt that corresponds to
the dtype"""
if self.list_free_format is not None:
use_free = self.list_free_format
else:
use_free = True
if self.package.parent.has_package("bas6"):
use_free = self.package.parent.bas6.ifrefm
# mt3d list data is fixed format
if "mt3d" in self.package.parent.version.lower():
use_free = False
fmts = []
for field in self.dtype.descr:
vtype = field[1][1].lower()
if vtype in ("i", "b"):
if use_free:
fmts.append("%9d")
else:
fmts.append("%10d")
elif vtype == "f":
if use_free:
if numpy114:
# Use numpy's floating-point formatter (Dragon4)
fmts.append("%15s")
else:
fmts.append("%15.7E")
else:
fmts.append("%10G")
elif vtype == "o":
if use_free:
fmts.append("%9s")
else:
fmts.append("%10s")
elif vtype == "s":
msg = (
"MfList.fmt_string error: 'str' type found in dtype. "
"This gives unpredictable results when "
"recarray to file - change to 'object' type"
)
raise TypeError(msg)
else:
raise TypeError(
"MfList.fmt_string error: unknown vtype in "
"field: {}".format(field)
)
if use_free:
fmt_string = " " + " ".join(fmts)
else:
fmt_string = "".join(fmts)
return fmt_string
# Private method to cast the data argument
# Should only be called by the constructor
def __cast_data(self, data):
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# If data is a dict, the we have to assume it is keyed on kper
if isinstance(data, dict):
if not list(data.keys()):
raise Exception("MfList error: data dict is empty")
for kper, d in data.items():
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: data dict key "
+ "{0:s} not integer: ".format(kper)
+ str(type(kper))
+ "\n"
+ str(e)
)
# Same as before, just try...
if isinstance(d, list):
# warnings.warn("MfList: casting list to array at " +\
# "kper {0:d}".format(kper))
try:
d = np.array(d)
except Exception as e:
raise Exception(
"MfList error: casting list "
+ "to ndarray: "
+ str(e)
)
# super hack - sick of recarrays already
# if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1):
# d = d.view(np.recarray)
if isinstance(d, np.recarray):
self.__cast_recarray(kper, d)
elif isinstance(d, np.ndarray):
self.__cast_ndarray(kper, d)
elif isinstance(d, int):
self.__cast_int(kper, d)
elif isinstance(d, str):
self.__cast_str(kper, d)
elif d is None:
self.__data[kper] = -1
self.__vtype[kper] = None
else:
raise Exception(
"MfList error: unsupported data type: "
+ str(type(d))
+ " at kper "
+ "{0:d}".format(kper)
)
# A single recarray - same MfList for all stress periods
elif isinstance(data, np.recarray):
self.__cast_recarray(0, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(0, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(0, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
def __cast_str(self, kper, d):
# If d is a string, assume it is a filename and check that it exists
assert os.path.exists(d), (
"MfList error: dict filename (string) '"
+ d
+ "' value for "
+ "kper {0:d} not found".format(kper)
)
self.__data[kper] = d
self.__vtype[kper] = str
def __cast_int(self, kper, d):
# If d is an integer, then it must be 0 or -1
if d > 0:
raise Exception(
"MfList error: dict integer value for "
"kper {0:10d} must be 0 or -1, "
"not {1:10d}".format(kper, d)
)
if d == 0:
self.__data[kper] = 0
self.__vtype[kper] = None
else:
self.__data[kper] = -1
self.__vtype[kper] = None
def __cast_recarray(self, kper, d):
assert d.dtype == self.__dtype, (
"MfList error: recarray dtype: "
+ str(d.dtype)
+ " doesn't match "
+ "self dtype: "
+ str(self.dtype)
)
self.__data[kper] = d
self.__vtype[kper] = np.recarray
def __cast_ndarray(self, kper, d):
d = np.atleast_2d(d)
if d.dtype != self.__dtype:
assert d.shape[1] == len(self.dtype), (
"MfList error: ndarray "
+ "shape "
+ str(d.shape)
+ " doesn't match dtype "
+ "len: "
+ str(len(self.dtype))
)
# warnings.warn("MfList: ndarray dtype does not match self " +\
# "dtype, trying to cast")
try:
self.__data[kper] = np.core.records.fromarrays(
d.transpose(), dtype=self.dtype
)
except Exception as e:
raise Exception(
"MfList error: casting ndarray to recarray: " + str(e)
)
self.__vtype[kper] = np.recarray
def get_dataframe(self, squeeze=True):
"""
Cast recarrays for stress periods into single
dataframe containing all stress periods.
Parameters
----------
squeeze : bool
Reduce number of columns in dataframe to only include
stress periods where a variable changes.
Returns
-------
df : dataframe
Dataframe of shape nrow = ncells, ncol = nvar x nper. If
the squeeze option is chosen, nper is the number of
stress periods where at least one cells is different,
otherwise it is equal to the number of keys in MfList.data.
Notes
-----
Requires pandas.
"""
try:
import pandas as pd
except Exception as e:
msg = "MfList.get_dataframe() requires pandas"
raise ImportError(msg)
# make a dataframe of all data for all stress periods
names = ["k", "i", "j"]
if "MNW2" in self.package.name:
names += ["wellid"]
# find relevant variable names
# may have to iterate over the first stress period
for per in range(self._model.nper):
if hasattr(self.data[per], "dtype"):
varnames = list(
[n for n in self.data[per].dtype.names if n not in names]
)
break
# create list of dataframes for each stress period
# each with index of k, i, j
dfs = []
for per in self.data.keys():
recs = self.data[per]
if recs is None or len(recs) == 0:
# add an empty dataframe if a stress period is
# empty (e.g. no pumping during a predevelopment
# period)
columns = names + list(
["{}{}".format(c, per) for c in varnames]
)
dfi = pd.DataFrame(data=None, columns=columns)
dfi = dfi.set_index(names)
else:
dfi = pd.DataFrame.from_records(recs)
dfg = dfi.groupby(names)
count = dfg[varnames[0]].count().rename("n")
if (count > 1).values.any():
print(
"Duplicated list entry locations aggregated "
"for kper {}".format(per)
)
for kij in count[count > 1].index.values:
print(" (k,i,j) {}".format(kij))
dfi = dfg.sum() # aggregate
dfi.columns = list(["{}{}".format(c, per) for c in varnames])
dfs.append(dfi)
df = pd.concat(dfs, axis=1)
if squeeze:
keep = []
for var in varnames:
diffcols = list([n for n in df.columns if var in n])
diff = df[diffcols].fillna(0).diff(axis=1)
diff[
"{}0".format(var)
] = 1 # always return the first stress period
changed = diff.sum(axis=0) != 0
keep.append(df.loc[:, changed.index[changed]])
df = pd.concat(keep, axis=1)
df = df.reset_index()
df.insert(len(names), "node", df.i * self._model.ncol + df.j)
return df
def add_record(self, kper, index, values):
# Add a record to possible already set list for a given kper
# index is a list of k,i,j or nodes.
# values is a list of floats.
# The length of index + values must be equal to the number of names
# in dtype
assert len(index) + len(values) == len(self.dtype), (
"MfList.add_record() error: length of index arg +"
+ "length of value arg != length of self dtype"
)
# If we already have something for this kper, then add to it
if kper in list(self.__data.keys()):
if self.vtype[kper] == int:
# If a 0 or -1, reset
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == str:
# If filename, load into recarray
d = self.__fromfile(self.data[kper])
d.resize(d.shape[0], d.shape[1])
self.__data[kper] = d
self.__vtype[kper] = np.recarray
elif self.vtype[kper] == np.recarray:
# Extend the recarray
self.__data[kper] = np.append(
self.__data[kper], self.get_empty(1)
)
else:
self.__data[kper] = self.get_empty(1)
self.__vtype[kper] = np.recarray
rec = list(index)
rec.extend(list(values))
try:
self.__data[kper][-1] = tuple(rec)
except Exception as e:
raise Exception(
"MfList.add_record() error: adding record to "
+ "recarray: "
+ str(e)
)
def __getitem__(self, kper):
# Get the recarray for a given kper
# If the data entry for kper is a string,
# return the corresponding recarray,
# but don't reset the value in the data dict
# assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \
# str(kper) + " not in data.keys()"
try:
kper = int(kper)
except Exception as e:
raise Exception(
"MfList error: _getitem__() passed invalid kper index:"
+ str(kper)
)
if kper not in list(self.data.keys()):
if kper == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == int:
if self.data[kper] == 0:
return self.get_empty()
else:
return self.data[self.__find_last_kper(kper)]
if self.vtype[kper] == str:
return self.__fromfile(self.data[kper])
if self.vtype[kper] == np.recarray:
return self.data[kper]
def __setitem__(self, kper, data):
if kper in list(self.__data.keys()):
if self._model.verbose:
print("removing existing data for kper={}".format(kper))
self.data.pop(kper)
# If data is a list, then all we can do is try to cast it to
# an ndarray, then cast again to a recarray
if isinstance(data, list):
# warnings.warn("MfList casting list to array")
try:
data = np.array(data)
except Exception as e:
raise Exception(
"MfList error: casting list to ndarray: " + str(e)
)
# cast data
if isinstance(data, int):
self.__cast_int(kper, data)
elif isinstance(data, np.recarray):
self.__cast_recarray(kper, data)
# A single ndarray
elif isinstance(data, np.ndarray):
self.__cast_ndarray(kper, data)
# A single filename
elif isinstance(data, str):
self.__cast_str(kper, data)
else:
raise Exception(
"MfList error: unsupported data type: " + str(type(data))
)
# raise NotImplementedError("MfList.__setitem__() not implemented")
def __fromfile(self, f):
# d = np.fromfile(f,dtype=self.dtype,count=count)
try:
d = np.genfromtxt(f, dtype=self.dtype)
except Exception as e:
raise Exception(
"MfList.__fromfile() error reading recarray "
+ "from file "
+ str(e)
)
return d
def get_filenames(self):
kpers = list(self.data.keys())
kpers.sort()
filenames = []
first = kpers[0]
for kper in list(range(0, max(self._model.nper, max(kpers) + 1))):
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_vtype = self.__vtype[kper]
if (
self._model.array_free_format
and self._model.external_path is not None
):
# py_filepath = ''
# py_filepath = os.path.join(py_filepath,
# self._model.external_path)
filename = self.package.name[0] + "_{0:04d}.dat".format(kper)
filenames.append(filename)
return filenames
def get_filename(self, kper):
ext = "dat"
if self.binary:
ext = "bin"
return self.package.name[0] + "_{0:04d}.{1}".format(kper, ext)
@property
def binary(self):
return bool(self.__binary)
def write_transient(self, f, single_per=None, forceInternal=False):
# forceInternal overrides isExternal (set below) for cases where
# external arrays are not supported (oh hello MNW1!)
# write the transient sequence described by the data dict
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
assert hasattr(f, "read"), (
"MfList.write() error: " + "f argument must be a file handle"
)
kpers = list(self.data.keys())
kpers.sort()
first = kpers[0]
if single_per is None:
loop_over_kpers = list(range(0, max(nper, max(kpers) + 1)))
else:
if not isinstance(single_per, list):
single_per = [single_per]
loop_over_kpers = single_per
for kper in loop_over_kpers:
# Fill missing early kpers with 0
if kper < first:
itmp = 0
kper_vtype = int
elif kper in kpers:
kper_data = self.__data[kper]
kper_vtype = self.__vtype[kper]
if kper_vtype == str:
if not self._model.array_free_format:
kper_data = self.__fromfile(kper_data)
kper_vtype = np.recarray
itmp = self.get_itmp(kper)
if kper_vtype == np.recarray:
itmp = kper_data.shape[0]
elif (kper_vtype == int) or (kper_vtype is None):
itmp = kper_data
# Fill late missing kpers with -1
else:
itmp = -1
kper_vtype = int
f.write(
" {0:9d} {1:9d} # stress period {2:d}\n".format(
itmp, 0, kper + 1
)
)
isExternal = False
if (
self._model.array_free_format
and self._model.external_path is not None
and forceInternal is False
):
isExternal = True
if self.__binary:
isExternal = True
if isExternal:
if kper_vtype == np.recarray:
py_filepath = ""
if self._model.model_ws is not None:
py_filepath = self._model.model_ws
if self._model.external_path is not None:
py_filepath = os.path.join(
py_filepath, self._model.external_path
)
filename = self.get_filename(kper)
py_filepath = os.path.join(py_filepath, filename)
model_filepath = filename
if self._model.external_path is not None:
model_filepath = os.path.join(
self._model.external_path, filename
)
self.__tofile(py_filepath, kper_data)
kper_vtype = str
kper_data = model_filepath
if kper_vtype == np.recarray:
name = f.name
if self.__binary or not numpy114:
f.close()
# switch file append mode to binary
with open(name, "ab+") as f:
self.__tofile(f, kper_data)
# continue back to non-binary
f = open(name, "a")
else:
self.__tofile(f, kper_data)
elif kper_vtype == str:
f.write(" open/close " + kper_data)
if self.__binary:
f.write(" (BINARY)")
f.write("\n")
def __tofile(self, f, data):
# Write the recarray (data) to the file (or file handle) f
assert isinstance(data, np.recarray), (
"MfList.__tofile() data arg " + "not a recarray"
)
# Add one to the kij indices
lnames = [name.lower() for name in self.dtype.names]
# --make copy of data for multiple calls
d = data.copy()
for idx in ["k", "i", "j", "node"]:
if idx in lnames:
d[idx] += 1
if self.__binary:
dtype2 = []
for name in self.dtype.names:
dtype2.append((name, np.float32))
dtype2 = np.dtype(dtype2)
d = np.array(d, dtype=dtype2)
d.tofile(f)
else:
np.savetxt(f, d, fmt=self.fmt_string, delimiter="")
def check_kij(self):
names = self.dtype.names
if ("k" not in names) or ("i" not in names) or ("j" not in names):
warnings.warn(
"MfList.check_kij(): index fieldnames 'k,i,j' "
+ "not found in self.dtype names: "
+ str(names)
)
return
nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper()
if nl == 0:
warnings.warn(
"MfList.check_kij(): unable to get dis info from " + "model"
)
return
for kper in list(self.data.keys()):
out_idx = []
data = self[kper]
if data is not None:
k = data["k"]
k_idx = np.where(np.logical_or(k < 0, k >= nl))
if k_idx[0].shape[0] > 0:
out_idx.extend(list(k_idx[0]))
i = data["i"]
i_idx = np.where(np.logical_or(i < 0, i >= nr))
if i_idx[0].shape[0] > 0:
out_idx.extend(list(i_idx[0]))
j = data["j"]
j_idx = np.where(np.logical_or(j < 0, j >= nc))
if j_idx[0].shape[0]:
out_idx.extend(list(j_idx[0]))
if len(out_idx) > 0:
warn_str = (
"MfList.check_kij(): warning the following "
+ "indices are out of bounds in kper "
+ str(kper)
+ ":\n"
)
for idx in out_idx:
d = data[idx]
warn_str += " {0:9d} {1:9d} {2:9d}\n".format(
d["k"] + 1, d["i"] + 1, d["j"] + 1
)
warnings.warn(warn_str)
def __find_last_kper(self, kper):
kpers = list(self.data.keys())
kpers.sort()
last = 0
for kkper in kpers[::-1]:
# if this entry is valid
if self.vtype[kkper] != int or self.data[kkper] != -1:
last = kkper
if kkper <= kper:
break
return kkper
def get_indices(self):
"""
a helper function for plotting - get all unique indices
"""
names = self.dtype.names
lnames = []
[lnames.append(name.lower()) for name in names]
if "k" not in lnames or "j" not in lnames:
raise NotImplementedError("MfList.get_indices requires kij")
kpers = list(self.data.keys())
kpers.sort()
indices = []
for i, kper in enumerate(kpers):
kper_vtype = self.__vtype[kper]
if (kper_vtype != int) or (kper_vtype is not None):
d = self.data[kper]
if not indices:
indices = list(zip(d["k"], d["i"], d["j"]))
else:
new_indices = list(zip(d["k"], d["i"], d["j"]))
for ni in new_indices:
if ni not in indices:
indices.append(ni)
return indices
def attribute_by_kper(self, attr, function=np.mean, idx_val=None):
assert attr in self.dtype.names
if idx_val is not None:
assert idx_val[0] in self.dtype.names
kpers = list(self.data.keys())
kpers.sort()
values = []
for kper in range(0, max(self._model.nper, max(kpers))):
if kper < min(kpers):
values.append(0)
elif kper > max(kpers) or kper not in kpers:
values.append(values[-1])
else:
kper_data = self.__data[kper]
if idx_val is not None:
kper_data = kper_data[
np.where(kper_data[idx_val[0]] == idx_val[1])
]
# kper_vtype = self.__vtype[kper]
v = function(kper_data[attr])
values.append(v)
return values
def plot(
self,
key=None,
names=None,
kper=0,
filename_base=None,
file_extension=None,
mflay=None,
**kwargs
):
"""
Plot stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
key : str
MfList dictionary key. (default is None)
names : list
List of names for figure titles. (default is None)
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
filename_base : str
Base file name that will be used to automatically generate file
names for output image files. Plots will be exported as image
files if file_name_base is not None. (default is None)
file_extension : str
Valid matplotlib.pyplot file extension for savefig(). Only used
if filename_base is not None. (default is 'png')
mflay : int
MODFLOW zero-based layer number to return. If None, then all
all layers will be included. (default is None)
**kwargs : dict
axes : list of matplotlib.pyplot.axis
List of matplotlib.pyplot.axis that will be used to plot
data for each layer. If axes=None axes will be generated.
(default is None)
pcolor : bool
Boolean used to determine if matplotlib.pyplot.pcolormesh
plot will be plotted. (default is True)
colorbar : bool
Boolean used to determine if a color bar will be added to
the matplotlib.pyplot.pcolormesh. Only used if pcolor=True.
(default is False)
inactive : bool
Boolean used to determine if a black overlay in inactive
cells in a layer will be displayed. (default is True)
contour : bool
Boolean used to determine if matplotlib.pyplot.contour
plot will be plotted. (default is False)
clabel : bool
Boolean used to determine if matplotlib.pyplot.clabel
will be plotted. Only used if contour=True. (default is False)
grid : bool
Boolean used to determine if the model grid will be plotted
on the figure. (default is False)
masked_values : list
List of unique values to be excluded from the plot.
Returns
----------
out : list
Empty list is returned if filename_base is not None. Otherwise
a list of matplotlib.pyplot.axis is returned.
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.stress_period_data.plot(ml.wel, kper=1)
"""
from flopy.plot import PlotUtilities
axes = PlotUtilities._plot_mflist_helper(
self,
key=key,
names=names,
kper=kper,
filename_base=filename_base,
file_extension=file_extension,
mflay=mflay,
**kwargs
)
return axes
def to_shapefile(self, filename, kper=None):
"""
Export stress period boundary condition (MfList) data for a specified
stress period
Parameters
----------
filename : str
Shapefile name to write
kper : int
MODFLOW zero-based stress period number to return. (default is None)
Returns
----------
None
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> ml.wel.to_shapefile('test_hk.shp', kper=1)
"""
import warnings
warnings.warn(
"Deprecation warning: to_shapefile() is deprecated. use .export()"
)
# if self.sr is None:
# raise Exception("MfList.to_shapefile: SpatialReference not set")
# import flopy.utils.flopy_io as fio
# if kper is None:
# keys = self.data.keys()
# keys.sort()
# else:
# keys = [kper]
# array_dict = {}
# for kk in keys:
# arrays = self.to_array(kk)
# for name, array in arrays.items():
# for k in range(array.shape[0]):
# #aname = name+"{0:03d}_{1:02d}".format(kk, k)
# n = fio.shape_attr_name(name, length=4)
# aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1)
# array_dict[aname] = array[k]
# fio.write_grid_shapefile(filename, self.sr, array_dict)
self.export(filename, kper=kper)
def to_array(self, kper=0, mask=False):
"""
Convert stress period boundary condition (MfList) data for a
specified stress period to a 3-D numpy array
Parameters
----------
kper : int
MODFLOW zero-based stress period number to return. (default is zero)
mask : boolean
return array with np.NaN instead of zero
Returns
----------
out : dict of numpy.ndarrays
Dictionary of 3-D numpy arrays containing the stress period data for
a selected stress period. The dictionary keys are the MfList dtype
names for the stress period data ('cond', 'flux', 'bhead', etc.).
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> ml = flopy.modflow.Modflow.load('test.nam')
>>> v = ml.wel.stress_period_data.to_array(kper=1)
"""
i0 = 3
unstructured = False
if "inode" in self.dtype.names:
raise NotImplementedError()
if "node" in self.dtype.names:
if "i" not in self.dtype.names and "j" not in self.dtype.names:
i0 = 1
unstructured = True
arrays = {}
for name in self.dtype.names[i0:]:
if not self.dtype.fields[name][0] == object:
if unstructured:
arr = np.zeros((self._model.nlay * self._model.ncpl,))
else:
arr = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol)
)
arrays[name] = arr.copy()
# if this kper is not found
if kper not in self.data.keys():
kpers = list(self.data.keys())
kpers.sort()
# if this kper is before the first entry,
# (maybe) mask and return
if kper < kpers[0]:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
# find the last kper
else:
kper = self.__find_last_kper(kper)
sarr = self.data[kper]
if np.isscalar(sarr):
# if there are no entries for this kper
if sarr == 0:
if mask:
for name, arr in arrays.items():
arrays[name][:] = np.NaN
return arrays
else:
raise Exception("MfList: something bad happened")
for name, arr in arrays.items():
if unstructured:
cnt = np.zeros(
(self._model.nlay * self._model.ncpl,), dtype=np.float
)
else:
cnt = np.zeros(
(self._model.nlay, self._model.nrow, self._model.ncol),
dtype=np.float,
)
# print(name,kper)
for rec in sarr:
if unstructured:
arr[rec["node"]] += rec[name]
cnt[rec["node"]] += 1.0
else:
arr[rec["k"], rec["i"], rec["j"]] += rec[name]
cnt[rec["k"], rec["i"], rec["j"]] += 1.0
# average keys that should not be added
if name not in ("cond", "flux"):
idx = cnt > 0.0
arr[idx] /= cnt[idx]
if mask:
arr = np.ma.masked_where(cnt == 0.0, arr)
arr[cnt == 0.0] = np.NaN
arrays[name] = arr.copy()
# elif mask:
# for name, arr in arrays.items():
# arrays[name][:] = np.NaN
return arrays
@property
def masked_4D_arrays(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
m4ds = {}
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
m4ds[name] = m4d
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for name, array in arrays.items():
m4ds[name][kper, :, :, :] = array
return m4ds
def masked_4D_arrays_itr(self):
# get the first kper
arrays = self.to_array(kper=0, mask=True)
# initialize these big arrays
for name, array in arrays.items():
m4d = np.zeros(
(
self._model.nper,
self._model.nlay,
self._model.nrow,
self._model.ncol,
)
)
m4d[0, :, :, :] = array
for kper in range(1, self._model.nper):
arrays = self.to_array(kper=kper, mask=True)
for tname, array in arrays.items():
if tname == name:
m4d[kper, :, :, :] = array
yield name, m4d
@property
def array(self):
return self.masked_4D_arrays
@classmethod
def from_4d(cls, model, pak_name, m4ds):
"""construct an MfList instance from a dict of
(attribute_name,masked 4D ndarray
Parameters
----------
model : mbase derived type
pak_name : str package name (e.g GHB)
m4ds : {attribute name:4d masked numpy.ndarray}
Returns
-------
MfList instance
"""
sp_data = MfList.masked4D_arrays_to_stress_period_data(
model.get_package(pak_name).get_default_dtype(), m4ds
)
return cls(model.get_package(pak_name), data=sp_data)
@staticmethod
def masked4D_arrays_to_stress_period_data(dtype, m4ds):
""" convert a dictionary of 4-dim masked arrays to
a stress_period_data style dict of recarray
Parameters
----------
dtype : numpy dtype
m4ds : dict {name:masked numpy 4-dim ndarray}
Returns
-------
dict {kper:recarray}
"""
assert isinstance(m4ds, dict)
for name, m4d in m4ds.items():
assert isinstance(m4d, np.ndarray)
assert name in dtype.names
assert m4d.ndim == 4
keys = list(m4ds.keys())
for i1, key1 in enumerate(keys):
a1 = np.isnan(m4ds[key1])
for i2, key2 in enumerate(keys[i1:]):
a2 = np.isnan(m4ds[key2])
if not np.array_equal(a1, a2):
raise Exception(
"Transient2d error: masking not equal"
+ " for {0} and {1}".format(key1, key2)
)
sp_data = {}
for kper in range(m4d.shape[0]):
vals = {}
for name, m4d in m4ds.items():
arr = m4d[kper, :, :, :]
isnan = np.argwhere(~np.isnan(arr))
v = []
for k, i, j in isnan:
v.append(arr[k, i, j])
vals[name] = v
kk = isnan[:, 0]
ii = isnan[:, 1]
jj = isnan[:, 2]
spd = np.recarray(shape=isnan.shape[0], dtype=dtype)
spd["i"] = ii
spd["k"] = kk
spd["j"] = jj
for n, v in vals.items():
spd[n] = v
sp_data[kper] = spd
return sp_data
| 34.349807 | 81 | 0.49012 | [
"CC0-1.0",
"BSD-3-Clause"
] | aleaf/flopy | flopy/utils/util_list.py | 44,483 | Python |
# -*- coding: utf-8 -*-
import requests
from webs.api.exceptions.customs import ServerError, InvalidAPIRequest, RecordNotFound, RecordAlreadyExists
class RequestMixin(object):
CODE_EXCEPTION_MSG = {
400: InvalidAPIRequest,
404: RecordNotFound,
409: RecordAlreadyExists,
422: InvalidAPIRequest,
500: ServerError,
}
def __init__(self):
self.session = requests.Session()
@property
def _headers(self):
return {
"Content-Type": "application/json",
}
def request(self, server, method, url, json=None, params=None, timeout=60):
try:
response = self.session.request(
method, url, json=json, params=params,
timeout=timeout, headers=self._headers
)
except requests.exceptions.ConnectTimeout:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接超时!")
except requests.exceptions.ConnectionError:
raise self.CODE_EXCEPTION_MSG[500](f"{server}服务器连接错误!")
try:
response_data = response.json()
except Exception as e:
raise ServerError(f"{server}服务器参数解析失败!")
if not (200 <= response.status_code < 300):
exception = self.CODE_EXCEPTION_MSG[response.status_code] \
if response.status_code in self.CODE_EXCEPTION_MSG else self.CODE_EXCEPTION_MSG[400]
raise exception(f"{server} Response:{response_data.get('error').get('message')}")
return response_data
web_client = RequestMixin()
| 30.230769 | 107 | 0.632316 | [
"Apache-2.0"
] | c89758971/crawloop | services/engine/webs/core/requests/request.py | 1,624 | Python |
import os
import re
import codecs
def isValidLine(line):
if re.search('include \"', line) == None or line.find('.PSVita') != -1 or line.find('.PS4') != -1 or line.find('.Switch') != -1 or line.find('.XBoxOne') != -1:
return True
return False
class CreateHeader:
def __init__(self):
self.lines = []
def addLine(self,line):
self.lines.append(line)
def readLines(self,path):
f = codecs.open(path, 'r','utf-8_sig')
line = f.readline()
while line:
if isValidLine(line):
self.lines.append(line.strip(os.linesep))
line = f.readline()
f.close()
def output(self,path):
f = codecs.open(path, 'w','utf-8_sig')
for line in self.lines:
f.write(line + os.linesep)
f.close()
effekseerHeader = CreateHeader()
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Base.Pre.h')
effekseerHeader.readLines('Effekseer/Effekseer/Utils/Effekseer.CustomAllocator.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Vector2D.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Vector3D.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Color.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.RectF.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Matrix43.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Matrix44.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.File.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.DefaultFile.h')
effekseerHeader.readLines('Effekseer/Effekseer/Backend/GraphicsDevice.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Resource.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Effect.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Manager.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Setting.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Server.h')
effekseerHeader.readLines('Effekseer/Effekseer/Effekseer.Client.h')
effekseerHeader.addLine('')
effekseerHeader.addLine('#include "Effekseer.Modules.h"')
effekseerHeader.addLine('')
effekseerHeader.output('Effekseer/Effekseer.h')
effekseerSimdHeader = CreateHeader()
effekseerSimdHeader.addLine('#pragma once')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Base.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Float4_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Int4_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_Gen.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_NEON.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Bridge_SSE.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec2f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec3f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Vec4f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Mat43f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Mat44f.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Quaternionf.h')
effekseerSimdHeader.readLines('Effekseer/Effekseer/SIMD/Utils.h')
effekseerSimdHeader.output('Effekseer/Effekseer.SIMD.h')
effekseerModulesHeader = CreateHeader()
effekseerModulesHeader.addLine('#pragma once')
effekseerModulesHeader.addLine('')
effekseerModulesHeader.addLine('#include "Effekseer.h"')
effekseerModulesHeader.addLine('#include "Effekseer.SIMD.h"')
effekseerModulesHeader.addLine('')
effekseerModulesHeader.addLine('// A header to access internal data of effekseer')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Parameter/Effekseer.Parameters.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.SpriteRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.RibbonRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.RingRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.ModelRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Renderer/Effekseer.TrackRenderer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.EffectLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.TextureLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/Model.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/ModelLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.MaterialLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Model/Model.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.Curve.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.CurveLoader.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Sound/Effekseer.SoundPlayer.h')
effekseerModulesHeader.readLines('Effekseer/Effekseer/Effekseer.SoundLoader.h')
effekseerModulesHeader.output('Effekseer/Effekseer.Modules.h')
effekseerRendererDX9Header = CreateHeader()
effekseerRendererDX9Header.readLines('EffekseerRendererDX9/EffekseerRenderer/EffekseerRendererDX9.Base.Pre.h')
effekseerRendererDX9Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX9Header.readLines('EffekseerRendererDX9/EffekseerRenderer/EffekseerRendererDX9.Renderer.h')
effekseerRendererDX9Header.output('EffekseerRendererDX9/EffekseerRendererDX9.h')
effekseerRendererDX11Header = CreateHeader()
effekseerRendererDX11Header.readLines('EffekseerRendererDX11/EffekseerRenderer/EffekseerRendererDX11.Base.Pre.h')
effekseerRendererDX11Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX11Header.readLines('EffekseerRendererDX11/EffekseerRenderer/EffekseerRendererDX11.Renderer.h')
effekseerRendererDX11Header.output('EffekseerRendererDX11/EffekseerRendererDX11.h')
effekseerRendererDX12Header = CreateHeader()
effekseerRendererDX12Header.readLines('EffekseerRendererDX12/EffekseerRenderer/EffekseerRendererDX12.Base.Pre.h')
effekseerRendererDX12Header.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererDX12Header.readLines('EffekseerRendererDX12/EffekseerRenderer/EffekseerRendererDX12.Renderer.h')
effekseerRendererDX12Header.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererDX12Header.output('EffekseerRendererDX12/EffekseerRendererDX12.h')
effekseerRendererVulkanHeader = CreateHeader()
effekseerRendererVulkanHeader.readLines('EffekseerRendererVulkan/EffekseerRenderer/EffekseerRendererVulkan.Base.Pre.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererVulkan/EffekseerRenderer/EffekseerRendererVulkan.Renderer.h')
effekseerRendererVulkanHeader.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererVulkanHeader.output('EffekseerRendererVulkan/EffekseerRendererVulkan.h')
effekseerRendererGLHeader = CreateHeader()
effekseerRendererGLHeader.readLines('EffekseerRendererGL/EffekseerRenderer/EffekseerRendererGL.Base.Pre.h')
effekseerRendererGLHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererGLHeader.readLines('EffekseerRendererGL/EffekseerRenderer/EffekseerRendererGL.Renderer.h')
effekseerRendererGLHeader.output('EffekseerRendererGL/EffekseerRendererGL.h')
effekseerRendererMetalHeader = CreateHeader()
effekseerRendererMetalHeader.readLines('EffekseerRendererMetal/EffekseerRenderer/EffekseerRendererMetal.Base.Pre.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererCommon/EffekseerRenderer.Renderer.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererMetal/EffekseerRenderer/EffekseerRendererMetal.Renderer.h')
effekseerRendererMetalHeader.readLines('EffekseerRendererLLGI/Common.h')
effekseerRendererMetalHeader.output('EffekseerRendererMetal/EffekseerRendererMetal.h')
| 57.757143 | 160 | 0.852461 | [
"Apache-2.0",
"BSD-3-Clause"
] | Shockblast/Effekseer | Dev/Cpp/CreateHeader.py | 8,086 | Python |
import wx.stc as stc
def highlight(editor, styles, faces):
editor.SetLexer(stc.STC_LEX_YAML)
editor.StyleSetSpec(stc.STC_YAML_DEFAULT, "fore:" +
styles["default"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_COMMENT, "fore:" +
styles["comment"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_ERROR, "fore:" +
styles["error"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_IDENTIFIER, "fore:" +
styles["identifier"] + ",face:%(helv)s,size:%(size)d" % faces)
editor.StyleSetSpec(stc.STC_YAML_NUMBER, "fore:" +
styles["number"] + ",face:%(helv)s,size:%(size)d" % faces)
| 35.043478 | 86 | 0.569479 | [
"MIT"
] | ShardulNalegave/pycode | yamlHighlighter.py | 806 | Python |
import pickle
import os
from tqdm import tqdm
with open('../data/bawe_splits.p', 'rb') as f:
splits = pickle.load(f)
if not os.path.isdir('../data/preprocess/bawe-group'):
os.mkdir('../data/preprocess/bawe-group')
for filename in tqdm(splits['train']):
id = filename[:4]
with open(f'../data/bawe/CORPUS_TXT/{filename}', 'r') as f:
if not os.path.isdir(f'../data/preprocess/bawe-group/{id}'):
os.mkdir(f'../data/preprocess/bawe-group/{id}')
text = f.read()
with open(f'../data/preprocess/bawe-group/{id}/{filename}', 'w') as wf:
wf.write(text)
| 29.142857 | 79 | 0.609477 | [
"Apache-2.0"
] | grchristensen/avpd | notebooks/develop/2021-02-18-gc-bawe-data-grouping.py | 612 | Python |
# -*- coding: utf-8 -*-
from flask import Blueprint, jsonify
from flask_service.swagger import spec
__all__ = ['main_app']
main_app = Blueprint('main_app', __name__)
@main_app.route('/api')
def swagger():
"""
Responds with the OpenAPI specification for this application.
"""
return jsonify(spec.to_dict())
@main_app.route('/health')
def health():
"""
Responds with the current's service health.
Could be used by the liveness probe of a Kubernetes cluster for instance.
"""
# put some logic here to decide if your app is doing well or not
# by default, we'll always return everything is okay!
return ""
@main_app.route('/status')
def status():
"""
Responds with the current's service status.
Could be used by the readiness probe of a Kubernetes cluster.
"""
# put some logic here to decide if your app is doing well or not
# by default, we'll always return everything is okay!
return "" | 24.2 | 77 | 0.677686 | [
"Apache-2.0"
] | mwprog/atomist-flask-microservice | flask_service/views.py | 968 | Python |
"""Principal Component Analysis Base Classes"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Kyle Kastner <kastnerkyle@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..utils.validation import check_is_fitted
from abc import ABCMeta, abstractmethod
class _BasePCA(
_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator, metaclass=ABCMeta
):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array of shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[:: len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.0)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[:: len(precision) + 1] += 1.0 / exp_var_diff
precision = np.dot(components_.T, np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[:: len(precision) + 1] += 1.0 / self.noise_variance_
return precision
@abstractmethod
def fit(self, X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Returns
-------
X_new : array-like of shape (n_samples, n_components)
Projection of X in the first principal components, where `n_samples`
is the number of samples and `n_components` is the number of the components.
"""
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input `X_original` whose transform would be X.
Parameters
----------
X : array-like of shape (n_samples, n_components)
New data, where `n_samples` is the number of samples
and `n_components` is the number of components.
Returns
-------
X_original array-like of shape (n_samples, n_features)
Original data, where `n_samples` is the number of samples
and `n_features` is the number of features.
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return (
np.dot(
X,
np.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_,
)
+ self.mean_
)
else:
return np.dot(X, self.components_) + self.mean_
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.components_.shape[0]
| 34.853659 | 88 | 0.618964 | [
"BSD-3-Clause"
] | 40104/Scikit-Learn | sklearn/decomposition/_base.py | 5,716 | Python |
from setuptools import setup
from os import path
from sys import version_info
def open_file(fname):
return open(path.join(path.dirname(__file__), fname))
setup_requires = ['pbr']
setup(
license='MIT',
setup_requires=setup_requires,
pbr=True,
auto_version="PBR",
install_requires=open(path.join(path.dirname(__file__), 'requirements.txt')).readlines(),
)
| 21.222222 | 93 | 0.727749 | [
"MIT"
] | CuidaAdmin/django-alexa | setup.py | 382 | Python |
"""
enCount tasks and analyses.
enCount is a Python library for processing RNA-Seq data from ENCODE.
"""
# from ._version import __version__
from . import config # load from myconfig.py if it exists
from . import db
from . import queues
from . import encode
from . import externals
from . import gtfs
from . import fastqs
from . import experiments
from . import mappings
from . import integration | 19.190476 | 68 | 0.756824 | [
"MIT"
] | mstrazar/enCount | enCount/__init__.py | 403 | Python |
from asgiref.sync import sync_to_async
from channels.layers import get_channel_layer
from ....models import Participant
import humps
channel_layer = get_channel_layer()
def get_participant(room_channel_name, channel_name):
participant = Participant.objects.get(
channel_room__channel_name=room_channel_name,
channel_name=channel_name
)
return participant
def get_participant_id(participant):
return participant.id
async def broadcast_avatar_position(room_channel_name, channel_name, json_data):
"""
Sends the new avatar's position to the users of the room.
"""
type = json_data['type']
payload = json_data['payload']
position = payload["position"]
animate = payload["animate"]
# receive the participant that sent this message
participant = await sync_to_async(get_participant)(room_channel_name, channel_name)
participant_id = await sync_to_async(get_participant_id)(participant)
# if this was for an avatar, then set participant's position to the payload data
def set_participant_position():
participant.x = position["x"]
participant.y = position["y"]
participant.direction_x = position["directionX"]
participant.save()
await sync_to_async(set_participant_position)()
await channel_layer.group_send(
room_channel_name,
{
'type': type,
'payload': {
"participant_id": participant_id,
"position": position,
"animate": animate,
}
}
)
async def broadcast_avatar_state(room_channel_name, channel_name, json_data):
"""
Sends the new avatar's state to the users of the room.
"""
type = json_data['type']
payload = json_data['payload']
state = payload['value']
# receive the participant that sent this message
participant = await sync_to_async(get_participant)(room_channel_name, channel_name)
participant_id = await sync_to_async(get_participant_id)(participant)
await channel_layer.group_send(
room_channel_name,
{
'type': humps.decamelize(type),
'payload': {
"participant_id": participant_id,
"state": state
}
}
) | 30.573333 | 87 | 0.66812 | [
"MIT"
] | Shadowsych/html5-msoy | server/websockets/consumers/world/broadcasts/avatar.py | 2,293 | Python |
"""Plot graphs from human-readable file formats."""
| 26 | 51 | 0.730769 | [
"MIT"
] | Sean1708/uniplot | uniplot/__init__.py | 52 | Python |
# https://www.hackerrank.com/challenges/tree-height-of-a-binary-tree/problem
def height(root):
"""
DFS
v = Vertices
e = Edges
d = Depth
Time complexity: O(v + e)
Space complexity: O(d)
"""
if root:
return 1 + max(height(root.left), height(root.right))
else:
return -1
| 17.421053 | 76 | 0.567976 | [
"MIT"
] | danielfsousa/algorithms-solutions | HackerRank/Data Structures/Trees/height-of-a-binary-tree.py | 331 | Python |
from dbt.clients.system import load_file_contents
from dbt.contracts.files import (
FilePath, ParseFileType, SourceFile, FileHash, AnySourceFile, SchemaSourceFile
)
from dbt.parser.schemas import yaml_from_file
from dbt.parser.search import FilesystemSearcher
# This loads the files contents and creates the SourceFile object
def load_source_file(
path: FilePath, parse_file_type: ParseFileType,
project_name: str) -> AnySourceFile:
file_contents = load_file_contents(path.absolute_path, strip=False)
checksum = FileHash.from_contents(file_contents)
sf_cls = SchemaSourceFile if parse_file_type == ParseFileType.Schema else SourceFile
source_file = sf_cls(path=path, checksum=checksum,
parse_file_type=parse_file_type, project_name=project_name)
source_file.contents = file_contents.strip()
if parse_file_type == ParseFileType.Schema:
source_file.dfy = yaml_from_file(source_file)
return source_file
# Special processing for big seed files
def load_seed_source_file(match: FilePath, project_name) -> SourceFile:
if match.seed_too_large():
# We don't want to calculate a hash of this file. Use the path.
source_file = SourceFile.big_seed(match)
else:
file_contents = load_file_contents(match.absolute_path, strip=False)
checksum = FileHash.from_contents(file_contents)
source_file = SourceFile(path=match, checksum=checksum)
source_file.contents = ''
source_file.parse_file_type = ParseFileType.Seed
source_file.project_name = project_name
return source_file
# Use the FilesystemSearcher to get a bunch of FilePaths, then turn
# them into a bunch of FileSource objects
def get_source_files(project, paths, extension, parse_file_type):
# file path list
fp_list = list(FilesystemSearcher(
project, paths, extension
))
# file block list
fb_list = []
for fp in fp_list:
if parse_file_type == ParseFileType.Seed:
fb_list.append(load_seed_source_file(fp, project.project_name))
else:
fb_list.append(load_source_file(
fp, parse_file_type, project.project_name))
return fb_list
def read_files_for_parser(project, files, dirs, extension, parse_ft):
parser_files = []
source_files = get_source_files(
project, dirs, extension, parse_ft
)
for sf in source_files:
files[sf.file_id] = sf
parser_files.append(sf.file_id)
return parser_files
# This needs to read files for multiple projects, so the 'files'
# dictionary needs to be passed in. What determines the order of
# the various projects? Is the root project always last? Do the
# non-root projects need to be done separately in order?
def read_files(project, files, parser_files):
project_files = {}
project_files['MacroParser'] = read_files_for_parser(
project, files, project.macro_paths, '.sql', ParseFileType.Macro,
)
project_files['ModelParser'] = read_files_for_parser(
project, files, project.source_paths, '.sql', ParseFileType.Model,
)
project_files['SnapshotParser'] = read_files_for_parser(
project, files, project.snapshot_paths, '.sql', ParseFileType.Snapshot,
)
project_files['AnalysisParser'] = read_files_for_parser(
project, files, project.analysis_paths, '.sql', ParseFileType.Analysis,
)
project_files['DataTestParser'] = read_files_for_parser(
project, files, project.test_paths, '.sql', ParseFileType.Test,
)
project_files['SeedParser'] = read_files_for_parser(
project, files, project.data_paths, '.csv', ParseFileType.Seed,
)
project_files['DocumentationParser'] = read_files_for_parser(
project, files, project.docs_paths, '.md', ParseFileType.Documentation,
)
project_files['SchemaParser'] = read_files_for_parser(
project, files, project.all_source_paths, '.yml', ParseFileType.Schema,
)
# Also read .yaml files for schema files. Might be better to change
# 'read_files_for_parser' accept an array in the future.
yaml_files = read_files_for_parser(
project, files, project.all_source_paths, '.yaml', ParseFileType.Schema,
)
project_files['SchemaParser'].extend(yaml_files)
# Store the parser files for this particular project
parser_files[project.project_name] = project_files
| 37.457627 | 88 | 0.722398 | [
"Apache-2.0"
] | JLDLaughlin/dbt | core/dbt/parser/read_files.py | 4,420 | Python |
from typing import List
'''
1. subproblems: dp(amount) the minimum number of coins needed to make changes for amount of S using the given coin denomination
2. guessing: all the available denomination c_i
3. relate subproblems: dp(amount) = min(dp(amount - c_i) + 1) for all possible c_i
Time complexity: O(#subproblems * #coins)
'''
class Solution:
# top down solution
def coinChange(self, coins: List[int], amount: int) -> int:
# for amount less than 1, return 0
if amount < 1:
return 0
memo = {}
def helper(coins, amount):
# for subproblems that we have alreay solve and memorized
if amount in memo:
return memo[amount]
# base case, we reach out the bottom of the tree.
if amount == 0:
return 0
# go through all possible coin denomination(breaches in tree)
dp = float('inf')
for coin in coins:
if coin > amount:
continue
# relate subproblems
dp = min(helper(coins, amount - coin) + 1, dp)
memo[amount] = dp
return dp
helper(coins, amount)
return -1 if memo[amount] == float('inf') else memo[amount]
# bottom-up solution, DAG
def coinChange_2(self, coins: List[int], amount: int) -> int:
memo = [float('inf') for i in range(amount + 1)]
# dp[i] = min{dp[i - c_i] + 1} for all c_i
memo[0] = 0
for i in range(amount + 1):
# check all the states that are reachable by coins to state i
for coin in coins:
if i < coin:
continue
memo[i] = min(memo[i], memo[i - coin] + 1)
print(memo)
return -1 if memo[amount] == float('inf') else memo[amount]
x = Solution()
# rs = x.coinChange([1, 2, 5], 2)
print(x.coinChange_2([1,2,5], 11)) | 28.109589 | 127 | 0.520955 | [
"MIT"
] | sundaycat/Leetcode-Practice | solution/322. coin-change.py | 2,052 | Python |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Raster Vector Analysis',
'author': 'Jan Kumor',
'url': 'http://github.com/akumor/python-rastervectoranalysis',
'download_url': 'http://github.com/akumor/python-rastervectoranalysis',
'author_email': 'akumor@users.noreply.github.com',
'version': '0.1',
'install_requires': [''],
'packages': ['rastervectoranalysis'],
'scripts': [],
'name': 'rastervectoranalysis'
}
setup(**config)
| 27.2 | 75 | 0.667279 | [
"Apache-2.0"
] | akumor/python-rastervectoranalysis | setup.py | 544 | Python |
from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class PasswordPolicyLowercaseLetter(BaseResourceValueCheck):
def __init__(self):
name = "Ensure RAM password policy requires at least one lowercase letter"
id = "CKV_ALI_17"
supported_resources = ['alicloud_ram_account_password_policy']
categories = [CheckCategories.IAM]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
return 'require_lowercase_characters'
check = PasswordPolicyLowercaseLetter()
| 38.333333 | 106 | 0.778261 | [
"Apache-2.0"
] | Eliran-Turgeman/checkov | checkov/terraform/checks/resource/alicloud/PasswordPolicyLowercaseLetter.py | 690 | Python |
import SimpleITK as sitk
import numpy as np
import torch
import math
import time
import sys
import cv2
from scipy.ndimage.interpolation import zoom
from torch.autograd import Variable
sys.path.append('../lung_nodule_detector')
from training.layers import nms
def load_itk_image(filename):
with open(filename) as f:
contents = f.readlines()
line = [k for k in contents if k.startswith('TransformMatrix')][0]
transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
transformM = np.round(transformM)
if np.any(transformM != np.array([1, 0, 0, 0, 1, 0, 0, 0, 1])):
isflip = True
else:
isflip = False
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing, isflip
def lumTrans(img):
lungwin = np.array([-1200.,600.])
newimg = (img-lungwin[0])/(lungwin[1]-lungwin[0])
newimg[newimg<0]=0
newimg[newimg>1]=1
newimg = (newimg*255).astype('uint8')
return newimg
def resample(imgs, spacing, new_spacing, progressBar, order=2):
print (len(imgs.shape))
if len(imgs.shape)==3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
progressBar.setValue(40)
return imgs, true_spacing
elif len(imgs.shape)==4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:,:,:,i]
newslice,true_spacing = resample(slice,spacing,new_spacing)
newimg.append(newslice)
newimg=np.transpose(np.array(newimg),[1,2,3,0])
return newimg,true_spacing
else:
raise ValueError('wrong shape')
def resample_v1(imgs, spacing, new_spacing, order=2):
print (len(imgs.shape))
if len(imgs.shape)==3:
new_shape = np.round(imgs.shape * spacing / new_spacing)
true_spacing = spacing * imgs.shape / new_shape
resize_factor = new_shape / imgs.shape
imgs = zoom(imgs, resize_factor, mode = 'nearest',order=order)
return imgs, true_spacing
elif len(imgs.shape)==4:
n = imgs.shape[-1]
newimg = []
for i in range(n):
slice = imgs[:,:,:,i]
newslice,true_spacing = resample(slice,spacing,new_spacing)
newimg.append(newslice)
newimg=np.transpose(np.array(newimg),[1,2,3,0])
return newimg,true_spacing
else:
raise ValueError('wrong shape')
def split_data(data, stride, split_comber):
print (data.shape[1:])
nz, nh, nw = data.shape[1:]
pz = int(np.ceil(float(nz) / stride)) * stride
ph = int(np.ceil(float(nh) / stride)) * stride
pw = int(np.ceil(float(nw) / stride)) * stride
data = np.pad(data, [[0, 0], [0, pz - nz], [0, ph - nh], [0, pw - nw]], 'constant', constant_values=0)
xx, yy, zz = np.meshgrid(np.linspace(-0.5, 0.5, data.shape[1] / stride),
np.linspace(-0.5, 0.5, data.shape[2] / stride),
np.linspace(-0.5, 0.5, data.shape[3] / stride), indexing='ij')
coord = np.concatenate([xx[np.newaxis, ...], yy[np.newaxis, ...], zz[np.newaxis, :]], 0).astype('float32')
data, nzhw = split_comber.split(data)
coord2, nzhw2 = split_comber.split(coord,
side_len=split_comber.side_len / stride,
max_stride=split_comber.max_stride / stride,
margin=split_comber.margin / stride)
assert np.all(nzhw == nzhw2)
data = (data.astype(np.float32) - 128) / 128
return torch.from_numpy(data), torch.from_numpy(coord2), np.array(nzhw)
def convert_prob(pbb):
for label in pbb:
pos_ori = label[1:4]
radious_ori = label[4]
#pos_ori = pos_ori + extendbox[:, 0]
label[1:4] = pos_ori
label[4] = radious_ori
label[0] = sigmoid(label[0])
return pbb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def predict_nodule(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb, progressBar):
net.eval()
total_label = 0
total_candi = 0
splitlist = list(range(0, len(data) + 1, n_per_run))
if splitlist[-1] != len(data):
splitlist.append(len(data))
outputlist = []
for i in range(len(splitlist) - 1):
with torch.no_grad():
inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()
inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()
output = net(inputdata, inputcoord)
outputlist.append(output.data.cpu().numpy())
progressBar.setValue(10 + (80/len(splitlist) * (i+1)))
output = np.concatenate(outputlist, 0)
output = split_comber.combine(output, nzhw=nzhw)
# fps 1.215909091, sens 0.933333333, thres 0.371853054
thresh = 0.371853054
pbb, mask = get_pbb(output, thresh, ismask=True)
pbb = pbb[pbb[:, 0].argsort()[::-1]]
pbb_cand_list = []
# check overlap under 3mm
for cand in pbb:
is_overlap = False
for appended in pbb_cand_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(
appended[3] - cand[3], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_cand_list.append(cand)
pbb_cand_list = np.array(pbb_cand_list)
pbb_cand_list_nms = nms(pbb_cand_list, 0.3)
# print (name)
# print (lbb)
world_pbb = convert_prob(pbb_cand_list_nms)
# print (world_pbb)
print("label", len(lbb))
print("z_pos y_pos x_pos size")
for i in range(len(lbb)):
for j in range(len(lbb[i])):
print(round(lbb[i][j], 2), end='\t')
print()
print("candidate", len(world_pbb))
print("prob z_pos y_pos x_pos size")
for i in range(len(world_pbb)):
for j in range(len(world_pbb[i])):
print(round(world_pbb[i][j], 2), end='\t')
print()
total_label += len(lbb)
total_candi += len(world_pbb)
return lbb, world_pbb
def predict_nodule_v1(net, data, coord, nzhw, lbb, n_per_run, split_comber, get_pbb):
net.eval()
total_label = 0
total_candi = 0
splitlist = list(range(0, len(data) + 1, n_per_run))
if splitlist[-1] != len(data):
splitlist.append(len(data))
outputlist = []
for i in range(len(splitlist) - 1):
with torch.no_grad():
inputdata = Variable(data[splitlist[i]:splitlist[i + 1]]).cuda()
inputcoord = Variable(coord[splitlist[i]:splitlist[i + 1]]).cuda()
output = net(inputdata, inputcoord)
outputlist.append(output.data.cpu().numpy())
output = np.concatenate(outputlist, 0)
output = split_comber.combine(output, nzhw=nzhw)
# fps 1.215909091, sens 0.933333333, thres 0.371853054
thresh = 0.371853054
pbb, mask = get_pbb(output, thresh, ismask=True)
pbb = pbb[pbb[:, 0].argsort()[::-1]]
pbb_cand_list = []
# check overlap under 3mm
for cand in pbb:
is_overlap = False
for appended in pbb_cand_list:
minimum_dist = 3
dist = math.sqrt(
math.pow(appended[1] - cand[1], 2) + math.pow(appended[2] - cand[2], 2) + math.pow(
appended[3] - cand[3], 2))
if (dist < minimum_dist):
is_overlap = True
break;
if not is_overlap:
pbb_cand_list.append(cand)
pbb_cand_list = np.array(pbb_cand_list)
pbb_cand_list_nms = nms(pbb_cand_list, 0.3)
# print (name)
# print (lbb)
world_pbb = convert_prob(pbb_cand_list_nms)
# print (world_pbb)
print("label", len(lbb))
print("z_pos y_pos x_pos size")
for i in range(len(lbb)):
for j in range(len(lbb[i])):
print(round(lbb[i][j], 2), end='\t')
print()
print("candidate", len(world_pbb))
print("prob z_pos y_pos x_pos size")
for i in range(len(world_pbb)):
for j in range(len(world_pbb[i])):
print(round(world_pbb[i][j], 2), end='\t')
print()
total_label += len(lbb)
total_candi += len(world_pbb)
return lbb, world_pbb
def draw_nodule_rect(lbb, world_pbb, img_arr):
for i in range(len(lbb)):
label = lbb[i]
# label = np.ceil(label)
r = (label[3] / 2) * 1.3
top_left = (max(int(math.ceil(label[2] - r)), 0),
max(int(math.ceil(label[1] - r)), 0))
bottom_right = (min(int(math.ceil(label[2] + r)), np.shape(img_arr)[1]),
min(int(math.ceil(label[1] + r)), np.shape(img_arr)[2]))
z_range = [max(int(math.ceil(label[0] - r)), 0),
min(int(math.ceil(label[0] + r)), np.shape(img_arr)[0])]
for j in range(z_range[0], z_range[1]):
cv2.rectangle(img_arr[j], top_left, bottom_right, (0, 255, 0), 1)
for i in range(len(world_pbb)):
candidate = world_pbb[i]
r = (candidate[4] / 2) * 1.3
top_left = (max(int(math.ceil(candidate[3] - r)), 0),
max(int(math.ceil(candidate[2] - r)), 0))
text_top_left = (max(int(math.ceil(candidate[3] - r)) - 1, 0),
max(int(math.ceil(candidate[2] - r)) - 1, 0))
bottom_right = (min(int(math.ceil(candidate[3] + r)), np.shape(img_arr)[1]),
min(int(math.ceil(candidate[2] + r)), np.shape(img_arr)[2]))
z_range = [max(int(math.ceil(candidate[1] - r)), 0),
min(int(math.ceil(candidate[1] + r)), np.shape(img_arr)[0])]
font = cv2.FONT_HERSHEY_SIMPLEX
for j in range(z_range[0], z_range[1]):
cv2.rectangle(img_arr[j], top_left, bottom_right, (255, 0, 0), 1)
#cv2.putText(img_arr[j], "c" + str(i) + "_" +str(round(candidate[0], 2)), top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
cv2.putText(img_arr[j], "c" + str(i), text_top_left, font, 0.4, (255, 0, 0), 1, cv2.LINE_AA)
def crop_all(target, img_arr, crop_size = 48):
target = np.copy(target)
start = []
for i in range(3):
start.append(int(round(target[i])) - int(crop_size / 2))
pad = []
pad.append([0, 0])
for i in range(3):
leftpad = max(0, -start[i])
rightpad = max(0, start[i] + crop_size - img_arr.shape[i + 1])
pad.append([leftpad, rightpad])
crop = img_arr[:,
max(start[0], 0):min(start[0] + crop_size, img_arr.shape[1]),
max(start[1], 0):min(start[1] + crop_size, img_arr.shape[2]),
max(start[2], 0):min(start[2] + crop_size, img_arr.shape[3])]
crop = np.pad(crop, pad, 'constant', constant_values=0)
for i in range(3):
target[i] = target[i] - start[i]
return crop, target
def crop_nodule_arr_2ch(target, img_arr, crop_size = 48):
img_size = [crop_size, crop_size, crop_size]
crop_img, target = crop_all(target, img_arr, crop_size)
imgs = np.squeeze(crop_img, axis=0)
z = int(target[0])
y = int(target[1])
x = int(target[2])
print (z, y, x)
# z = 24
# y = 24
# x = 24
nodule_size = int(target[3])
margin = max(7, nodule_size * 0.4)
radius = int((nodule_size + margin) / 2)
s_z_pad = 0
e_z_pad = 0
s_y_pad = 0
e_y_pad = 0
s_x_pad = 0
e_x_pad = 0
s_z = max(0, z - radius)
if (s_z == 0):
s_z_pad = -(z - radius)
e_z = min(np.shape(imgs)[0], z + radius)
if (e_z == np.shape(imgs)[0]):
e_z_pad = (z + radius) - np.shape(imgs)[0]
s_y = max(0, y - radius)
if (s_y == 0):
s_y_pad = -(y - radius)
e_y = min(np.shape(imgs)[1], y + radius)
if (e_y == np.shape(imgs)[1]):
e_y_pad = (y + radius) - np.shape(imgs)[1]
s_x = max(0, x - radius)
if (s_x == 0):
s_x_pad = -(x - radius)
e_x = min(np.shape(imgs)[2], x + radius)
if (e_x == np.shape(imgs)[2]):
e_x_pad = (x + radius) - np.shape(imgs)[2]
# print (s_x, e_x, s_y, e_y, s_z, e_z)
# print (np.shape(img_arr[s_z:e_z, s_y:e_y, s_x:e_x]))
nodule_img = imgs[s_z:e_z, s_y:e_y, s_x:e_x]
nodule_img = np.pad(nodule_img, [[s_z_pad, e_z_pad], [s_y_pad, e_y_pad], [s_x_pad, e_x_pad]], 'constant',
constant_values=0)
imgpad_size = [img_size[0] - np.shape(nodule_img)[0],
img_size[1] - np.shape(nodule_img)[1],
img_size[2] - np.shape(nodule_img)[2]]
imgpad = []
imgpad_left = [int(imgpad_size[0] / 2),
int(imgpad_size[1] / 2),
int(imgpad_size[2] / 2)]
imgpad_right = [int(imgpad_size[0] / 2),
int(imgpad_size[1] / 2),
int(imgpad_size[2] / 2)]
for i in range(3):
if (imgpad_size[i] % 2 != 0):
rand = np.random.randint(2)
if rand == 0:
imgpad.append([imgpad_left[i], imgpad_right[i] + 1])
else:
imgpad.append([imgpad_left[i] + 1, imgpad_right[i]])
else:
imgpad.append([imgpad_left[i], imgpad_right[i]])
padding_crop = np.pad(nodule_img, imgpad, 'constant', constant_values=0)
padding_crop = np.expand_dims(padding_crop, axis=0)
crop = np.concatenate((padding_crop, crop_img))
crop = (crop.astype(np.float32) - 128) / 128
return torch.from_numpy(crop), crop
def predict_attribute(attribute_net, crop_img):
attribute_net.eval()
with torch.no_grad():
crop_img = Variable(crop_img.cuda(async=True))
output = attribute_net(crop_img)
return output
| 34.55774 | 135 | 0.572129 | [
"MIT"
] | JiazeWang/lung_nodule_integ_viewer | UI_util.py | 14,065 | Python |
# Copyright (c) 2021 Sen Wu. All Rights Reserved.
"""Helper function to set random seed for reproducibility of models."""
import logging
import random
from typing import Optional
import numpy as np
import torch
logger = logging.getLogger(__name__)
def set_random_seed(seed: Optional[int] = None) -> None:
"""Set random seed for random, numpy, and pytorch.
Args:
seed: The random seed, defaults to `None` which select it randomly.
"""
max_value = np.iinfo(np.uint32).max
min_value = np.iinfo(np.uint32).min
try:
seed = int(seed)
logger.info(f"Set random seed to {seed}.")
except (TypeError, ValueError):
seed = random.randint(min_value, max_value)
logger.info(f"No random seed specified, randomly set random seed to {seed}.")
if not (min_value <= seed <= max_value):
new_seed = random.randint(min_value, max_value)
logger.info(
f"Random seed {seed} is not valid, randomly set random seed to {new_seed}."
)
seed = new_seed
# Set random seed for random
random.seed(seed)
# Set random seed for all numpy operations
np.random.seed(seed=seed)
# Set random seed for PyTorch
torch.manual_seed(seed)
| 27.555556 | 87 | 0.664516 | [
"MIT"
] | KeAWang/emmental | src/emmental/utils/seed.py | 1,240 | Python |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from rdfdatabank.lib.auth_entry import list_silos, list_usernames, list_group_usernames, add_silo, add_group_users
def sync_members(g):
# NOTE: g._register_silos() IS AN EXPENSIVE OPERATION.
# THIS FUNCTION IS EXPENSIVE AND SHOULD BE CALLED ONLY IF REALLY NECESSARY
#g = ag.granary
g.state.revert()
g._register_silos()
granary_list = g.silos
granary_list_database = list_silos()
usernames = list_usernames()
for silo in granary_list:
if not silo in granary_list_database:
add_silo(silo)
kw = g.describe_silo(silo)
#Get existing owners, admins, managers and submitters from silo metadata
owners = []
admins = []
managers = []
submitters = []
if 'administrators' in kw and kw['administrators']:
admins = [x.strip() for x in kw['administrators'].split(",") if x]
if 'managers' in kw and kw['managers']:
managers = [x.strip() for x in kw['managers'].split(",") if x]
if 'submitters' in kw and kw['submitters']:
submitters = [x.strip() for x in kw['submitters'].split(",") if x]
# Check users in silo metadata are valid users
owners = [x for x in owners if x in usernames]
admins = [x for x in admins if x in usernames]
managers = [x for x in managers if x in usernames]
submitters = [x for x in submitters if x in usernames]
#Synchronize members in silo metadata with users in database
d_admins = []
d_managers = []
d_sunbmitters = []
if silo in granary_list_database:
d_admins, d_managers, d_submitters = list_group_usernames(silo)
admins.extend(d_admins)
managers.extend(d_managers)
submitters.extend(d_submitters)
# Ensure users are listed just once in silo metadata and owner is superset
owners.extend(admins)
owners.extend(managers)
owners.extend(submitters)
admins = list(set(admins))
managers = list(set(managers))
submitters = list(set(submitters))
owners = list(set(owners))
# Add users in silo metadata to the database
new_silo_users = []
for a in admins:
if not a in d_admins:
new_silo_users.append((a, 'administrator'))
for a in managers:
if not a in d_managers:
new_silo_users.append((a, 'manager'))
for a in new_submitters:
if not a in d_submitters:
new_silo_users.append((a, 'submitter'))
if new_silo_users:
add_group_users(silo, new_silo_users)
#Write members into silo
kw['owners'] = ','.join(owners)
kw['administrators'] = ','.join(admins)
kw['managers'] = ','.join(managers)
kw['submitters'] = ','.join(submitters)
g.describe_silo(silo, **kw)
g.sync()
return
| 39.803922 | 114 | 0.652956 | [
"MIT"
] | dataflow/RDFDatabank | rdfdatabank/lib/data_sync.py | 4,060 | Python |
import mysql.connector
import json
import os
import requests
def getAllFindings(host, database, user, password, table, where):
db = mysql.connector.connect(host=host, database=database, user=user, password=password)
cursor = db.cursor()
cursor.execute("SELECT distinct findingCode, specimenOrganCode FROM " + table + " " + where)
return cursor.fetchall()
def getDrugs(api, filename):
if filename is None:
drugs = getDrugsMapping(api)
else:
if os.path.isfile(filename):
with open(filename, 'r') as drug_file:
drugs = json.loads(drug_file.read())
else:
drugs = getDrugsMapping(api)
with open(filename, 'w') as drug_file:
drug_file.write(json.dumps(drugs))
return drugs
def getDrugsMapping(api):
result = {}
clinicalCompounds = getClinicalCompounds(api)
preclinicalCompounds = getPreclinicalCompounds(api)
# iterate over the clinical and preclinical compounds and match them om inchiKey
for clinicalCompound in clinicalCompounds:
for preclinicalCompound in preclinicalCompounds:
if (clinicalCompound['inchiKey'] is not None) and (clinicalCompound['inchiKey'] == preclinicalCompound['inchiKey']):
inchiKey = clinicalCompound['inchiKey']
if inchiKey not in result:
result[inchiKey] = {
'inchiKey': inchiKey,
'clinicalName': clinicalCompound['name'],
'preclinicalName': preclinicalCompound['name']
}
result[inchiKey][preclinicalCompound['source']] = preclinicalCompound['findingIds']
result[inchiKey][clinicalCompound['source']] = clinicalCompound['findingIds']
return result
def getClinicalCompounds(api):
ct_compounds = api.ClinicalTrials().getAllCompounds();
for ct_compound in ct_compounds:
ct_compound['source'] = 'ClinicalTrials'
ml_compounds = api.Medline().getAllCompounds();
for ml_compound in ml_compounds:
ml_compound['source'] = 'Medline'
fa_compounds = api.Faers().getAllCompounds();
for fa_compound in fa_compounds:
fa_compound['source'] = 'Faers'
dm_compounds = api.DailyMed().getAllCompounds();
for dm_compound in dm_compounds:
dm_compound['source'] = 'DailyMed'
return ct_compounds + ml_compounds + fa_compounds + dm_compounds
def getPreclinicalCompounds(api):
et_compounds = api.eToxSys().getAllCompounds()
for et_compound in et_compounds:
et_compound['source'] = 'eToxSys'
return et_compounds
def getFindingsByIds(api, service, findingIds):
result = []
record_count = 0
query = {
"filter": {
"criteria": [
[
{
"field": {
"dataClassKey": "FINDING",
"name": "id"
},
"primitiveType": "Integer",
"comparisonOperator": "IN",
"values": None
},
]
]
},
"selectedFields": [
{
"dataClassKey": "FINDING",
"names": [
"id",
"specimenOrgan", "specimenOrganCode", "specimenOrganVocabulary",
"findingIdentifier", "finding", "findingCode", "findingVocabulary", "findingType",
"severity", "observation", "frequency",
"dose", "doseUnit",
"timepoint", "timepointUnit",
"treatmentRelated",
"compoundId",
"studyId",
"createdDate", "modifiedDate", "sex"
]
}
],
"offset": 0,
"limit": 500
}
for offset in range(0, len(findingIds), 500):
query['filter']['criteria'][0][0]['values'] = [{'value': findingId} for findingId in findingIds[offset:offset+500]]
r = requests.post(service.endpoint + 'query', verify=False, headers={"Authorization": f"Bearer {api.get_token()}"}, json=query, timeout=None)
if r.status_code == 200:
response = json.loads(r.text)
for record in response['resultData']['data']:
record['FINDING']['source'] = response['origin']
result.append(record['FINDING'])
elif r.status_code == 401:
api.reconnect()
continue
return result
| 35.744186 | 149 | 0.561483 | [
"MIT"
] | erikvanmulligen/etransafe-heatmap | Concordance/condordance_utils.py | 4,611 | Python |
"""
Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from cfnlint.rules.outputs.Name import Name # pylint: disable=E0401
from .. import BaseRuleTestCase
class TestName(BaseRuleTestCase):
"""Test template outputs Names"""
def setUp(self):
"""Setup"""
super(TestName, self).setUp()
self.collection.register(Name())
self.success_templates = [
'fixtures/templates/good/outputs/name.yaml'
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('fixtures/templates/bad/outputs/name.yaml', 1)
| 42.131579 | 87 | 0.723923 | [
"MIT-0"
] | SanderKnape/cfn-python-lint | test/rules/outputs/test_name.py | 1,601 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from ._client_factory import iotcentral_service_factory
def load_command_table(self, _):
from azure.cli.core.commands import CliCommandType
iotcentral_sdk = CliCommandType(
operations_tmpl='azure.mgmt.iotcentral.operations#IoTCentaralOperations.{}'
)
update_custom_util = CliCommandType(
operations_tmpl='azure.cli.command_modules.iotcentral.custom#{}')
with self.command_group('iotcentral app', iotcentral_sdk, client_factory=iotcentral_service_factory) as g:
g.custom_command('create', 'iotcentral_app_create')
g.custom_command('list', 'iotcentral_app_list')
g.custom_command('show', 'iotcentral_app_get')
g.generic_update_command('update', getter_name='iotcentral_app_get',
setter_name='iotcentral_app_update', command_type=update_custom_util)
g.custom_command('delete', 'iotcentral_app_delete')
| 46.142857 | 110 | 0.632353 | [
"MIT"
] | 6paklata/azure-cli | src/azure-cli/azure/cli/command_modules/iotcentral/commands.py | 1,292 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""RNN helpers for TensorFlow models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_state_size_with_prefix = rnn_cell_impl._state_size_with_prefix
# pylint: enable=protected-access
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
def _on_device(fn, device):
"""Build the subgraph defined by lambda `fn` on `device` if it's not None."""
if device:
with ops.device(device):
return fn()
else:
return fn()
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
copy_cond = (time >= sequence_length)
return _on_device(
lambda: array_ops.where(copy_cond, output, new_output),
device=new_output.op.device)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# TODO(schuster, ebrevdo): Remove cast when reverse_sequence takes int32
if lengths is not None:
lengths = math_ops.to_int64(lengths)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs. The input_size of forward and
backward cell must match. The initial state for both directions is zero by
default (but can be set optionally) and no intermediate states are ever
returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, input_size]`.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, input_size]`.
[batch_size, input_size].
sequence_length: An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
# pylint: disable=protected-access
if not isinstance(cell_fw, rnn_cell_impl._RNNCell):
raise TypeError("cell_fw must be an instance of RNNCell")
if not isinstance(cell_bw, rnn_cell_impl._RNNCell):
raise TypeError("cell_bw must be an instance of RNNCell")
# pylint: enable=protected-access
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = array_ops.reverse_sequence(
input=inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = array_ops.reverse_sequence(
input=tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
This function is functionally identical to the function `rnn` above, but
performs fully dynamic unrolling of `inputs`.
Unlike `rnn`, the input `inputs` is not a Python list of `Tensors`, one for
each frame. Instead, `inputs` may be a single `Tensor` where
the maximum time is either the first or second dimension (see the parameter
`time_major`). Alternatively, it may be a (possibly nested) tuple of
Tensors, each of them having matching batch and time dimensions.
The corresponding output is either a single `Tensor` having the same number
of time steps and batch size, or a (possibly nested) tuple of such tensors,
matching the nested structure of `cell.output_size`.
The parameter `sequence_length` is optional and is used to copy-through state
and zero-out outputs when past a batch element's sequence length. So it's more
for correctness than performance, unlike in rnn().
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])
for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)
batch_size = input_shape[0][1]
for input_ in input_shape:
if input_[1].get_shape() != batch_size.get_shape():
raise ValueError("All inputs should have the same batch size")
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, dtype must be.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
flat_output = nest.flatten(outputs)
flat_output = [array_ops.transpose(output, [1, 0, 2])
for output in flat_output]
outputs = nest.pack_sequence_as(
structure=outputs, flat_sequence=flat_output)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = input_shape[1]
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _state_size_with_prefix(size, prefix=[batch_size])
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[0].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _state_size_with_prefix(
output_size, prefix=[const_time_steps, const_batch_size])
output.set_shape(shape)
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
# pylint: disable=protected-access
if not isinstance(cell, rnn_cell_impl._RNNCell):
raise TypeError("cell must be an instance of RNNCell")
# pylint: enable=protected-access
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.get_shape() for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(
_state_size_with_prefix(size_i, prefix=[batch_size]),
dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
current_flat = nest.flatten(current)
candidate_flat = nest.flatten(candidate)
# pylint: disable=g-long-lambda,cell-var-from-loop
result_flat = [
_on_device(
lambda: array_ops.where(
elements_finished, current_i, candidate_i),
device=candidate_i.op.device)
for (current_i, candidate_i) in zip(current_flat, candidate_flat)]
# pylint: enable=g-long-lambda,cell-var-from-loop
return nest.pack_sequence_as(
structure=current, flat_sequence=result_flat)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_output_flat = nest.flatten(emit_output)
emit_ta_flat = nest.flatten(emit_ta)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
emit_ta_flat = [
ta.write(time, emit)
for (ta, emit) in zip(emit_ta_flat, emit_output_flat)]
emit_ta = nest.pack_sequence_as(
structure=emit_structure, flat_sequence=emit_ta_flat)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
| 42.787356 | 80 | 0.701903 | [
"Apache-2.0"
] | gameover27/hiptensorflow | tensorflow/python/ops/rnn.py | 44,670 | Python |
"""
Module containing NetPyNE metadata
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
metadata = {
# ---------------------------------------------------------------------------------------------------------------------
# netParams
# ---------------------------------------------------------------------------------------------------------------------
"netParams": {
"label": "Network Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"popParams": {
"label": "Population Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"cellType": {
"label": "Cell type",
"suggestions": "",
"help": "Arbitrary cell type attribute/tag assigned to all cells in this population; can be used as condition to apply specific cell properties. e.g. 'Pyr' (for pyramidal neurons) or 'FS' (for fast-spiking interneurons)",
"hintText": "",
"type": "str"
},
"numCells": {
"label": "Number of cells",
"suggestions": "",
"help": "The total number of cells in this population.",
"hintText": "number of cells",
"type": "int"
},
"density": {
"label": "Cell density (neurons/mm^3)",
"suggestions": "",
"help": "The cell density in neurons/mm3. The volume occupied by each population can be customized (see xRange, yRange and zRange); otherwise the full network volume will be used (defined in netParams: sizeX, sizeY, sizeZ). density can be expressed as a function of normalized location (xnorm, ynorm or znorm), by providing a string with the variable and any common Python mathematical operators/functions. e.g. '1e5 * exp(-ynorm/2)'. ",
"hintText": "density in neurons/mm3",
"type": "str"
},
"gridSpacing": {
"label": "Grid spacing (um)",
"suggestions": "",
"help": "Fixed grid spacing between cells (in um). Cells will be placed in a grid, with the total number of cells be determined based on spacing and sizeX, sizeY, sizeZ. e.g. a spacing of 20 with sizeX=sizeY=sizeZ=100 will lead to 5*5*5=125 cells.",
"hintText": "fixed grid spacing",
"type": "int"
},
"cellModel": {
"label": "Cell model",
"help": "Can be either 1) an arbitrary cell model attribute/tag assigned to all cells in this population, and used later as a condition to apply specific cell properties. e.g. 'HH' (standard Hodkgin-Huxley type cell model) or 'Izhi2007' (Izhikevich point neuron model), 2) a point process artificial cell, with its parameters defined directly in this population entry, i.e. no need for cell propoerties (e.g. 'NetStim', VecStim', 'IntFire1')",
"suggestions": [
"VecStim",
"NetStim",
"IntFire1"
],
"type": "str"
},
"xRange": {
"label": "X-axis range (um)",
"help": "Range of neuron positions in x-axis (horizontal length), specified as a 2-element list [min, max] using absolute values in um (e.g.[100, 200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"xnormRange": {
"label": "X-axis normalized range (0-1)",
"help": "Range of neuron positions in x-axis (horizontal length), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeX (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"default": [
0,
1
],
"type": "list(float)"
},
"yRange": {
"label": "Y-axis range (um)",
"help": "Range of neuron positions in y-axis (vertical height=cortical depth), specified as 2-element list [min, max] using absolute values in um (e.g.[100,200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"ynormRange": {
"label": "Y-axis normalized range (0-1)",
"help": "Range of neuron positions in y-axis (vertical height=cortical depth), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeY (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"zRange": {
"label": "Z-axis range (um)",
"help": "Range of neuron positions in z-axis (horizontal depth), specified as a 2-element list [min, max] using absolute value in um (e.g.[100,200]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"znormRange": {
"label": "Z-axis normalized range (0-1)",
"help": "Range of neuron positions in z-axis (horizontal depth), specified as a 2-element list [min, max] using normalized values between 0 and 1 as fraction of sizeZ (e.g.[0.1,0.2]).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"interval": {
"label": "Spike interval (ms)",
"help": "Spike interval in ms.",
"suggestions": "",
"hintText": "50",
"type": "float"
},
"rate": {
"label": "Firing rate (Hz)",
"help": "Firing rate in Hz (note this is the inverse of the NetStim interval property).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"noise": {
"label": "Noise fraction (0-1)",
"help": "Fraction of noise in NetStim (0 = deterministic; 1 = completely random).",
"suggestions": "",
"hintText": "0.5",
"type": "list(float)"
},
"start": {
"label": "Start time (ms)",
"help": "Time of first spike in ms (default = 0).",
"suggestions": "",
"hintText": "0",
"type": "list(float)"
},
"number": {
"label": "Max number of spikes",
"help": "Max number of spikes generated (default = 1e12).",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"seed": {
"label": "Randomizer seed (optional)",
"help": " Seed for randomizer (optional; defaults to value set in simConfig.seeds['stim'])",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"spkTimes": {
"label": "Spike times",
"help": "List of spike times (only for 'VecStim') e.g. [1, 10, 40, 50], range(1,500,10), or any variable containing a Python list.",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"pulses": {
"label": "Pulses",
"help": "List of spiking pulses (only for 'VecStim'); each item includes the start (ms), end (ms), rate (Hz), and noise (0 to 1) pulse parameters. ",
"suggestions": "",
"hintText": "",
"type": "list(float)"
}
}
},
"scale": {
"label": "scale factor",
"help": "Scale factor multiplier for number of cells (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"shape": {
"label": "network shape",
"help": "Shape of network: 'cuboid', 'cylinder' or 'ellipsoid' (default: 'cuboid')",
"suggestions": "",
"hintText": "",
"options": [
"cuboid",
"cylinder",
"ellipsoid"
],
"default": "cuboid",
"type": "str"
},
"sizeX": {
"label": "x-dimension",
"help": "x-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"sizeY": {
"label": "y-dimension",
"help": "y-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"sizeZ": {
"label": "z-dimension",
"help": "z-dimension (horizontal length) network size in um (default: 100)",
"suggestions": "",
"hintText": "",
"default": 100,
"type": "float"
},
"rotateCellsRandomly": {
"label": "random rotation",
"help": "Random rotation of cells around y-axis [min,max] radians, e.g. [0, 3.0] (default: False)",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"defaultWeight": {
"label": "default weight connection",
"help": "Default connection weight (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"defaultDelay": {
"label": "default delay",
"help": "Default connection delay, in ms (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"propVelocity": {
"label": "conduction velocity",
"help": "Conduction velocity in um/ms (e.g. 500 um/ms = 0.5 m/s) (default: 500)",
"suggestions": "",
"hintText": "",
"default": 500,
"type": "float"
},
"scaleConnWeight": {
"label": "connection weight scale factor",
"help": "Connection weight scale factor (excludes NetStims) (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"scaleConnWeightNetStims": {
"label": "connection weight scale factor for NetStims",
"help": "Connection weight scale factor for NetStims (default: 1)",
"suggestions": "",
"hintText": "",
"default": 1,
"type": "float"
},
"scaleConnWeightModels": {
"label": "Connection weight scale factor for each cell model",
"help": "Connection weight scale factor for each cell model, e.g. {'HH': 0.1, 'Izhi': 0.2} (default: {})",
"suggestions": "",
"hintText": "",
"type": "dict"
},
"popTagsCopiedToCells": {
"label": "",
"help": "List of tags that will be copied from the population to the cells (default: ['pop', 'cellModel', 'cellType'])}",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.cellParams
# ---------------------------------------------------------------------------------------------------------------------
"cellParams": {
"label": "Cell Parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"conds": {
"label": "Conds",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"pop": {
"label": "Population",
"help": "Apply the cell rule only to cells belonging to this population (or list of populations).",
"suggestions": "",
"hintText": "",
"type": "list(str)"
},
"cellType": {
"label": "Cell type",
"suggestions": "",
"help": "Apply the cell rule only to cells with this cell type attribute/tag.",
"hintText": "",
"type": "list(str)"
},
"cellModel": {
"label": "Cell model",
"suggestions": "",
"help": "Apply the cell rule only to cells with this cell model attribute/tag.",
"hintText": "",
"type": "list(str)"
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these x-axis locations.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these y-axis locations.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these z-axis locations.",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized x-axis locations.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized y-axis locations.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Apply the cell rule only to cells within these normalized z-axis locations.",
"hintText": ""
}
}
},
"secs": {
"label": "Sections",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"geom": {
"label": "Cell geometry",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"diam": {
"label": "Diameter (um)",
"default": 10,
"suggestions": "",
"help": "",
"hintText": "10",
"type": "float"
},
"L": {
"label": "Length (um)",
"default": 50,
"suggestions": "",
"help": "",
"hintText": "50",
"type": "float"
},
"Ra": {
"label": "Axial resistance, Ra (ohm-cm)",
"default": 100,
"suggestions": "",
"help": "",
"hintText": "100",
"type": "float"
},
"cm": {
"label": "Membrane capacitance, cm (uF/cm2)",
"suggestions": "",
"help": "",
"hintText": "1",
"type": "float"
},
"pt3d": {
"label": "3D points",
"suggestions": "",
"help": "",
"hintText": "",
"type": "list(list(float))"
},
"nseg": {
"label": "Number of segments, nseg",
"default": 1,
"suggestions": "",
"help": "",
"hintText": "1",
"type": "float"
}
},
"mechs": {
"label": "Mechanisms",
"help": "Dictionary of density/distributed mechanisms, including the name of the mechanism (e.g. hh or pas) and a list of properties of the mechanism (e.g. {'g': 0.003, 'e': -70}).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"ions": {
"label": "Ions",
"help": "Dictionary of ions, including the name of the ion (e.g. hh or pas) and a list of properties of the ion (e.g. {'e': -70}).",
"suggestions": "",
"hintText": ""
},
"pointps": {
"label": "Point processes",
"help": "Dictionary of point processes (excluding synaptic mechanisms). The key contains an arbitrary label (e.g. 'Izhi') The value contains a dictionary with the point process properties (e.g. {'mod':'Izhi2007a', 'a':0.03, 'b':-2, 'c':-50, 'd':100, 'celltype':1}).",
"suggestions": "",
"hintText": "",
"children": {
"mod": {
"label": "Point process name",
"help": "The name of the NEURON mechanism, e.g. 'Izhi2007a'",
"suggestions": "",
"hintText": "",
"type": "float"
},
"loc": {
"label": "Location (0-1)",
"help": "Section location where to place synaptic mechanism, e.g. 1.0, default=0.5.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"vref": {
"label": "Point process variable for voltage (optional)",
"help": "Internal mechanism variable containing the cell membrane voltage, e.g. 'V'.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"synList": {
"label": "Point process list of synapses (optional)",
"help": "list of internal mechanism synaptic mechanism labels, e.g. ['AMPA', 'NMDA', 'GABAB'].",
"suggestions": "",
"hintText": "",
"type": "float"
}
},
"vinit": {
"label": "Initial membrance voltage, vinit (mV)",
"help": "(optional) Initial membrane voltage (in mV) of the section (default: -65).e.g. cellRule['secs']['soma']['vinit'] = -72",
"suggestions": "",
"hintText": ""
},
"spikeGenLoc": {
"label": "Spike generation location (0-1)",
"help": "(optional) Indicates that this section is responsible for spike generation (instead of the default 'soma'), and provides the location (segment) where spikes are generated.e.g. cellRule['secs']['axon']['spikeGenLoc'] = 1.0.",
"suggestions": "",
"hintText": ""
},
"threshold": {
"label": "Spike threshold voltage (mV)",
"help": "(optional) Threshold voltage (in mV) used to detect a spike originating in this section of the cell. If omitted, defaults to netParams.defaultThreshold = 10.0.e.g. cellRule['secs']['soma']['threshold'] = 5.0.",
"suggestions": "",
"hintText": ""
}
},
"secLists": {
"label": "Section lists (optional) ",
"help": "Dictionary of sections lists (e.g. {'all': ['soma', 'dend']})",
"suggestions": "",
"hintText": ""
}
},
"topol": {
"label": "Topology",
"help": "Topological properties, including parentSec (label of parent section), parentX (parent location where to make connection) and childX (current section child location where to make connection).",
"suggestions": "",
"hintText": "",
"children": {
"parentSec": {
"label": "Parent Section",
"suggestions": [
"soma"
],
"help": "label of parent section",
"hintText": "soma",
"type": "str"
},
"parentX": {
"label": "Parent connection location",
"suggestions": [
0,
1
],
"help": "Parent location where to make connection",
"hintText": "1",
"type": "float"
},
"childX": {
"label": "Child connection location",
"suggestions": [
0,
1
],
"help": "Current section child location where to make connection",
"hintText": "1",
"type": "float"
}
}
}
}
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.synMechParams
# ---------------------------------------------------------------------------------------------------------------------
"synMechParams": {
"label": "Synaptic mechanism parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"mod": {
"label": "NMODL mechanism name",
"help": "The NMODL mechanism name (e.g. 'ExpSyn'); note this does not always coincide with the name of the mod file.",
"suggestions": "",
"options": [
"ExpSyn",
"Exp2Syn"
],
"hintText": "",
"type": "str"
},
"selfNetCon": {
"label": "Self NetCon parameters",
"help": "Dict with parameters of NetCon between the cell voltage and the synapse, required by some synaptic mechanisms such as the homeostatic synapse (hsyn). e.g. 'selfNetCon': {'sec': 'soma' , threshold: -15, 'weight': -1, 'delay': 0} (by default the source section, 'sec' = 'soma').",
"suggestions": "",
"hintText": ""
},
"tau1": {
"label": "Time constant for exponential 1 (ms)",
"help": "Define the time constant for the first exponential.",
"suggestions": "",
"hintText": "1",
"type": "float"
},
"tau2": {
"label": "Time constant for exponential 2 (ms)",
"help": "Define the time constant for the second exponential.",
"suggestions": "",
"hintText": "5",
"type": "float"
},
"e": {
"label": "Reversal potential (mV)",
"help": "Reversal potential of the synaptic receptors.",
"suggestions": "",
"hintText": "0",
"type": "float"
},
"i": {
"label": "synaptic current (nA)",
"help": "Synaptic current in nA.",
"suggestions": "",
"hintText": "10",
"type": "float"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.connParams
# ---------------------------------------------------------------------------------------------------------------------
"connParams": {
"label": "Connectivity parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"preConds": {
"label": "Conditions for the presynaptic cells",
"help": "Presynaptic cell conditions defined using attributes/tags and the required value e.g. {'cellType': 'PYR'}. Values can be lists, e.g. {'pop': ['Exc1', 'Exc2']}. For location properties, the list values correspond to the min and max values, e.g. {'ynorm': [0.1, 0.6]}.",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Population (multiple selection available)",
"suggestions": "",
"help": "Cells belonging to this population (or list of populations) will be connected pre-synaptically.",
"hintText": ""
},
"cellType": {
"label": "Cell type (multiple selection available)",
"suggestions": "",
"help": "Ccells with this cell type attribute/tag will be connected pre-synaptically.",
"hintText": ""
},
"cellModel": {
"label": "Cell model (multiple selection available)",
"suggestions": "",
"help": "Cells with this cell model attribute/tag will be connected pre-synaptically.",
"hintText": ""
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within these x-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within these y-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within these z-axis locations will be connected pre-synaptically..",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells within these normalized x-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within these normalized y-axis locations will be connected pre-synaptically.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within these normalized z-axis locations will be connected pre-synaptically.",
"hintText": ""
}
}
},
"postConds": {
"label": "Conditions for the postsynaptic cells",
"help": "Defined as a dictionary with the attributes/tags of the postsynaptic cell and the required values e.g. {'cellType': 'PYR'}. Values can be lists, e.g. {'pop': ['Exc1', 'Exc2']}. For location properties, the list values correspond to the min and max values, e.g. {'ynorm': [0.1, 0.6]}.",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Population (multiple selection available)",
"suggestions": "",
"help": "Cells belonging to this population (or list of populations) will be connected post-synaptically.",
"hintText": ""
},
"cellType": {
"label": "Cell type (multiple selection available)",
"suggestions": "",
"help": "Ccells with this cell type attribute/tag will be connected post-synaptically.",
"hintText": ""
},
"cellModel": {
"label": "Cell model (multiple selection available)",
"suggestions": "",
"help": "Cells with this cell model attribute/tag will be connected post-synaptically.",
"hintText": ""
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within these x-axis locations will be connected post-synaptically.",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within these y-axis locations will be connected post-synaptically.",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within these z-axis locations will be connected post-synaptically..",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells within these normalized x-axis locations will be connected post-synaptically.",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within these normalized y-axis locations will be connected post-synaptically.",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within these normalized z-axis locations will be connected post-synaptically.",
"hintText": ""
}
}
},
"sec": {
"label": "Postsynaptic neuron section",
"help": "Name of target section on the postsynaptic neuron (e.g. 'soma'). If omitted, defaults to 'soma' if exists, otherwise to first section in the cell sections list. If synsPerConn > 1, a list of sections or sectionList can be specified, and synapses will be distributed uniformly along the specified section(s), taking into account the length of each section.",
"suggestions": "",
"hintText": "soma",
"type": "list(str)"
},
"loc": {
"label": "Postsynaptic neuron location (0-1)",
"help": "Location of target synaptic mechanism (e.g. 0.3). If omitted, defaults to 0.5. Can be single value, or list (if have synsPerConn > 1) or list of lists (If have both a list of synMechs and synsPerConn > 1).",
"suggestions": "",
"hintText": "0.5",
"type": "list(float)"
},
"synMech": {
"label": "Synaptic mechanism",
"help": "Label (or list of labels) of target synaptic mechanism on the postsynaptic neuron (e.g. 'AMPA' or ['AMPA', 'NMDA']). If omitted employs first synaptic mechanism in the cell synaptic mechanisms list. If have list, a separate connection is created to each synMech; and a list of weights, delays and or locs can be provided.",
"suggestions": "",
"hintText": ""
},
"synsPerConn": {
"label": "Number of individual synaptic contacts per connection",
"help": "Number of individual synaptic contacts (synapses) per cell-to-cell connection (connection). Can be defined as a function (see Functions as strings). If omitted, defaults to 1.",
"suggestions": "",
"hintText": "",
"default": 1
},
"weight": {
"label": "Weight of synaptic connection",
"help": "Strength of synaptic connection (e.g. 0.01). Associated to a change in conductance, but has different meaning and scale depending on the synaptic mechanism and cell model. Can be defined as a function (see Functions as strings). If omitted, defaults to netParams.defaultWeight = 1.",
"suggestions": "",
"hintText": "",
"type": "func"
},
"delay": {
"label": "Connection delay (ms)",
"help": "Time (in ms) for the presynaptic spike to reach the postsynaptic neuron. Can be defined as a function (see Functions as strings). If omitted, defaults to netParams.defaultDelay = 1.",
"suggestions": "",
"hintText": "",
"type": "func"
},
"probability": {
"label": "Probability of connection (0-1)",
"help": "Probability of connection between each pre and postsynaptic cell (0 to 1). Can be a string that defines as a function, e.g. '0.1*dist_3D+uniform(0.2,0.4)' (see Documentation on 'Functions as strings'). Overrides the convergence, divergence and fromList parameters.",
"suggestions": "0.1",
"hintText": "",
"type": "func"
},
"convergence": {
"label": "Convergence",
"help": "Number of pre-synaptic cells connected to each post-synaptic cell. Can be a string that defines as a function, e.g. '2*dist_3D+uniform(2,4)' (see Documentation on 'Functions as strings'). Overrides the divergence and fromList parameters.",
"suggestions": "5",
"hintText": "",
"type": "func"
},
"divergence": {
"label": "Divergence",
"help": "Number of post-synaptic cells connected to each pre-synaptic cell. Can be a string that defines as a function, e.g. '2*dist_3D+uniform(2,4)' (see Documentation on 'Functions as strings'). Overrides the fromList parameter.",
"suggestions": "5",
"hintText": "",
"type": "func"
},
"connList": {
"label": "Explicit list of one-to-one connections",
"help": "Each connection is indicated with relative ids of cell in pre and post populations, e.g. [[0,1],[3,1]] creates a connection between pre cell 0 and post cell 1; and pre cell 3 and post cell 1. Weights, delays and locs can also be specified as a list for each of the individual cell connection. These lists can be 2D or 3D if combined with multiple synMechs and synsPerConn > 1 (the outer dimension will correspond to the connList).",
"suggestions": "",
"hintText": "list(list(float))"
},
"connFunc": {
"label": "Internal connectivity function to use (not required)",
"help": "Automatically set to probConn, convConn, divConn or fromList, when the probability, convergence, divergence or connList parameters are included, respectively. Otherwise defaults to fullConn, ie. all-to-all connectivity.",
"suggestions": "",
"hintText": ""
},
"shape": {
"label": "Weight shape",
"help": "Modifies the conn weight dynamically during the simulation based on the specified pattern. Contains a dictionary with the following fields: 'switchOnOff' - times at which to switch on and off the weight, 'pulseType' - type of pulse to generate; either 'square' or 'gaussian', 'pulsePeriod' - period (in ms) of the pulse, 'pulseWidth' - width (in ms) of the pulse.",
"suggestions": "",
"hintText": ""
},
"plasticity": {
"label": "Plasticity mechanism",
"help": "Requires 2 fields: mech to specifiy the name of the plasticity mechanism, and params containing a dictionary with the parameters of the mechanism, e.g. {'mech': 'STDP', 'params': {'hebbwt': 0.01, 'antiwt':-0.01, 'wmax': 50, 'RLon': 1 'tauhebb': 10}}.",
"suggestions": "",
"hintText": "",
"type": "dict"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.stimSourceParams
# ---------------------------------------------------------------------------------------------------------------------
"stimSourceParams": {
"label": "Stimulation source parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"type": {
"label": "Point process used as stimulator",
"help": "Point process used as stimulator; allowed values: 'IClamp', 'VClamp', 'SEClamp', 'NetStim' and 'AlphaSynapse'. Note that NetStims can be added both using this method, or by creating a population of 'cellModel': 'NetStim' and adding the appropriate connections.",
"suggestions": "",
"hintText": "",
"default": "IClamp",
"type": "str"
},
"dur": {
"label": "Current clamp duration (ms)",
"help": "Duration of current clamp injection in ms",
"suggestions": "",
"hintText": "10",
"type": "float"
},
"amp": {
"label": "Current clamp amplitude (nA)",
"help": "Amplitude of current injection in nA",
"suggestions": "",
"hintText": "10",
"type": "float"
},
"del": {
"label": "Current clamp delay (ms)",
"help": "Delay (time when turned on after simulation starts) of current clamp in ms.",
"suggestions": "",
"hintText": "5",
"type": "float"
},
"vClampAmp": {
"label": "Current clamp amplitude (nA)",
"help": "Voltage clamp with three levels. Clamp is on at time 0, and off at time dur[0]+dur[1]+dur[2].",
"suggestions": "",
"hintText": "10",
"type": "list(float)"
},
"vClampDur": {
"label": "Current clamp delay (ms)",
"help": "Voltage clamp with three levels. Clamp is on at time 0, and off at time dur[0]+dur[1]+dur[2].",
"suggestions": "",
"hintText": "5",
"type": "list(float)"
},
"interval": {
"label": "Interval between spikes (ms)",
"help": "Define the mean time interval between spike.",
"suggestions": "10",
"hintText": "",
"type": "float"
},
"rate": {
"label": "Firing rate (Hz)",
"help": "Firing rate in Hz (note this is the inverse of the NetStim interval property).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"rstim": {
"label": "Voltage clamp stimulation resistance",
"help": "Voltage clamp stimulation resistance.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"gain": {
"label": "Voltage clamp amplifier gain",
"help": "Voltage clamp amplifier gain.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"number": {
"label": "Maximum number of spikes",
"help": "Maximum number of spikes generated by the NetStim.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"start": {
"label": "Start time of first spike",
"help": "Define the start time for the first spike.",
"suggestions": "0",
"hintText": "",
"type": "float"
},
"noise": {
"label": "Noise/randomness fraction (0-1)",
"help": "Fractional noise, 0 <= noise <= 1, means that an interval between spikes consists of a fixed interval of duration (1 - noise)*interval plus a negexp interval of mean duration noise*interval. Note that the most likely negexp interval has duration 0.",
"suggestions": "0.5",
"hintText": "",
"type": "float"
},
"tau1": {
"label": "Voltage clamp tau1",
"help": "Voltage clamp tau1.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"tau2": {
"label": "Voltage clamp tau2",
"help": "Voltage clamp tau2.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"i": {
"label": "Voltage clamp current (nA)",
"help": "Voltage clamp injected current in nA.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"onset": {
"label": "Alpha synapse onset time (ms)",
"help": "Alpha synapse onset time.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"tau": {
"label": "Alpha synapse time constant (ms)",
"help": "Alpha synapse time constant (ms).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"gmax": {
"label": "Alpha synapse maximum conductance",
"help": "Alpha synapse maximum conductance.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"e": {
"label": "Alpha synapse equilibrium potential",
"help": "Alpha synapse equilibrium potential.",
"suggestions": "",
"hintText": "",
"type": "float"
},
"rs": {
"label": "Voltage clamp resistance (MOhm)",
"help": "Voltage clamp resistance (MOhm).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"vc": {
"label": "Voltage clamp reference voltage (mV)",
"help": "Voltage clamp reference voltage (mV).",
"suggestions": "",
"hintText": "",
"type": "float"
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.stimTargetParams
# ---------------------------------------------------------------------------------------------------------------------
"stimTargetParams": {
"label": "Stimulation target parameters",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"source": {
"label": "Stimulation source",
"help": "Label of the stimulation source (e.g. 'electrode_current').",
"suggestions": "",
"hintText": ""
},
"conds": {
"label": "Conditions of cells where the stimulation will be applied",
"help": "Conditions of cells where the stimulation will be applied. Can include a field 'cellList' with the relative cell indices within the subset of cells selected (e.g. 'conds': {'cellType':'PYR', 'y':[100,200], 'cellList': [1,2,3]}).",
"suggestions": "",
"hintText": "",
"children": {
"pop": {
"label": "Target population",
"help": "Populations that will receive the stimulation e.g. {'pop': ['Exc1', 'Exc2']}",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
"cellType": {
"label": "Target cell type",
"suggestions": "",
"help": "Cell types that will receive the stimulation",
"hintText": "",
"type": "str"
},
"cellModel": {
"label": "Target cell model",
"help": "Cell models that will receive the stimulation.",
"suggestions": "",
"type": "str"
},
"x": {
"label": "Range of x-axis locations",
"suggestions": "",
"help": "Cells within this x-axis locations will receive stimulation",
"hintText": ""
},
"y": {
"label": "Range of y-axis locations",
"suggestions": "",
"help": "Cells within this y-axis locations will receive stimulation",
"hintText": ""
},
"z": {
"label": "Range of z-axis locations",
"suggestions": "",
"help": "Cells within this z-axis locations will receive stimulation",
"hintText": ""
},
"xnorm": {
"label": "Range of normalized x-axis locations",
"suggestions": "",
"help": "Cells withing this normalized x-axis locations will receive stimulation",
"hintText": ""
},
"ynorm": {
"label": "Range of normalized y-axis locations",
"suggestions": "",
"help": "Cells within this normalized y-axis locations will receive stimulation",
"hintText": ""
},
"znorm": {
"label": "Range of normalized z-axis locations",
"suggestions": "",
"help": "Cells within this normalized z-axis locations will receive stimulation",
"hintText": ""
},
"cellList": {
"label": "Target cell global indices (gids)",
"help": "Global indices (gids) of neurons to receive stimulation. ([1, 8, 12])",
"suggestions": "",
"hintText": "",
"type": "list(float)"
},
}
},
"sec": {
"label": "Target section",
"help": "Target section (default: 'soma').",
"suggestions": "",
"hintText": "",
"type": "str"
},
"loc": {
"label": "Target location",
"help": "Target location (default: 0.5). Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": "",
"type": "float"
},
"synMech": {
"label": "Target synaptic mechanism",
"help": "Synaptic mechanism label to connect NetStim to. Optional; only for NetStims.",
"suggestions": "",
"hintText": ""
},
"weight": {
"label": "Weight of connection between NetStim and cell",
"help": "Weight of connection between NetStim and cell. Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
},
"delay": {
"label": "Delay of connection between NetStim and cell",
"help": "Delay of connection between NetStim and cell (default: 1). Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
},
"synsPerConn": {
"label": "Number of synaptic contacts per connection between NetStim and cell",
"help": "Number of synaptic contacts of connection between NetStim and cell (default: 1). Optional; only for NetStims. Can be defined as a function (see Functions as strings).",
"suggestions": "",
"hintText": ""
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# netParams.importCellParams
# ---------------------------------------------------------------------------------------------------------------------
"importCellParams": {
"label": "Import cell from .hoc or .py templates",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"fileName": {
"label": "Absolute path to file",
"help": "Absolute path to .hoc or .py template file.",
"suggestions": "",
"hintText": "",
"type": "str"
},
"cellName": {
"label": "Cell template/class name",
"help": "Template or class name defined inside the .hoc or .py file",
"suggestions": "",
"hintText": "",
"type": "str"
},
"label": {
"label": "Cell rule label",
"help": "Give a name to this cell rule.",
"suggestions": "",
"hintText": "",
"type": "str"
},
"importSynMechs": {
"label": "Import synaptic mechanisms",
"help": "If true, synaptic mechanisms will also be imported from the file. (default: False)",
"suggestions": "",
"hintText": "",
"type": "bool"
},
"compileMod": {
"label": "Compile mod files",
"help": "If true, mod files will be compiled before importing the cell. (default: false)",
"suggestions": "",
"hintText": "",
"type": "bool"
},
"modFolder": {
"label": "Path to mod folder",
"help": "Define the absolute path to the folder containing the mod files.",
"suggestions": "",
"hintText": "",
"type": "str"
},
}
}
}
},
# ---------------------------------------------------------------------------------------------------------------------
# simConfig
# ---------------------------------------------------------------------------------------------------------------------
"simConfig": {
"label": "Simulation Configuration",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"simLabel": {
"label": "Simulation label",
"help": "Choose a label for this simulation",
"suggestions": "",
"type": "str"
},
"duration": {
"label": "Duration (ms)",
"help": "Simulation duration in ms (default: 1000)",
"suggestions": "",
"default": 1000,
"type": "float"
},
"dt": {
"label": "Time step, dt",
"help": "Simulation time step in ms (default: 0.1)",
"suggestions": "",
"default": 0.025,
"type": "float"
},
"seeds": {
"label": "Randomizer seeds",
"help": "Dictionary with random seeds for connectivity, input stimulation, and cell locations (default: {'conn': 1, 'stim': 1, 'loc': 1}).",
"suggestions": "",
"type": "dict"
},
"addSynMechs": {
"label": "Add synaptic mechanisms",
"help": "Whether to add synaptic mechanisms or not (default: True).",
"suggestions": "",
"type": "bool"
},
"includeParamsLabel": {
"label": "Include parameter rule label",
"help": "Include label of parameters rule that created that cell, conn or stim (default: True).",
"suggestions": "",
"type": "bool"
},
"timing": {
"label": "Show timing",
"help": "Show and record timing of each process (default: True).",
"suggestions": "",
"type": "bool"
},
"verbose": {
"label": "Verbose mode",
"help": "Show detailed messages (default: False).",
"suggestions": "",
"type": "bool"
},
"saveFolder": {
"label": "Output folder",
"help": "Path where to save output data (default: '')",
"suggestions": "",
"type": "str"
},
"filename": {
"label": "Output file name",
"help": "Name of file to save model output (default: 'model_output')",
"suggestions": "",
"default": "model_output",
"type": "str"
},
"saveDataInclude": {
"label": "Data to include in output file",
"help": "Data structures to save to file (default: ['netParams', 'netCells', 'netPops', 'simConfig', 'simData'])",
"suggestions": "",
"type": "list(str)"
},
"timestampFilename": {
"label": "Add timestamp to file name",
"help": "Add timestamp to filename to avoid overwriting (default: False)",
"suggestions": "",
"type": "bool"
},
"savePickle": {
"label": "Save as Pickle",
"help": "Save data to pickle file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveJson": {
"label": "Save as JSON",
"help": "Save dat to json file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveMat": {
"label": "Save as MAT",
"help": "Save data to mat file (default: False).",
"suggestions": "",
"type": "bool"
},
"saveHDF5": {
"label": "Save as HDF5",
"help": "Save data to save to HDF5 file (under development) (default: False).",
"suggestions": "",
"type": "bool"
},
"saveDpk": {
"label": "Save as DPK",
"help": "Save data to .dpk pickled file (default: False).",
"suggestions": "",
"type": "bool"
},
"checkErrors": {
"label": "Check parameter errors",
"help": "check for errors (default: False).",
"suggestions": "",
"type": "bool"
},
"checkErrorsVerbose": {
"label": "Check parameter errors verbose mode",
"help": "check errors vervose (default: False)",
"suggestions": "",
"type": "bool"
},
"backupCfgFile": {
"label": "Copy simulation configuration file to this folder:",
"help": "Copy cfg file to folder, eg. ['cfg.py', 'backupcfg/'] (default: []).",
"suggestions": "",
"type": "list(str)"
},
"recordCells": {
"label": "Cells to record traces from",
"help": "List of cells from which to record traces. Can include cell gids (e.g. 5), population labels (e.g. 'S' to record from one cell of the 'S' population), or 'all', to record from all cells. NOTE: All cells selected in the include argument of simConfig.analysis['plotTraces'] will be automatically included in recordCells. (default: []).",
"suggestions": "",
"type": "list(float)"
},
"recordTraces": {
"label": "Traces to record from cells",
"help": "Dict of traces to record (default: {} ; example: {'V_soma': {'sec':'soma','loc':0.5,'var':'v'} }).",
"suggestions": "",
"type": "dict(dict)",
"default": "{\"V_soma\": {\"sec\": \"soma\", \"loc\": 0.5, \"var\": \"v\"}}"
},
"saveCSV": {
"label": "Save as CSV",
"help": "save cvs file (under development) (default: False)",
"suggestions": "",
"type": "bool"
},
"saveDat": {
"label": "Save as DAT ",
"help": "save .dat file (default: False)",
"suggestions": "",
"type": "bool"
},
"saveCellSecs": {
"label": "Store cell sections after simulation",
"help": "Save cell sections after gathering data from nodes post simulation; set to False to reduce memory required (default: True)",
"suggestions": "",
"type": "bool"
},
"saveCellConns": {
"label": "Store cell connections after simulation",
"help": "Save cell connections after gathering data from nodes post simulation; set to False to reduce memory required (default: True)",
"suggestions": "",
"type": "bool"
},
"recordStim": {
"label": "Record spikes of artificial stimulators (NetStims and VecStims)",
"help": "Record spikes of NetStims and VecStims (default: False).",
"suggestions": "",
"type": "bool"
},
"recordLFP": {
"label": "Record LFP electrode locations",
"help": "3D locations of local field potential (LFP) electrodes, e.g. [[50, 100, 50], [50, 200]] (default: False).",
"suggestions": "",
"type": "list(list(float))"
},
"saveLFPCells": {
"label": "Store LFP of individual cells",
"help": "Store LFP generated individually by each cell in sim.allSimData['LFPCells'].",
"suggestions": "",
"type": "bool"
},
"recordStep": {
"label": "Time step for data recording (ms)",
"help": "Step size in ms for data recording (default: 0.1).",
"suggestions": "",
"default": 0.1,
"type": "float"
},
"printRunTime": {
"label": "Interval to print run time at (s)",
"help": "Print run time at interval (in sec) specified here (eg. 0.1) (default: False).",
"suggestions": "",
"type": "float"
},
"printSynsAfterRule": {
"label": "Print total connections",
"help": "Print total connections after each conn rule is applied.",
"suggestions": "",
"type": "bool"
},
"printPopAvgRates": {
"label": "Print population average firing rates",
"help": "Print population avg firing rates after run (default: False).",
"suggestions": "",
"type": "bool"
},
"connRandomSecFromList": {
"label": "Select random sections from list for connection",
"help": "Select random section (and location) from list even when synsPerConn=1 (default: True).",
"suggestions": "",
"type": "bool"
},
"compactConnFormat": {
"label": "Use compact connection format (list instead of dicT)",
"help": "Replace dict format with compact list format for conns (need to provide list of keys to include) (default: False).",
"suggestions": "",
"type": "bool"
},
"gatherOnlySimData": {
"label": "Gather only simulation output data",
"help": "Omits gathering of net and cell data thus reducing gatherData time (default: False).",
"suggestions": "",
"type": "bool"
},
"createPyStruct": {
"label": "Create Python structure",
"help": "Create Python structure (simulator-independent) when instantiating network (default: True).",
"suggestions": "",
"type": "bool"
},
"createNEURONObj": {
"label": "Create NEURON objects",
"help": "Create runnable network in NEURON when instantiating netpyne network metadata (default: True).",
"suggestions": "",
"type": "bool"
},
"cvode_active": {
"label": "use CVode",
"help": "Use CVode variable time step (default: False).",
"suggestions": "",
"type": "bool"
},
"cache_efficient": {
"label": "use CVode cache_efficient",
"help": "Use CVode cache_efficient option to optimize load when running on many cores (default: False).",
"suggestions": "",
"type": "bool"
},
"hParams": {
"label": "Set global parameters (temperature, initial voltage, etc)",
"help": "Dictionary with parameters of h module (default: {'celsius': 6.3, 'v_init': -65.0, 'clamp_resist': 0.001}).",
"suggestions": "",
"type": "dict"
},
"saveTxt": {
"label": "Save as TXT",
"help": "Save data to txt file (under development) (default: False)",
"suggestions": "",
"type": "bool"
},
"saveTiming": {
"label": "Save timing data to file",
"help": " Save timing data to pickle file (default: False).",
"suggestions": "",
"type": "bool"
},
# ---------------------------------------------------------------------------------------------------------------------
# simConfig.analysis
# ---------------------------------------------------------------------------------------------------------------------
"analysis": {
"label": "Analysis",
"suggestions": "",
"help": "",
"hintText": "",
"children": {
"plotRaster": {
"label": "Raster plot",
"suggestions": "",
"help": "Plot raster (spikes over time) of network cells.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "str"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"maxSpikes": {
"label": "Maximum number of spikes to plot",
"suggestions": "",
"help": "maximum number of spikes that will be plotted (int).",
"hintText": "",
"type": "float"
},
"orderBy": {
"label": "Order by",
"suggestions": "",
"help": "Unique numeric cell property to order y-axis by, e.g. 'gid', 'ynorm', 'y' ('gid'|'y'|'ynorm'|...)",
"hintText": "",
"options": [
"gid",
"y",
"ynorm"
],
"type": "str"
},
"orderInverse": {
"label": "Invert y-axis",
"suggestions": "",
"help": "Invert the y-axis order (True|False)",
"hintText": "",
"type": "bool"
},
"labels": {
"label": "Population labels",
"suggestions": "",
"help": "Show population labels in a legend or overlayed on one side of raster ('legend'|'overlay'))",
"hintText": "",
"type": "str"
},
"popRates": {
"label": "Include population rates",
"suggestions": "",
"help": "Include population rates ('legend'|'overlay')",
"hintText": "",
"options": [
"legend",
"overlay"
],
"type": "str"
},
"spikeHist": {
"label": "Overlay spike histogram",
"suggestions": "",
"help": "overlay line over raster showing spike histogram (spikes/bin) (None|'overlay'|'subplot')",
"hintText": "",
"options": [
"None",
"overlay",
"subplot"
],
"type": "str"
},
"spikeHistBin": {
"label": "Bin size for histogram",
"suggestions": "",
"help": "Size of bin in ms to use for histogram (int)",
"hintText": "",
"type": "float"
},
"syncLines": {
"label": "Synchronization lines",
"suggestions": "",
"help": "calculate synchorny measure and plot vertical lines for each spike to evidence synchrony (True|False)",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": "str"
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotSpikeHist": {
"label": "Plot Spike Histogram",
"suggestions": "",
"help": "Plot spike histogram.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "bin size for histogram",
"suggestions": "",
"help": "Size of bin in ms to use for histogram (int)",
"hintText": "",
"type": "int"
},
"overlay": {
"label": "show overlay",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False)",
"hintText": "",
"type": "bool"
},
"graphType": {
"label": "type of Graph",
"suggestions": "",
"help": " Type of graph to use (line graph or bar plot) ('line'|'bar')",
"hintText": "",
"options": [
"line",
"bar"
],
"type": "str"
},
"yaxis": {
"label": "axis units",
"suggestions": "",
"help": "Units of y axis (firing rate in Hz, or spike count) ('rate'|'count')",
"hintText": "",
"options": [
"rate",
"count"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotRatePSD": {
"label": "Plot Rate PSD",
"suggestions": "",
"help": "Plot spikes power spectral density (PSD).",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "Bin size",
"suggestions": "",
"help": "Size of bin in ms to use (int)",
"hintText": "",
"type": "float"
},
"maxFreq": {
"label": "maximum frequency",
"suggestions": "",
"help": " Maximum frequency to show in plot (float).",
"hintText": "",
"type": "float"
},
"NFFT": {
"label": "Number of point",
"suggestions": "",
"help": "The number of data points used in each block for the FFT (power of 2)",
"hintText": "",
"type": "float"
},
"noverlap": {
"label": "Number of overlap points",
"suggestions": "",
"help": "Number of points of overlap between segments (< nperseg).",
"hintText": "",
"type": "float"
},
"smooth": {
"label": "Window size",
"suggestions": "",
"help": "Window size for smoothing; no smoothing if 0.",
"hintText": "",
"type": "float"
},
"overlay": {
"label": "Overlay data",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotSpikeStats": {
"label": "Plot Spike Statistics",
"suggestions": "",
"help": "Plot spike histogram.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range of spikes shown; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"graphType": {
"label": "type of graph",
"suggestions": "",
"help": "Type of graph to use ('boxplot').",
"hintText": "",
"options": [
"boxplot"
],
"type": "str"
},
"stats": {
"label": "meassure type to calculate stats",
"suggestions": "",
"help": "List of types measure to calculate stats over: cell firing rates, interspike interval coefficient of variation (ISI CV), pairwise synchrony, and/or overall synchrony (sync measures calculated using PySpike SPIKE-Synchrony measure) (['rate', |'isicv'| 'pairsync' |'sync'|]).",
"hintText": "",
"options": [
"rate",
"isicv",
"pairsync",
"sync"
],
"type": "str"
},
"popColors": {
"label": "color for each population",
"suggestions": "",
"help": "Dictionary with color (value) used for each population/key.",
"hintText": "",
"type": "dict"
},
"figSize": {
"label": "figure size",
"suggestions": "",
"help": "Size of figure ((width, height)).",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotTraces": {
"label": "Plot Traces",
"suggestions": "",
"help": "Plot recorded traces (specified in simConfig.recordTraces).",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list(float)"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range for shown Traces ; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"overlay": {
"label": "overlay data",
"suggestions": "",
"help": "Whether to overlay the data lines or plot in separate subplots (True|False).",
"hintText": "",
"type": "bool"
},
"oneFigPer": {
"label": "plot one figure per cell/trace",
"suggestions": "",
"help": "Whether to plot one figure per cell or per trace (showing multiple cells) ('cell'|'trace').",
"hintText": "",
"options": [
"cell",
"traces"
],
"type": "str"
},
"rerun": {
"label": "re-run simulation",
"suggestions": "",
"help": "rerun simulation so new set of cells gets recorded (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotLFP": {
"label": "Plot LFP",
"suggestions": "",
"help": "Plot LFP / extracellular electrode recordings (time-resolved, power spectral density, time-frequency and 3D locations).",
"hintText": "",
"children": {
"electrodes": {
"label": "electrode to show",
"suggestions": "",
"help": " List of electrodes to include; 'avg'=avg of all electrodes; 'all'=each electrode separately (['avg', 'all', 0, 1, ...]).",
"hintText": "",
"type": "list"
},
"plots": {
"label": "Select plot types to show (multiple selection available)",
"suggestions": "",
"help": "list of plot types to show (['timeSeries', 'PSD', 'timeFreq', 'locations']).",
"hintText": "",
"options": [
"timeSeries",
"PSD",
"spectrogram",
"locations"
],
"type": "str"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Time range for shown Traces ; if None shows all ([start,stop])",
"hintText": "",
"type": "list(float)"
},
"NFFT": {
"label": "NFFT",
"suggestions": "",
"help": "The number of data points used in each block for the FFT (power of 2) (float)",
"hintText": "",
"type": "float"
},
"noverlap": {
"label": "Overlap",
"suggestions": "",
"help": "Number of points of overlap between segments (int, < nperseg).",
"hintText": "",
"type": "float"
},
"maxFreq": {
"label": "Maximum Frequency",
"suggestions": "",
"help": "Maximum frequency shown in plot for PSD and time-freq (float).",
"hintText": "",
"type": "float"
},
"nperseg": {
"label": "Segment length (nperseg)",
"suggestions": "",
"help": "Length of each segment for time-freq (int).",
"hintText": "",
"type": "float"
},
"smooth": {
"label": "Window size",
"suggestions": "",
"help": "Window size for smoothing; no smoothing if 0 (int).",
"hintText": "",
"type": "float"
},
"separation": {
"label": "Separation factor",
"suggestions": "",
"help": "Separation factor between time-resolved LFP plots; multiplied by max LFP value (float).",
"hintText": "",
"type": "float"
},
"includeAxon": {
"label": "Include axon",
"suggestions": "",
"help": "Whether to show the axon in the location plot (boolean).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotShape": {
"label": "Plot Shape",
"suggestions": "",
"help": "",
"hintText": "Plot 3D cell shape using Matplotlib or NEURON Interviews PlotShape.",
"children": {
"includePre": {
"label": "population (or cell by index) to presyn",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"includePost": {
"label": "population (or cell by index) to postsyn",
"suggestions": "",
"help": "List of cells to include (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])])",
"hintText": "",
"type": "list"
},
"synStyle": {
"label": "synaptic marker style",
"suggestions": "",
"help": "Style of marker to show synapses (Matplotlib markers).",
"hintText": "",
"type": "str"
},
"dist": {
"label": "3D distance",
"suggestions": "",
"help": "3D distance (like zoom).",
"hintText": "",
"type": "float"
},
"synSize": {
"label": "synapses marker size",
"suggestions": "",
"help": "Size of marker to show synapses.",
"hintText": "",
"type": "float"
},
"cvar": {
"label": "variable to represent in shape plot",
"suggestions": "",
"help": "Variable to represent in shape plot ('numSyns'|'weightNorm').",
"hintText": "",
"options": [
"numSyns",
"weightNorm"
],
"type": "str"
},
"cvals": {
"label": "value to represent in shape plot",
"suggestions": "",
"help": "List of values to represent in shape plot; must be same as num segments (list of size num segments; ).",
"hintText": "",
"type": "list(float)"
},
"iv": {
"label": "use NEURON iv",
"suggestions": "",
"help": "Use NEURON Interviews (instead of matplotlib) to show shape plot (True|False).",
"hintText": "",
"type": "bool"
},
"ivprops": {
"label": "properties for iv",
"suggestions": "",
"help": "Dict of properties to plot using Interviews (dict).",
"hintText": "",
"type": "dict"
},
"showSyns": {
"label": "show synaptic connections in 3D",
"suggestions": "",
"help": "Show synaptic connections in 3D (True|False).",
"hintText": "",
"type": "bool"
},
"bkgColor": {
"label": "background color",
"suggestions": "",
"help": "RGBA list/tuple with bakcground color eg. (0.5, 0.2, 0.1, 1.0) (list/tuple with 4 floats).",
"hintText": "",
"type": "list(float)"
},
"showElectrodes": {
"label": "show electrodes",
"suggestions": "",
"help": "Show electrodes in 3D (True|False).",
"hintText": "",
"type": "bool"
},
"includeAxon": {
"label": "include Axon in shape plot",
"suggestions": "",
"help": "Include axon in shape plot (True|False).",
"hintText": "",
"type": "bool"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plot2Dnet": {
"label": "Plot 2D net",
"suggestions": "",
"help": "Plot 2D representation of network cell positions and connections.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to show (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"showConns": {
"label": "show connections",
"suggestions": "",
"help": "Whether to show connections or not (True|False).",
"hintText": "",
"type": "bool"
},
"view": {
"label": "perspective view",
"suggestions": "",
"help": "Perspective view, either front ('xy') or top-down ('xz').",
"hintText": "",
"options": [
"xy",
"xz"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"plotConn": {
"label": "Plot Connectivity",
"suggestions": "",
"help": "Plot network connectivity.",
"hintText": "",
"children": {
"include": {
"label": "Cells to include",
"suggestions": "",
"help": "List of cells to show (['all'|,'allCells'|,'allNetStims'|,120|,'L4'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"feature": {
"label": "feature to show",
"suggestions": "",
"help": "Feature to show in connectivity matrix; the only features applicable to groupBy='cell' are 'weight', 'delay' and 'numConns'; 'strength' = weight * probability ('weight'|'delay'|'numConns'|'probability'|'strength'|'convergence'|'divergence')g.",
"hintText": "",
"options": [
"weight",
"delay",
"numConns",
"probability",
"strength",
"convergency",
"divergency"
],
"type": "str"
},
"groupBy": {
"label": "group by",
"suggestions": "",
"help": "Show matrix for individual cells or populations ('pop'|'cell').",
"hintText": "",
"options": [
"pop",
"cell"
],
"type": "str"
},
"orderBy": {
"label": "order by",
"suggestions": "",
"help": "Unique numeric cell property to order x and y axes by, e.g. 'gid', 'ynorm', 'y' (requires groupBy='cells') ('gid'|'y'|'ynorm'|...).",
"hintText": "",
"options": [
"gid",
"y",
"ynorm"
],
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"granger": {
"label": "Granger",
"suggestions": "",
"help": "Calculate and optionally plot Granger Causality.",
"hintText": "",
"children": {
"cells1": {
"label": "population (or cell by index) to subset 1",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 1 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"cells2": {
"label": "population (or cell by index cell) to subset 2",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 2 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"spks1": {
"label": "spike times to train 1",
"suggestions": "",
"help": "Spike train 1; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list"
},
"spks2": {
"label": "spike times to train 2",
"suggestions": "",
"help": "Spike train 2; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Range of time to calculate nTE in ms ([min, max]).",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "bin size",
"suggestions": "",
"help": "Bin size used to convert spike times into histogram (int).",
"hintText": "",
"type": "float"
},
"label1": {
"label": "label for train 1",
"suggestions": "",
"help": "Label for spike train 1 to use in plot (string).",
"hintText": "",
"type": "str"
},
"label2": {
"label": "label for train 2",
"suggestions": "",
"help": "Label for spike train 2 to use in plot (string).",
"hintText": "",
"type": "str"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
},
"nTE": {
"label": "Normalize Transfer Entropy",
"suggestions": "",
"help": "Calculate normalized transfer entropy.",
"hintText": "",
"children": {
"cell1": {
"label": "Cell Subset 1",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 1 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"cell2": {
"label": "Cell Subset 2",
"suggestions": "",
"help": "Subset of cells from which to obtain spike train 2 (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]).",
"hintText": "",
"type": "list"
},
"spks1": {
"label": "Spike train 1",
"suggestions": "",
"help": "Spike train 1; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list(float)"
},
"spks2": {
"label": "Spike train 2",
"suggestions": "",
"help": "Spike train 2; list of spike times; if omitted then obtains spikes from cells1 (list).",
"hintText": "",
"type": "list(float)"
},
"timeRange": {
"label": "Time range [min,max] (ms)",
"suggestions": "",
"help": "Range of time to calculate nTE in ms ([min, max]).",
"hintText": "",
"type": "list(float)"
},
"binSize": {
"label": "Bin size",
"suggestions": "",
"help": "Bin size used to convert spike times into histogram (int).",
"hintText": "",
"type": "float"
},
"numShuffle": {
"label": "Number of Shuffles",
"suggestions": "",
"help": "Number of times to shuffle spike train 1 to calculate TEshuffled; note: nTE = (TE - TEShuffled)/H(X2F|X2P) (int).",
"hintText": "",
"type": "float"
},
"figSize": {
"label": "Figure size",
"suggestions": "",
"help": "Size of figure ((width, height))",
"hintText": "",
"type": ""
},
"saveData": {
"label": "Save data",
"suggestions": "",
"help": "File name where to save the final data used to generate the figure (None|'fileName').",
"hintText": "",
"type": "str"
},
"saveFig": {
"label": "Save figure file name",
"suggestions": "",
"help": "File name where to save the figure (None|'fileName')",
"hintText": "",
"type": "str"
},
"showFig": {
"label": "Show figure",
"suggestions": "",
"help": "Whether to show the figure or not (True|False).",
"hintText": "",
"type": "bool"
}
}
}
}
}
}
}
}
| 53.659082 | 467 | 0.323153 | [
"MIT"
] | Anjali-Agarwal8/netpyne | netpyne/metadata/metadata.py | 132,055 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import LivecoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(LivecoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
| 41.058824 | 141 | 0.681411 | [
"MIT"
] | elliottminns/livecoin | test/functional/mining_basic.py | 5,584 | Python |
import tarfile
import textwrap
import pytoml
from .app import App
from .exceptions import TockLoaderException
from .tbfh import TBFHeader
class TAB:
'''
Tock Application Bundle object. This class handles the TAB format.
'''
def __init__ (self, tab_path):
self.tab = tarfile.open(tab_path)
def extract_app (self, arch):
'''
Return an `App` object from this TAB. You must specify the desired
MCU architecture so the correct binary can be retrieved.
'''
binary_tarinfo = self.tab.getmember('{}.bin'.format(arch))
binary = self.tab.extractfile(binary_tarinfo).read()
# First get the TBF header from the correct binary in the TAB
tbfh = TBFHeader(binary)
if tbfh.is_valid():
name_or_params = tbfh.get_app_name()
if isinstance(name_or_params, str):
name = name_or_params
else:
start = name_or_params[0]
end = start+name_or_params[1]
name = binary[start:end].decode('utf-8')
# Check that total size actually matches the binary that we got.
if tbfh.get_app_size() < len(binary):
# It's fine if the binary is smaller, but the binary cannot be
# longer than the amount of reserved space (`total_size` in the
# TBF header) for the app.
raise TockLoaderException('Invalid TAB, the app binary is longer than its defined total_size')
return App(tbfh, None, name, binary)
else:
raise TockLoaderException('Invalid TBF found in app in TAB')
def is_compatible_with_board (self, board):
'''
Check if the Tock app is compatible with a particular Tock board.
'''
metadata = self.parse_metadata()
if metadata['tab-version'] == 1:
return 'only-for-boards' not in metadata or \
board in metadata['only-for-boards'] or \
metadata['only-for-boards'] == ''
else:
raise TockLoaderException('Unable to understand version {} of metadata'.format(metadata['tab-version']))
def parse_metadata (self):
'''
Open and parse the included metadata file in the TAB.
'''
metadata_tarinfo = self.tab.getmember('metadata.toml')
metadata_str = self.tab.extractfile(metadata_tarinfo).read().decode('utf-8')
return pytoml.loads(metadata_str)
def get_supported_architectures (self):
'''
Return a list of architectures that this TAB has compiled binaries for.
'''
contained_files = self.tab.getnames()
return [i[:-4] for i in contained_files if i[-4:] == '.bin']
def get_tbf_header (self):
'''
Return a TBFHeader object with the TBF header from the app in the TAB.
TBF headers are not architecture specific, so we pull from a random
binary if there are multiple architectures supported.
'''
# Find a .bin file
for f in self.tab.getnames():
if f[-4:] == '.bin':
binary_tarinfo = self.tab.getmember(f)
binary = self.tab.extractfile(binary_tarinfo).read()
# Get the TBF header from a binary in the TAB
return TBFHeader(binary)
return None
def __str__ (self):
out = ''
metadata = self.parse_metadata()
out += 'TAB: {}\n'.format(metadata['name'])
for k,v in sorted(metadata.items()):
if k == 'name':
continue
out += ' {}: {}\n'.format(k,v)
out += ' supported architectures: {}\n'.format(', '.join(self.get_supported_architectures()))
out += ' TBF Header\n'
out += textwrap.indent(str(self.get_tbf_header()), ' ')
return out
| 32.009709 | 107 | 0.693358 | [
"MIT"
] | torfmaster/tockloader | tockloader/tab.py | 3,297 | Python |
from __future__ import absolute_import
from __future__ import unicode_literals
import types
import copy
from django import forms
from django.forms.forms import NON_FIELD_ERRORS
from django.core.validators import EMPTY_VALUES
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from sys import version_info
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.db.models.sql.constants import LOOKUP_SEP # noqa
try:
from collections import OrderedDict
except ImportError: # pragma: nocover
# Django < 1.5 fallback
from django.utils.datastructures import SortedDict as OrderedDict # noqa
try:
from django.db.models.related import RelatedObject as ForeignObjectRel
except ImportError: # pragma: nocover
# Django >= 1.8 replaces RelatedObject with ForeignObjectRel
from django.db.models.fields.related import ForeignObjectRel
from .filters import (Filter, CharFilter, BooleanFilter,
ChoiceFilter, DateFilter, DateTimeFilter, TimeFilter, ModelChoiceFilter,
ModelMultipleChoiceFilter, NumberFilter)
ORDER_BY_FIELD = 'o'
# There is a bug with deepcopy in 2.6, patch if we are running python < 2.7
# http://bugs.python.org/issue1515
if version_info < (2, 7, 0):
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
class STRICTNESS(object):
"""
Values of False & True chosen for backward compatability reasons.
Originally, these were the only options.
"""
IGNORE = False
RETURN_NO_RESULTS = True
RAISE_VALIDATION_ERROR = "RAISE"
def get_declared_filters(bases, attrs, with_base_filters=True):
filters = []
for filter_name, obj in list(attrs.items()):
if isinstance(obj, Filter):
obj = attrs.pop(filter_name)
if getattr(obj, 'name', None) is None:
obj.name = filter_name
filters.append((filter_name, obj))
filters.sort(key=lambda x: x[1].creation_counter)
if with_base_filters:
for base in bases[::-1]:
if hasattr(base, 'base_filters'):
filters = list(base.base_filters.items()) + filters
else:
for base in bases[::-1]:
if hasattr(base, 'declared_filters'):
filters = list(base.declared_filters.items()) + filters
return OrderedDict(filters)
def get_model_field(model, f):
parts = f.split(LOOKUP_SEP)
opts = model._meta
for name in parts[:-1]:
try:
rel = opts.get_field_by_name(name)[0]
except FieldDoesNotExist:
return None
if isinstance(rel, ForeignObjectRel):
if hasattr(rel, "related_model"):
# django >= 1.8 (ForeignObjectRel)
opts = rel.related_model._meta
else:
# django < 1.8 (RelatedObject)
opts = rel.opts
else:
model = rel.rel.to
opts = model._meta
try:
rel, model, direct, m2m = opts.get_field_by_name(parts[-1])
except FieldDoesNotExist:
return None
return rel
def filters_for_model(model, fields=None, exclude=None, filter_for_field=None,
filter_for_reverse_field=None):
field_dict = OrderedDict()
opts = model._meta
if fields is None:
fields = [f.name for f in sorted(opts.fields + opts.many_to_many)
if not isinstance(f, models.AutoField)]
# Loop through the list of fields.
for f in fields:
# Skip the field if excluded.
if exclude is not None and f in exclude:
continue
field = get_model_field(model, f)
# Do nothing if the field doesn't exist.
if field is None:
field_dict[f] = None
continue
if isinstance(field, ForeignObjectRel):
filter_ = filter_for_reverse_field(field, f)
if filter_:
field_dict[f] = filter_
# If fields is a dictionary, it must contain lists.
elif isinstance(fields, dict):
# Create a filter for each lookup type.
for lookup_type in fields[f]:
filter_ = filter_for_field(field, f, lookup_type)
if filter_:
filter_name = f
# Don't add "exact" to filter names
if lookup_type != 'exact':
filter_name = f + LOOKUP_SEP + lookup_type
field_dict[filter_name] = filter_
# If fields is a list, it contains strings.
else:
filter_ = filter_for_field(field, f)
if filter_:
field_dict[f] = filter_
return field_dict
def get_full_clean_override(together):
def full_clean(form):
def add_error(message):
try:
form.add_error(None, message)
except AttributeError:
form._errors[NON_FIELD_ERRORS] = message
def all_valid(fieldset):
cleaned_data = form.cleaned_data
count = len([i for i in fieldset if cleaned_data.get(i)])
return 0 < count < len(fieldset)
super(form.__class__, form).full_clean()
message = 'Following fields must be together: %s'
if isinstance(together[0], (list, tuple)):
for each in together:
if all_valid(each):
return add_error(message % ','.join(each))
elif all_valid(together):
return add_error(message % ','.join(together))
return full_clean
class FilterSetOptions(object):
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.order_by = getattr(options, 'order_by', False)
self.form = getattr(options, 'form', forms.Form)
self.together = getattr(options, 'together', None)
class FilterSetMetaclass(type):
def __new__(cls, name, bases, attrs):
try:
parents = [b for b in bases if issubclass(b, FilterSet)]
except NameError:
# We are defining FilterSet itself here
parents = None
declared_filters = get_declared_filters(bases, attrs, False)
new_class = super(
FilterSetMetaclass, cls).__new__(cls, name, bases, attrs)
if not parents:
return new_class
opts = new_class._meta = FilterSetOptions(
getattr(new_class, 'Meta', None))
if opts.model:
filters = filters_for_model(opts.model, opts.fields, opts.exclude,
new_class.filter_for_field,
new_class.filter_for_reverse_field)
filters.update(declared_filters)
else:
filters = declared_filters
if None in filters.values():
raise TypeError("Meta.fields contains a field that isn't defined "
"on this FilterSet")
new_class.declared_filters = declared_filters
new_class.base_filters = filters
return new_class
FILTER_FOR_DBFIELD_DEFAULTS = {
models.AutoField: {
'filter_class': NumberFilter
},
models.CharField: {
'filter_class': CharFilter
},
models.TextField: {
'filter_class': CharFilter
},
models.BooleanField: {
'filter_class': BooleanFilter
},
models.DateField: {
'filter_class': DateFilter
},
models.DateTimeField: {
'filter_class': DateTimeFilter
},
models.TimeField: {
'filter_class': TimeFilter
},
models.OneToOneField: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name,
}
},
models.ForeignKey: {
'filter_class': ModelChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
'to_field_name': f.rel.field_name
}
},
models.ManyToManyField: {
'filter_class': ModelMultipleChoiceFilter,
'extra': lambda f: {
'queryset': f.rel.to._default_manager.complex_filter(
f.rel.limit_choices_to),
}
},
models.DecimalField: {
'filter_class': NumberFilter,
},
models.SmallIntegerField: {
'filter_class': NumberFilter,
},
models.IntegerField: {
'filter_class': NumberFilter,
},
models.PositiveIntegerField: {
'filter_class': NumberFilter,
},
models.PositiveSmallIntegerField: {
'filter_class': NumberFilter,
},
models.FloatField: {
'filter_class': NumberFilter,
},
models.NullBooleanField: {
'filter_class': BooleanFilter,
},
models.SlugField: {
'filter_class': CharFilter,
},
models.EmailField: {
'filter_class': CharFilter,
},
models.FilePathField: {
'filter_class': CharFilter,
},
models.URLField: {
'filter_class': CharFilter,
},
models.IPAddressField: {
'filter_class': CharFilter,
},
models.CommaSeparatedIntegerField: {
'filter_class': CharFilter,
},
}
class BaseFilterSet(object):
filter_overrides = {}
order_by_field = ORDER_BY_FIELD
# What to do on on validation errors
strict = STRICTNESS.RETURN_NO_RESULTS
def __init__(self, data=None, queryset=None, prefix=None, strict=None):
self.is_bound = data is not None
self.data = data or {}
if queryset is None:
queryset = self._meta.model._default_manager.all()
self.queryset = queryset
self.form_prefix = prefix
if strict is not None:
self.strict = strict
self.filters = copy.deepcopy(self.base_filters)
# propagate the model being used through the filters
for filter_ in self.filters.values():
filter_.model = self._meta.model
# Apply the parent to the filters, this will allow the filters to access the filterset
for filter_key, filter_ in six.iteritems(self.filters):
filter_.parent = self
def __iter__(self):
for obj in self.qs:
yield obj
def __len__(self):
return len(self.qs)
def __getitem__(self, key):
return self.qs[key]
@property
def qs(self):
if not hasattr(self, '_qs'):
valid = self.is_bound and self.form.is_valid()
if self.is_bound and not valid:
if self.strict == STRICTNESS.RAISE_VALIDATION_ERROR:
raise forms.ValidationError(self.form.errors)
elif bool(self.strict) == STRICTNESS.RETURN_NO_RESULTS:
self._qs = self.queryset.none()
return self._qs
# else STRICTNESS.IGNORE... ignoring
# start with all the results and filter from there
qs = self.queryset.all()
for name, filter_ in six.iteritems(self.filters):
value = None
if valid:
value = self.form.cleaned_data[name]
else:
raw_value = self.form[name].value()
try:
value = self.form.fields[name].clean(raw_value)
except forms.ValidationError:
if self.strict == STRICTNESS.RAISE_VALIDATION_ERROR:
raise
elif bool(self.strict) == STRICTNESS.RETURN_NO_RESULTS:
self._qs = self.queryset.none()
return self._qs
# else STRICTNESS.IGNORE... ignoring
if value is not None: # valid & clean data
qs = filter_.filter(qs, value)
if self._meta.order_by:
order_field = self.form.fields[self.order_by_field]
data = self.form[self.order_by_field].data
ordered_value = None
try:
ordered_value = order_field.clean(data)
except forms.ValidationError:
pass
if ordered_value in EMPTY_VALUES and self.strict:
ordered_value = self.form.fields[self.order_by_field].choices[0][0]
if ordered_value:
qs = qs.order_by(*self.get_order_by(ordered_value))
self._qs = qs
return self._qs
def count(self):
return self.qs.count()
@property
def form(self):
if not hasattr(self, '_form'):
fields = OrderedDict([
(name, filter_.field)
for name, filter_ in six.iteritems(self.filters)])
fields[self.order_by_field] = self.ordering_field
Form = type(str('%sForm' % self.__class__.__name__),
(self._meta.form,), fields)
if self._meta.together:
Form.full_clean = get_full_clean_override(self._meta.together)
if self.is_bound:
self._form = Form(self.data, prefix=self.form_prefix)
else:
self._form = Form(prefix=self.form_prefix)
return self._form
def get_ordering_field(self):
if self._meta.order_by:
if isinstance(self._meta.order_by, (list, tuple)):
if isinstance(self._meta.order_by[0], (list, tuple)):
# e.g. (('field', 'Display name'), ...)
choices = [(f[0], f[1]) for f in self._meta.order_by]
else:
choices = [(f, _('%s (descending)' % capfirst(f[1:])) if f[0] == '-' else capfirst(f))
for f in self._meta.order_by]
else:
# add asc and desc field names
# use the filter's label if provided
choices = []
for f, fltr in self.filters.items():
choices.extend([
(fltr.name or f, fltr.label or capfirst(f)),
("-%s" % (fltr.name or f), _('%s (descending)' % (fltr.label or capfirst(f))))
])
return forms.ChoiceField(label=_("Ordering"), required=False,
choices=choices)
@property
def ordering_field(self):
if not hasattr(self, '_ordering_field'):
self._ordering_field = self.get_ordering_field()
return self._ordering_field
def get_order_by(self, order_choice):
return [order_choice]
@classmethod
def filter_for_field(cls, f, name, lookup_type='exact'):
filter_for_field = dict(FILTER_FOR_DBFIELD_DEFAULTS)
filter_for_field.update(cls.filter_overrides)
default = {
'name': name,
'label': capfirst(f.verbose_name),
'lookup_type': lookup_type
}
if f.choices:
default['choices'] = f.choices
return ChoiceFilter(**default)
data = filter_for_field.get(f.__class__)
if data is None:
# could be a derived field, inspect parents
for class_ in f.__class__.mro():
# skip if class_ is models.Field or object
# 1st item in mro() is original class
if class_ in (f.__class__, models.Field, object):
continue
data = filter_for_field.get(class_)
if data:
break
if data is None:
return
filter_class = data.get('filter_class')
default.update(data.get('extra', lambda f: {})(f))
if filter_class is not None:
return filter_class(**default)
@classmethod
def filter_for_reverse_field(cls, f, name):
rel = f.field.rel
queryset = f.field.model._default_manager.all()
default = {
'name': name,
'label': capfirst(rel.related_name),
'queryset': queryset,
}
if rel.multiple:
return ModelMultipleChoiceFilter(**default)
else:
return ModelChoiceFilter(**default)
class FilterSet(six.with_metaclass(FilterSetMetaclass, BaseFilterSet)):
pass
def filterset_factory(model):
meta = type(str('Meta'), (object,), {'model': model})
filterset = type(str('%sFilterSet' % model._meta.object_name),
(FilterSet,), {'Meta': meta})
return filterset
| 33.884692 | 106 | 0.582962 | [
"BSD-3-Clause"
] | aioTV/django-filter | django_filters/filterset.py | 17,044 | Python |
from BiblioAlly import catalog as cat, domain, translator as bibtex
class IeeeXTranslator(bibtex.Translator):
def _document_from_proto_document(self, proto_document):
bibtex.Translator._translate_kind(proto_document)
kind = proto_document['type']
fields = proto_document['field']
if 'title' in fields:
title = self._unbroken(self._uncurlied(fields['title']))
else:
title = ''
if 'abstract' in fields:
abstract = self._unbroken(self._uncurlied(fields['abstract']))
else:
abstract = ''
year = int(fields['year'])
author_field = ''
if 'author' in fields:
author_field = self._unbroken(self._all_uncurly(fields['author'].replace('}and', ' and')))
if author_field == '':
author_field = 'Author, Unamed'
authors = self._authors_from_field(author_field)
affiliations = self._expand_affiliations(None, authors)
keywords = []
if 'keywords' in fields:
all_keywords = self._all_uncurly(fields['keywords']).split(';')
keyword_names = set()
for keyword_name in all_keywords:
sub_keyword_names = keyword_name.split(',')
for sub_keyword_name in sub_keyword_names:
name = sub_keyword_name.strip().capitalize()
if name not in keyword_names:
keyword_names.add(name)
keyword_names = list(keyword_names)
for keyword_name in keyword_names:
keywords.append(domain.Keyword(name=keyword_name))
document = domain.Document(proto_document['id'].strip(), kind, title, abstract, keywords, year, affiliations)
document.generator = "IEEE Xplore"
if 'doi' in fields:
document.doi = self._uncurlied(fields['doi'])
if 'journal' in fields:
document.journal = self._uncurlied(fields['journal'])
elif 'booktitle' in fields and kind == 'inproceedings':
document.journal = self._uncurlied(fields['booktitle'])
if 'number' in fields:
if len(self._uncurlied(fields['number'])) > 0:
document.number = self._uncurlied(fields['number'])
if 'pages' in fields:
if len(self._uncurlied(fields['pages'])) > 0:
document.pages = self._uncurlied(fields['pages'])
if 'url' in fields:
if len(self._uncurlied(fields['url'])) > 0:
document.url = self._uncurlied(fields['url'])
if 'volume' in fields:
if len(self._uncurlied(fields['volume'])) > 0:
document.volume = self._uncurlied(fields['volume'])
return document
def _proto_document_from_document(self, document: domain.Document):
kind = document.kind
if kind == 'proceedings':
kind = 'inproceedings'
fields = dict()
fields['external_key'] = document.external_key
doc_authors = document.authors
doc_authors.sort(key=lambda doc_author: doc_author.first)
doc_authors.reverse()
all_authors = [(doc_author.author.long_name if doc_author.author.long_name is not None
else doc_author.author.short_name) for doc_author in doc_authors]
fields['author'] = self._curly(all_authors, separator=' and ')
if document.journal is not None:
if document.kind == 'article':
fields['journal'] = self._curly(str(document.journal))
else:
fields['booktitle'] = self._curly(str(document.journal))
fields['title'] = self._curly(document.title)
affiliations = []
for doc_author in doc_authors:
institution = doc_author.institution
if institution is not None:
affiliation = ', '.join([institution.name, institution.country])
affiliations.append(affiliation)
if len(affiliations) > 0:
fields['affiliation'] = self._curly(affiliations, '; ')
fields['year'] = self._curly(str(document.year))
if document.international_number is not None:
fields['ISSN'] = self._curly(str(document.international_number))
if document.publisher is not None:
fields['publisher'] = self._curly(str(document.publisher))
if document.address is not None:
fields['address'] = self._curly(str(document.address))
if document.doi is not None:
fields['doi'] = self._curly(str(document.doi))
if document.international_number is not None:
fields['url'] = self._curly(str(document.url))
fields['abstract'] = self._curly(document.abstract)
if document.pages is not None:
fields['pages'] = self._curly(str(document.pages))
if document.volume is not None:
fields['volume'] = self._curly(str(document.volume))
if document.number is not None:
fields['number'] = self._curly(str(document.number))
if document.language is not None:
fields['language'] = self._curly(str(document.language))
keywords = [keyword.name for keyword in document.keywords]
fields['keywords'] = self._curly(keywords, ';')
if len(document.references) > 0:
fields['references'] = self._curly('; '.join(document.references))
if document.document_type is not None:
fields['document_type'] = self._curly(document.document_type)
fields['source'] = self._curly(document.generator)
proto_document = {
'type': kind,
'fields': fields
}
return proto_document
def _as_bibtex(self, proto_document):
kind = proto_document['type'].upper()
fields = proto_document['fields']
external_key = fields['external_key']
del fields['external_key']
key_value = []
for key, value in fields.items():
key_value.append(f'{key}={value}')
bibtex = f'@{kind}' + '{' + f'{external_key},\n' + ',\n'.join(key_value) + '\n}'
return bibtex
IeeeXplore = "IeeeXplore"
cat.Catalog.translators[IeeeXplore] = IeeeXTranslator
| 44.992806 | 117 | 0.605373 | [
"MIT"
] | gambit4348/BiblioAlly | BiblioAlly/ieee.py | 6,254 | Python |
"""empty message
Revision ID: 780c29109b25
Revises: 911cc5d772fc
Create Date: 2020-08-30 15:22:15.026266
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "780c29109b25"
down_revision = "911cc5d772fc"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_foreign_key(None, "feedback", "user", ["author_id"], ["id"])
op.drop_column("feedback", "author")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("feedback", sa.Column("author", sa.VARCHAR(length=20), nullable=True))
op.drop_constraint(None, "feedback", type_="foreignkey")
# ### end Alembic commands ###
| 26.806452 | 89 | 0.665463 | [
"MIT"
] | mutalisk999/Flog | migrations/versions/780c29109b25_.py | 831 | Python |
import numpy as np
import os
import cv2
from PIL import Image
import numpy as np
import random
import itertools
import matplotlib.pyplot as plt # plt 用于显示图片
from tqdm import tqdm
# 标注文件数据处理
def read_pslot(annt_file):
# print(annt_file)
with open(annt_file, "r") as f:
annt = f.readlines()
# print("annt", annt)
l = []
l_ign = []
for line in annt:
line_annt = line.strip('\n').split(' ')
# print(line_annt)
if len(line_annt) != 13 or line_annt[0] != 'line' or line_annt[-4] == '3':
continue
if line_annt[-4] in ['0', '1']:
l.append(np.array([int(line_annt[i + 1]) for i in range(8)]))
# continue
# if line_annt[-4] in ['1', '5']:
# l_ign.append(np.array([int(line_annt[i + 1]) for i in range(8)]))
# continue
return l, l_ign
# 标点
def colorize(points_list, img, save_path, item, line, point_color):
save_path = os.path.join(save_path, str(
item.strip('.jpg'))+"_"+str(line)+".jpg")
img2 = img.copy()
# print(save_path)
# points_list = 384 * np.abs(np.array(outputs[0], dtype=np.float))
point_size = 1
thickness = 4 # 可以为 0、4、8
for i in range(4):
cv2.circle(img2, (int(points_list[i][0]), int(points_list[i][1])),
point_size, point_color, thickness)
# print(save_path)
cv2.imwrite(save_path, img2)
# 画线
def paint_line(img, dst, cropimg_path, num):
img2 = img.copy()
cv2.line(img2, (int(dst[0][0]), int(dst[0][1])), (int(
dst[1][0]), int(dst[1][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[1][0]), int(dst[1][1])), (int(
dst[2][0]), int(dst[2][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[2][0]), int(dst[2][1])), (int(
dst[3][0]), int(dst[3][1])), (255, 0, 0), 5)
cv2.line(img2, (int(dst[3][0]), int(dst[3][1])), (int(
dst[0][0]), int(dst[0][1])), (255, 0, 0), 5)
cropimg_path1 = os.path.join(
cropimg_path, i.strip('.jpg')+'_'+str(num)+'.jpg')
cv2.imwrite(cropimg_path1, img2)
def Crop_pic(ps, img_path, cropimg_path, perspective_path, txt_file, i, trans_path, save_path1, save_path2):
# single pic
img = cv2.imread(img_path)
perspective3 = np.float32([[0, 0], [383, 0], [383, 383], [0, 383]])
perspective3_ = np.float32([[0, 0], [383, 0], [383, 383]])
num = 0
for line in ps:
num = num + 1
# 随机生成4个坐标
arr0 = random.randint(80, 120)
arr1 = random.randint(80, 120)
arr2 = random.randint(263, 303)
arr3 = random.randint(80, 120)
arr4 = random.randint(263, 303)
arr5 = random.randint(263, 303)
arr6 = random.randint(80, 120)
arr7 = random.randint(263, 303)
perspective0 = np.float32([[line[0], line[1]], [line[2], line[3]], [
line[4], line[5]], [line[6], line[7]]])
perspective0_ = np.float32([[line[0], line[1]], [line[2], line[3]], [
line[4], line[5]]])
colorize(perspective0, img, save_path1, i, num, (0, 255, 0))
perspective1 = np.float32(
[[arr0, arr1], [arr2, arr3], [arr4, arr5], [arr6, arr7]])
perspective1_ = np.float32(
[[arr0, arr1], [arr2, arr3], [arr4, arr5]])
# 求逆变换矩阵
# trans_inv = cv2.getPerspectiveTransform(perspective1, perspective0)
trans_inv = cv2.getAffineTransform(perspective1_, perspective0_)
# 求逆投影变换后的点坐标
dst = []
# mat = np.array(
# [[[0, 0], [383, 0], [383, 383], [0, 383]]], dtype=np.float32)
mat = np.array(
[[0, 0, 1], [383, 0, 1], [383, 383, 1], [0, 383, 1]], dtype=np.float32)
mat = mat.transpose()
# dst = cv2.perspectiveTransform(mat, trans_inv)
dst = np.dot(trans_inv, mat)
dst = dst.transpose()
# 画线
paint_line(img, dst, cropimg_path, num)
# 将停车位投影变换后得到在384*384分辨率下的停车位图像
# perspective2 = np.float32([[dst[0][0][0], dst[0][0][1]], [dst[0][1][0], dst[0][1][1]], [
# dst[0][2][0], dst[0][2][1]], [dst[0][3][0], dst[0][3][1]]])
perspective2_ = np.float32([[dst[0][0], dst[0][1]], [dst[1][0], dst[1][1]], [
dst[2][0], dst[2][1]]])
# trans = cv2.getPerspectiveTransform(perspective2, perspective3)
# dst2 = cv2.warpPerspective(img, trans, (384, 384))
trans = cv2.getAffineTransform(perspective2_, perspective3_)
dst2 = cv2.warpAffine(img, trans, (384, 384))
# 保存原图四个内角点在384*384图片上的坐标
# mat2 = np.array([[[line[0], line[1]], [line[2], line[3]], [
# line[4], line[5]], [line[6], line[7]]]], dtype=np.float32)
mat2 = np.array([[line[0], line[1], 1], [line[2], line[3], 1], [
line[4], line[5], 1], [line[6], line[7], 1]], dtype=np.float32)
mat2 = mat2.transpose()
point = np.dot(trans, mat2)
point = point.transpose()
# point = cv2.perspectiveTransform(mat2, trans)
# point = np.dot(mat2, trans)
perspective_path1 = os.path.join(
perspective_path, i.strip('.jpg')+'_'+str(num)+'.jpg')
# print(perspective_path)
cv2.imwrite(perspective_path1, dst2)
colorize(point, dst2, save_path2, i, num, (0, 255, 0))
# 把四个坐标点记录下来
txt_file1 = os.path.join(
txt_file, i.strip('.jpg')+'_'+str(num)+'_OA.txt')
with open(txt_file1, "w") as f:
for j in range(4):
f.write(str(point[j][0]))
f.write(' ')
f.write(str(point[j][1]))
f.write('\n')
# 把转换矩阵记录下来
trans_path1 = os.path.join(
trans_path, i.strip('.jpg')+'_'+str(num)+'.txt')
with open(trans_path1, "w") as ff:
for j in range(2):
for k in range(3):
ff.write(str(trans_inv[j][k]))
ff.write(" ")
# 计算四个点的预测点与真值点之间的误差
def get_acc(y, y_hat, dis):
total = 0
total = 0
for i in range(4):
total += ((y[i][0]-y_hat[i][0])**2 + (y[i][1]-y_hat[i][1])**2)**0.5
total /= 4
if total < dis:
return 1
else:
return 0
def output_pic(img_path, output_path, trans_path, fina_path, ps2, pix, point_path):
img_pred = cv2.imread(img_path)
point_pred = []
trans_inv = []
point_pred = np.loadtxt(output_path)
point_pred = 384*np.expand_dims(point_pred, axis=0)
trans_inv = np.loadtxt(trans_path)
trans_inv = trans_inv.reshape(3, 3)
trans_inv = np.mat(trans_inv)
point_ground = np.loadtxt(point_path)
point_ground = np.expand_dims(point_ground, axis=0)
point_ground2 = cv2.perspectiveTransform(point_ground, trans_inv)
point_size = 1
thickness = 4
for i in range(4):
cv2.circle(img_pred, (int(point_ground2[0][i][0]), int(point_ground2[0][i][1])),
point_size, (0, 255, 0), thickness)
cv2.imwrite(fina_path, img_pred)
point_pred2 = cv2.perspectiveTransform(point_pred, trans_inv)
# 红色
point_color = (0, 0, 255)
point_color2 = (0, 255, 0)
for i in range(4):
cv2.circle(img_pred, (int(point_pred2[0][i][0]), int(point_pred2[0][i][1])),
point_size, point_color, thickness)
cv2.imwrite(fina_path, img_pred)
point_pred3 = point_pred2[0]
ps2 = ps2[0].reshape(4, 2)
tmp = get_acc(point_pred3, point_ground2[0], pix)
return tmp
# 精度
def output(pix):
accuracy = 0
for i in os.listdir(test_dir):
output_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/CNN/output", i.strip('.jpg')+'.txt')
img_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/img", i)
trans_inv = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/trans_inv", i.strip('.jpg')+'.txt')
fina_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/fina", i)
annt_path2 = os.path.join(
'./Ps_locate_dataset/annt', i.strip('.jpg')+'_OA.txt')
point_path = os.path.join(
"/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/point", i.strip('.jpg')+'_OA.txt')
# print(fina_path)
ps2, _ = read_pslot(annt_path2)
tmp = output_pic(img_path, output_path,
trans_inv, fina_path, ps2, pix, point_path)
accuracy += tmp
return accuracy
if __name__ == "__main__":
data_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/pic'
label_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/annotation'
crop_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/crop_img'
perspective_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/perspective_img'
txt_dir = '/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/point'
cnt = 0
f1 = open(
"/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/train_list.txt", "w")
# f2 = open("./Ps_locate_dataset/val_list.txt", "w")
test_dir = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/test_img"
trans_path = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/trans_inv"
save_path1 = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/src_img"
save_path2 = "/media/home_bak/ziqi/park/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/perspective2_img"
pbar = tqdm(total=len(os.listdir(data_dir)))
for i in os.listdir(data_dir):
# print(i)
annt_file = os.path.join(label_dir, i.strip('.jpg')+'_OA.txt')
img_path = os.path.join(data_dir, i)
ps, _ = read_pslot(annt_file)
Crop_pic(ps, img_path, crop_dir,
perspective_dir, txt_dir, i, trans_path, save_path1, save_path2)
pbar.update(1)
pbar.close()
# acc = []
# for k in range(31):
# print("k", k)
# x1 = output(k)
# x1 = 100 * x1 / 743
# acc.append(x1)
# x1 = round(x1, 3)
# print(acc)
# print(len(acc))
# # 设置画布大小
# plt.figure(figsize=(30, 15))
# # 标题
# plt.title("accruracy distribution")
# # 数据
# plt.bar(range(len(acc)), acc)
# # 横坐标描述
# plt.xlabel('pixel')
# # 纵坐标描述
# plt.ylabel('accuracy')
# # # 设置数字标签
# # for a, b in zip(x, acc):
# # plt.text(a, b, b, ha='center', va='bottom', fontsize=10)
# plt.savefig(
# "/media/alpha4TB/ziqi/Parking/Ps_locate_dataset/PLD_BirdView_TrainingDaraSet_All/accuracy.png")
# 保存训练数据的文件名
filenames = os.listdir(perspective_dir)
filenames.sort()
print(filenames[0])
for i in os.listdir(perspective_dir):
perspective_path = os.path.join(perspective_dir, i)
f1.write(perspective_path)
f1.write('\n')
f1.close()
| 33.156156 | 116 | 0.580926 | [
"Apache-2.0"
] | ziqi123/AutoParking | preprocessing/make_dataset_new.py | 11,369 | Python |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/vision_v1p1beta1/proto/image_annotator.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.vision_v1p1beta1.proto import geometry_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2
from google.cloud.vision_v1p1beta1.proto import text_annotation_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2
from google.cloud.vision_v1p1beta1.proto import web_detection_pb2 as google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
from google.type import color_pb2 as google_dot_type_dot_color__pb2
from google.type import latlng_pb2 as google_dot_type_dot_latlng__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/vision_v1p1beta1/proto/image_annotator.proto',
package='google.cloud.vision.v1p1beta1',
syntax='proto3',
serialized_pb=_b('\n9google/cloud/vision_v1p1beta1/proto/image_annotator.proto\x12\x1dgoogle.cloud.vision.v1p1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x32google/cloud/vision_v1p1beta1/proto/geometry.proto\x1a\x39google/cloud/vision_v1p1beta1/proto/text_annotation.proto\x1a\x37google/cloud/vision_v1p1beta1/proto/web_detection.proto\x1a\x17google/rpc/status.proto\x1a\x17google/type/color.proto\x1a\x18google/type/latlng.proto\"\xe1\x02\n\x07\x46\x65\x61ture\x12\x39\n\x04type\x18\x01 \x01(\x0e\x32+.google.cloud.vision.v1p1beta1.Feature.Type\x12\x13\n\x0bmax_results\x18\x02 \x01(\x05\x12\r\n\x05model\x18\x03 \x01(\t\"\xf6\x01\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x12\n\x0e\x46\x41\x43\x45_DETECTION\x10\x01\x12\x16\n\x12LANDMARK_DETECTION\x10\x02\x12\x12\n\x0eLOGO_DETECTION\x10\x03\x12\x13\n\x0fLABEL_DETECTION\x10\x04\x12\x12\n\x0eTEXT_DETECTION\x10\x05\x12\x1b\n\x17\x44OCUMENT_TEXT_DETECTION\x10\x0b\x12\x19\n\x15SAFE_SEARCH_DETECTION\x10\x06\x12\x14\n\x10IMAGE_PROPERTIES\x10\x07\x12\x0e\n\nCROP_HINTS\x10\t\x12\x11\n\rWEB_DETECTION\x10\n\"7\n\x0bImageSource\x12\x15\n\rgcs_image_uri\x18\x01 \x01(\t\x12\x11\n\timage_uri\x18\x02 \x01(\t\"T\n\x05Image\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\x0c\x12:\n\x06source\x18\x02 \x01(\x0b\x32*.google.cloud.vision.v1p1beta1.ImageSource\"\x9b\x0e\n\x0e\x46\x61\x63\x65\x41nnotation\x12\x42\n\rbounding_poly\x18\x01 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12\x45\n\x10\x66\x64_bounding_poly\x18\x02 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12I\n\tlandmarks\x18\x03 \x03(\x0b\x32\x36.google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark\x12\x12\n\nroll_angle\x18\x04 \x01(\x02\x12\x11\n\tpan_angle\x18\x05 \x01(\x02\x12\x12\n\ntilt_angle\x18\x06 \x01(\x02\x12\x1c\n\x14\x64\x65tection_confidence\x18\x07 \x01(\x02\x12\x1e\n\x16landmarking_confidence\x18\x08 \x01(\x02\x12\x41\n\x0ejoy_likelihood\x18\t \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x44\n\x11sorrow_likelihood\x18\n \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x43\n\x10\x61nger_likelihood\x18\x0b \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x46\n\x13surprise_likelihood\x18\x0c \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12K\n\x18under_exposed_likelihood\x18\r \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x45\n\x12\x62lurred_likelihood\x18\x0e \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x46\n\x13headwear_likelihood\x18\x0f \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x1a\xc7\x07\n\x08Landmark\x12I\n\x04type\x18\x03 \x01(\x0e\x32;.google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.Type\x12\x39\n\x08position\x18\x04 \x01(\x0b\x32\'.google.cloud.vision.v1p1beta1.Position\"\xb4\x06\n\x04Type\x12\x14\n\x10UNKNOWN_LANDMARK\x10\x00\x12\x0c\n\x08LEFT_EYE\x10\x01\x12\r\n\tRIGHT_EYE\x10\x02\x12\x18\n\x14LEFT_OF_LEFT_EYEBROW\x10\x03\x12\x19\n\x15RIGHT_OF_LEFT_EYEBROW\x10\x04\x12\x19\n\x15LEFT_OF_RIGHT_EYEBROW\x10\x05\x12\x1a\n\x16RIGHT_OF_RIGHT_EYEBROW\x10\x06\x12\x19\n\x15MIDPOINT_BETWEEN_EYES\x10\x07\x12\x0c\n\x08NOSE_TIP\x10\x08\x12\r\n\tUPPER_LIP\x10\t\x12\r\n\tLOWER_LIP\x10\n\x12\x0e\n\nMOUTH_LEFT\x10\x0b\x12\x0f\n\x0bMOUTH_RIGHT\x10\x0c\x12\x10\n\x0cMOUTH_CENTER\x10\r\x12\x15\n\x11NOSE_BOTTOM_RIGHT\x10\x0e\x12\x14\n\x10NOSE_BOTTOM_LEFT\x10\x0f\x12\x16\n\x12NOSE_BOTTOM_CENTER\x10\x10\x12\x19\n\x15LEFT_EYE_TOP_BOUNDARY\x10\x11\x12\x19\n\x15LEFT_EYE_RIGHT_CORNER\x10\x12\x12\x1c\n\x18LEFT_EYE_BOTTOM_BOUNDARY\x10\x13\x12\x18\n\x14LEFT_EYE_LEFT_CORNER\x10\x14\x12\x1a\n\x16RIGHT_EYE_TOP_BOUNDARY\x10\x15\x12\x1a\n\x16RIGHT_EYE_RIGHT_CORNER\x10\x16\x12\x1d\n\x19RIGHT_EYE_BOTTOM_BOUNDARY\x10\x17\x12\x19\n\x15RIGHT_EYE_LEFT_CORNER\x10\x18\x12\x1f\n\x1bLEFT_EYEBROW_UPPER_MIDPOINT\x10\x19\x12 \n\x1cRIGHT_EYEBROW_UPPER_MIDPOINT\x10\x1a\x12\x14\n\x10LEFT_EAR_TRAGION\x10\x1b\x12\x15\n\x11RIGHT_EAR_TRAGION\x10\x1c\x12\x12\n\x0eLEFT_EYE_PUPIL\x10\x1d\x12\x13\n\x0fRIGHT_EYE_PUPIL\x10\x1e\x12\x15\n\x11\x46OREHEAD_GLABELLA\x10\x1f\x12\x11\n\rCHIN_GNATHION\x10 \x12\x14\n\x10\x43HIN_LEFT_GONION\x10!\x12\x15\n\x11\x43HIN_RIGHT_GONION\x10\"\"4\n\x0cLocationInfo\x12$\n\x07lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\"=\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\x12\x14\n\x0cuint64_value\x18\x03 \x01(\x04\"\xbc\x02\n\x10\x45ntityAnnotation\x12\x0b\n\x03mid\x18\x01 \x01(\t\x12\x0e\n\x06locale\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\r\n\x05score\x18\x04 \x01(\x02\x12\x12\n\nconfidence\x18\x05 \x01(\x02\x12\x12\n\ntopicality\x18\x06 \x01(\x02\x12\x42\n\rbounding_poly\x18\x07 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12>\n\tlocations\x18\x08 \x03(\x0b\x32+.google.cloud.vision.v1p1beta1.LocationInfo\x12;\n\nproperties\x18\t \x03(\x0b\x32\'.google.cloud.vision.v1p1beta1.Property\"\xbc\x02\n\x14SafeSearchAnnotation\x12\x38\n\x05\x61\x64ult\x18\x01 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x38\n\x05spoof\x18\x02 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12:\n\x07medical\x18\x03 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12;\n\x08violence\x18\x04 \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\x12\x37\n\x04racy\x18\t \x01(\x0e\x32).google.cloud.vision.v1p1beta1.Likelihood\"a\n\x0bLatLongRect\x12(\n\x0bmin_lat_lng\x18\x01 \x01(\x0b\x32\x13.google.type.LatLng\x12(\n\x0bmax_lat_lng\x18\x02 \x01(\x0b\x32\x13.google.type.LatLng\"U\n\tColorInfo\x12!\n\x05\x63olor\x18\x01 \x01(\x0b\x32\x12.google.type.Color\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x16\n\x0epixel_fraction\x18\x03 \x01(\x02\"T\n\x18\x44ominantColorsAnnotation\x12\x38\n\x06\x63olors\x18\x01 \x03(\x0b\x32(.google.cloud.vision.v1p1beta1.ColorInfo\"c\n\x0fImageProperties\x12P\n\x0f\x64ominant_colors\x18\x01 \x01(\x0b\x32\x37.google.cloud.vision.v1p1beta1.DominantColorsAnnotation\"\x7f\n\x08\x43ropHint\x12\x42\n\rbounding_poly\x18\x01 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.BoundingPoly\x12\x12\n\nconfidence\x18\x02 \x01(\x02\x12\x1b\n\x13importance_fraction\x18\x03 \x01(\x02\"R\n\x13\x43ropHintsAnnotation\x12;\n\ncrop_hints\x18\x01 \x03(\x0b\x32\'.google.cloud.vision.v1p1beta1.CropHint\"(\n\x0f\x43ropHintsParams\x12\x15\n\raspect_ratios\x18\x01 \x03(\x02\"1\n\x12WebDetectionParams\x12\x1b\n\x13include_geo_results\x18\x02 \x01(\x08\"\x85\x02\n\x0cImageContext\x12\x41\n\rlat_long_rect\x18\x01 \x01(\x0b\x32*.google.cloud.vision.v1p1beta1.LatLongRect\x12\x16\n\x0elanguage_hints\x18\x02 \x03(\t\x12I\n\x11\x63rop_hints_params\x18\x04 \x01(\x0b\x32..google.cloud.vision.v1p1beta1.CropHintsParams\x12O\n\x14web_detection_params\x18\x06 \x01(\x0b\x32\x31.google.cloud.vision.v1p1beta1.WebDetectionParams\"\xc9\x01\n\x14\x41nnotateImageRequest\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32$.google.cloud.vision.v1p1beta1.Image\x12\x38\n\x08\x66\x65\x61tures\x18\x02 \x03(\x0b\x32&.google.cloud.vision.v1p1beta1.Feature\x12\x42\n\rimage_context\x18\x03 \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.ImageContext\"\xc2\x06\n\x15\x41nnotateImageResponse\x12G\n\x10\x66\x61\x63\x65_annotations\x18\x01 \x03(\x0b\x32-.google.cloud.vision.v1p1beta1.FaceAnnotation\x12M\n\x14landmark_annotations\x18\x02 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12I\n\x10logo_annotations\x18\x03 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12J\n\x11label_annotations\x18\x04 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12I\n\x10text_annotations\x18\x05 \x03(\x0b\x32/.google.cloud.vision.v1p1beta1.EntityAnnotation\x12K\n\x14\x66ull_text_annotation\x18\x0c \x01(\x0b\x32-.google.cloud.vision.v1p1beta1.TextAnnotation\x12S\n\x16safe_search_annotation\x18\x06 \x01(\x0b\x32\x33.google.cloud.vision.v1p1beta1.SafeSearchAnnotation\x12S\n\x1bimage_properties_annotation\x18\x08 \x01(\x0b\x32..google.cloud.vision.v1p1beta1.ImageProperties\x12Q\n\x15\x63rop_hints_annotation\x18\x0b \x01(\x0b\x32\x32.google.cloud.vision.v1p1beta1.CropHintsAnnotation\x12\x42\n\rweb_detection\x18\r \x01(\x0b\x32+.google.cloud.vision.v1p1beta1.WebDetection\x12!\n\x05\x65rror\x18\t \x01(\x0b\x32\x12.google.rpc.Status\"c\n\x1a\x42\x61tchAnnotateImagesRequest\x12\x45\n\x08requests\x18\x01 \x03(\x0b\x32\x33.google.cloud.vision.v1p1beta1.AnnotateImageRequest\"f\n\x1b\x42\x61tchAnnotateImagesResponse\x12G\n\tresponses\x18\x01 \x03(\x0b\x32\x34.google.cloud.vision.v1p1beta1.AnnotateImageResponse*e\n\nLikelihood\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x11\n\rVERY_UNLIKELY\x10\x01\x12\x0c\n\x08UNLIKELY\x10\x02\x12\x0c\n\x08POSSIBLE\x10\x03\x12\n\n\x06LIKELY\x10\x04\x12\x0f\n\x0bVERY_LIKELY\x10\x05\x32\xc6\x01\n\x0eImageAnnotator\x12\xb3\x01\n\x13\x42\x61tchAnnotateImages\x12\x39.google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest\x1a:.google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/v1p1beta1/images:annotate:\x01*B\x82\x01\n!com.google.cloud.vision.v1p1beta1B\x13ImageAnnotatorProtoP\x01ZCgoogle.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision\xf8\x01\x01\x62\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2.DESCRIPTOR,google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,google_dot_type_dot_color__pb2.DESCRIPTOR,google_dot_type_dot_latlng__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_LIKELIHOOD = _descriptor.EnumDescriptor(
name='Likelihood',
full_name='google.cloud.vision.v1p1beta1.Likelihood',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERY_UNLIKELY', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNLIKELY', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POSSIBLE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LIKELY', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='VERY_LIKELY', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5631,
serialized_end=5732,
)
_sym_db.RegisterEnumDescriptor(_LIKELIHOOD)
Likelihood = enum_type_wrapper.EnumTypeWrapper(_LIKELIHOOD)
UNKNOWN = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
_FEATURE_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.cloud.vision.v1p1beta1.Feature.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FACE_DETECTION', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LANDMARK_DETECTION', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOGO_DETECTION', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LABEL_DETECTION', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TEXT_DETECTION', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DOCUMENT_TEXT_DETECTION', index=6, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SAFE_SEARCH_DETECTION', index=7, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMAGE_PROPERTIES', index=8, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CROP_HINTS', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WEB_DETECTION', index=10, number=10,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=474,
serialized_end=720,
)
_sym_db.RegisterEnumDescriptor(_FEATURE_TYPE)
_FACEANNOTATION_LANDMARK_TYPE = _descriptor.EnumDescriptor(
name='Type',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.Type',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN_LANDMARK', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_OF_LEFT_EYEBROW', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_OF_LEFT_EYEBROW', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_OF_RIGHT_EYEBROW', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_OF_RIGHT_EYEBROW', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MIDPOINT_BETWEEN_EYES', index=7, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_TIP', index=8, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UPPER_LIP', index=9, number=9,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LOWER_LIP', index=10, number=10,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_LEFT', index=11, number=11,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_RIGHT', index=12, number=12,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='MOUTH_CENTER', index=13, number=13,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_RIGHT', index=14, number=14,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_LEFT', index=15, number=15,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOSE_BOTTOM_CENTER', index=16, number=16,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_TOP_BOUNDARY', index=17, number=17,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_RIGHT_CORNER', index=18, number=18,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_BOTTOM_BOUNDARY', index=19, number=19,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_LEFT_CORNER', index=20, number=20,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_TOP_BOUNDARY', index=21, number=21,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_RIGHT_CORNER', index=22, number=22,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_BOTTOM_BOUNDARY', index=23, number=23,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_LEFT_CORNER', index=24, number=24,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYEBROW_UPPER_MIDPOINT', index=25, number=25,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYEBROW_UPPER_MIDPOINT', index=26, number=26,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EAR_TRAGION', index=27, number=27,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EAR_TRAGION', index=28, number=28,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LEFT_EYE_PUPIL', index=29, number=29,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RIGHT_EYE_PUPIL', index=30, number=30,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FOREHEAD_GLABELLA', index=31, number=31,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_GNATHION', index=32, number=32,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_LEFT_GONION', index=33, number=33,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CHIN_RIGHT_GONION', index=34, number=34,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1865,
serialized_end=2685,
)
_sym_db.RegisterEnumDescriptor(_FACEANNOTATION_LANDMARK_TYPE)
_FEATURE = _descriptor.Descriptor(
name='Feature',
full_name='google.cloud.vision.v1p1beta1.Feature',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='google.cloud.vision.v1p1beta1.Feature.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_results', full_name='google.cloud.vision.v1p1beta1.Feature.max_results', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='model', full_name='google.cloud.vision.v1p1beta1.Feature.model', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FEATURE_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=367,
serialized_end=720,
)
_IMAGESOURCE = _descriptor.Descriptor(
name='ImageSource',
full_name='google.cloud.vision.v1p1beta1.ImageSource',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='gcs_image_uri', full_name='google.cloud.vision.v1p1beta1.ImageSource.gcs_image_uri', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_uri', full_name='google.cloud.vision.v1p1beta1.ImageSource.image_uri', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=722,
serialized_end=777,
)
_IMAGE = _descriptor.Descriptor(
name='Image',
full_name='google.cloud.vision.v1p1beta1.Image',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='content', full_name='google.cloud.vision.v1p1beta1.Image.content', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='source', full_name='google.cloud.vision.v1p1beta1.Image.source', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=779,
serialized_end=863,
)
_FACEANNOTATION_LANDMARK = _descriptor.Descriptor(
name='Landmark',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.type', index=0,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='position', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark.position', index=1,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FACEANNOTATION_LANDMARK_TYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1718,
serialized_end=2685,
)
_FACEANNOTATION = _descriptor.Descriptor(
name='FaceAnnotation',
full_name='google.cloud.vision.v1p1beta1.FaceAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.bounding_poly', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='fd_bounding_poly', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.fd_bounding_poly', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmarks', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.landmarks', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='roll_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.roll_angle', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pan_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.pan_angle', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tilt_angle', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.tilt_angle', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='detection_confidence', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.detection_confidence', index=6,
number=7, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmarking_confidence', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.landmarking_confidence', index=7,
number=8, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='joy_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.joy_likelihood', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sorrow_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.sorrow_likelihood', index=9,
number=10, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='anger_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.anger_likelihood', index=10,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='surprise_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.surprise_likelihood', index=11,
number=12, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='under_exposed_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.under_exposed_likelihood', index=12,
number=13, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='blurred_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.blurred_likelihood', index=13,
number=14, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='headwear_likelihood', full_name='google.cloud.vision.v1p1beta1.FaceAnnotation.headwear_likelihood', index=14,
number=15, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_FACEANNOTATION_LANDMARK, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=866,
serialized_end=2685,
)
_LOCATIONINFO = _descriptor.Descriptor(
name='LocationInfo',
full_name='google.cloud.vision.v1p1beta1.LocationInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lat_lng', full_name='google.cloud.vision.v1p1beta1.LocationInfo.lat_lng', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2687,
serialized_end=2739,
)
_PROPERTY = _descriptor.Descriptor(
name='Property',
full_name='google.cloud.vision.v1p1beta1.Property',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.cloud.vision.v1p1beta1.Property.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='google.cloud.vision.v1p1beta1.Property.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='uint64_value', full_name='google.cloud.vision.v1p1beta1.Property.uint64_value', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2741,
serialized_end=2802,
)
_ENTITYANNOTATION = _descriptor.Descriptor(
name='EntityAnnotation',
full_name='google.cloud.vision.v1p1beta1.EntityAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='mid', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.mid', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locale', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.locale', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='description', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.score', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='confidence', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.confidence', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='topicality', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.topicality', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.bounding_poly', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='locations', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.locations', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='properties', full_name='google.cloud.vision.v1p1beta1.EntityAnnotation.properties', index=8,
number=9, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2805,
serialized_end=3121,
)
_SAFESEARCHANNOTATION = _descriptor.Descriptor(
name='SafeSearchAnnotation',
full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='adult', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.adult', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spoof', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.spoof', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='medical', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.medical', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='violence', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.violence', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='racy', full_name='google.cloud.vision.v1p1beta1.SafeSearchAnnotation.racy', index=4,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3124,
serialized_end=3440,
)
_LATLONGRECT = _descriptor.Descriptor(
name='LatLongRect',
full_name='google.cloud.vision.v1p1beta1.LatLongRect',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min_lat_lng', full_name='google.cloud.vision.v1p1beta1.LatLongRect.min_lat_lng', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_lat_lng', full_name='google.cloud.vision.v1p1beta1.LatLongRect.max_lat_lng', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3442,
serialized_end=3539,
)
_COLORINFO = _descriptor.Descriptor(
name='ColorInfo',
full_name='google.cloud.vision.v1p1beta1.ColorInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='color', full_name='google.cloud.vision.v1p1beta1.ColorInfo.color', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score', full_name='google.cloud.vision.v1p1beta1.ColorInfo.score', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pixel_fraction', full_name='google.cloud.vision.v1p1beta1.ColorInfo.pixel_fraction', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3541,
serialized_end=3626,
)
_DOMINANTCOLORSANNOTATION = _descriptor.Descriptor(
name='DominantColorsAnnotation',
full_name='google.cloud.vision.v1p1beta1.DominantColorsAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='colors', full_name='google.cloud.vision.v1p1beta1.DominantColorsAnnotation.colors', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3628,
serialized_end=3712,
)
_IMAGEPROPERTIES = _descriptor.Descriptor(
name='ImageProperties',
full_name='google.cloud.vision.v1p1beta1.ImageProperties',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dominant_colors', full_name='google.cloud.vision.v1p1beta1.ImageProperties.dominant_colors', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3714,
serialized_end=3813,
)
_CROPHINT = _descriptor.Descriptor(
name='CropHint',
full_name='google.cloud.vision.v1p1beta1.CropHint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='bounding_poly', full_name='google.cloud.vision.v1p1beta1.CropHint.bounding_poly', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='confidence', full_name='google.cloud.vision.v1p1beta1.CropHint.confidence', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='importance_fraction', full_name='google.cloud.vision.v1p1beta1.CropHint.importance_fraction', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3815,
serialized_end=3942,
)
_CROPHINTSANNOTATION = _descriptor.Descriptor(
name='CropHintsAnnotation',
full_name='google.cloud.vision.v1p1beta1.CropHintsAnnotation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='crop_hints', full_name='google.cloud.vision.v1p1beta1.CropHintsAnnotation.crop_hints', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3944,
serialized_end=4026,
)
_CROPHINTSPARAMS = _descriptor.Descriptor(
name='CropHintsParams',
full_name='google.cloud.vision.v1p1beta1.CropHintsParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='aspect_ratios', full_name='google.cloud.vision.v1p1beta1.CropHintsParams.aspect_ratios', index=0,
number=1, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4028,
serialized_end=4068,
)
_WEBDETECTIONPARAMS = _descriptor.Descriptor(
name='WebDetectionParams',
full_name='google.cloud.vision.v1p1beta1.WebDetectionParams',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='include_geo_results', full_name='google.cloud.vision.v1p1beta1.WebDetectionParams.include_geo_results', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4070,
serialized_end=4119,
)
_IMAGECONTEXT = _descriptor.Descriptor(
name='ImageContext',
full_name='google.cloud.vision.v1p1beta1.ImageContext',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='lat_long_rect', full_name='google.cloud.vision.v1p1beta1.ImageContext.lat_long_rect', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='language_hints', full_name='google.cloud.vision.v1p1beta1.ImageContext.language_hints', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_hints_params', full_name='google.cloud.vision.v1p1beta1.ImageContext.crop_hints_params', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='web_detection_params', full_name='google.cloud.vision.v1p1beta1.ImageContext.web_detection_params', index=3,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4122,
serialized_end=4383,
)
_ANNOTATEIMAGEREQUEST = _descriptor.Descriptor(
name='AnnotateImageRequest',
full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='image', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.image', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='features', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.features', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_context', full_name='google.cloud.vision.v1p1beta1.AnnotateImageRequest.image_context', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4386,
serialized_end=4587,
)
_ANNOTATEIMAGERESPONSE = _descriptor.Descriptor(
name='AnnotateImageResponse',
full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='face_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.face_annotations', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='landmark_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.landmark_annotations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='logo_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.logo_annotations', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.label_annotations', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='text_annotations', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.text_annotations', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='full_text_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.full_text_annotation', index=5,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='safe_search_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.safe_search_annotation', index=6,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='image_properties_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.image_properties_annotation', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_hints_annotation', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.crop_hints_annotation', index=8,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='web_detection', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.web_detection', index=9,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error', full_name='google.cloud.vision.v1p1beta1.AnnotateImageResponse.error', index=10,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4590,
serialized_end=5424,
)
_BATCHANNOTATEIMAGESREQUEST = _descriptor.Descriptor(
name='BatchAnnotateImagesRequest',
full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='requests', full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest.requests', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5426,
serialized_end=5525,
)
_BATCHANNOTATEIMAGESRESPONSE = _descriptor.Descriptor(
name='BatchAnnotateImagesResponse',
full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='responses', full_name='google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse.responses', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5527,
serialized_end=5629,
)
_FEATURE.fields_by_name['type'].enum_type = _FEATURE_TYPE
_FEATURE_TYPE.containing_type = _FEATURE
_IMAGE.fields_by_name['source'].message_type = _IMAGESOURCE
_FACEANNOTATION_LANDMARK.fields_by_name['type'].enum_type = _FACEANNOTATION_LANDMARK_TYPE
_FACEANNOTATION_LANDMARK.fields_by_name['position'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._POSITION
_FACEANNOTATION_LANDMARK.containing_type = _FACEANNOTATION
_FACEANNOTATION_LANDMARK_TYPE.containing_type = _FACEANNOTATION_LANDMARK
_FACEANNOTATION.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_FACEANNOTATION.fields_by_name['fd_bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_FACEANNOTATION.fields_by_name['landmarks'].message_type = _FACEANNOTATION_LANDMARK
_FACEANNOTATION.fields_by_name['joy_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['sorrow_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['anger_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['surprise_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['under_exposed_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['blurred_likelihood'].enum_type = _LIKELIHOOD
_FACEANNOTATION.fields_by_name['headwear_likelihood'].enum_type = _LIKELIHOOD
_LOCATIONINFO.fields_by_name['lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_ENTITYANNOTATION.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_ENTITYANNOTATION.fields_by_name['locations'].message_type = _LOCATIONINFO
_ENTITYANNOTATION.fields_by_name['properties'].message_type = _PROPERTY
_SAFESEARCHANNOTATION.fields_by_name['adult'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['spoof'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['medical'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['violence'].enum_type = _LIKELIHOOD
_SAFESEARCHANNOTATION.fields_by_name['racy'].enum_type = _LIKELIHOOD
_LATLONGRECT.fields_by_name['min_lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_LATLONGRECT.fields_by_name['max_lat_lng'].message_type = google_dot_type_dot_latlng__pb2._LATLNG
_COLORINFO.fields_by_name['color'].message_type = google_dot_type_dot_color__pb2._COLOR
_DOMINANTCOLORSANNOTATION.fields_by_name['colors'].message_type = _COLORINFO
_IMAGEPROPERTIES.fields_by_name['dominant_colors'].message_type = _DOMINANTCOLORSANNOTATION
_CROPHINT.fields_by_name['bounding_poly'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_geometry__pb2._BOUNDINGPOLY
_CROPHINTSANNOTATION.fields_by_name['crop_hints'].message_type = _CROPHINT
_IMAGECONTEXT.fields_by_name['lat_long_rect'].message_type = _LATLONGRECT
_IMAGECONTEXT.fields_by_name['crop_hints_params'].message_type = _CROPHINTSPARAMS
_IMAGECONTEXT.fields_by_name['web_detection_params'].message_type = _WEBDETECTIONPARAMS
_ANNOTATEIMAGEREQUEST.fields_by_name['image'].message_type = _IMAGE
_ANNOTATEIMAGEREQUEST.fields_by_name['features'].message_type = _FEATURE
_ANNOTATEIMAGEREQUEST.fields_by_name['image_context'].message_type = _IMAGECONTEXT
_ANNOTATEIMAGERESPONSE.fields_by_name['face_annotations'].message_type = _FACEANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['landmark_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['logo_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['label_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['text_annotations'].message_type = _ENTITYANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['full_text_annotation'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_text__annotation__pb2._TEXTANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['safe_search_annotation'].message_type = _SAFESEARCHANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['image_properties_annotation'].message_type = _IMAGEPROPERTIES
_ANNOTATEIMAGERESPONSE.fields_by_name['crop_hints_annotation'].message_type = _CROPHINTSANNOTATION
_ANNOTATEIMAGERESPONSE.fields_by_name['web_detection'].message_type = google_dot_cloud_dot_vision__v1p1beta1_dot_proto_dot_web__detection__pb2._WEBDETECTION
_ANNOTATEIMAGERESPONSE.fields_by_name['error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_BATCHANNOTATEIMAGESREQUEST.fields_by_name['requests'].message_type = _ANNOTATEIMAGEREQUEST
_BATCHANNOTATEIMAGESRESPONSE.fields_by_name['responses'].message_type = _ANNOTATEIMAGERESPONSE
DESCRIPTOR.message_types_by_name['Feature'] = _FEATURE
DESCRIPTOR.message_types_by_name['ImageSource'] = _IMAGESOURCE
DESCRIPTOR.message_types_by_name['Image'] = _IMAGE
DESCRIPTOR.message_types_by_name['FaceAnnotation'] = _FACEANNOTATION
DESCRIPTOR.message_types_by_name['LocationInfo'] = _LOCATIONINFO
DESCRIPTOR.message_types_by_name['Property'] = _PROPERTY
DESCRIPTOR.message_types_by_name['EntityAnnotation'] = _ENTITYANNOTATION
DESCRIPTOR.message_types_by_name['SafeSearchAnnotation'] = _SAFESEARCHANNOTATION
DESCRIPTOR.message_types_by_name['LatLongRect'] = _LATLONGRECT
DESCRIPTOR.message_types_by_name['ColorInfo'] = _COLORINFO
DESCRIPTOR.message_types_by_name['DominantColorsAnnotation'] = _DOMINANTCOLORSANNOTATION
DESCRIPTOR.message_types_by_name['ImageProperties'] = _IMAGEPROPERTIES
DESCRIPTOR.message_types_by_name['CropHint'] = _CROPHINT
DESCRIPTOR.message_types_by_name['CropHintsAnnotation'] = _CROPHINTSANNOTATION
DESCRIPTOR.message_types_by_name['CropHintsParams'] = _CROPHINTSPARAMS
DESCRIPTOR.message_types_by_name['WebDetectionParams'] = _WEBDETECTIONPARAMS
DESCRIPTOR.message_types_by_name['ImageContext'] = _IMAGECONTEXT
DESCRIPTOR.message_types_by_name['AnnotateImageRequest'] = _ANNOTATEIMAGEREQUEST
DESCRIPTOR.message_types_by_name['AnnotateImageResponse'] = _ANNOTATEIMAGERESPONSE
DESCRIPTOR.message_types_by_name['BatchAnnotateImagesRequest'] = _BATCHANNOTATEIMAGESREQUEST
DESCRIPTOR.message_types_by_name['BatchAnnotateImagesResponse'] = _BATCHANNOTATEIMAGESRESPONSE
DESCRIPTOR.enum_types_by_name['Likelihood'] = _LIKELIHOOD
Feature = _reflection.GeneratedProtocolMessageType('Feature', (_message.Message,), dict(
DESCRIPTOR = _FEATURE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Users describe the type of Google Cloud Vision API tasks to perform over
images by using *Feature*\ s. Each Feature indicates a type of image
detection task to perform. Features encode the Cloud Vision API vertical
to operate on and the number of top-scoring results to return.
Attributes:
type:
The feature type.
max_results:
Maximum number of results of this type.
model:
Model to use for the feature. Supported values:
"builtin/stable" (the default if unset) and "builtin/latest".
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Feature)
))
_sym_db.RegisterMessage(Feature)
ImageSource = _reflection.GeneratedProtocolMessageType('ImageSource', (_message.Message,), dict(
DESCRIPTOR = _IMAGESOURCE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """External image source (Google Cloud Storage image location).
Attributes:
gcs_image_uri:
NOTE: For new code ``image_uri`` below is preferred. Google
Cloud Storage image URI, which must be in the following form:
``gs://bucket_name/object_name`` (for details, see `Google
Cloud Storage Request URIs
<https://cloud.google.com/storage/docs/reference-uris>`__).
NOTE: Cloud Storage object versioning is not supported.
image_uri:
Image URI which supports: 1) Google Cloud Storage image URI,
which must be in the following form:
``gs://bucket_name/object_name`` (for details, see `Google
Cloud Storage Request URIs
<https://cloud.google.com/storage/docs/reference-uris>`__).
NOTE: Cloud Storage object versioning is not supported. 2)
Publicly accessible image HTTP/HTTPS URL. This is preferred
over the legacy ``gcs_image_uri`` above. When both
``gcs_image_uri`` and ``image_uri`` are specified,
``image_uri`` takes precedence.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ImageSource)
))
_sym_db.RegisterMessage(ImageSource)
Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), dict(
DESCRIPTOR = _IMAGE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Client image to perform Google Cloud Vision API tasks over.
Attributes:
content:
Image content, represented as a stream of bytes. Note: as with
all ``bytes`` fields, protobuffers use a pure binary
representation, whereas JSON representations use base64.
source:
Google Cloud Storage image location. If both ``content`` and
``source`` are provided for an image, ``content`` takes
precedence and is used to perform the image annotation
request.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Image)
))
_sym_db.RegisterMessage(Image)
FaceAnnotation = _reflection.GeneratedProtocolMessageType('FaceAnnotation', (_message.Message,), dict(
Landmark = _reflection.GeneratedProtocolMessageType('Landmark', (_message.Message,), dict(
DESCRIPTOR = _FACEANNOTATION_LANDMARK,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A face-specific landmark (for example, a face feature).
Attributes:
type:
Face landmark type.
position:
Face landmark position.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.FaceAnnotation.Landmark)
))
,
DESCRIPTOR = _FACEANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A face annotation object contains the results of face detection.
Attributes:
bounding_poly:
The bounding polygon around the face. The coordinates of the
bounding box are in the original image's scale, as returned in
``ImageParams``. The bounding box is computed to "frame" the
face in accordance with human expectations. It is based on the
landmarker results. Note that one or more x and/or y
coordinates may not be generated in the ``BoundingPoly`` (the
polygon will be unbounded) if only a partial face appears in
the image to be annotated.
fd_bounding_poly:
The ``fd_bounding_poly`` bounding polygon is tighter than the
``boundingPoly``, and encloses only the skin part of the face.
Typically, it is used to eliminate the face from any image
analysis that detects the "amount of skin" visible in an
image. It is not based on the landmarker results, only on the
initial face detection, hence the fd (face detection) prefix.
landmarks:
Detected face landmarks.
roll_angle:
Roll angle, which indicates the amount of clockwise/anti-
clockwise rotation of the face relative to the image vertical
about the axis perpendicular to the face. Range [-180,180].
pan_angle:
Yaw angle, which indicates the leftward/rightward angle that
the face is pointing relative to the vertical plane
perpendicular to the image. Range [-180,180].
tilt_angle:
Pitch angle, which indicates the upwards/downwards angle that
the face is pointing relative to the image's horizontal plane.
Range [-180,180].
detection_confidence:
Detection confidence. Range [0, 1].
landmarking_confidence:
Face landmarking confidence. Range [0, 1].
joy_likelihood:
Joy likelihood.
sorrow_likelihood:
Sorrow likelihood.
anger_likelihood:
Anger likelihood.
surprise_likelihood:
Surprise likelihood.
under_exposed_likelihood:
Under-exposed likelihood.
blurred_likelihood:
Blurred likelihood.
headwear_likelihood:
Headwear likelihood.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.FaceAnnotation)
))
_sym_db.RegisterMessage(FaceAnnotation)
_sym_db.RegisterMessage(FaceAnnotation.Landmark)
LocationInfo = _reflection.GeneratedProtocolMessageType('LocationInfo', (_message.Message,), dict(
DESCRIPTOR = _LOCATIONINFO,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Detected entity location information.
Attributes:
lat_lng:
lat/long location coordinates.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.LocationInfo)
))
_sym_db.RegisterMessage(LocationInfo)
Property = _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), dict(
DESCRIPTOR = _PROPERTY,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """A ``Property`` consists of a user-supplied name/value pair.
Attributes:
name:
Name of the property.
value:
Value of the property.
uint64_value:
Value of numeric properties.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.Property)
))
_sym_db.RegisterMessage(Property)
EntityAnnotation = _reflection.GeneratedProtocolMessageType('EntityAnnotation', (_message.Message,), dict(
DESCRIPTOR = _ENTITYANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of detected entity features.
Attributes:
mid:
Opaque entity ID. Some IDs may be available in `Google
Knowledge Graph Search API
<https://developers.google.com/knowledge-graph/>`__.
locale:
The language code for the locale in which the entity textual
``description`` is expressed.
description:
Entity textual description, expressed in its ``locale``
language.
score:
Overall score of the result. Range [0, 1].
confidence:
The accuracy of the entity detection in an image. For example,
for an image in which the "Eiffel Tower" entity is detected,
this field represents the confidence that there is a tower in
the query image. Range [0, 1].
topicality:
The relevancy of the ICA (Image Content Annotation) label to
the image. For example, the relevancy of "tower" is likely
higher to an image containing the detected "Eiffel Tower" than
to an image containing a detected distant towering building,
even though the confidence that there is a tower in each image
may be the same. Range [0, 1].
bounding_poly:
Image region to which this entity belongs. Not produced for
``LABEL_DETECTION`` features.
locations:
The location information for the detected entity. Multiple
``LocationInfo`` elements can be present because one location
may indicate the location of the scene in the image, and
another location may indicate the location of the place where
the image was taken. Location information is usually present
for landmarks.
properties:
Some entities may have optional user-supplied ``Property``
(name/value) fields, such a score or string that qualifies the
entity.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.EntityAnnotation)
))
_sym_db.RegisterMessage(EntityAnnotation)
SafeSearchAnnotation = _reflection.GeneratedProtocolMessageType('SafeSearchAnnotation', (_message.Message,), dict(
DESCRIPTOR = _SAFESEARCHANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of features pertaining to the image, computed by computer vision
methods over safe-search verticals (for example, adult, spoof, medical,
violence).
Attributes:
adult:
Represents the adult content likelihood for the image. Adult
content may contain elements such as nudity, pornographic
images or cartoons, or sexual activities.
spoof:
Spoof likelihood. The likelihood that an modification was made
to the image's canonical version to make it appear funny or
offensive.
medical:
Likelihood that this is a medical image.
violence:
Likelihood that this image contains violent content.
racy:
Likelihood that the request image contains racy content. Racy
content may include (but is not limited to) skimpy or sheer
clothing, strategically covered nudity, lewd or provocative
poses, or close-ups of sensitive body areas.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.SafeSearchAnnotation)
))
_sym_db.RegisterMessage(SafeSearchAnnotation)
LatLongRect = _reflection.GeneratedProtocolMessageType('LatLongRect', (_message.Message,), dict(
DESCRIPTOR = _LATLONGRECT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Rectangle determined by min and max ``LatLng`` pairs.
Attributes:
min_lat_lng:
Min lat/long pair.
max_lat_lng:
Max lat/long pair.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.LatLongRect)
))
_sym_db.RegisterMessage(LatLongRect)
ColorInfo = _reflection.GeneratedProtocolMessageType('ColorInfo', (_message.Message,), dict(
DESCRIPTOR = _COLORINFO,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Color information consists of RGB channels, score, and the fraction of
the image that the color occupies in the image.
Attributes:
color:
RGB components of the color.
score:
Image-specific score for this color. Value in range [0, 1].
pixel_fraction:
The fraction of pixels the color occupies in the image. Value
in range [0, 1].
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ColorInfo)
))
_sym_db.RegisterMessage(ColorInfo)
DominantColorsAnnotation = _reflection.GeneratedProtocolMessageType('DominantColorsAnnotation', (_message.Message,), dict(
DESCRIPTOR = _DOMINANTCOLORSANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of dominant colors and their corresponding scores.
Attributes:
colors:
RGB color values with their score and pixel fraction.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.DominantColorsAnnotation)
))
_sym_db.RegisterMessage(DominantColorsAnnotation)
ImageProperties = _reflection.GeneratedProtocolMessageType('ImageProperties', (_message.Message,), dict(
DESCRIPTOR = _IMAGEPROPERTIES,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Stores image properties, such as dominant colors.
Attributes:
dominant_colors:
If present, dominant colors completed successfully.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ImageProperties)
))
_sym_db.RegisterMessage(ImageProperties)
CropHint = _reflection.GeneratedProtocolMessageType('CropHint', (_message.Message,), dict(
DESCRIPTOR = _CROPHINT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Single crop hint that is used to generate a new crop when serving an
image.
Attributes:
bounding_poly:
The bounding polygon for the crop region. The coordinates of
the bounding box are in the original image's scale, as
returned in ``ImageParams``.
confidence:
Confidence of this being a salient region. Range [0, 1].
importance_fraction:
Fraction of importance of this salient region with respect to
the original image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHint)
))
_sym_db.RegisterMessage(CropHint)
CropHintsAnnotation = _reflection.GeneratedProtocolMessageType('CropHintsAnnotation', (_message.Message,), dict(
DESCRIPTOR = _CROPHINTSANNOTATION,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Set of crop hints that are used to generate new crops when serving
images.
Attributes:
crop_hints:
Crop hint results.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHintsAnnotation)
))
_sym_db.RegisterMessage(CropHintsAnnotation)
CropHintsParams = _reflection.GeneratedProtocolMessageType('CropHintsParams', (_message.Message,), dict(
DESCRIPTOR = _CROPHINTSPARAMS,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Parameters for crop hints annotation request.
Attributes:
aspect_ratios:
Aspect ratios in floats, representing the ratio of the width
to the height of the image. For example, if the desired aspect
ratio is 4/3, the corresponding float value should be 1.33333.
If not specified, the best possible crop is returned. The
number of provided aspect ratios is limited to a maximum of
16; any aspect ratios provided after the 16th are ignored.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.CropHintsParams)
))
_sym_db.RegisterMessage(CropHintsParams)
WebDetectionParams = _reflection.GeneratedProtocolMessageType('WebDetectionParams', (_message.Message,), dict(
DESCRIPTOR = _WEBDETECTIONPARAMS,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Parameters for web detection request.
Attributes:
include_geo_results:
Whether to include results derived from the geo information in
the image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.WebDetectionParams)
))
_sym_db.RegisterMessage(WebDetectionParams)
ImageContext = _reflection.GeneratedProtocolMessageType('ImageContext', (_message.Message,), dict(
DESCRIPTOR = _IMAGECONTEXT,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Image context and/or feature-specific parameters.
Attributes:
lat_long_rect:
lat/long rectangle that specifies the location of the image.
language_hints:
List of languages to use for TEXT\_DETECTION. In most cases,
an empty value yields the best results since it enables
automatic language detection. For languages based on the Latin
alphabet, setting ``language_hints`` is not needed. In rare
cases, when the language of the text in the image is known,
setting a hint will help get better results (although it will
be a significant hindrance if the hint is wrong). Text
detection returns an error if one or more of the specified
languages is not one of the `supported languages
</vision/docs/languages>`__.
crop_hints_params:
Parameters for crop hints annotation request.
web_detection_params:
Parameters for web detection.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.ImageContext)
))
_sym_db.RegisterMessage(ImageContext)
AnnotateImageRequest = _reflection.GeneratedProtocolMessageType('AnnotateImageRequest', (_message.Message,), dict(
DESCRIPTOR = _ANNOTATEIMAGEREQUEST,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Request for performing Google Cloud Vision API tasks over a
user-provided image, with user-requested features.
Attributes:
image:
The image to be processed.
features:
Requested features.
image_context:
Additional context that may accompany the image.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.AnnotateImageRequest)
))
_sym_db.RegisterMessage(AnnotateImageRequest)
AnnotateImageResponse = _reflection.GeneratedProtocolMessageType('AnnotateImageResponse', (_message.Message,), dict(
DESCRIPTOR = _ANNOTATEIMAGERESPONSE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Response to an image annotation request.
Attributes:
face_annotations:
If present, face detection has completed successfully.
landmark_annotations:
If present, landmark detection has completed successfully.
logo_annotations:
If present, logo detection has completed successfully.
label_annotations:
If present, label detection has completed successfully.
text_annotations:
If present, text (OCR) detection has completed successfully.
full_text_annotation:
If present, text (OCR) detection or document (OCR) text
detection has completed successfully. This annotation provides
the structural hierarchy for the OCR detected text.
safe_search_annotation:
If present, safe-search annotation has completed successfully.
image_properties_annotation:
If present, image properties were extracted successfully.
crop_hints_annotation:
If present, crop hints have completed successfully.
web_detection:
If present, web detection has completed successfully.
error:
If set, represents the error message for the operation. Note
that filled-in image annotations are guaranteed to be correct,
even when ``error`` is set.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.AnnotateImageResponse)
))
_sym_db.RegisterMessage(AnnotateImageResponse)
BatchAnnotateImagesRequest = _reflection.GeneratedProtocolMessageType('BatchAnnotateImagesRequest', (_message.Message,), dict(
DESCRIPTOR = _BATCHANNOTATEIMAGESREQUEST,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Multiple image annotation requests are batched into a single service
call.
Attributes:
requests:
Individual image annotation requests for this batch.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.BatchAnnotateImagesRequest)
))
_sym_db.RegisterMessage(BatchAnnotateImagesRequest)
BatchAnnotateImagesResponse = _reflection.GeneratedProtocolMessageType('BatchAnnotateImagesResponse', (_message.Message,), dict(
DESCRIPTOR = _BATCHANNOTATEIMAGESRESPONSE,
__module__ = 'google.cloud.vision_v1p1beta1.proto.image_annotator_pb2'
,
__doc__ = """Response to a batch image annotation request.
Attributes:
responses:
Individual responses to image annotation requests within the
batch.
""",
# @@protoc_insertion_point(class_scope:google.cloud.vision.v1p1beta1.BatchAnnotateImagesResponse)
))
_sym_db.RegisterMessage(BatchAnnotateImagesResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n!com.google.cloud.vision.v1p1beta1B\023ImageAnnotatorProtoP\001ZCgoogle.golang.org/genproto/googleapis/cloud/vision/v1p1beta1;vision\370\001\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class ImageAnnotatorStub(object):
"""Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.BatchAnnotateImages = channel.unary_unary(
'/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages',
request_serializer=BatchAnnotateImagesRequest.SerializeToString,
response_deserializer=BatchAnnotateImagesResponse.FromString,
)
class ImageAnnotatorServicer(object):
"""Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
def BatchAnnotateImages(self, request, context):
"""Run image detection and annotation for a batch of images.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ImageAnnotatorServicer_to_server(servicer, server):
rpc_method_handlers = {
'BatchAnnotateImages': grpc.unary_unary_rpc_method_handler(
servicer.BatchAnnotateImages,
request_deserializer=BatchAnnotateImagesRequest.FromString,
response_serializer=BatchAnnotateImagesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.cloud.vision.v1p1beta1.ImageAnnotator', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaImageAnnotatorServicer(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
def BatchAnnotateImages(self, request, context):
"""Run image detection and annotation for a batch of images.
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaImageAnnotatorStub(object):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This class was generated
only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
"""Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Run image detection and annotation for a batch of images.
"""
raise NotImplementedError()
BatchAnnotateImages.future = None
def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_deserializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString,
}
response_serializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString,
}
method_implementations = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
"""The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
request_serializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString,
}
response_deserializers = {
('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString,
}
cardinalities = {
'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1p1beta1.ImageAnnotator', cardinalities, options=stub_options)
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| 42.973206 | 9,043 | 0.743392 | [
"Apache-2.0"
] | Alexander-Minyushkin/google-cloud-python | vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py | 89,814 | Python |
import logging
# noinspection PyPackageRequirements
from telegram.ext import CommandHandler, ConversationHandler
# noinspection PyPackageRequirements
from telegram import ChatAction, Update
from bot import stickersbot
from bot.utils import decorators
from bot.utils import utils
from bot.database.base import session_scope
from bot.database.models.pack import Pack
from bot.strings import Strings
logger = logging.getLogger(__name__)
@decorators.action(ChatAction.TYPING)
@decorators.restricted
@decorators.failwithmessage
def on_list_command(update: Update, _):
logger.info('/list')
# packs = db.get_user_packs(update.effective_user.id, as_namedtuple=True)
with session_scope() as session:
packs = session.query(Pack).filter_by(user_id=update.effective_user.id).order_by(Pack.title).all()
packs = packs[:98] # can't include more than 100 entities
strings_list = ['<a href="{}">{}</a> ({})'.format(utils.name2link(pack.name), pack.title, 'a' if pack.is_animated else 's') for pack in packs]
if not strings_list:
update.message.reply_text(Strings.LIST_NO_PACKS)
return
update.message.reply_html('• {}'.format('\n• '.join(strings_list)) + Strings.LIST_FOOTER)
return ConversationHandler.END # /list should end whatever conversation the user was having
stickersbot.add_handler(CommandHandler(['list', 'l'], on_list_command))
| 34.975 | 150 | 0.754825 | [
"MIT"
] | Ankit29-A/sticker-thief | bot/handlers/packs/list.py | 1,403 | Python |
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
Joy Zhang <joycheung1994@gmail.com>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt
for license details.
'''
from .. import SanUnit, WasteStream, Process, Processes, CompiledProcesses
from ._clarifier import _settling_flux
from sympy import symbols, lambdify, Matrix
from scipy.integrate import solve_ivp
from warnings import warn
from math import floor, ceil
import numpy as np
import pandas as pd
from numba import njit
__all__ = ('CSTR',
'SBR',
# 'PFR',
)
def _add_aeration_to_growth_model(aer, model):
if isinstance(aer, Process):
processes = Processes(model.tuple)
processes.append(aer)
processes.compile()
else:
processes = model
processes.compile()
return processes
# %%
@njit(cache=True)
def dydt_cstr_no_rxn_fixed_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs):
Q_ins = QC_ins[:, -1]
C_ins = QC_ins[:, :-1]
flow_in = Q_ins @ C_ins / V_arr
Q_e_arr[:] = Q_ins.sum(axis=0)
_dstate[-1] = dQC_ins[:, -1].sum(axis=0)
flow_out = Q_e_arr * Cs / V_arr
_dstate[:-1] = flow_in - flow_out
@njit(cache=True)
def dydt_cstr_no_rxn_controlled_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs):
Q_ins = QC_ins[:, -1]
C_ins = QC_ins[:, :-1]
flow_in = Q_ins @ C_ins / V_arr
Q_e_arr[:] = Q_ins.sum(axis=0)
_dstate[-1] = dQC_ins[:, -1].sum(axis=0)
flow_out = Q_e_arr * Cs / V_arr
_dstate[:-1] = flow_in - flow_out
#%%
class CSTR(SanUnit):
'''
A single continuous stirred tank reactor.
Parameters
----------
ID : str
ID for the reactor.
ins : :class:`WasteStream`
Influents to the reactor. Can be an array of up to 3 WasteStream objects by
default, typically wastewater to be treated, recycled effluent, recycled
activated sludge.
outs : :class:`WasteStream`
Treated effluent.
split : iterable of float
Volumetric splits of effluent flows if there are more than one effluent.
The default is None.
V_max : float
Designed volume, in [m^3]. The default is 1000.
aeration : float or :class:`Process`, optional
Aeration setting. Either specify a targeted dissolved oxygen concentration
in [mg O2/L] or provide a :class:`Process` object to represent aeration,
or None for no aeration. The default is 2.0.
DO_ID : str, optional
The :class:`Component` ID for dissolved oxygen, only relevant when the
reactor is aerated. The default is 'S_O2'.
suspended_growth_model : :class:`Processes`, optional
The suspended growth biokinetic model. The default is None.
'''
_N_ins = 3
_N_outs = 1
_ins_size_is_fixed = False
_outs_size_is_fixed = False
def __init__(self, ID='', ins=None, outs=(), split=None, thermo=None,
init_with='WasteStream', V_max=1000, aeration=2.0,
DO_ID='S_O2', suspended_growth_model=None,
isdynamic=True, **kwargs):
SanUnit.__init__(self, ID, ins, outs, thermo, init_with, isdynamic=isdynamic)
self._V_max = V_max
self._aeration = aeration
self._DO_ID = DO_ID
self._model = suspended_growth_model
self._concs = None
self._mixed = WasteStream()
self.split = split
for attr, value in kwargs.items():
setattr(self, attr, value)
@property
def V_max(self):
'''[float] The designed maximum liquid volume, not accounting for increased volume due to aeration, in m^3.'''
return self._V_max
@V_max.setter
def V_max(self, Vm):
self._V_max = Vm
@property
def aeration(self):
'''[:class:`Process` or float or NoneType] Aeration model.'''
return self._aeration
@aeration.setter
def aeration(self, ae):
if ae is None or isinstance(ae, Process): self._aeration = ae
elif isinstance(ae, (float, int)):
if ae < 0:
raise ValueError('targeted dissolved oxygen concentration for aeration must be non-negative.')
else:
if ae > 14:
warn(f'targeted dissolved oxygen concentration for {self.ID} might exceed the saturated level.')
self._aeration = ae
else:
raise TypeError(f'aeration must be one of the following types: float, '
f'int, Process, NoneType. Not {type(ae)}')
@property
def suspended_growth_model(self):
'''[:class:`CompiledProcesses` or NoneType] Suspended growth model.'''
return self._model
@suspended_growth_model.setter
def suspended_growth_model(self, model):
if isinstance(model, CompiledProcesses) or model is None: self._model = model
else: raise TypeError(f'suspended_growth_model must be one of the following '
f'types: CompiledProesses, NoneType. Not {type(model)}')
@property
def DO_ID(self):
'''[str] The `Component` ID for dissolved oxygen used in the suspended growth model and the aeration model.'''
return self._DO_ID
@DO_ID.setter
def DO_ID(self, doid):
if doid not in self.components.IDs:
raise ValueError(f'DO_ID must be in the set of `CompiledComponents` used to set thermo, '
f'i.e., one of {self.components.IDs}.')
self._DO_ID = doid
@property
def split(self):
'''[numpy.1darray or NoneType] The volumetric split of outs.'''
return self._split
@split.setter
def split(self, split):
if split is None: self._split = split
else:
if len(split) != len(self._outs):
raise ValueError('split and outs must have the same size')
self._split = np.array(split)/sum(split)
@property
def state(self):
'''The state of the CSTR, including component concentrations [mg/L] and flow rate [m^3/d].'''
if self._state is None: return None
else:
return dict(zip(list(self.components.IDs) + ['Q'], self._state))
@state.setter
def state(self, QCs):
QCs = np.asarray(QCs)
if QCs.shape != (len(self.components)+1, ):
raise ValueError(f'state must be a 1D array of length {len(self.components) + 1},'
'indicating component concentrations [mg/L] and total flow rate [m^3/d]')
self._state = QCs
def set_init_conc(self, **kwargs):
'''set the initial concentrations [mg/L] of the CSTR.'''
Cs = np.zeros(len(self.components))
cmpx = self.components.index
for k, v in kwargs.items(): Cs[cmpx(k)] = v
self._concs = Cs
def _init_state(self):
mixed = self._mixed
Q = mixed.get_total_flow('m3/d')
if self._concs is not None: Cs = self._concs
else: Cs = mixed.conc
self._state = np.append(Cs, Q).astype('float64')
self._dstate = self._state * 0.
def _update_state(self):
arr = self._state
if self.split is None: self._outs[0].state = arr
else:
for ws, spl in zip(self._outs, self.split):
y = arr.copy()
y[-1] *= spl
ws.state = y
def _update_dstate(self):
arr = self._dstate
if self.split is None: self._outs[0].dstate = arr
else:
for ws, spl in zip(self._outs, self.split):
y = arr.copy()
y[-1] *= spl
ws.dstate = y
def _run(self):
'''Only to converge volumetric flows.'''
mixed = self._mixed # avoid creating multiple new streams
mixed.mix_from(self.ins)
Q = mixed.F_vol # m3/hr
if self.split is None: self.outs[0].copy_like(mixed)
else:
for ws, spl in zip(self._outs, self.split):
ws.copy_like(mixed)
ws.set_total_flow(Q*spl, 'm3/hr')
def get_retained_mass(self, biomass_IDs):
cmps = self.components
mass = cmps.i_mass * self._state[:-1]
return self._V_max * mass[cmps.indices(biomass_IDs)].sum()
@property
def ODE(self):
if self._ODE is None:
self._compile_ODE()
return self._ODE
def _compile_ODE(self):
isa = isinstance
C = list(symbols(self.components.IDs))
m = len(C)
if self._model is None:
warn(f'{self.ID} was initialized without a suspended growth model, '
f'and thus run as a non-reactive unit')
r = lambda *args: np.zeros(m)
else:
processes = _add_aeration_to_growth_model(self._aeration, self._model)
r_eqs = list(processes.production_rates.rate_of_production)
r = lambdify(C, r_eqs, 'numpy')
_dstate = self._dstate
_update_dstate = self._update_dstate
V_arr = np.full(m, self._V_max)
Q_e_arr = np.zeros(m)
if isa(self._aeration, (float, int)):
i = self.components.index(self._DO_ID)
fixed_DO = self._aeration
def dy_dt(t, QC_ins, QC, dQC_ins):
Cs = QC[:-1]
Cs[i] = fixed_DO
dydt_cstr_no_rxn_controlled_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs)
_dstate[:-1] += r(*Cs)
_dstate[i] = 0
_update_dstate()
else:
def dy_dt(t, QC_ins, QC, dQC_ins):
Cs = QC[:-1]
dydt_cstr_no_rxn_fixed_aer(QC_ins, dQC_ins, V_arr, Q_e_arr, _dstate, Cs)
_dstate[:-1] += r(*Cs)
_update_dstate()
self._ODE = dy_dt
def _design(self):
pass
class SBR(SanUnit):
'''
Sequential batch reactors operated in parallel. The number of reactors is
determined by operation cycle and influent flowrate. [1]_
Parameters
----------
ID : str
ID for the reactors. The default is ''.
ins : :class:`WasteStream`
Influent to the reactor. Expected number of influent is 1.
outs : :class:`WasteStream`
Treated effluent and wasted sludge.
surface_area : float, optional
Surface area of the reactor bottom, in [m^2]. The reactor is assumed
to be cylinder. The default is 1500.
height : float, optional
Height of the reactor, in [m]. The default is 4.
operation_cycle : iterable of float, optional
Operation cycle of the SBR, time for each stage specified in [h]. There
are 7 stages: 1 - fill, 2 - fill, 3 - mix, 4 - mix, 5 - settle, 6 - decant,
7 - desludge. The first 4 stages are modeled as a biological reactor.
The 5th stage is modeled as a 1D N-layer settler. The last 2 stages are
assumed inactive. The default is (0.5, 1.5, 2.0, 0, 1.0, 0.5, 0.1).
aeration : iterable of float and/or :class:`Process`, optional
Aeration settings for the first 4 stages of the operation cycle. Either
specify a targeted dissolved oxygen concentration in [mg O2/L] or provide
a :class:`Process` object to represent aeration, or None for no aeration.
The default is (None, None, None, 2.0).
DO_ID : str, optional
The :class:`Component` ID for dissolved oxygen, only relevant when the
reactor is aerated. The default is 'S_O2'.
suspended_growth_model : :class:`Processes`, optional
The suspended growth biokinetic model. The default is None.
N_layer : int, optional
The number of layers to model settling. The default is 10.
pumped_flow : float, optional
Designed effluent flowrate, in [m^3/d]. The default is None.
underflow : float, optional
Designed wasted activated sludge flowrate, in [m^3/d]. The default is None.
X_threshold : float, optional
Threshold suspended solid concentration, in [g/m^3]. The default is 3000.
v_max : float, optional
Maximum theoretical (i.e. Vesilind) settling velocity, in [m/d]. The
default is 474.
v_max_practical : float, optional
Maximum practical settling velocity, in [m/d]. The default is 250.
rh : float, optional
Hindered zone settling parameter in the double-exponential settling velocity
function, in [m^3/g]. The default is 5.76e-4.
rp : float, optional
Flocculant zone settling parameter in the double-exponential settling velocity
function, in [m^3/g]. The default is 2.86e-3.
fns : float, optional
Non-settleable fraction of the suspended solids, dimensionless. Must be within
[0, 1]. The default is 2.28e-3.
cache_state : bool, optional
Whether to store volume and composition of retained sludge in the tank from
most recent run. The default is True.
References
----------
.. [1] Takács, I.; Patry, G. G.; Nolasco, D. A Dynamic Model of the Clarification
-Thickening Process. Water Res. 1991, 25 (10), 1263–1271.
https://doi.org/10.1016/0043-1354(91)90066-Y.
'''
_N_ins = 1
_N_outs = 2
def __init__(self, ID='', ins=None, outs=(), thermo=None, init_with='WasteStream',
surface_area=1500, height=4,
operation_cycle=(0.5, 1.5, 2.0, 0, 1.0, 0.5, 0.1),
aeration=(None, None, None, 2.0), DO_ID='S_O2',
suspended_growth_model=None, N_layer=10,
pumped_flow=None, underflow=None,
X_threshold=3000, v_max=474, v_max_practical=250,
rh=5.76e-4, rp=2.86e-3, fns=2.28e-3,
cache_state=True, **kwargs):
SanUnit.__init__(self, ID, ins, outs, thermo, init_with)
self._V = surface_area * height
self._A = surface_area
self._h = height
self._operation_cycle = operation_cycle
self._aeration = aeration
self._DO_ID = DO_ID
self._model = suspended_growth_model
self._N_layer = N_layer
self._Q_e = pumped_flow
self._Q_WAS = underflow
self._X_t = X_threshold
self._v_max = v_max
self._v_max_p = v_max_practical
self._rh = rh
self._rp = rp
self._fns = fns
self._cache_state = cache_state
for attr, value in kwargs.items():
setattr(self, attr, value)
self._init_Vas = None
self._init_Cas = None
self._dynamic_composition = None
@property
def operation_cycle(self):
return dict(zip(('fill_1', 'fill_2', 'mix_1', 'mix_2', 'settle', 'decant', 'desludge'),
self._operation_cycle))
@property
def total_cycle_time(self):
return sum(self._operation_cycle)
@property
def aeration(self):
return dict(zip(('fill_1', 'fill_2', 'mix_1', 'mix_2'),
self._aeration[:4]))
@property
def C_t(self):
if self._dynamic_composition:
return pd.DataFrame(self._dynamic_composition,
columns = ['Time[d]'] + list(self.components.IDs))
else: return None
def _run(self, cache_state=True):
if self._model is None:
raise RuntimeError(f'{self.ID} was initialized without a suspended growth model.')
else:
isa = isinstance
inf = self.ins[0]
Q_in = inf.get_total_flow('m3/d')
eff, sludge = self.outs
eff.copy_like(inf)
sludge.copy_like(inf)
C_in = inf.mass / inf.F_vol * 1e3 # concentrations in g/m3
cmps = self.components
C = list(symbols(cmps.IDs))
if self._init_Vas is not None:
V_0 = self._init_Vas
C_0 = self._init_Cas
else:
V_0 = 0
C_0 = C_in
n = self._N_layer
if self._aeration.count(None) == len(self._aeration):
Vmax = self._V
hj = self._h/n
else:
Vmax = self._V*0.75
hj = self._h*0.75/n
# ********fill and mix/aerate stages***********
T_fill = (Vmax - V_0)/Q_in # maximum total fill time in day
T = [t/24 for t in self._operation_cycle] # operation cycle in day
if T_fill <= T[0]:
schedule = [T_fill, T[0]-T_fill] + T[1:4]
aer = [self._aeration[0], self._aeration[0]] + list(self._aeration[1:4])
fill = [True] + [False]*4
V_total = Vmax
elif T_fill <= T[0]+T[1]:
schedule = [T[0], T_fill-T[0], T[0]+T[1]-T_fill] + T[2:4]
aer = list(self._aeration[:2]) + [self._aeration[1]] + list(self._aeration[2:4])
fill = [True]*2 + [False]*3
V_total = Vmax
else:
schedule = T[:4]
aer = list(self._aeration[:4])
fill = [True]*2 + [False]*2
V_total = Q_in*(T[0]+T[1])+V_0
hj = V_total/self._V*self._h/n
for i in range(1, len(schedule)):
if fill[-i] == fill[-i-1] and aer[-i] == aer[-i-1]:
schedule[-i-1] += schedule[-i]
schedule[-i] = 0
t_arr = np.array([])
y_mat = np.ndarray([])
for i in range(len(schedule)):
if schedule[i] > 0:
dC_dt, J_func = self._compile_dC_dt(V_0, Q_in, C_in, C, fill[i], aer[i])
if isa(aer[i], (float, int)): C_0[cmps.index(self._DO_ID)] = aer[i]
sol = solve_ivp(dC_dt, (0, schedule[i]), C_0, method='BDF', jac=J_func)
C_0 = sol.y.transpose()[-1]
V_0 += Q_in * schedule[i] * fill[i]
t_arr = np.concatenate((t_arr, sol.t + t_arr[-1]))
y_mat = np.hstack((y_mat, sol.y))
self._dynamic_composition = np.vstack((t_arr, y_mat)).transpose()
# *********settle, decant, desludge**********
eff.set_flow(C_0*eff.F_vol, 'g/hr', self.components.IDs)
X_0 = eff.get_TSS()
X_min = X_0 * self._fns
T_settle = T[4]
def dX_dt(t, X):
VX = [_settling_flux(x, self._v_max, self._v_max_p, X_min, self._rh, self._rp) for x in X]
J = [VX[j] if X[j+1] <= self._X_t else min(VX[j], VX[j+1]) for j in range(n-1)]
settle_out = np.array(J + [0])
settle_in = np.array([0] + J)
dXdt = (settle_in - settle_out)/hj
return dXdt
sol = solve_ivp(dX_dt, (0, T_settle), np.ones(n)*X_0)
X = sol.y.transpose()[-1]
V_eff = min(T[5]*self._Q_e, V_total*(n-1)/n)
n_eff = V_eff/V_total
w_eff = np.array([1]*floor(n_eff)+[n_eff-floor(n_eff)])
X_eff = np.average(X[:ceil(n_eff)], weights=w_eff)
eff_mass_flow = (X_eff/X_0*cmps.x + (1-cmps.x))*C_0*V_eff/T[5]
eff.set_flow(eff_mass_flow, 'g/d', cmps.IDs)
V_was = min(T[6]*self._Q_WAS, V_total-V_eff)
X_as = (V_total*X_0 - V_eff*X_eff) / (V_total-V_eff)
C_as = (X_as/X_0*cmps.x + (1-cmps.x))*C_0
was_mass_flow = C_as*V_was/T[6]
sludge.set_flow(was_mass_flow, 'g/d', cmps.IDs)
if self._cache_state:
self._init_Vas = V_total - V_eff - V_was
self._init_Cas = C_as
def _design(self):
pass
def _compile_dC_dt(self, V0, Qin, Cin, C, fill, aer):
isa = isinstance
processes = _add_aeration_to_growth_model(aer, self._model)
if fill:
t = symbols('t')
mass_balance_terms = list(zip(Cin, C, processes.production_rates.rate_of_production))
C_dot_eqs = [(cin-c)/(t+V0/Qin) + r for cin, c, r in mass_balance_terms]
if isa(aer, (float, int)): C_dot_eqs[self.components.index(self._DO_ID)] = 0
def dC_dt(t, y):
C_dot = lambdify([t]+C, C_dot_eqs)
return C_dot(t, *y)
J = Matrix(dC_dt(t, C)).jacobian(C)
else:
C_dot_eqs = processes.production_rates.rate_of_production
if isa(aer, (float, int)): C_dot_eqs[self.components.index(self._DO_ID)] = 0
def dC_dt(t, y):
C_dot = lambdify(C, C_dot_eqs)
return C_dot(*y)
J = Matrix(dC_dt(None, C)).jacobian(C)
def J_func(t, y):
J_func = lambdify(C, J)
return J_func(*y)
return (dC_dt, J_func)
# class PFR(SanUnit):
# _N_ins = 1
# _N_outs = 2
# def __init__(self, ID='', ins=None, outs=(), **kwargs):
# SanUnit.__init__(self, ID, ins, outs)
# def _run(self, steady_state=True):
# pass
# def _design(self):
# pass | 38.412727 | 118 | 0.576229 | [
"Unlicense"
] | QSD-for-WaSH/sanitation | qsdsan/sanunits/_suspended_growth_bioreactor.py | 21,130 | Python |
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Python camera library for the Rasperry-Pi camera module
# Copyright (c) 2013-2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import warnings
import datetime
import mimetypes
import ctypes as ct
import threading
from fractions import Fraction
from operator import itemgetter
from collections import namedtuple
from . import bcm_host, mmal, mmalobj as mo
from .exc import (
PiCameraError,
PiCameraValueError,
PiCameraRuntimeError,
PiCameraClosed,
PiCameraNotRecording,
PiCameraAlreadyRecording,
PiCameraMMALError,
PiCameraDeprecated,
PiCameraFallback,
)
from .encoders import (
PiVideoFrame,
PiVideoEncoder,
PiRawVideoEncoder,
PiCookedVideoEncoder,
PiRawOneImageEncoder,
PiRawMultiImageEncoder,
PiCookedOneImageEncoder,
PiCookedMultiImageEncoder,
)
from .renderers import (
PiPreviewRenderer,
PiOverlayRenderer,
PiNullSink,
)
from .color import Color
try:
from RPi import GPIO
except ImportError:
# Can't find RPi.GPIO so just null-out the reference
GPIO = None
def docstring_values(values, indent=8):
"""
Formats a dictionary of values for inclusion in a docstring.
"""
return ('\n' + ' ' * indent).join(
"* ``'%s'``" % k
for (k, v) in
sorted(values.items(), key=itemgetter(1)))
class PiCameraMaxResolution(object):
"""
Singleton representing the maximum resolution of the camera module.
"""
PiCameraMaxResolution = PiCameraMaxResolution()
class PiCameraMaxFramerate(object):
"""
Singleton representing the maximum framerate of the camera module.
"""
PiCameraMaxFramerate = PiCameraMaxFramerate()
class PiCamera(object):
"""
Provides a pure Python interface to the Raspberry Pi's camera module.
Upon construction, this class initializes the camera. The *camera_num*
parameter (which defaults to 0) selects the camera module that the instance
will represent. Only the Raspberry Pi compute module currently supports
more than one camera.
The *sensor_mode*, *resolution*, *framerate*, *framerate_range*, and
*clock_mode* parameters provide initial values for the :attr:`sensor_mode`,
:attr:`resolution`, :attr:`framerate`, :attr:`framerate_range`, and
:attr:`clock_mode` attributes of the class (these attributes are all
relatively expensive to set individually, hence setting them all upon
construction is a speed optimization). Please refer to the attribute
documentation for more information and default values.
The *stereo_mode* and *stereo_decimate* parameters configure dual cameras
on a compute module for sterescopic mode. These parameters can only be set
at construction time; they cannot be altered later without closing the
:class:`PiCamera` instance and recreating it. The *stereo_mode* parameter
defaults to ``'none'`` (no stereoscopic mode) but can be set to
``'side-by-side'`` or ``'top-bottom'`` to activate a stereoscopic mode. If
the *stereo_decimate* parameter is ``True``, the resolution of the two
cameras will be halved so that the resulting image has the same dimensions
as if stereoscopic mode were not being used.
The *led_pin* parameter can be used to specify the GPIO pin which should be
used to control the camera's LED via the :attr:`led` attribute. If this is
not specified, it should default to the correct value for your Pi platform.
You should only need to specify this parameter if you are using a custom
DeviceTree blob (this is only typical on the `Compute Module`_ platform).
No preview or recording is started automatically upon construction. Use
the :meth:`capture` method to capture images, the :meth:`start_recording`
method to begin recording video, or the :meth:`start_preview` method to
start live display of the camera's input.
Several attributes are provided to adjust the camera's configuration. Some
of these can be adjusted while a recording is running, like
:attr:`brightness`. Others, like :attr:`resolution`, can only be adjusted
when the camera is idle.
When you are finished with the camera, you should ensure you call the
:meth:`close` method to release the camera resources::
camera = PiCamera()
try:
# do something with the camera
pass
finally:
camera.close()
The class supports the context manager protocol to make this particularly
easy (upon exiting the :keyword:`with` statement, the :meth:`close` method
is automatically called)::
with PiCamera() as camera:
# do something with the camera
pass
.. versionchanged:: 1.8
Added *stereo_mode* and *stereo_decimate* parameters.
.. versionchanged:: 1.9
Added *resolution*, *framerate*, and *sensor_mode* parameters.
.. versionchanged:: 1.10
Added *led_pin* parameter.
.. versionchanged:: 1.11
Added *clock_mode* parameter, and permitted setting of resolution as
appropriately formatted string.
.. versionchanged:: 1.13
Added *framerate_range* parameter.
.. _Compute Module: https://www.raspberrypi.org/documentation/hardware/computemodule/cmio-camera.md
"""
CAMERA_PREVIEW_PORT = 0
CAMERA_VIDEO_PORT = 1
CAMERA_CAPTURE_PORT = 2
MAX_RESOLUTION = PiCameraMaxResolution # modified by PiCamera.__init__
MAX_FRAMERATE = PiCameraMaxFramerate # modified by PiCamera.__init__
DEFAULT_ANNOTATE_SIZE = 32
CAPTURE_TIMEOUT = 60
METER_MODES = {
'average': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE,
'spot': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT,
'backlit': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT,
'matrix': mmal.MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX,
}
EXPOSURE_MODES = {
'off': mmal.MMAL_PARAM_EXPOSUREMODE_OFF,
'auto': mmal.MMAL_PARAM_EXPOSUREMODE_AUTO,
'night': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHT,
'nightpreview': mmal.MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW,
'backlight': mmal.MMAL_PARAM_EXPOSUREMODE_BACKLIGHT,
'spotlight': mmal.MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT,
'sports': mmal.MMAL_PARAM_EXPOSUREMODE_SPORTS,
'snow': mmal.MMAL_PARAM_EXPOSUREMODE_SNOW,
'beach': mmal.MMAL_PARAM_EXPOSUREMODE_BEACH,
'verylong': mmal.MMAL_PARAM_EXPOSUREMODE_VERYLONG,
'fixedfps': mmal.MMAL_PARAM_EXPOSUREMODE_FIXEDFPS,
'antishake': mmal.MMAL_PARAM_EXPOSUREMODE_ANTISHAKE,
'fireworks': mmal.MMAL_PARAM_EXPOSUREMODE_FIREWORKS,
}
FLASH_MODES = {
'off': mmal.MMAL_PARAM_FLASH_OFF,
'auto': mmal.MMAL_PARAM_FLASH_AUTO,
'on': mmal.MMAL_PARAM_FLASH_ON,
'redeye': mmal.MMAL_PARAM_FLASH_REDEYE,
'fillin': mmal.MMAL_PARAM_FLASH_FILLIN,
'torch': mmal.MMAL_PARAM_FLASH_TORCH,
}
AWB_MODES = {
'off': mmal.MMAL_PARAM_AWBMODE_OFF,
'auto': mmal.MMAL_PARAM_AWBMODE_AUTO,
'sunlight': mmal.MMAL_PARAM_AWBMODE_SUNLIGHT,
'cloudy': mmal.MMAL_PARAM_AWBMODE_CLOUDY,
'shade': mmal.MMAL_PARAM_AWBMODE_SHADE,
'tungsten': mmal.MMAL_PARAM_AWBMODE_TUNGSTEN,
'fluorescent': mmal.MMAL_PARAM_AWBMODE_FLUORESCENT,
'incandescent': mmal.MMAL_PARAM_AWBMODE_INCANDESCENT,
'flash': mmal.MMAL_PARAM_AWBMODE_FLASH,
'horizon': mmal.MMAL_PARAM_AWBMODE_HORIZON,
}
IMAGE_EFFECTS = {
'none': mmal.MMAL_PARAM_IMAGEFX_NONE,
'negative': mmal.MMAL_PARAM_IMAGEFX_NEGATIVE,
'solarize': mmal.MMAL_PARAM_IMAGEFX_SOLARIZE,
# The following don't work
#'posterize': mmal.MMAL_PARAM_IMAGEFX_POSTERIZE,
#'whiteboard': mmal.MMAL_PARAM_IMAGEFX_WHITEBOARD,
#'blackboard': mmal.MMAL_PARAM_IMAGEFX_BLACKBOARD,
'sketch': mmal.MMAL_PARAM_IMAGEFX_SKETCH,
'denoise': mmal.MMAL_PARAM_IMAGEFX_DENOISE,
'emboss': mmal.MMAL_PARAM_IMAGEFX_EMBOSS,
'oilpaint': mmal.MMAL_PARAM_IMAGEFX_OILPAINT,
'hatch': mmal.MMAL_PARAM_IMAGEFX_HATCH,
'gpen': mmal.MMAL_PARAM_IMAGEFX_GPEN,
'pastel': mmal.MMAL_PARAM_IMAGEFX_PASTEL,
'watercolor': mmal.MMAL_PARAM_IMAGEFX_WATERCOLOUR,
'film': mmal.MMAL_PARAM_IMAGEFX_FILM,
'blur': mmal.MMAL_PARAM_IMAGEFX_BLUR,
'saturation': mmal.MMAL_PARAM_IMAGEFX_SATURATION,
'colorswap': mmal.MMAL_PARAM_IMAGEFX_COLOURSWAP,
'washedout': mmal.MMAL_PARAM_IMAGEFX_WASHEDOUT,
'posterise': mmal.MMAL_PARAM_IMAGEFX_POSTERISE,
'colorpoint': mmal.MMAL_PARAM_IMAGEFX_COLOURPOINT,
'colorbalance': mmal.MMAL_PARAM_IMAGEFX_COLOURBALANCE,
'cartoon': mmal.MMAL_PARAM_IMAGEFX_CARTOON,
'deinterlace1': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_DOUBLE,
'deinterlace2': mmal.MMAL_PARAM_IMAGEFX_DEINTERLACE_ADV,
}
DRC_STRENGTHS = {
'off': mmal.MMAL_PARAMETER_DRC_STRENGTH_OFF,
'low': mmal.MMAL_PARAMETER_DRC_STRENGTH_LOW,
'medium': mmal.MMAL_PARAMETER_DRC_STRENGTH_MEDIUM,
'high': mmal.MMAL_PARAMETER_DRC_STRENGTH_HIGH,
}
RAW_FORMATS = {
'yuv',
'rgb',
'rgba',
'bgr',
'bgra',
}
STEREO_MODES = {
'none': mmal.MMAL_STEREOSCOPIC_MODE_NONE,
'side-by-side': mmal.MMAL_STEREOSCOPIC_MODE_SIDE_BY_SIDE,
'top-bottom': mmal.MMAL_STEREOSCOPIC_MODE_BOTTOM,
}
CLOCK_MODES = {
'reset': mmal.MMAL_PARAM_TIMESTAMP_MODE_RESET_STC,
'raw': mmal.MMAL_PARAM_TIMESTAMP_MODE_RAW_STC,
}
_METER_MODES_R = {v: k for (k, v) in METER_MODES.items()}
_EXPOSURE_MODES_R = {v: k for (k, v) in EXPOSURE_MODES.items()}
_FLASH_MODES_R = {v: k for (k, v) in FLASH_MODES.items()}
_AWB_MODES_R = {v: k for (k, v) in AWB_MODES.items()}
_IMAGE_EFFECTS_R = {v: k for (k, v) in IMAGE_EFFECTS.items()}
_DRC_STRENGTHS_R = {v: k for (k, v) in DRC_STRENGTHS.items()}
_STEREO_MODES_R = {v: k for (k, v) in STEREO_MODES.items()}
_CLOCK_MODES_R = {v: k for (k, v) in CLOCK_MODES.items()}
__slots__ = (
'_used_led',
'_led_pin',
'_camera',
'_camera_config',
'_camera_exception',
'_revision',
'_preview',
'_preview_alpha',
'_preview_layer',
'_preview_fullscreen',
'_preview_window',
'_splitter',
'_splitter_connection',
'_encoders_lock',
'_encoders',
'_overlays',
'_raw_format',
'_image_effect_params',
'_exif_tags',
)
def __init__(
self, camera_num=0, stereo_mode='none', stereo_decimate=False,
resolution=None, framerate=None, sensor_mode=0, led_pin=None,
clock_mode='reset', framerate_range=None):
bcm_host.bcm_host_init()
mimetypes.add_type('application/h264', '.h264', False)
mimetypes.add_type('application/mjpeg', '.mjpg', False)
mimetypes.add_type('application/mjpeg', '.mjpeg', False)
self._used_led = False
if GPIO and led_pin is None:
try:
led_pin = {
(0, 0): 2, # compute module (default for cam 0)
(0, 1): 30, # compute module (default for cam 1)
(1, 0): 5, # Pi 1 model B rev 1
(2, 0): 5, # Pi 1 model B rev 2 or model A
(3, 0): 32, # Pi 1 model B+ or Pi 2 model B
}[(GPIO.RPI_REVISION, camera_num)]
except KeyError:
raise PiCameraError(
'Unable to determine default GPIO LED pin for RPi '
'revision %d and camera num %d' % (
GPIO.RPI_REVISION, camera_num))
self._led_pin = led_pin
self._camera = None
self._camera_config = None
self._camera_exception = None
self._preview = None
self._preview_alpha = 255
self._preview_layer = 2
self._preview_fullscreen = True
self._preview_window = None
self._splitter = None
self._splitter_connection = None
self._encoders_lock = threading.Lock()
self._encoders = {}
self._overlays = []
self._raw_format = 'yuv'
self._image_effect_params = None
with mo.MMALCameraInfo() as camera_info:
info = camera_info.control.params[mmal.MMAL_PARAMETER_CAMERA_INFO]
self._revision = 'ov5647'
if camera_info.info_rev > 1:
self._revision = info.cameras[camera_num].camera_name.decode('ascii')
self._exif_tags = {
'IFD0.Model': 'RP_%s' % self._revision,
'IFD0.Make': 'RaspberryPi',
}
if PiCamera.MAX_RESOLUTION is PiCameraMaxResolution:
PiCamera.MAX_RESOLUTION = mo.PiResolution(
info.cameras[camera_num].max_width,
info.cameras[camera_num].max_height,
)
if PiCamera.MAX_FRAMERATE is PiCameraMaxFramerate:
if self._revision.upper() == 'OV5647':
PiCamera.MAX_FRAMERATE = 90
else:
PiCamera.MAX_FRAMERATE = 120
if resolution is None:
# Get screen resolution
w = ct.c_uint32()
h = ct.c_uint32()
if bcm_host.graphics_get_display_size(0, w, h) == -1:
w = 1280
h = 720
else:
w = int(w.value)
h = int(h.value)
resolution = mo.PiResolution(w, h)
elif resolution is PiCameraMaxResolution:
resolution = PiCamera.MAX_RESOLUTION
else:
resolution = mo.to_resolution(resolution)
if framerate_range is None:
if framerate is None:
framerate = 30
elif framerate is PiCameraMaxFramerate:
framerate = PiCamera.MAX_FRAMERATE
else:
framerate = mo.to_fraction(framerate)
elif framerate is not None:
raise PiCameraValueError(
"Can't specify framerate and framerate_range")
else:
try:
low, high = framerate_range
except TypeError:
raise PiCameraValueError(
"framerate_range must have (low, high) values")
if low is PiCameraMaxFramerate:
low = PiCamera.MAX_FRAMERATE
if high is PiCameraMaxFramerate:
high = PiCamera.MAX_FRAMERATE
framerate = (mo.to_fraction(low), mo.to_fraction(high))
try:
stereo_mode = self.STEREO_MODES[stereo_mode]
except KeyError:
raise PiCameraValueError('Invalid stereo mode: %s' % stereo_mode)
try:
clock_mode = self.CLOCK_MODES[clock_mode]
except KeyError:
raise PiCameraValueError('Invalid clock mode: %s' % clock_mode)
try:
self._init_camera(camera_num, stereo_mode, stereo_decimate)
self._configure_camera(sensor_mode, framerate, resolution, clock_mode)
self._init_preview()
self._init_splitter()
self._camera.enable()
self._init_defaults()
except:
self.close()
raise
def _init_led(self):
global GPIO
if GPIO:
try:
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self._led_pin, GPIO.OUT, initial=GPIO.LOW)
self._used_led = True
except RuntimeError:
# We're probably not running as root. In this case, forget the
# GPIO reference so we don't try anything further
GPIO = None
def _init_camera(self, num, stereo_mode, stereo_decimate):
try:
self._camera = mo.MMALCamera()
except PiCameraMMALError as e:
if e.status == mmal.MMAL_ENOMEM:
raise PiCameraError(
"Camera is not enabled. Try running 'sudo raspi-config' "
"and ensure that the camera has been enabled.")
else:
raise
self._camera_config = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG]
# Don't attempt to set this if stereo mode isn't requested as it'll
# break compatibility on older firmwares
if stereo_mode != mmal.MMAL_STEREOSCOPIC_MODE_NONE:
for p in self._camera.outputs:
mp = mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE,
ct.sizeof(mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE_T),
),
mode=stereo_mode,
decimate=stereo_decimate,
swap_eyes=False,
)
p.params[mmal.MMAL_PARAMETER_STEREOSCOPIC_MODE] = mp
# Must be done *after* stereo-scopic setting
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_NUM] = num
def _init_defaults(self):
self.sharpness = 0
self.contrast = 0
self.brightness = 50
self.saturation = 0
self.iso = 0 # auto
self.video_stabilization = False
self.exposure_compensation = 0
self.exposure_mode = 'auto'
self.meter_mode = 'average'
self.awb_mode = 'auto'
self.image_effect = 'none'
self.color_effects = None
self.rotation = 0
self.hflip = self.vflip = False
self.zoom = (0.0, 0.0, 1.0, 1.0)
def _init_splitter(self):
# Create a splitter component for the video port. This is to permit
# video recordings and captures where use_video_port=True to occur
# simultaneously (#26)
self._splitter = mo.MMALSplitter()
self._splitter.inputs[0].connect(
self._camera.outputs[self.CAMERA_VIDEO_PORT]).enable()
def _init_preview(self):
# Create a null-sink component, enable it and connect it to the
# camera's preview port. If nothing is connected to the preview port,
# the camera doesn't measure exposure and captured images gradually
# fade to black (issue #22)
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def _start_capture(self, port):
# Only enable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = True
def _stop_capture(self, port):
# Only disable capture if the port is the camera's still port, or if
# there's a single active encoder on the video splitter
if (
port == self._camera.outputs[self.CAMERA_CAPTURE_PORT] or
len([e for e in self._encoders.values() if e.active]) == 1):
port.params[mmal.MMAL_PARAMETER_CAPTURE] = False
def _check_camera_open(self):
"""
Raise an exception if the camera is already closed, or if the camera
has encountered a fatal error.
"""
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
if self.closed:
raise PiCameraClosed("Camera is closed")
def _check_recording_stopped(self):
"""
Raise an exception if the camera is currently recording.
"""
if self.recording:
raise PiCameraRuntimeError("Recording is currently running")
def _get_ports(self, from_video_port, splitter_port):
"""
Determine the camera and output ports for given capture options.
See :ref:`camera_hardware` for more information on picamera's usage of
camera, splitter, and encoder ports. The general idea here is that the
capture (still) port operates on its own, while the video port is
always connected to a splitter component, so requests for a video port
also have to specify which splitter port they want to use.
"""
self._check_camera_open()
if from_video_port and (splitter_port in self._encoders):
raise PiCameraAlreadyRecording(
'The camera is already using port %d ' % splitter_port)
camera_port = (
self._camera.outputs[self.CAMERA_VIDEO_PORT]
if from_video_port else
self._camera.outputs[self.CAMERA_CAPTURE_PORT]
)
output_port = (
self._splitter.outputs[splitter_port]
if from_video_port else
camera_port
)
return (camera_port, output_port)
def _get_output_format(self, output):
"""
Given an output object, attempt to determine the requested format.
We attempt to determine the filename of the *output* object and derive
a MIME type from the extension. If *output* has no filename, an error
is raised.
"""
if isinstance(output, bytes):
filename = output.decode('utf-8')
elif isinstance(output, str):
filename = output
else:
try:
filename = output.name
except AttributeError:
raise PiCameraValueError(
'Format must be specified when output has no filename')
(type, encoding) = mimetypes.guess_type(filename, strict=False)
if not type:
raise PiCameraValueError(
'Unable to determine type from filename %s' % filename)
return type
def _get_image_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested image format.
This method is used by all capture methods to determine the requested
output format. If *format* is specified as a MIME-type the "image/"
prefix is stripped. If *format* is not specified, then
:meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('image/') else
format)
if format == 'x-ms-bmp':
format = 'bmp'
if format == 'raw':
format = self.raw_format
return format
def _get_video_format(self, output, format=None):
"""
Given an output object and an optional format, attempt to determine the
requested video format.
This method is used by all recording methods to determine the requested
output format. If *format* is specified as a MIME-type the "video/" or
"application/" prefix will be stripped. If *format* is not specified,
then :meth:`_get_output_format` will be called to attempt to determine
format from the *output* object.
"""
if isinstance(format, bytes):
format = format.decode('utf-8')
format = format or self._get_output_format(output)
format = (
format[6:] if format.startswith('video/') else
format[12:] if format.startswith('application/') else
format)
return format
def _get_image_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct an image encoder for the requested parameters.
This method is called by :meth:`capture` and :meth:`capture_continuous`
to construct an image encoder. The *camera_port* parameter gives the
MMAL camera port that should be enabled for capture by the encoder. The
*output_port* parameter gives the MMAL port that the encoder should
read output from (this may be the same as the camera port, but may be
different if other component(s) like a splitter have been placed in the
pipeline). The *format* parameter indicates the image format and will
be one of:
* ``'jpeg'``
* ``'png'``
* ``'gif'``
* ``'bmp'``
* ``'yuv'``
* ``'rgb'``
* ``'rgba'``
* ``'bgr'``
* ``'bgra'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawOneImageEncoder if format in self.RAW_FORMATS else
PiCookedOneImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_images_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a multi-image encoder for the requested parameters.
This method is largely equivalent to :meth:`_get_image_encoder` with
the exception that the encoder returned should expect to be passed an
iterable of outputs to its :meth:`~PiEncoder.start` method, rather than
a single output object. This method is called by the
:meth:`capture_sequence` method.
All parameters are the same as in :meth:`_get_image_encoder`. Please
refer to the documentation for that method for further information.
"""
encoder_class = (
PiRawMultiImageEncoder if format in self.RAW_FORMATS else
PiCookedMultiImageEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def _get_video_encoder(
self, camera_port, output_port, format, resize, **options):
"""
Construct a video encoder for the requested parameters.
This method is called by :meth:`start_recording` and
:meth:`record_sequence` to construct a video encoder. The
*camera_port* parameter gives the MMAL camera port that should be
enabled for capture by the encoder. The *output_port* parameter gives
the MMAL port that the encoder should read output from (this may be the
same as the camera port, but may be different if other component(s)
like a splitter have been placed in the pipeline). The *format*
parameter indicates the video format and will be one of:
* ``'h264'``
* ``'mjpeg'``
The *resize* parameter indicates the size that the encoder should
resize the output to (presumably by including a resizer in the
pipeline). Finally, *options* includes extra keyword arguments that
should be passed verbatim to the encoder.
"""
encoder_class = (
PiRawVideoEncoder if format in self.RAW_FORMATS else
PiCookedVideoEncoder)
return encoder_class(
self, camera_port, output_port, format, resize, **options)
def close(self):
"""
Finalizes the state of the camera.
After successfully constructing a :class:`PiCamera` object, you should
ensure you call the :meth:`close` method once you are finished with the
camera (e.g. in the ``finally`` section of a ``try..finally`` block).
This method stops all recording and preview activities and releases all
resources associated with the camera; this is necessary to prevent GPU
memory leaks.
"""
for port in list(self._encoders):
self.stop_recording(splitter_port=port)
assert not self.recording
for overlay in list(self._overlays):
self.remove_overlay(overlay)
if self._preview:
self._preview.close()
self._preview = None
if self._splitter:
self._splitter.close()
self._splitter = None
if self._camera:
self._camera.close()
self._camera = None
exc, self._camera_exception = self._camera_exception, None
if exc:
raise exc
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def start_preview(self, **options):
"""
Displays the preview overlay.
This method starts a camera preview as an overlay on the Pi's primary
display (HDMI or composite). A :class:`PiRenderer` instance (more
specifically, a :class:`PiPreviewRenderer`) is constructed with the
keyword arguments captured in *options*, and is returned from the
method (this instance is also accessible from the :attr:`preview`
attribute for as long as the renderer remains active). By default, the
renderer will be opaque and fullscreen.
This means the default preview overrides whatever is currently visible
on the display. More specifically, the preview does not rely on a
graphical environment like X-Windows (it can run quite happily from a
TTY console); it is simply an overlay on the Pi's video output. To stop
the preview and reveal the display again, call :meth:`stop_preview`.
The preview can be started and stopped multiple times during the
lifetime of the :class:`PiCamera` object.
All other camera properties can be modified "live" while the preview is
running (e.g. :attr:`brightness`).
.. note::
Because the default preview typically obscures the screen, ensure
you have a means of stopping a preview before starting one. If the
preview obscures your interactive console you won't be able to
Alt+Tab back to it as the preview isn't in a window. If you are in
an interactive Python session, simply pressing Ctrl+D usually
suffices to terminate the environment, including the camera and its
associated preview.
"""
self._check_camera_open()
self._preview.close()
options.setdefault('layer', self._preview_layer)
options.setdefault('alpha', self._preview_alpha)
options.setdefault('fullscreen', self._preview_fullscreen)
options.setdefault('window', self._preview_window)
renderer = PiPreviewRenderer(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT], **options)
self._preview = renderer
return renderer
def stop_preview(self):
"""
Hides the preview overlay.
If :meth:`start_preview` has previously been called, this method shuts
down the preview display which generally results in the underlying
display becoming visible again. If a preview is not currently running,
no exception is raised - the method will simply do nothing.
"""
self._check_camera_open()
self._preview.close()
self._preview = PiNullSink(
self, self._camera.outputs[self.CAMERA_PREVIEW_PORT])
def add_overlay(self, source, size=None, format=None, **options):
"""
Adds a static overlay to the preview output.
This method creates a new static overlay using the same rendering
mechanism as the preview. Overlays will appear on the Pi's video
output, but will not appear in captures or video recordings. Multiple
overlays can exist; each call to :meth:`add_overlay` returns a new
:class:`PiOverlayRenderer` instance representing the overlay.
The *source* must be an object that supports the :ref:`buffer protocol
<bufferobjects>` in one of the supported unencoded formats: ``'yuv'``,
``'rgb'``, ``'rgba'``, ``'bgr'``, or ``'bgra'``. The format can
specified explicitly with the optional *format* parameter. If not
specified, the method will attempt to guess the format based on the
length of *source* and the *size* (assuming 3 bytes per pixel for RGB,
and 4 bytes for RGBA).
The optional *size* parameter specifies the size of the source image as
a ``(width, height)`` tuple. If this is omitted or ``None`` then the
size is assumed to be the same as the camera's current
:attr:`resolution`.
The length of *source* must take into account that widths are rounded
up to the nearest multiple of 32, and heights to the nearest multiple
of 16. For example, if *size* is ``(1280, 720)``, and *format* is
``'rgb'``, then *source* must be a buffer with length 1280 × 720 × 3
bytes, or 2,764,800 bytes (because 1280 is a multiple of 32, and 720 is
a multiple of 16 no extra rounding is required). However, if *size* is
``(97, 57)``, and *format* is ``'rgb'`` then *source* must be a buffer
with length 128 × 64 × 3 bytes, or 24,576 bytes (pixels beyond column
97 and row 57 in the source will be ignored).
New overlays default to *layer* 0, whilst the preview defaults to layer
2. Higher numbered layers obscure lower numbered layers, hence new
overlays will be invisible (if the preview is running) by default. You
can make the new overlay visible either by making any existing preview
transparent (with the :attr:`~PiRenderer.alpha` property) or by moving
the overlay into a layer higher than the preview (with the
:attr:`~PiRenderer.layer` property).
All keyword arguments captured in *options* are passed onto the
:class:`PiRenderer` constructor. All camera properties except
:attr:`resolution` and :attr:`framerate` can be modified while overlays
exist. The reason for these exceptions is that the overlay has a static
resolution and changing the camera's mode would require resizing of the
source.
.. warning::
If too many overlays are added, the display output will be disabled
and a reboot will generally be required to restore the display.
Overlays are composited "on the fly". Hence, a real-time constraint
exists wherein for each horizontal line of HDMI output, the content
of all source layers must be fetched, resized, converted, and
blended to produce the output pixels.
If enough overlays exist (where "enough" is a number dependent on
overlay size, display resolution, bus frequency, and several other
factors making it unrealistic to calculate in advance), this
process breaks down and video output fails. One solution is to add
``dispmanx_offline=1`` to ``/boot/config.txt`` to force the use of
an off-screen buffer. Be aware that this requires more GPU memory
and may reduce the update rate.
.. _RGB: https://en.wikipedia.org/wiki/RGB
.. _RGBA: https://en.wikipedia.org/wiki/RGBA_color_space
.. versionadded:: 1.8
.. versionchanged:: 1.13
Added *format* parameter
"""
self._check_camera_open()
renderer = PiOverlayRenderer(self, source, size, format, **options)
self._overlays.append(renderer)
return renderer
def remove_overlay(self, overlay):
"""
Removes a static overlay from the preview output.
This method removes an overlay which was previously created by
:meth:`add_overlay`. The *overlay* parameter specifies the
:class:`PiRenderer` instance that was returned by :meth:`add_overlay`.
.. versionadded:: 1.8
"""
if not overlay in self._overlays:
raise PiCameraValueError(
"The specified overlay is not owned by this instance of "
"PiCamera")
overlay.close()
self._overlays.remove(overlay)
def start_recording(
self, output, format=None, resize=None, splitter_port=1, **options):
"""
Start recording video from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the video will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the video data is appended to it (the
implementation only assumes the object has a ``write()`` method - no
other methods are required but ``flush`` will be called at the end of
recording if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the video frames will be written
sequentially to the underlying buffer (which must be large enough to
accept all frame data).
If *format* is ``None`` (the default), the method will attempt to guess
the required video format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the video output in. The format can be a MIME-type or
one of the following strings:
* ``'h264'`` - Write an H.264 video stream
* ``'mjpeg'`` - Write an M-JPEG video stream
* ``'yuv'`` - Write the raw video data to a file in YUV420 format
* ``'rgb'`` - Write the raw video data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw video data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw video data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw video data to a file in 32-bit BGRA format
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the video recording should
be resized to. This is particularly useful for recording video using
the full resolution of the camera sensor (which is not possible in
H.264 without down-sizing the output).
The *splitter_port* parameter specifies the port of the built-in
splitter that the video encoder will be attached to. This defaults to
``1`` and most users will have no need to specify anything different.
If you wish to record multiple (presumably resized) streams
simultaneously, specify a value between ``0`` and ``3`` inclusive for
this parameter, ensuring that you do not specify a port that is
currently in use.
Certain formats accept additional options which can be specified
as keyword arguments. The ``'h264'`` format accepts the following
additional options:
* *profile* - The H.264 profile to use for encoding. Defaults to
'high', but can be one of 'baseline', 'main', 'extended', 'high', or
'constrained'.
* *level* - The `H.264 level`_ to use for encoding. Defaults to '4',
but can be any H.264 level up to '4.2'.
* *intra_period* - The key frame rate (the rate at which I-frames are
inserted in the output). Defaults to ``None``, but can be any 32-bit
integer value representing the number of frames between successive
I-frames. The special value 0 causes the encoder to produce a single
initial I-frame, and then only P-frames subsequently. Note that
:meth:`split_recording` will fail in this mode.
* *intra_refresh* - The key frame format (the way in which I-frames
will be inserted into the output stream). Defaults to ``None``, but
can be one of 'cyclic', 'adaptive', 'both', or 'cyclicrows'.
* *inline_headers* - When ``True``, specifies that the encoder should
output SPS/PPS headers within the stream to ensure GOPs (groups of
pictures) are self describing. This is important for streaming
applications where the client may wish to seek within the stream, and
enables the use of :meth:`split_recording`. Defaults to ``True`` if
not specified.
* *sei* - When ``True``, specifies the encoder should include
"Supplemental Enhancement Information" within the output stream.
Defaults to ``False`` if not specified.
* *sps_timing* - When ``True`` the encoder includes the camera's
framerate in the SPS header. Defaults to ``False`` if not specified.
* *motion_output* - Indicates the output destination for motion vector
estimation data. When ``None`` (the default), motion data is not
output. Otherwise, this can be a filename string, a file-like object,
or a writeable buffer object (as with the *output* parameter).
All encoded formats accept the following additional options:
* *bitrate* - The bitrate at which video will be encoded. Defaults to
17000000 (17Mbps) if not specified. The maximum value depends on the
selected `H.264 level`_ and profile. Bitrate 0 indicates the encoder
should not use bitrate control (the encoder is limited by the quality
only).
* *quality* - Specifies the quality that the encoder should attempt
to maintain. For the ``'h264'`` format, use values between 10 and 40
where 10 is extremely high quality, and 40 is extremely low (20-25 is
usually a reasonable range for H.264 encoding). For the ``mjpeg``
format, use JPEG quality values between 1 and 100 (where higher
values are higher quality). Quality 0 is special and seems to be
a "reasonable quality" default.
* *quantization* - Deprecated alias for *quality*.
.. versionchanged:: 1.0
The *resize* parameter was added, and ``'mjpeg'`` was added as a
recording format
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *quantization* parameter was deprecated in favor of *quality*,
and the *motion_output* parameter was added.
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _H.264 level: https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC#Levels
"""
if 'quantization' in options:
warnings.warn(
PiCameraDeprecated(
'The quantization option is deprecated; please use '
'quality instead (same value)'))
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format(output, format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
encoder.start(output, options.get('motion_output'))
except Exception as e:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
raise
def split_recording(self, output, splitter_port=1, **options):
"""
Continue the recording in the specified output; close existing output.
When called, the video encoder will wait for the next appropriate
split point (an inline SPS header), then will cease writing to the
current output (and close it, if it was specified as a filename), and
continue writing to the newly specified *output*.
The *output* parameter is treated as in the :meth:`start_recording`
method (it can be a string, a file-like object, or a writeable
buffer object).
The *motion_output* parameter can be used to redirect the output of the
motion vector data in the same fashion as *output*. If *motion_output*
is ``None`` (the default) then motion vector data will not be
redirected and will continue being written to the output specified by
the *motion_output* parameter given to :meth:`start_recording`.
Alternatively, if you only wish to redirect motion vector data, you can
set *output* to ``None`` and given a new value for *motion_output*.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to change outputs is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
Note that unlike :meth:`start_recording`, you cannot specify format or
other options as these cannot be changed in the middle of recording.
Only the new *output* (and *motion_output*) can be specified.
Furthermore, the format of the recording is currently limited to H264,
and *inline_headers* must be ``True`` when :meth:`start_recording` is
called (this is the default).
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.5
The *motion_output* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.split(output, options.get('motion_output'))
def request_key_frame(self, splitter_port=1):
"""
Request the encoder generate a key-frame as soon as possible.
When called, the video encoder running on the specified *splitter_port*
will attempt to produce a key-frame (full-image frame) as soon as
possible. The *splitter_port* defaults to ``1``. Valid values are
between ``0`` and ``3`` inclusive.
.. note::
This method is only meaningful for recordings encoded in the H264
format as MJPEG produces full frames for every frame recorded.
Furthermore, there's no guarantee that the *next* frame will be
a key-frame; this is simply a request to produce one as soon as
possible after the call.
.. versionadded:: 1.11
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.request_key_frame()
def wait_recording(self, timeout=0, splitter_port=1):
"""
Wait on the video encoder for timeout seconds.
It is recommended that this method is called while recording to check
for exceptions. If an error occurs during recording (for example out of
disk space) the recording will stop, but an exception will only be
raised when the :meth:`wait_recording` or :meth:`stop_recording`
methods are called.
If ``timeout`` is 0 (the default) the function will immediately return
(or raise an exception if an error has occurred).
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to wait on is attached to. This
defaults to ``1`` and most users will have no need to specify anything
different. Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
assert timeout is not None
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
encoder.wait(timeout)
def stop_recording(self, splitter_port=1):
"""
Stop recording video from the camera.
After calling this method the video encoder will be shut down and
output will stop being written to the file-like object specified with
:meth:`start_recording`. If an error occurred during recording and
:meth:`wait_recording` has not been called since the error then this
method will raise the exception.
The *splitter_port* parameter specifies which port of the video
splitter the encoder you wish to stop is attached to. This defaults to
``1`` and most users will have no need to specify anything different.
Valid values are between ``0`` and ``3`` inclusive.
.. versionchanged:: 1.3
The *splitter_port* parameter was added
"""
try:
with self._encoders_lock:
encoder = self._encoders[splitter_port]
except KeyError:
raise PiCameraNotRecording(
'There is no recording in progress on '
'port %d' % splitter_port)
else:
try:
self.wait_recording(0, splitter_port)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def record_sequence(
self, outputs, format='h264', resize=None, splitter_port=1, **options):
"""
Record a sequence of video clips from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method.
The method acts as an iterator itself, yielding each item of the
sequence in turn. In this way, the caller can control how long to
record to each item by only permitting the loop to continue when ready
to switch to the next output.
The *format*, *splitter_port*, *resize*, and *options* parameters are
the same as in :meth:`start_recording`, but *format* defaults to
``'h264'``. The format is **not** derived from the filenames in
*outputs* by this method.
For example, to record 3 consecutive 10-second video clips, writing the
output to a series of H.264 files named clip01.h264, clip02.h264, and
clip03.h264 one could use the following::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence([
'clip01.h264',
'clip02.h264',
'clip03.h264']):
print('Recording to %s' % filename)
camera.wait_recording(10)
Alternatively, a more flexible method of writing the previous example
(which is easier to expand to a large number of output files) is by
using a generator expression as the input sequence::
import picamera
with picamera.PiCamera() as camera:
for filename in camera.record_sequence(
'clip%02d.h264' % i for i in range(3)):
print('Recording to %s' % filename)
camera.wait_recording(10)
More advanced techniques are also possible by utilising infinite
sequences, such as those generated by :func:`itertools.cycle`. In the
following example, recording is switched between two in-memory streams.
Whilst one stream is recording, the other is being analysed. The script
only stops recording when a video recording meets some criteria defined
by the ``process`` function::
import io
import itertools
import picamera
with picamera.PiCamera() as camera:
analyse = None
for stream in camera.record_sequence(
itertools.cycle((io.BytesIO(), io.BytesIO()))):
if analyse is not None:
if process(analyse):
break
analyse.seek(0)
analyse.truncate()
camera.wait_recording(5)
analyse = stream
.. versionadded:: 1.3
"""
with self._encoders_lock:
camera_port, output_port = self._get_ports(True, splitter_port)
format = self._get_video_format('', format)
encoder = self._get_video_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
try:
start = True
for output in outputs:
if start:
start = False
encoder.start(output, options.get('motion_output'))
else:
encoder.split(output)
yield output
finally:
try:
encoder.wait(0)
finally:
encoder.close()
with self._encoders_lock:
del self._encoders[splitter_port]
def capture(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, bayer=False, **options):
"""
Capture an image from the camera, storing it in *output*.
If *output* is a string, it will be treated as a filename for a new
file which the image will be written to. If *output* is not a string,
but is an object with a ``write`` method, it is assumed to be a
file-like object and the image data is appended to it (the
implementation only assumes the object has a ``write`` method - no
other methods are required but ``flush`` will be called at the end of
capture if it is present). If *output* is not a string, and has no
``write`` method it is assumed to be a writeable object implementing
the buffer protocol. In this case, the image data will be written
directly to the underlying buffer (which must be large enough to accept
the image data).
If *format* is ``None`` (the default), the method will attempt to guess
the required image format from the extension of *output* (if it's a
string), or from the *name* attribute of *output* (if it has one). In
the case that the format cannot be determined, a
:exc:`PiCameraValueError` will be raised.
If *format* is not ``None``, it must be a string specifying the format
that you want the image output in. The format can be a MIME-type or
one of the following strings:
* ``'jpeg'`` - Write a JPEG file
* ``'png'`` - Write a PNG file
* ``'gif'`` - Write a GIF file
* ``'bmp'`` - Write a Windows bitmap file
* ``'yuv'`` - Write the raw image data to a file in YUV420 format
* ``'rgb'`` - Write the raw image data to a file in 24-bit RGB format
* ``'rgba'`` - Write the raw image data to a file in 32-bit RGBA format
* ``'bgr'`` - Write the raw image data to a file in 24-bit BGR format
* ``'bgra'`` - Write the raw image data to a file in 32-bit BGRA format
* ``'raw'`` - Deprecated option for raw captures; the format is taken
from the deprecated :attr:`raw_format` attribute
The *use_video_port* parameter controls whether the camera's image or
video port is used to capture images. It defaults to ``False`` which
means that the camera's image port is used. This port is slow but
produces better quality pictures. If you need rapid capture up to the
rate of video frames, set this to ``True``.
When *use_video_port* is ``True``, the *splitter_port* parameter
specifies the port of the video splitter that the image encoder will be
attached to. This defaults to ``0`` and most users will have no need to
specify anything different. This parameter is ignored when
*use_video_port* is ``False``. See :ref:`mmal` for more information
about the video splitter.
If *resize* is not ``None`` (the default), it must be a two-element
tuple specifying the width and height that the image should be resized
to.
.. warning::
If *resize* is specified, or *use_video_port* is ``True``, Exif
metadata will **not** be included in JPEG output. This is due to an
underlying firmware limitation.
Certain file formats accept additional options which can be specified
as keyword arguments. Currently, only the ``'jpeg'`` encoder accepts
additional options, which are:
* *quality* - Defines the quality of the JPEG encoder as an integer
ranging from 1 to 100. Defaults to 85. Please note that JPEG quality
is not a percentage and `definitions of quality`_ vary widely.
* *restart* - Defines the restart interval for the JPEG encoder as a
number of JPEG MCUs. The actual restart interval used will be a
multiple of the number of MCUs per row in the resulting image.
* *thumbnail* - Defines the size and quality of the thumbnail to embed
in the Exif metadata. Specifying ``None`` disables thumbnail
generation. Otherwise, specify a tuple of ``(width, height,
quality)``. Defaults to ``(64, 48, 35)``.
* *bayer* - If ``True``, the raw bayer data from the camera's sensor
is included in the Exif metadata.
.. note::
The so-called "raw" formats listed above (``'yuv'``, ``'rgb'``,
etc.) do not represent the raw bayer data from the camera's sensor.
Rather they provide access to the image data after GPU processing,
but before format encoding (JPEG, PNG, etc). Currently, the only
method of accessing the raw bayer data is via the *bayer* parameter
described above.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added, and *bayer* was added as
an option for the ``'jpeg'`` format
.. versionchanged:: 1.11
Support for buffer outputs was added.
.. _definitions of quality: http://photo.net/learn/jpeg/#qual
"""
if format == 'raw':
warnings.warn(
PiCameraDeprecated(
'The "raw" format option is deprecated; specify the '
'required format directly instead ("yuv", "rgb", etc.)'))
if use_video_port and bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
if 'burst' in options:
raise PiCameraValueError(
'burst is only valid with capture_sequence or capture_continuous')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
# Wait for the callback to set the event indicating the end of
# image capture
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_sequence(
self, outputs, format='jpeg', use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture a sequence of consecutive images from the camera.
This method accepts a sequence or iterator of *outputs* each of which
must either be a string specifying a filename for output, or a
file-like object with a ``write`` method, or a writeable buffer object.
For each item in the sequence or iterator of outputs, the camera
captures a single image as fast as it can.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`, but *format*
defaults to ``'jpeg'``. The format is **not** derived from the
filenames in *outputs* by this method.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 3 consecutive images::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image1.jpg',
'image2.jpg',
'image3.jpg',
])
camera.stop_preview()
If you wish to capture a large number of images, a list comprehension
or generator expression can be used to construct the list of filenames
to use::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
time.sleep(2)
camera.capture_sequence([
'image%02d.jpg' % i
for i in range(100)
])
camera.stop_preview()
More complex effects can be obtained by using a generator function to
provide the filenames or output objects.
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format('', format)
if use_video_port:
encoder = self._get_images_encoder(
camera_port, output_port, format, resize, **options)
self._encoders[splitter_port] = encoder
else:
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
try:
if use_video_port:
encoder.start(outputs)
encoder.wait()
else:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
for output in outputs:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
def capture_continuous(
self, output, format=None, use_video_port=False, resize=None,
splitter_port=0, burst=False, bayer=False, **options):
"""
Capture images continuously from the camera as an infinite iterator.
This method returns an infinite iterator of images captured
continuously from the camera. If *output* is a string, each captured
image is stored in a file named after *output* after substitution of
two values with the :meth:`~str.format` method. Those two values are:
* ``{counter}`` - a simple incrementor that starts at 1 and increases
by 1 for each image taken
* ``{timestamp}`` - a :class:`~datetime.datetime` instance
The table below contains several example values of *output* and the
sequence of filenames those values could produce:
.. tabularcolumns:: |p{80mm}|p{40mm}|p{10mm}|
+--------------------------------------------+--------------------------------------------+-------+
| *output* Value | Filenames | Notes |
+============================================+============================================+=======+
| ``'image{counter}.jpg'`` | image1.jpg, image2.jpg, image3.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{counter:02d}.jpg'`` | image01.jpg, image02.jpg, image03.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp}.jpg'`` | image2013-10-05 12:07:12.346743.jpg, | (1) |
| | image2013-10-05 12:07:32.498539, ... | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'image{timestamp:%H-%M-%S-%f}.jpg'`` | image12-10-02-561527.jpg, | |
| | image12-10-14-905398.jpg | |
+--------------------------------------------+--------------------------------------------+-------+
| ``'{timestamp:%H%M%S}-{counter:03d}.jpg'`` | 121002-001.jpg, 121013-002.jpg, | (2) |
| | 121014-003.jpg, ... | |
+--------------------------------------------+--------------------------------------------+-------+
1. Note that because timestamp's default output includes colons (:),
the resulting filenames are not suitable for use on Windows. For
this reason (and the fact the default contains spaces) it is
strongly recommended you always specify a format when using
``{timestamp}``.
2. You can use both ``{timestamp}`` and ``{counter}`` in a single
format string (multiple times too!) although this tends to be
redundant.
If *output* is not a string, but has a ``write`` method, it is assumed
to be a file-like object and each image is simply written to this
object sequentially. In this case you will likely either want to write
something to the object between the images to distinguish them, or
clear the object between iterations. If *output* is not a string, and
has no ``write`` method, it is assumed to be a writeable object
supporting the buffer protocol; each image is simply written to the
buffer sequentially.
The *format*, *use_video_port*, *splitter_port*, *resize*, and
*options* parameters are the same as in :meth:`capture`.
If *use_video_port* is ``False`` (the default), the *burst* parameter
can be used to make still port captures faster. Specifically, this
prevents the preview from switching resolutions between captures which
significantly speeds up consecutive captures from the still port. The
downside is that this mode is currently has several bugs; the major
issue is that if captures are performed too quickly some frames will
come back severely underexposed. It is recommended that users avoid the
*burst* parameter unless they absolutely require it and are prepared to
work around such issues.
For example, to capture 60 images with a one second delay between them,
writing the output to a series of JPEG files named image01.jpg,
image02.jpg, etc. one could do the following::
import time
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
try:
for i, filename in enumerate(
camera.capture_continuous('image{counter:02d}.jpg')):
print(filename)
time.sleep(1)
if i == 59:
break
finally:
camera.stop_preview()
Alternatively, to capture JPEG frames as fast as possible into an
in-memory stream, performing some processing on each stream until
some condition is satisfied::
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
.. versionchanged:: 1.0
The *resize* parameter was added, and raw capture formats can now
be specified directly
.. versionchanged:: 1.3
The *splitter_port* parameter was added
.. versionchanged:: 1.11
Support for buffer outputs was added.
"""
if use_video_port:
if burst:
raise PiCameraValueError(
'burst is only valid with still port captures')
if bayer:
raise PiCameraValueError(
'bayer is only valid with still port captures')
with self._encoders_lock:
camera_port, output_port = self._get_ports(use_video_port, splitter_port)
format = self._get_image_format(output, format)
encoder = self._get_image_encoder(
camera_port, output_port, format, resize, **options)
if use_video_port:
self._encoders[splitter_port] = encoder
try:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = True
try:
if isinstance(output, bytes):
# If we're fed a bytes string, assume it's UTF-8 encoded
# and convert it to Unicode. Technically this is wrong
# (file-systems use all sorts of encodings), but UTF-8 is a
# reasonable default and this keeps compatibility with
# Python 2 simple although it breaks the edge cases of
# non-UTF-8 encoded bytes strings with non-UTF-8 encoded
# file-systems
output = output.decode('utf-8')
if isinstance(output, str):
counter = 1
while True:
filename = output.format(
counter=counter,
timestamp=datetime.datetime.now(),
)
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(filename)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield filename
counter += 1
else:
while True:
if bayer:
camera_port.params[mmal.MMAL_PARAMETER_ENABLE_RAW_CAPTURE] = True
encoder.start(output)
if not encoder.wait(self.CAPTURE_TIMEOUT):
raise PiCameraRuntimeError(
'Timed out waiting for capture to end')
yield output
finally:
if burst:
camera_port.params[mmal.MMAL_PARAMETER_CAMERA_BURST_CAPTURE] = False
finally:
encoder.close()
with self._encoders_lock:
if use_video_port:
del self._encoders[splitter_port]
@property
def closed(self):
"""
Returns ``True`` if the :meth:`close` method has been called.
"""
return not self._camera
@property
def recording(self):
"""
Returns ``True`` if the :meth:`start_recording` method has been called,
and no :meth:`stop_recording` call has been made yet.
"""
return any(
isinstance(e, PiVideoEncoder) and e.active
for e in self._encoders.values()
)
@property
def previewing(self):
"""
Returns ``True`` if the :meth:`start_preview` method has been called,
and no :meth:`stop_preview` call has been made yet.
.. deprecated:: 1.8
Test whether :attr:`preview` is ``None`` instead.
"""
warnings.warn(
PiCameraDeprecated(
'PiCamera.previewing is deprecated; test PiCamera.preview '
'is not None instead'))
return isinstance(self._preview, PiPreviewRenderer)
@property
def revision(self):
"""
Returns a string representing the revision of the Pi's camera module.
At the time of writing, the string returned is 'ov5647' for the V1
module, and 'imx219' for the V2 module.
"""
return self._revision
@property
def exif_tags(self):
"""
Holds a mapping of the Exif tags to apply to captured images.
.. note::
Please note that Exif tagging is only supported with the ``jpeg``
format.
By default several Exif tags are automatically applied to any images
taken with the :meth:`capture` method: ``IFD0.Make`` (which is set to
``RaspberryPi``), ``IFD0.Model`` (which is set to ``RP_OV5647``), and
three timestamp tags: ``IFD0.DateTime``, ``EXIF.DateTimeOriginal``, and
``EXIF.DateTimeDigitized`` which are all set to the current date and
time just before the picture is taken.
If you wish to set additional Exif tags, or override any of the
aforementioned tags, simply add entries to the exif_tags map before
calling :meth:`capture`. For example::
camera.exif_tags['IFD0.Copyright'] = 'Copyright (c) 2013 Foo Industries'
The Exif standard mandates ASCII encoding for all textual values, hence
strings containing non-ASCII characters will cause an encoding error to
be raised when :meth:`capture` is called. If you wish to set binary
values, use a :func:`bytes` value::
camera.exif_tags['EXIF.UserComment'] = b'Something containing\\x00NULL characters'
.. warning::
Binary Exif values are currently ignored; this appears to be a
libmmal or firmware bug.
You may also specify datetime values, integer, or float values, all of
which will be converted to appropriate ASCII strings (datetime values
are formatted as ``YYYY:MM:DD HH:MM:SS`` in accordance with the Exif
standard).
The currently supported Exif tags are:
+-------+-------------------------------------------------------------+
| Group | Tags |
+=======+=============================================================+
| IFD0, | ImageWidth, ImageLength, BitsPerSample, Compression, |
| IFD1 | PhotometricInterpretation, ImageDescription, Make, Model, |
| | StripOffsets, Orientation, SamplesPerPixel, RowsPerString, |
| | StripByteCounts, Xresolution, Yresolution, |
| | PlanarConfiguration, ResolutionUnit, TransferFunction, |
| | Software, DateTime, Artist, WhitePoint, |
| | PrimaryChromaticities, JPEGInterchangeFormat, |
| | JPEGInterchangeFormatLength, YcbCrCoefficients, |
| | YcbCrSubSampling, YcbCrPositioning, ReferenceBlackWhite, |
| | Copyright |
+-------+-------------------------------------------------------------+
| EXIF | ExposureTime, FNumber, ExposureProgram, |
| | SpectralSensitivity, ISOSpeedRatings, OECF, ExifVersion, |
| | DateTimeOriginal, DateTimeDigitized, |
| | ComponentsConfiguration, CompressedBitsPerPixel, |
| | ShutterSpeedValue, ApertureValue, BrightnessValue, |
| | ExposureBiasValue, MaxApertureValue, SubjectDistance, |
| | MeteringMode, LightSource, Flash, FocalLength, SubjectArea, |
| | MakerNote, UserComment, SubSecTime, SubSecTimeOriginal, |
| | SubSecTimeDigitized, FlashpixVersion, ColorSpace, |
| | PixelXDimension, PixelYDimension, RelatedSoundFile, |
| | FlashEnergy, SpacialFrequencyResponse, |
| | FocalPlaneXResolution, FocalPlaneYResolution, |
| | FocalPlaneResolutionUnit, SubjectLocation, ExposureIndex, |
| | SensingMethod, FileSource, SceneType, CFAPattern, |
| | CustomRendered, ExposureMode, WhiteBalance, |
| | DigitalZoomRatio, FocalLengthIn35mmFilm, SceneCaptureType, |
| | GainControl, Contrast, Saturation, Sharpness, |
| | DeviceSettingDescription, SubjectDistanceRange, |
| | ImageUniqueID |
+-------+-------------------------------------------------------------+
| GPS | GPSVersionID, GPSLatitudeRef, GPSLatitude, GPSLongitudeRef, |
| | GPSLongitude, GPSAltitudeRef, GPSAltitude, GPSTimeStamp, |
| | GPSSatellites, GPSStatus, GPSMeasureMode, GPSDOP, |
| | GPSSpeedRef, GPSSpeed, GPSTrackRef, GPSTrack, |
| | GPSImgDirectionRef, GPSImgDirection, GPSMapDatum, |
| | GPSDestLatitudeRef, GPSDestLatitude, GPSDestLongitudeRef, |
| | GPSDestLongitude, GPSDestBearingRef, GPSDestBearing, |
| | GPSDestDistanceRef, GPSDestDistance, GPSProcessingMethod, |
| | GPSAreaInformation, GPSDateStamp, GPSDifferential |
+-------+-------------------------------------------------------------+
| EINT | InteroperabilityIndex, InteroperabilityVersion, |
| | RelatedImageFileFormat, RelatedImageWidth, |
| | RelatedImageLength |
+-------+-------------------------------------------------------------+
"""
return self._exif_tags
def _set_led(self, value):
if not self._used_led:
self._init_led()
if not GPIO:
raise PiCameraRuntimeError(
"GPIO library not found, or not accessible; please install "
"RPi.GPIO and run the script as root")
GPIO.output(self._led_pin, bool(value))
led = property(None, _set_led, doc="""
Sets the state of the camera's LED via GPIO.
If a GPIO library is available (only RPi.GPIO is currently supported),
and if the python process has the necessary privileges (typically this
means running as root via sudo), this property can be used to set the
state of the camera's LED as a boolean value (``True`` is on, ``False``
is off).
.. note::
This is a write-only property. While it can be used to control the
camera's LED, you cannot query the state of the camera's LED using
this property.
.. note::
At present, the camera's LED cannot be controlled on the Pi 3
(the GPIOs used to control the camera LED were re-routed to GPIO
expander on the Pi 3).
.. warning::
There are circumstances in which the camera firmware may override
an existing LED setting. For example, in the case that the firmware
resets the camera (as can happen with a CSI-2 timeout), the LED may
also be reset. If you wish to guarantee that the LED remain off at
all times, you may prefer to use the ``disable_camera_led`` option
in `config.txt`_ (this has the added advantage that sudo privileges
and GPIO access are not required, at least for LED control).
.. _config.txt: https://www.raspberrypi.org/documentation/configuration/config-txt.md
""")
def _get_raw_format(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
return self._raw_format
def _set_raw_format(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.raw_format is deprecated; use required format '
'directly with capture methods instead'))
if value not in self.RAW_FORMATS:
raise PiCameraValueError("Invalid raw format: %s" % value)
self._raw_format = value
raw_format = property(_get_raw_format, _set_raw_format, doc="""
Retrieves or sets the raw format of the camera's ports.
.. deprecated:: 1.0
Please use ``'yuv'`` or ``'rgb'`` directly as a format in the
various capture methods instead.
""")
def _get_timestamp(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_SYSTEM_TIME]
timestamp = property(_get_timestamp, doc="""
Retrieves the system time according to the camera firmware.
The camera's timestamp is a 64-bit integer representing the number of
microseconds since the last system boot. When the camera's
:attr:`clock_mode` is ``'raw'`` the values returned by this attribute
are comparable to those from the :attr:`frame`
:attr:`~PiVideoFrame.timestamp` attribute.
""")
def _get_frame(self):
self._check_camera_open()
for e in self._encoders.values():
try:
return e.frame
except AttributeError:
pass
raise PiCameraRuntimeError(
"Cannot query frame information when camera is not recording")
frame = property(_get_frame, doc="""
Retrieves information about the current frame recorded from the camera.
When video recording is active (after a call to
:meth:`start_recording`), this attribute will return a
:class:`PiVideoFrame` tuple containing information about the current
frame that the camera is recording.
If multiple video recordings are currently in progress (after multiple
calls to :meth:`start_recording` with different values for the
``splitter_port`` parameter), which encoder's frame information is
returned is arbitrary. If you require information from a specific
encoder, you will need to extract it from :attr:`_encoders` explicitly.
Querying this property when the camera is not recording will result in
an exception.
.. note::
There is a small window of time when querying this attribute will
return ``None`` after calling :meth:`start_recording`. If this
attribute returns ``None``, this means that the video encoder has
been initialized, but the camera has not yet returned any frames.
""")
def _disable_camera(self):
"""
An internal method for disabling the camera, e.g. for re-configuration.
This disables the splitter and preview connections (if they exist).
"""
self._splitter.connection.disable()
self._preview.renderer.connection.disable()
self._camera.disable()
def _enable_camera(self):
"""
An internal method for enabling the camera after re-configuration.
This ensures the splitter configuration is consistent, then re-enables
the camera along with the splitter and preview connections.
"""
self._camera.enable()
self._preview.renderer.connection.enable()
self._splitter.connection.enable()
def _configure_splitter(self):
"""
Ensures all splitter output ports have a sensible format (I420) and
buffer sizes.
This method is used to ensure the splitter configuration is sane,
typically after :meth:`_configure_camera` is called.
"""
self._splitter.inputs[0].copy_from(self._camera.outputs[self.CAMERA_VIDEO_PORT])
self._splitter.inputs[0].commit()
def _control_callback(self, port, buf):
try:
if buf.command == mmal.MMAL_EVENT_ERROR:
raise PiCameraRuntimeError(
"No data recevied from sensor. Check all connections, "
"including the SUNNY chip on the camera board")
elif buf.command != mmal.MMAL_EVENT_PARAMETER_CHANGED:
raise PiCameraRuntimeError(
"Received unexpected camera control callback event, 0x%08x" % buf[0].cmd)
except Exception as exc:
# Pass the exception to the main thread; next time
# check_camera_open() is called this will get raised
self._camera_exception = exc
def _configure_camera(
self, sensor_mode, framerate, resolution, clock_mode,
old_sensor_mode=0):
"""
An internal method for setting a new camera mode, framerate,
resolution, and/or clock_mode.
This method is used by the setters of the :attr:`resolution`,
:attr:`framerate`, and :attr:`sensor_mode` properties. It assumes the
camera is currently disabled. The *old_mode* and *new_mode* arguments
are required to ensure correct operation on older firmwares
(specifically that we don't try to set the sensor mode when both old
and new modes are 0 or automatic).
"""
old_cc = mmal.MMAL_PARAMETER_CAMERA_CONFIG_T.from_buffer_copy(self._camera_config)
old_ports = [
(port.framesize, port.framerate, port.params[mmal.MMAL_PARAMETER_FPS_RANGE])
for port in self._camera.outputs
]
if old_sensor_mode != 0 or sensor_mode != 0:
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG] = sensor_mode
if not self._camera.control.enabled:
# Initial setup
self._camera.control.enable(self._control_callback)
preview_resolution = resolution
elif (
self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize ==
self._camera.outputs[self.CAMERA_VIDEO_PORT].framesize
):
preview_resolution = resolution
else:
preview_resolution = self._camera.outputs[self.CAMERA_PREVIEW_PORT].framesize
try:
try:
fps_low, fps_high = framerate
except TypeError:
fps_low = fps_high = framerate
else:
framerate = 0
fps_range = mmal.MMAL_PARAMETER_FPS_RANGE_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_FPS_RANGE,
ct.sizeof(mmal.MMAL_PARAMETER_FPS_RANGE_T)
),
fps_low=mo.to_rational(fps_low),
fps_high=mo.to_rational(fps_high),
)
cc = self._camera_config
cc.max_stills_w = resolution.width
cc.max_stills_h = resolution.height
cc.stills_yuv422 = 0
cc.one_shot_stills = 1
cc.max_preview_video_w = resolution.width
cc.max_preview_video_h = resolution.height
cc.num_preview_video_frames = max(3, fps_high // 10)
cc.stills_capture_circular_buffer_height = 0
cc.fast_preview_resume = 0
cc.use_stc_timestamp = clock_mode
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = cc
# Clamp preview resolution to camera's resolution
if (
preview_resolution.width > resolution.width or
preview_resolution.height > resolution.height
):
preview_resolution = resolution
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
if port.index == self.CAMERA_PREVIEW_PORT:
port.framesize = preview_resolution
else:
port.framesize = resolution
port.framerate = framerate
port.commit()
except:
# If anything goes wrong, restore original resolution and
# framerate otherwise the camera can be left in unusual states
# (camera config not matching ports, etc).
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CONFIG] = old_cc
self._camera_config = old_cc
for port, (res, fps, fps_range) in zip(self._camera.outputs, old_ports):
port.framesize = res
port.framerate = fps
port.params[mmal.MMAL_PARAMETER_FPS_RANGE] = fps_range
port.commit()
raise
def _get_framerate(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return mo.PiCameraFraction(self._camera.outputs[port_num].framerate)
def _set_framerate(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_fraction(value, den_limit=256)
if not (0 < value <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid framerate: %.2ffps" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=value, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate = property(_get_framerate, _set_framerate, doc="""\
Retrieves or sets the framerate at which video-port based image
captures, video recordings, and previews will run.
When queried, the :attr:`framerate` property returns the rate at which
the camera's video and preview ports will operate as a
:class:`~fractions.Fraction` instance (which can be easily converted to
an :class:`int` or :class:`float`). If :attr:`framerate_range` has been
set, then :attr:`framerate` will be 0 which indicates that a dynamic
range of framerates is being used.
.. note::
For backwards compatibility, a derivative of the
:class:`~fractions.Fraction` class is actually used which permits
the value to be treated as a tuple of ``(numerator, denominator)``.
Setting and retrieving framerate as a ``(numerator, denominator)``
tuple is deprecated and will be removed in 2.0. Please use a
:class:`~fractions.Fraction` instance instead (which is just as
accurate and also permits direct use with math operators).
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate. Setting
this property implicitly sets :attr:`framerate_range` so that the low
and high values are equal to the new framerate. The framerate can be
specified as an :ref:`int <typesnumeric>`, :ref:`float <typesnumeric>`,
:class:`~fractions.Fraction`, or a ``(numerator, denominator)`` tuple.
For example, the following definitions are all equivalent::
from fractions import Fraction
camera.framerate = 30
camera.framerate = 30 / 1
camera.framerate = Fraction(30, 1)
camera.framerate = (30, 1) # deprecated
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`resolution`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*framerate* parameter in the :class:`PiCamera` constructor, and will
default to 30 if not specified.
""")
def _get_sensor_mode(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG]
def _set_sensor_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
if not (0 <= value <= 7):
raise PiCameraValueError(
"Invalid sensor mode: %d (valid range 0..7)" % value)
except TypeError:
raise PiCameraValueError("Invalid sensor mode: %s" % value)
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
old_sensor_mode=sensor_mode, sensor_mode=value,
framerate=framerate, resolution=resolution,
clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
sensor_mode = property(_get_sensor_mode, _set_sensor_mode, doc="""\
Retrieves or sets the input mode of the camera's sensor.
This is an advanced property which can be used to control the camera's
sensor mode. By default, mode 0 is used which allows the camera to
automatically select an input mode based on the requested
:attr:`resolution` and :attr:`framerate`. Valid values are currently
between 0 and 7. The set of valid sensor modes (along with the
heuristic used to select one automatically) are detailed in the
:ref:`camera_modes` section of the documentation.
.. note::
At the time of writing, setting this property does nothing unless
the camera has been initialized with a sensor mode other than 0.
Furthermore, some mode transitions appear to require setting the
property twice (in a row). This appears to be a firmware
limitation.
The initial value of this property can be specified with the
*sensor_mode* parameter in the :class:`PiCamera` constructor, and will
default to 0 if not specified.
.. versionadded:: 1.9
""")
def _get_clock_mode(self):
self._check_camera_open()
return self._CLOCK_MODES_R[self._camera_config.use_stc_timestamp]
def _set_clock_mode(self, value):
self._check_camera_open()
self._check_recording_stopped()
try:
clock_mode = self.CLOCK_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid clock mode %s" % value)
sensor_mode = self.sensor_mode
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
clock_mode = property(_get_clock_mode, _set_clock_mode, doc="""\
Retrieves or sets the mode of the camera's clock.
This is an advanced property which can be used to control the nature of
the frame timestamps available from the :attr:`frame` property. When
this is "reset" (the default) each frame's timestamp will be relative
to the start of the recording. When this is "raw", each frame's
timestamp will be relative to the last initialization of the camera.
The initial value of this property can be specified with the
*clock_mode* parameter in the :class:`PiCamera` constructor, and will
default to "reset" if not specified.
.. versionadded:: 1.11
""")
def _get_resolution(self):
self._check_camera_open()
return mo.PiResolution(
int(self._camera_config.max_stills_w),
int(self._camera_config.max_stills_h)
)
def _set_resolution(self, value):
self._check_camera_open()
self._check_recording_stopped()
value = mo.to_resolution(value)
if not (
(0 < value.width <= self.MAX_RESOLUTION.width) and
(0 < value.height <= self.MAX_RESOLUTION.height)):
raise PiCameraValueError(
"Invalid resolution requested: %r" % (value,))
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
framerate = Fraction(self.framerate)
if framerate == 0:
framerate = self.framerate_range
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=framerate,
resolution=value, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
resolution = property(_get_resolution, _set_resolution, doc="""
Retrieves or sets the resolution at which image captures, video
recordings, and previews will be captured.
When queried, the :attr:`resolution` property returns the resolution at
which the camera will operate as a tuple of ``(width, height)``
measured in pixels. This is the resolution that the :meth:`capture`
method will produce images at, and the resolution that
:meth:`start_recording` will produce videos at.
When set, the property configures the camera so that the next call to
these methods will use the new resolution. The resolution can be
specified as a ``(width, height)`` tuple, as a string formatted
``'WIDTHxHEIGHT'``, or as a string containing a commonly recognized
`display resolution`_ name (e.g. "VGA", "HD", "1080p", etc). For
example, the following definitions are all equivalent::
camera.resolution = (1280, 720)
camera.resolution = '1280x720'
camera.resolution = '1280 x 720'
camera.resolution = 'HD'
camera.resolution = '720p'
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, in combination with :attr:`framerate`, determines
the mode that the camera operates in. The actual sensor framerate
and resolution used by the camera is influenced, but not directly
set, by this property. See :attr:`sensor_mode` for more
information.
The initial value of this property can be specified with the
*resolution* parameter in the :class:`PiCamera` constructor, and will
default to the display's resolution or 1280x720 if the display has
been disabled (with ``tvservice -o``).
.. versionchanged:: 1.11
Resolution permitted to be set as a string. Preview resolution
added as separate property.
.. _display resolution: https://en.wikipedia.org/wiki/Graphics_display_resolution
""")
def _get_framerate_range(self):
self._check_camera_open()
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
mp = self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FPS_RANGE]
return mo.PiFramerateRange(
mo.to_fraction(mp.fps_low), mo.to_fraction(mp.fps_high))
def _set_framerate_range(self, value):
self._check_camera_open()
self._check_recording_stopped()
low, high = value
low = mo.to_fraction(low, den_limit=256)
high = mo.to_fraction(high, den_limit=256)
if not (0 < low <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid low framerate: %.2ffps" % low)
if not (0 < high <= self.MAX_FRAMERATE):
raise PiCameraValueError("Invalid high framerate: %.2ffps" % high)
if high < low:
raise PiCameraValueError("framerate_range is backwards")
sensor_mode = self.sensor_mode
clock_mode = self.CLOCK_MODES[self.clock_mode]
resolution = self.resolution
self._disable_camera()
self._configure_camera(
sensor_mode=sensor_mode, framerate=(low, high),
resolution=resolution, clock_mode=clock_mode)
self._configure_splitter()
self._enable_camera()
framerate_range = property(_get_framerate_range, _set_framerate_range, doc="""\
Retrieves or sets a range between which the camera's framerate is
allowed to float.
When queried, the :attr:`framerate_range` property returns a
:func:`~collections.namedtuple` derivative with ``low`` and ``high``
components (index 0 and 1 respectively) which specify the limits of the
permitted framerate range.
When set, the property configures the camera so that the next call to
recording and previewing methods will use the new framerate range.
Setting this property will implicitly set the :attr:`framerate`
property to 0 (indicating that a dynamic range of framerates is in use
by the camera).
.. note::
Use of this property prevents use of :attr:`framerate_delta` (there
would be little point in making fractional adjustments to the
framerate when the framerate itself is variable).
The low and high framerates can be specified as :ref:`int
<typesnumeric>`, :ref:`float <typesnumeric>`, or
:class:`~fractions.Fraction` values. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_range = (0.16666, 30)
camera.framerate_range = (Fraction(1, 6), 30 / 1)
camera.framerate_range = (Fraction(1, 6), Fraction(30, 1))
The camera must not be closed, and no recording must be active when the
property is set.
.. note::
This attribute, like :attr:`framerate`, determines the mode that
the camera operates in. The actual sensor framerate and resolution
used by the camera is influenced, but not directly set, by this
property. See :attr:`sensor_mode` for more information.
.. versionadded:: 1.13
""")
def _get_framerate_delta(self):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
port_num = (
self.CAMERA_VIDEO_PORT
if self._encoders else
self.CAMERA_PREVIEW_PORT
)
return self._camera.outputs[port_num].params[mmal.MMAL_PARAMETER_FRAME_RATE] - self.framerate
def _set_framerate_delta(self, value):
self._check_camera_open()
if self.framerate == 0:
raise PiCameraValueError(
'framerate_delta cannot be used with framerate_range')
value = mo.to_fraction(self.framerate + value, den_limit=256)
self._camera.outputs[self.CAMERA_PREVIEW_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
self._camera.outputs[self.CAMERA_VIDEO_PORT].params[mmal.MMAL_PARAMETER_FRAME_RATE] = value
framerate_delta = property(_get_framerate_delta, _set_framerate_delta, doc="""\
Retrieves or sets a fractional amount that is added to the camera's
framerate for the purpose of minor framerate adjustments.
When queried, the :attr:`framerate_delta` property returns the amount
that the camera's :attr:`framerate` has been adjusted. This defaults
to 0 (so the camera's framerate is the actual framerate used).
When set, the property adjusts the camera's framerate on the fly. The
property can be set while recordings or previews are in progress. Thus
the framerate used by the camera is actually :attr:`framerate` +
:attr:`framerate_delta`.
.. note::
Framerates deltas can be fractional with adjustments as small as
1/256th of an fps possible (finer adjustments will be rounded).
With an appropriately tuned PID controller, this can be used to
achieve synchronization between the camera framerate and other
devices.
If the new framerate demands a mode switch (such as moving between a
low framerate and a high framerate mode), currently active recordings
may drop a frame. This should only happen when specifying quite large
deltas, or when framerate is at the boundary of a sensor mode (e.g.
49fps).
The framerate delta can be specified as an :ref:`int <typesnumeric>`,
:ref:`float <typesnumeric>`, :class:`~fractions.Fraction` or a
``(numerator, denominator)`` tuple. For example, the following
definitions are all equivalent::
from fractions import Fraction
camera.framerate_delta = 0.5
camera.framerate_delta = 1 / 2 # in python 3
camera.framerate_delta = Fraction(1, 2)
camera.framerate_delta = (1, 2) # deprecated
.. note::
This property is implicitly reset to 0 when :attr:`framerate` or
:attr:`framerate_range` is set. When :attr:`framerate` is 0
(indicating that :attr:`framerate_range` is set), this property
cannot be used. (there would be little point in making fractional
adjustments to the framerate when the framerate itself is
variable).
.. versionadded:: 1.11
""")
def _get_still_stats(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS]
def _set_still_stats(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_CAPTURE_STATS_PASS] = value
still_stats = property(_get_still_stats, _set_still_stats, doc="""\
Retrieves or sets whether statistics will be calculated from still
frames or the prior preview frame.
When queried, the :attr:`still_stats` property returns a boolean value
indicating when scene statistics will be calculated for still captures
(that is, captures where the *use_video_port* parameter of
:meth:`capture` is ``False``). When this property is ``False`` (the
default), statistics will be calculated from the preceding preview
frame (this also applies when the preview is not visible). When `True`,
statistics will be calculated from the captured image itself.
When set, the propetry controls when scene statistics will be
calculated for still captures. The property can be set while recordings
or previews are in progress. The default value is ``False``.
The advantages to calculating scene statistics from the captured image
are that time between startup and capture is reduced as only the AGC
(automatic gain control) has to converge. The downside is that
processing time for captures increases and that white balance and gain
won't necessarily match the preview.
.. warning::
Enabling the still statistics pass will `override fixed white
balance`_ gains (set via :attr:`awb_gains` and :attr:`awb_mode`).
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.9
""")
def _get_saturation(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] * 100)
def _set_saturation(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid saturation value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SATURATION] = Fraction(value, 100)
saturation = property(_get_saturation, _set_saturation, doc="""\
Retrieves or sets the saturation setting of the camera.
When queried, the :attr:`saturation` property returns the color
saturation of the camera as an integer between -100 and 100. When set,
the property adjusts the saturation of the camera. Saturation can be
adjusted while previews or recordings are in progress. The default
value is 0.
""")
def _get_sharpness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] * 100)
def _set_sharpness(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid sharpness value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_SHARPNESS] = Fraction(value, 100)
sharpness = property(_get_sharpness, _set_sharpness, doc="""\
Retrieves or sets the sharpness setting of the camera.
When queried, the :attr:`sharpness` property returns the sharpness
level of the camera (a measure of the amount of post-processing to
reduce or increase image sharpness) as an integer between -100 and 100.
When set, the property adjusts the sharpness of the camera. Sharpness
can be adjusted while previews or recordings are in progress. The
default value is 0.
""")
def _get_contrast(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] * 100)
def _set_contrast(self, value):
self._check_camera_open()
if not (-100 <= value <= 100):
raise PiCameraValueError(
"Invalid contrast value: %d (valid range -100..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_CONTRAST] = Fraction(value, 100)
contrast = property(_get_contrast, _set_contrast, doc="""\
Retrieves or sets the contrast setting of the camera.
When queried, the :attr:`contrast` property returns the contrast level
of the camera as an integer between -100 and 100. When set, the
property adjusts the contrast of the camera. Contrast can be adjusted
while previews or recordings are in progress. The default value is 0.
""")
def _get_brightness(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] * 100)
def _set_brightness(self, value):
self._check_camera_open()
if not (0 <= value <= 100):
raise PiCameraValueError(
"Invalid brightness value: %d (valid range 0..100)" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_BRIGHTNESS] = Fraction(value, 100)
brightness = property(_get_brightness, _set_brightness, doc="""\
Retrieves or sets the brightness setting of the camera.
When queried, the :attr:`brightness` property returns the brightness
level of the camera as an integer between 0 and 100. When set, the
property adjusts the brightness of the camera. Brightness can be
adjusted while previews or recordings are in progress. The default
value is 50.
""")
def _get_shutter_speed(self):
self._check_camera_open()
return int(self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED])
def _set_shutter_speed(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_SHUTTER_SPEED] = value
shutter_speed = property(_get_shutter_speed, _set_shutter_speed, doc="""\
Retrieves or sets the shutter speed of the camera in microseconds.
When queried, the :attr:`shutter_speed` property returns the shutter
speed of the camera in microseconds, or 0 which indicates that the
speed will be automatically determined by the auto-exposure algorithm.
Faster shutter times naturally require greater amounts of illumination
and vice versa.
When set, the property adjusts the shutter speed of the camera, which
most obviously affects the illumination of subsequently captured
images. Shutter speed can be adjusted while previews or recordings are
running. The default value is 0 (auto).
.. note::
You can query the :attr:`exposure_speed` attribute to determine the
actual shutter speed being used when this attribute is set to 0.
Please note that this capability requires an up to date firmware
(#692 or later).
.. note::
In later firmwares, this attribute is limited by the value of the
:attr:`framerate` attribute. For example, if framerate is set to
30fps, the shutter speed cannot be slower than 33,333µs (1/fps).
""")
def _get_exposure_speed(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].exposure
exposure_speed = property(_get_exposure_speed, doc="""\
Retrieves the current shutter speed of the camera.
When queried, this property returns the shutter speed currently being
used by the camera. If you have set :attr:`shutter_speed` to a non-zero
value, then :attr:`exposure_speed` and :attr:`shutter_speed` should be
equal. However, if :attr:`shutter_speed` is set to 0 (auto), then you
can read the actual shutter speed being used from this attribute. The
value is returned as an integer representing a number of microseconds.
This is a read-only property.
.. versionadded:: 1.6
""")
def _get_analog_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].analog_gain)
analog_gain = property(_get_analog_gain, doc="""\
Retrieves the current analog gain of the camera.
When queried, this property returns the analog gain currently being
used by the camera. The value represents the analog gain of the sensor
prior to digital conversion. The value is returned as a
:class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_digital_gain(self):
self._check_camera_open()
return mo.to_fraction(
self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS].digital_gain)
digital_gain = property(_get_digital_gain, doc="""\
Retrieves the current digital gain of the camera.
When queried, this property returns the digital gain currently being
used by the camera. The value represents the digital gain the camera
applies after conversion of the sensor's analog output. The value is
returned as a :class:`~fractions.Fraction` instance.
.. versionadded:: 1.6
""")
def _get_video_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE]
def _set_video_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_DENOISE] = value
video_denoise = property(_get_video_denoise, _set_video_denoise, doc="""\
Retrieves or sets whether denoise will be applied to video recordings.
When queried, the :attr:`video_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to video recordings.
When set, the property activates or deactivates the denoise algorithm
for video recordings. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_image_denoise(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE]
def _set_image_denoise(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_STILLS_DENOISE] = value
image_denoise = property(_get_image_denoise, _set_image_denoise, doc="""\
Retrieves or sets whether denoise will be applied to image captures.
When queried, the :attr:`image_denoise` property returns a boolean
value indicating whether or not the camera software will apply a
denoise algorithm to image captures.
When set, the property activates or deactivates the denoise algorithm
for image captures. The property can be set while recordings or
previews are in progress. The default value is ``True``.
.. versionadded:: 1.7
""")
def _get_drc_strength(self):
self._check_camera_open()
return self._DRC_STRENGTHS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION].strength
]
def _set_drc_strength(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION]
mp.strength = self.DRC_STRENGTHS[value]
except KeyError:
raise PiCameraValueError(
"Invalid dynamic range compression strength: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION] = mp
drc_strength = property(_get_drc_strength, _set_drc_strength, doc="""\
Retrieves or sets the dynamic range compression strength of the camera.
When queried, the :attr:`drc_strength` property returns a string
indicating the amount of `dynamic range compression`_ the camera
applies to images.
When set, the attributes adjusts the strength of the dynamic range
compression applied to the camera's output. Valid values are given
in the list below:
{values}
The default value is ``'off'``. All possible values for the attribute
can be obtained from the ``PiCamera.DRC_STRENGTHS`` attribute.
.. warning::
Enabling DRC will `override fixed white balance`_ gains (set via
:attr:`awb_gains` and :attr:`awb_mode`).
.. _dynamic range compression: https://en.wikipedia.org/wiki/Gain_compression
.. _override fixed white balance: https://www.raspberrypi.org/forums/viewtopic.php?p=875772&sid=92fa4ea70d1fe24590a4cdfb4a10c489#p875772
.. versionadded:: 1.6
""".format(values=docstring_values(DRC_STRENGTHS)))
def _get_ISO(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
return self.iso
def _set_ISO(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.ISO is deprecated; use PiCamera.iso instead'))
self.iso = value
ISO = property(_get_ISO, _set_ISO, doc="""
Retrieves or sets the apparent ISO setting of the camera.
.. deprecated:: 1.8
Please use the :attr:`iso` attribute instead.
""")
def _get_iso(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_ISO]
def _set_iso(self, value):
self._check_camera_open()
try:
if not (0 <= value <= 1600):
raise PiCameraValueError(
"Invalid iso value: %d (valid range 0..800)" % value)
except TypeError:
raise PiCameraValueError("Invalid iso value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_ISO] = value
iso = property(_get_iso, _set_iso, doc="""\
Retrieves or sets the apparent ISO setting of the camera.
When queried, the :attr:`iso` property returns the ISO setting of the
camera, a value which represents the `sensitivity of the camera to
light`_. Lower values (e.g. 100) imply less sensitivity than higher
values (e.g. 400 or 800). Lower sensitivities tend to produce less
"noisy" (smoother) images, but operate poorly in low light conditions.
When set, the property adjusts the sensitivity of the camera (by
adjusting the :attr:`analog_gain` and :attr:`digital_gain`). Valid
values are between 0 (auto) and 1600. The actual value used when iso is
explicitly set will be one of the following values (whichever is
closest): 100, 200, 320, 400, 500, 640, 800.
On the V1 camera module, non-zero ISO values attempt to fix overall
gain at various levels. For example, ISO 100 attempts to provide an
overall gain of 1.0, ISO 200 attempts to provide overall gain of 2.0,
etc. The algorithm prefers analog gain over digital gain to reduce
noise.
On the V2 camera module, ISO 100 attempts to produce overall gain of
~1.84, and ISO 800 attempts to produce overall gain of ~14.72 (the V2
camera module was calibrated against the `ISO film speed`_ standard).
The attribute can be adjusted while previews or recordings are in
progress. The default value is 0 which means automatically determine a
value according to image-taking conditions.
.. note::
Some users on the Pi camera forum have noted that higher ISO values
than 800 (specifically up to 1600) can be achieved in certain
conditions with :attr:`exposure_mode` set to ``'sports'`` and
:attr:`iso` set to 0. It doesn't appear to be possible to manually
request an ISO setting higher than 800, but the picamera library
will permit settings up to 1600 in case the underlying firmware
permits such settings in particular circumstances.
.. note::
Certain :attr:`exposure_mode` values override the ISO setting. For
example, ``'off'`` fixes :attr:`analog_gain` and
:attr:`digital_gain` entirely, preventing this property from
adjusting them when set.
.. _sensitivity of the camera to light: https://en.wikipedia.org/wiki/Film_speed#Digital
.. _ISO film speed: https://en.wikipedia.org/wiki/Film_speed#Current_system:_ISO
""")
def _get_meter_mode(self):
self._check_camera_open()
return self._METER_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE].value
]
def _set_meter_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE]
mp.value = self.METER_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid metering mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXP_METERING_MODE] = mp
meter_mode = property(_get_meter_mode, _set_meter_mode, doc="""\
Retrieves or sets the metering mode of the camera.
When queried, the :attr:`meter_mode` property returns the method by
which the camera `determines the exposure`_ as one of the following
strings:
{values}
When set, the property adjusts the camera's metering mode. All modes
set up two regions: a center region, and an outer region. The major
`difference between each mode`_ is the size of the center region. The
``'backlit'`` mode has the largest central region (30% of the width),
while ``'spot'`` has the smallest (10% of the width).
The property can be set while recordings or previews are in progress.
The default value is ``'average'``. All possible values for the
attribute can be obtained from the ``PiCamera.METER_MODES`` attribute.
.. _determines the exposure: https://en.wikipedia.org/wiki/Metering_mode
.. _difference between each mode: https://www.raspberrypi.org/forums/viewtopic.php?p=565644#p565644
""".format(values=docstring_values(METER_MODES)))
def _get_video_stabilization(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION]
def _set_video_stabilization(self, value):
self._check_camera_open()
self._camera.control.params[mmal.MMAL_PARAMETER_VIDEO_STABILISATION] = value
video_stabilization = property(
_get_video_stabilization, _set_video_stabilization, doc="""\
Retrieves or sets the video stabilization mode of the camera.
When queried, the :attr:`video_stabilization` property returns a
boolean value indicating whether or not the camera attempts to
compensate for motion.
When set, the property activates or deactivates video stabilization.
The property can be set while recordings or previews are in progress.
The default value is ``False``.
.. note::
The built-in video stabilization only accounts for `vertical and
horizontal motion`_, not rotation.
.. _vertical and horizontal motion: https://www.raspberrypi.org/forums/viewtopic.php?p=342667&sid=ec7d95e887ab74a90ffaab87888c48cd#p342667
""")
def _get_exposure_compensation(self):
self._check_camera_open()
return self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP]
def _set_exposure_compensation(self, value):
self._check_camera_open()
try:
if not (-25 <= value <= 25):
raise PiCameraValueError(
"Invalid exposure compensation value: "
"%d (valid range -25..25)" % value)
except TypeError:
raise PiCameraValueError(
"Invalid exposure compensation value: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_COMP] = value
exposure_compensation = property(
_get_exposure_compensation, _set_exposure_compensation, doc="""\
Retrieves or sets the exposure compensation level of the camera.
When queried, the :attr:`exposure_compensation` property returns an
integer value between -25 and 25 indicating the exposure level of the
camera. Larger values result in brighter images.
When set, the property adjusts the camera's exposure compensation
level. Each increment represents 1/6th of a stop. Hence setting the
attribute to 6 increases exposure by 1 stop. The property can be set
while recordings or previews are in progress. The default value is 0.
""")
def _get_exposure_mode(self):
self._check_camera_open()
return self._EXPOSURE_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE].value
]
def _set_exposure_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE]
mp.value = self.EXPOSURE_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid exposure mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_EXPOSURE_MODE] = mp
exposure_mode = property(_get_exposure_mode, _set_exposure_mode, doc="""\
Retrieves or sets the exposure mode of the camera.
When queried, the :attr:`exposure_mode` property returns a string
representing the exposure setting of the camera. The possible values
can be obtained from the ``PiCamera.EXPOSURE_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's exposure mode. The
property can be set while recordings or previews are in progress. The
default value is ``'auto'``.
.. note::
Exposure mode ``'off'`` is special: this disables the camera's
automatic gain control, fixing the values of :attr:`digital_gain`
and :attr:`analog_gain`.
Please note that these properties are not directly settable
(although they can be influenced by setting :attr:`iso` *prior* to
fixing the gains), and default to low values when the camera is
first initialized. Therefore it is important to let them settle on
higher values before disabling automatic gain control otherwise all
frames captured will appear black.
""".format(values=docstring_values(EXPOSURE_MODES)))
def _get_flash_mode(self):
self._check_camera_open()
return self._FLASH_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH].value
]
def _set_flash_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_FLASH]
mp.value = self.FLASH_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid flash mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_FLASH] = mp
flash_mode = property(_get_flash_mode, _set_flash_mode, doc="""\
Retrieves or sets the flash mode of the camera.
When queried, the :attr:`flash_mode` property returns a string
representing the flash setting of the camera. The possible values can
be obtained from the ``PiCamera.FLASH_MODES`` attribute, and are as
follows:
{values}
When set, the property adjusts the camera's flash mode. The property
can be set while recordings or previews are in progress. The default
value is ``'off'``.
.. note::
You must define which GPIO pins the camera is to use for flash and
privacy indicators. This is done within the `Device Tree
configuration`_ which is considered an advanced topic.
Specifically, you need to define pins ``FLASH_0_ENABLE`` and
optionally ``FLASH_0_INDICATOR`` (for the privacy indicator). More
information can be found in this :ref:`recipe
<flash_configuration>`.
.. _Device Tree configuration: https://www.raspberrypi.org/documentation/configuration/pin-configuration.md
.. versionadded:: 1.10
""".format(values=docstring_values(FLASH_MODES)))
def _get_awb_mode(self):
self._check_camera_open()
return self._AWB_MODES_R[
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE].value
]
def _set_awb_mode(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE]
mp.value = self.AWB_MODES[value]
except KeyError:
raise PiCameraValueError("Invalid auto-white-balance mode: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_AWB_MODE] = mp
awb_mode = property(_get_awb_mode, _set_awb_mode, doc="""\
Retrieves or sets the auto-white-balance mode of the camera.
When queried, the :attr:`awb_mode` property returns a string
representing the auto white balance setting of the camera. The possible
values can be obtained from the ``PiCamera.AWB_MODES`` attribute, and
are as follows:
{values}
When set, the property adjusts the camera's auto-white-balance mode.
The property can be set while recordings or previews are in progress.
The default value is ``'auto'``.
.. note::
AWB mode ``'off'`` is special: this disables the camera's automatic
white balance permitting manual control of the white balance via
the :attr:`awb_gains` property. However, even with AWB disabled,
some attributes (specifically :attr:`still_stats` and
:attr:`drc_strength`) can cause AWB re-calculations.
""".format(values=docstring_values(AWB_MODES)))
def _get_awb_gains(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_CAMERA_SETTINGS]
return (
mo.to_fraction(mp.awb_red_gain),
mo.to_fraction(mp.awb_blue_gain),
)
def _set_awb_gains(self, value):
self._check_camera_open()
try:
red_gain, blue_gain = value
except (ValueError, TypeError):
red_gain = blue_gain = value
if not (0.0 <= red_gain <= 8.0 and 0.0 <= blue_gain <= 8.0):
raise PiCameraValueError(
"Invalid gain(s) in (%f, %f) (valid range: 0.0-8.0)" % (
red_gain, blue_gain))
mp = mmal.MMAL_PARAMETER_AWB_GAINS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS,
ct.sizeof(mmal.MMAL_PARAMETER_AWB_GAINS_T)
),
mo.to_rational(red_gain),
mo.to_rational(blue_gain),
)
self._camera.control.params[mmal.MMAL_PARAMETER_CUSTOM_AWB_GAINS] = mp
awb_gains = property(_get_awb_gains, _set_awb_gains, doc="""\
Gets or sets the auto-white-balance gains of the camera.
When queried, this attribute returns a tuple of values representing
the `(red, blue)` balance of the camera. The `red` and `blue` values
are returned :class:`~fractions.Fraction` instances. The values will
be between 0.0 and 8.0.
When set, this attribute adjusts the camera's auto-white-balance gains.
The property can be specified as a single value in which case both red
and blue gains will be adjusted equally, or as a `(red, blue)` tuple.
Values can be specified as an :ref:`int <typesnumeric>`, :ref:`float
<typesnumeric>` or :class:`~fractions.Fraction` and each gain must be
between 0.0 and 8.0. Typical values for the gains are between 0.9 and
1.9. The property can be set while recordings or previews are in
progress.
.. note::
This attribute only has an effect when :attr:`awb_mode` is set to
``'off'``. Also note that even with AWB disabled, some attributes
(specifically :attr:`still_stats` and :attr:`drc_strength`) can
cause AWB re-calculations.
.. versionchanged:: 1.6
Prior to version 1.6, this attribute was write-only.
""")
def _get_image_effect(self):
self._check_camera_open()
return self._IMAGE_EFFECTS_R[
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT].value
]
def _set_image_effect(self, value):
self._check_camera_open()
try:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT]
mp.value = self.IMAGE_EFFECTS[value]
self._image_effect_params = None
except KeyError:
raise PiCameraValueError("Invalid image effect: %s" % value)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT] = mp
image_effect = property(_get_image_effect, _set_image_effect, doc="""\
Retrieves or sets the current image effect applied by the camera.
When queried, the :attr:`image_effect` property returns a string
representing the effect the camera will apply to captured video. The
possible values can be obtained from the ``PiCamera.IMAGE_EFFECTS``
attribute, and are as follows:
{values}
When set, the property changes the effect applied by the camera. The
property can be set while recordings or previews are in progress, but
only certain effects work while recording video (notably ``'negative'``
and ``'solarize'``). The default value is ``'none'``.
""".format(values=docstring_values(IMAGE_EFFECTS)))
def _get_image_effect_params(self):
self._check_camera_open()
return self._image_effect_params
def _set_image_effect_params(self, value):
self._check_camera_open()
to_int = lambda x: int(x)
to_byte = lambda x: max(0, min(255, int(x)))
to_bool = lambda x: (0, 1)[bool(x)]
to_8dot8 = lambda x: int(x * 256)
valid_transforms = {
'solarize': [
(to_bool, to_byte, to_byte, to_byte, to_byte),
(to_byte, to_byte, to_byte, to_byte),
(to_bool,),
],
'colorpoint': [
(lambda x: max(0, min(3, int(x))),),
],
'colorbalance': [
(to_8dot8, to_8dot8, to_8dot8, to_8dot8, to_int, to_int),
(to_8dot8, to_8dot8, to_8dot8, to_8dot8),
(to_8dot8, to_8dot8, to_8dot8),
],
'colorswap': [
(to_bool,),
],
'posterise': [
(lambda x: max(2, min(31, int(x))),),
],
'blur': [
(lambda x: max(1, min(2, int(x))),),
],
'film': [
(to_byte, to_byte, to_byte),
],
'watercolor': [
(),
(to_byte, to_byte),
]
}
# Ensure params is a tuple
try:
params = tuple(i for i in value)
except TypeError:
params = (value,)
# Find the parameter combination for the current effect
effect = self.image_effect
param_transforms = [
transforms for transforms in valid_transforms.get(effect, [])
if len(transforms) == len(params)
]
if not param_transforms:
raise PiCameraValueError(
'invalid set of parameters for effect "%s"' % effect)
param_transforms = param_transforms[0]
params = tuple(
transform(p)
for (transform, p) in zip(param_transforms, params)
)
mp = mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS,
ct.sizeof(mmal.MMAL_PARAMETER_IMAGEFX_PARAMETERS_T)
),
effect=self.IMAGE_EFFECTS[effect],
num_effect_params=len(params),
effect_parameter=params,
)
self._camera.control.params[mmal.MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS] = mp
self._image_effect_params = value
image_effect_params = property(
_get_image_effect_params, _set_image_effect_params, doc="""\
Retrieves or sets the parameters for the current :attr:`effect
<image_effect>`.
When queried, the :attr:`image_effect_params` property either returns
``None`` (for effects which have no configurable parameters, or if no
parameters have been configured), or a tuple of numeric values up to
six elements long.
When set, the property changes the parameters of the current
:attr:`effect <image_effect>` as a sequence of numbers, or a single
number. Attempting to set parameters on an effect which does not
support parameters, or providing an incompatible set of parameters for
an effect will raise a :exc:`PiCameraValueError` exception.
The effects which have parameters, and what combinations those
parameters can take is as follows:
.. tabularcolumns:: |p{30mm}|p{25mm}|p{75mm}|
+--------------------+----------------+-----------------------------------------+
| Effect | Parameters | Description |
+====================+================+=========================================+
| ``'solarize'`` | *yuv*, | *yuv* controls whether data is |
| | *x0*, *y1*, | processed as RGB (0) or YUV(1). Input |
| | *y2*, *y3* | values from 0 to *x0* - 1 are remapped |
| | | linearly onto the range 0 to *y0*. |
| | | Values from *x0* to 255 are remapped |
| | | linearly onto the range *y1* to *y2*. |
| +----------------+-----------------------------------------+
| | *x0*, *y0*, | Same as above, but *yuv* defaults to |
| | *y1*, *y2* | 0 (process as RGB). |
| +----------------+-----------------------------------------+
| | *yuv* | Same as above, but *x0*, *y0*, *y1*, |
| | | *y2* default to 128, 128, 128, 0 |
| | | respectively. |
+--------------------+----------------+-----------------------------------------+
| ``'colorpoint'`` | *quadrant* | *quadrant* specifies which quadrant |
| | | of the U/V space to retain chroma |
| | | from: 0=green, 1=red/yellow, 2=blue, |
| | | 3=purple. There is no default; this |
| | | effect does nothing until parameters |
| | | are set. |
+--------------------+----------------+-----------------------------------------+
| ``'colorbalance'`` | *lens*, | *lens* specifies the lens shading |
| | *r*, *g*, *b*, | strength (0.0 to 256.0, where 0.0 |
| | *u*, *v* | indicates lens shading has no effect). |
| | | *r*, *g*, *b* are multipliers for their |
| | | respective color channels (0.0 to |
| | | 256.0). *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *u* are defaulted |
| | *r*, *g*, *b* | to 0. |
| +----------------+-----------------------------------------+
| | *lens*, | Same as above but *g* also defaults to |
| | *r*, *b* | to 1.0. |
+--------------------+----------------+-----------------------------------------+
| ``'colorswap'`` | *dir* | If *dir* is 0, swap RGB to BGR. If |
| | | *dir* is 1, swap RGB to BRG. |
+--------------------+----------------+-----------------------------------------+
| ``'posterise'`` | *steps* | Control the quantization steps for the |
| | | image. Valid values are 2 to 32, and |
| | | the default is 4. |
+--------------------+----------------+-----------------------------------------+
| ``'blur'`` | *size* | Specifies the size of the kernel. Valid |
| | | values are 1 or 2. |
+--------------------+----------------+-----------------------------------------+
| ``'film'`` | *strength*, | *strength* specifies the strength of |
| | *u*, *v* | effect. *u* and *v* are offsets added |
| | | to the U/V plane (0 to 255). |
+--------------------+----------------+-----------------------------------------+
| ``'watercolor'`` | *u*, *v* | *u* and *v* specify offsets to add to |
| | | the U/V plane (0 to 255). |
| +----------------+-----------------------------------------+
| | | No parameters indicates no U/V effect. |
+--------------------+----------------+-----------------------------------------+
.. versionadded:: 1.8
""")
def _get_color_effects(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT]
if mp.enable != mmal.MMAL_FALSE:
return (mp.u, mp.v)
else:
return None
def _set_color_effects(self, value):
self._check_camera_open()
if value is None:
enable = mmal.MMAL_FALSE
u = v = 128
else:
enable = mmal.MMAL_TRUE
try:
u, v = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid color effect (u, v) tuple: %s" % value)
if not ((0 <= u <= 255) and (0 <= v <= 255)):
raise PiCameraValueError(
"(u, v) values must be between 0 and 255")
mp = mmal.MMAL_PARAMETER_COLOURFX_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_COLOUR_EFFECT,
ct.sizeof(mmal.MMAL_PARAMETER_COLOURFX_T)
),
enable, u, v
)
self._camera.control.params[mmal.MMAL_PARAMETER_COLOUR_EFFECT] = mp
color_effects = property(_get_color_effects, _set_color_effects, doc="""\
Retrieves or sets the current color effect applied by the camera.
When queried, the :attr:`color_effects` property either returns
``None`` which indicates that the camera is using normal color
settings, or a ``(u, v)`` tuple where ``u`` and ``v`` are integer
values between 0 and 255.
When set, the property changes the color effect applied by the camera.
The property can be set while recordings or previews are in progress.
For example, to make the image black and white set the value to ``(128,
128)``. The default value is ``None``.
""")
def _get_rotation(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_ROTATION]
def _set_rotation(self, value):
self._check_camera_open()
try:
value = ((int(value) % 360) // 90) * 90
except ValueError:
raise PiCameraValueError("Invalid rotation angle: %s" % value)
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_ROTATION] = value
rotation = property(_get_rotation, _set_rotation, doc="""\
Retrieves or sets the current rotation of the camera's image.
When queried, the :attr:`rotation` property returns the rotation
applied to the image. Valid values are 0, 90, 180, and 270.
When set, the property changes the rotation applied to the camera's
input. The property can be set while recordings or previews are in
progress. The default value is ``0``.
""")
def _get_vflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_VERTICAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_vflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(bool(value), self.hflip)]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
vflip = property(_get_vflip, _set_vflip, doc="""\
Retrieves or sets whether the camera's output is vertically flipped.
When queried, the :attr:`vflip` property returns a boolean indicating
whether or not the camera's output is vertically flipped. The property
can be set while recordings or previews are in progress. The default
value is ``False``.
""")
def _get_hflip(self):
self._check_camera_open()
return self._camera.outputs[0].params[mmal.MMAL_PARAMETER_MIRROR] in (
mmal.MMAL_PARAM_MIRROR_HORIZONTAL, mmal.MMAL_PARAM_MIRROR_BOTH)
def _set_hflip(self, value):
self._check_camera_open()
value = {
(False, False): mmal.MMAL_PARAM_MIRROR_NONE,
(True, False): mmal.MMAL_PARAM_MIRROR_VERTICAL,
(False, True): mmal.MMAL_PARAM_MIRROR_HORIZONTAL,
(True, True): mmal.MMAL_PARAM_MIRROR_BOTH,
}[(self.vflip, bool(value))]
for port in self._camera.outputs:
port.params[mmal.MMAL_PARAMETER_MIRROR] = value
hflip = property(_get_hflip, _set_hflip, doc="""\
Retrieves or sets whether the camera's output is horizontally flipped.
When queried, the :attr:`hflip` property returns a boolean indicating
whether or not the camera's output is horizontally flipped. The
property can be set while recordings or previews are in progress. The
default value is ``False``.
""")
def _get_zoom(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP]
return (
mp.rect.x / 65535.0,
mp.rect.y / 65535.0,
mp.rect.width / 65535.0,
mp.rect.height / 65535.0,
)
def _set_zoom(self, value):
self._check_camera_open()
try:
x, y, w, h = value
except (TypeError, ValueError) as e:
raise PiCameraValueError(
"Invalid zoom rectangle (x, y, w, h) tuple: %s" % value)
mp = mmal.MMAL_PARAMETER_INPUT_CROP_T(
mmal.MMAL_PARAMETER_HEADER_T(
mmal.MMAL_PARAMETER_INPUT_CROP,
ct.sizeof(mmal.MMAL_PARAMETER_INPUT_CROP_T)
),
mmal.MMAL_RECT_T(
max(0, min(65535, int(65535 * x))),
max(0, min(65535, int(65535 * y))),
max(0, min(65535, int(65535 * w))),
max(0, min(65535, int(65535 * h))),
),
)
self._camera.control.params[mmal.MMAL_PARAMETER_INPUT_CROP] = mp
zoom = property(_get_zoom, _set_zoom, doc="""\
Retrieves or sets the zoom applied to the camera's input.
When queried, the :attr:`zoom` property returns a ``(x, y, w, h)``
tuple of floating point values ranging from 0.0 to 1.0, indicating the
proportion of the image to include in the output (this is also known as
the "Region of Interest" or ROI). The default value is ``(0.0, 0.0,
1.0, 1.0)`` which indicates that everything should be included. The
property can be set while recordings or previews are in progress.
The `zoom` is applied to the processed image, after rotation and rescale.
If rotation has been used, zoom is composed of ``(y, x, h, w)`` instead.
The values `w` and `h` can modify the aspect ratio of the image: use equal
values for `w` and `h` if you want to keep the same the aspect ratio.
""")
def _get_crop(self):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
return self.zoom
def _set_crop(self, value):
warnings.warn(
PiCameraDeprecated(
'PiCamera.crop is deprecated; use PiCamera.zoom instead'))
self.zoom = value
crop = property(_get_crop, _set_crop, doc="""
Retrieves or sets the zoom applied to the camera's input.
.. deprecated:: 1.8
Please use the :attr:`zoom` attribute instead.
""")
def _get_overlays(self):
self._check_camera_open()
return self._overlays
overlays = property(_get_overlays, doc="""\
Retrieves all active :class:`PiRenderer` overlays.
If no overlays are current active, :attr:`overlays` will return an
empty iterable. Otherwise, it will return an iterable of
:class:`PiRenderer` instances which are currently acting as overlays.
Note that the preview renderer is an exception to this: it is *not*
included as an overlay despite being derived from :class:`PiRenderer`.
.. versionadded:: 1.8
""")
def _get_preview(self):
self._check_camera_open()
if isinstance(self._preview, PiPreviewRenderer):
return self._preview
preview = property(_get_preview, doc="""\
Retrieves the :class:`PiRenderer` displaying the camera preview.
If no preview is currently active, :attr:`preview` will return
``None``. Otherwise, it will return the instance of
:class:`PiRenderer` which is currently connected to the camera's
preview port for rendering what the camera sees. You can use the
attributes of the :class:`PiRenderer` class to configure the appearance
of the preview. For example, to make the preview semi-transparent::
import picamera
with picamera.PiCamera() as camera:
camera.start_preview()
camera.preview.alpha = 128
.. versionadded:: 1.8
""")
def _get_preview_alpha(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
return self.preview.alpha
else:
return self._preview_alpha
def _set_preview_alpha(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_alpha is deprecated; use '
'PiCamera.preview.alpha instead'))
if self.preview:
self.preview.alpha = value
else:
self._preview_alpha = value
preview_alpha = property(_get_preview_alpha, _set_preview_alpha, doc="""\
Retrieves or sets the opacity of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.alpha` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_layer(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
return self.preview.layer
else:
return self._preview_layer
def _set_preview_layer(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_layer is deprecated; '
'use PiCamera.preview.layer instead'))
if self.preview:
self.preview.layer = value
else:
self._preview_layer = value
preview_layer = property(_get_preview_layer, _set_preview_layer, doc="""\
Retrieves or sets the layer of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.layer` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_fullscreen(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
return self.preview.fullscreen
else:
return self._preview_fullscreen
def _set_preview_fullscreen(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_fullscreen is deprecated; '
'use PiCamera.preview.fullscreen instead'))
if self.preview:
self.preview.fullscreen = value
else:
self._preview_fullscreen = value
preview_fullscreen = property(
_get_preview_fullscreen, _set_preview_fullscreen, doc="""\
Retrieves or sets full-screen for the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.fullscreen` attribute of the
:attr:`preview` object instead.
""")
def _get_preview_window(self):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
return self.preview.window
else:
return self._preview_window
def _set_preview_window(self, value):
self._check_camera_open()
warnings.warn(
PiCameraDeprecated(
'PiCamera.preview_window is deprecated; '
'use PiCamera.preview.window instead'))
if self.preview:
self.preview.window = value
else:
self._preview_window = value
preview_window = property(
_get_preview_window, _set_preview_window, doc="""\
Retrieves or sets the size of the preview window.
.. deprecated:: 1.8
Please use the :attr:`~PiRenderer.window` attribute of the
:attr:`preview` object instead.
""")
def _get_annotate_text(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if mp.enable:
return mp.text.decode('ascii')
else:
return ''
def _set_annotate_text(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.show_frame_num)
if mp.enable:
try:
mp.text = value.encode('ascii')
except ValueError as e:
raise PiCameraValueError(str(e))
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_text = property(_get_annotate_text, _set_annotate_text, doc="""\
Retrieves or sets a text annotation for all output.
When queried, the :attr:`annotate_text` property returns the current
annotation (if no annotation has been set, this is simply a blank
string).
When set, the property immediately applies the annotation to the
preview (if it is running) and to any future captures or video
recording. Strings longer than 255 characters, or strings containing
non-ASCII characters will raise a :exc:`PiCameraValueError`. The
default value is ``''``.
.. versionchanged:: 1.8
Text annotations can now be 255 characters long. The prior limit
was 32 characters.
""")
def _get_annotate_frame_num(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.show_frame_num.value != mmal.MMAL_FALSE
def _set_annotate_frame_num(self, value):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.enable = bool(value or mp.text)
mp.show_frame_num = bool(value)
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_frame_num = property(
_get_annotate_frame_num, _set_annotate_frame_num, doc="""\
Controls whether the current frame number is drawn as an annotation.
The :attr:`annotate_frame_num` attribute is a bool indicating whether
or not the current frame number is rendered as an annotation, similar
to :attr:`annotate_text`. The default is ``False``.
.. versionadded:: 1.8
""")
def _get_annotate_text_size(self):
self._check_camera_open()
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
return mp.text_size or self.DEFAULT_ANNOTATE_SIZE
else:
return self.DEFAULT_ANNOTATE_SIZE
def _set_annotate_text_size(self, value):
self._check_camera_open()
if not (6 <= value <= 160):
raise PiCameraValueError(
"Invalid annotation text size: %d (valid range 6-160)" % value)
if self._camera.annotate_rev == 3:
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.text_size = value
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
elif value != self.DEFAULT_ANNOTATE_SIZE:
warnings.warn(
PiCameraFallback(
"Firmware does not support setting annotation text "
"size; using default (%d) instead" % self.DEFAULT_ANNOTATE_SIZE))
annotate_text_size = property(
_get_annotate_text_size, _set_annotate_text_size, doc="""\
Controls the size of the annotation text.
The :attr:`annotate_text_size` attribute is an int which determines how
large the annotation text will appear on the display. Valid values are
in the range 6 to 160, inclusive. The default is {size}.
.. versionadded:: 1.10
""".format(size=DEFAULT_ANNOTATE_SIZE))
def _get_annotate_foreground(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3 and mp.custom_text_color:
return Color.from_yuv_bytes(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V)
else:
return Color('white')
def _set_annotate_foreground(self, value):
self._check_camera_open()
if not isinstance(value, Color):
raise PiCameraValueError(
'annotate_foreground must be a Color')
elif self._camera.annotate_rev < 3:
if value.rgb_bytes != (255, 255, 255):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom foreground "
"annotation color; using white instead"))
return
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
mp.custom_text_color = True
(
mp.custom_text_Y,
mp.custom_text_U,
mp.custom_text_V,
) = value.yuv_bytes
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_foreground = property(
_get_annotate_foreground, _set_annotate_foreground, doc="""\
Controls the color of the annotation text.
The :attr:`annotate_foreground` attribute specifies, partially, the
color of the annotation text. The value is specified as a
:class:`Color`. The default is white.
.. note::
The underlying firmware does not directly support setting all
components of the text color, only the Y' component of a `Y'UV`_
tuple. This is roughly (but not precisely) analogous to the
"brightness" of a color, so you may choose to think of this as
setting how bright the annotation text will be relative to its
background. In order to specify just the Y' component when setting
this attribute, you may choose to construct the
:class:`Color` instance as follows::
camera.annotate_foreground = picamera.Color(y=0.2, u=0, v=0)
.. _Y'UV: https://en.wikipedia.org/wiki/YUV
.. versionadded:: 1.10
""")
def _get_annotate_background(self):
self._check_camera_open()
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if mp.enable_text_background:
if mp.custom_background_color:
return Color.from_yuv_bytes(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V)
else:
return Color('black')
else:
return None
else:
if mp.black_text_background:
return Color('black')
else:
return None
def _set_annotate_background(self, value):
self._check_camera_open()
if value is True:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to True is '
'deprecated; use PiCamera.color.Color("black") instead'))
value = Color('black')
elif value is False:
warnings.warn(
PiCameraDeprecated(
'Setting PiCamera.annotate_background to False is '
'deprecated; use None instead'))
value = None
elif value is None:
pass
elif not isinstance(value, Color):
raise PiCameraValueError(
'annotate_background must be a Color or None')
elif self._camera.annotate_rev < 3 and value.rgb_bytes != (0, 0, 0):
warnings.warn(
PiCameraFallback(
"Firmware does not support setting a custom background "
"annotation color; using black instead"))
mp = self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE]
if self._camera.annotate_rev == 3:
if value is None:
mp.enable_text_background = False
else:
mp.enable_text_background = True
mp.custom_background_color = True
(
mp.custom_background_Y,
mp.custom_background_U,
mp.custom_background_V,
) = value.yuv_bytes
else:
if value is None:
mp.black_text_background = False
else:
mp.black_text_background = True
self._camera.control.params[mmal.MMAL_PARAMETER_ANNOTATE] = mp
annotate_background = property(
_get_annotate_background, _set_annotate_background, doc="""\
Controls what background is drawn behind the annotation.
The :attr:`annotate_background` attribute specifies if a background
will be drawn behind the :attr:`annotation text <annotate_text>` and,
if so, what color it will be. The value is specified as a
:class:`Color` or ``None`` if no background should be drawn. The
default is ``None``.
.. note::
For backward compatibility purposes, the value ``False`` will be
treated as ``None``, and the value ``True`` will be treated as the
color black. The "truthiness" of the values returned by the
attribute are backward compatible although the values themselves
are not.
.. versionadded:: 1.8
.. versionchanged:: 1.10
In prior versions this was a bool value with ``True`` representing
a black background.
""")
| 45.465936 | 146 | 0.609215 | [
"BSD-3-Clause"
] | RobertLucian/picamera | picamera/camera.py | 174,185 | Python |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""logical_and_impl"""
from mindspore.ops.composite import base
from mindspore.ops import functional as F
# logical_and is a metagraph object which will generate function according to input type
# using ".register" decorator
logical_and = base.MultitypeFuncGraph("logical_and")
@logical_and.register("Number", "Number")
def _logical_and_scala(x, y):
"""
Return logical and operation result of x and y.
Args:
x(Number): Number.
y(Number): Number.
Returns:
bool, Return logical and operation result of x and y.
"""
return F.bool_and(x.__bool__(), y.__bool__())
@logical_and.register("Tensor", "Tensor")
def _logical_and_tensor(x, y):
"""
Return logical and operation result of x and y.
Args:
x(Tensor): Tensor.
y(Tensor): Tensor.
Returns:
Tensor, Return logical and operation result of x and y.
"""
return F.logical_and(x, y)
| 30 | 88 | 0.67673 | [
"Apache-2.0"
] | Gavin-Hoang/mindspore | mindspore/ops/composite/multitype_ops/logical_and_impl.py | 1,590 | Python |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:9332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:9332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Sarnath address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Sarnath address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| 24.110769 | 79 | 0.668198 | [
"MIT"
] | iannkwon/Sarnath | contrib/bitrpc/bitrpc.py | 7,836 | Python |
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
from PIL import Image
from scipy.misc import imsave, imread
def plots(epochs, train_acc, test_acc, train_loss, test_loss, train_error, test_error,filename):
plt.style.use('bmh')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_acc, 'r', epochs,test_acc, 'g')
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_acc', 'test_acc'], loc='upper left')
fig.savefig(filename + '_accuracy.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_loss, 'r', epochs,test_loss, 'g')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss', 'test_loss'], loc='upper left')
fig.savefig(filename + '_loss.png')
fig=plt.figure(figsize=(8,6))
plt.plot(epochs,train_error, 'r', epochs,test_error, 'g')
plt.title('model error rate')
plt.ylabel('error rate')
plt.xlabel('epoch')
plt.legend(['train_error', 'test_error'], loc='upper left')
fig.savefig(filename + '_error.png')
plt.close('all')
def write_csv(filename, train_acc,test_acc,train_loss,test_loss,train_error,test_error,epoch):
if epoch==0:
with open(filename, 'w') as f:
f.write('train_acc,test_acc,train_loss, test_loss, train_error, test_error\n')
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
else:
with open(filename, 'a') as f:
f.write('{0},{1},{2},{3},{4},{5}\n'.format(train_acc[-1],\
test_acc[-1],\
train_loss[-1],\
test_loss[-1],\
train_error[-1],\
test_error[-1]))
| 35.112903 | 96 | 0.509417 | [
"MIT"
] | gahshiv/DenseNet-pytorch | data_utils.py | 2,177 | Python |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_data_labels17.xlsx')
self.ignore_elements = {'xl/charts/chart1.xml': ['<c:formatCode']}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'stock'})
date_format = workbook.add_format({'num_format': 14})
chart.axis_ids = [45740032, 45747200]
data = [
[39083, 39084, 39085, 39086, 39087],
[27.2, 25.03, 19.05, 20.34, 18.5],
[23.49, 19.55, 15.12, 17.84, 16.34],
[25.45, 23.05, 17.32, 20.45, 17.34],
]
for row in range(5):
worksheet.write(row, 0, data[0][row], date_format)
worksheet.write(row, 1, data[1][row])
worksheet.write(row, 2, data[2][row])
worksheet.write(row, 3, data[3][row])
worksheet.set_column('A:D', 11)
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$D$1:$D$5',
'data_labels': {'value': 1, 'position': 'right'},
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 27.805556 | 79 | 0.535964 | [
"BSD-2-Clause"
] | hugovk/XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels17.py | 2,002 | Python |
from datetime import timedelta
from typing import NamedTuple, Optional
class ErdAdvantiumKitchenTimerMinMax(NamedTuple):
"""Defines min/max kitchen timer settings"""
min_time: timedelta
max_time: timedelta
raw_value: Optional[str]
| 24.9 | 49 | 0.7751 | [
"MIT"
] | ChevySSinSD/gehome | gehomesdk/erd/values/advantium/erd_advantium_kitchen_timer_min_max.py | 249 | Python |
#!/usr/bin/env python3
# Copyright (c) 2018-2020 The Ludirium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful util functions for testing the wallet"""
from collections import namedtuple
from test_framework.address import (
byte_to_base58,
key_to_p2pkh,
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.key import ECKey
from test_framework.script import (
CScript,
OP_2,
OP_3,
OP_CHECKMULTISIG,
)
from test_framework.script_util import (
key_to_p2pkh_script,
key_to_p2wpkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.util import hex_str_to_bytes
Key = namedtuple('Key', ['privkey',
'pubkey',
'p2pkh_script',
'p2pkh_addr',
'p2wpkh_script',
'p2wpkh_addr',
'p2sh_p2wpkh_script',
'p2sh_p2wpkh_redeem_script',
'p2sh_p2wpkh_addr'])
Multisig = namedtuple('Multisig', ['privkeys',
'pubkeys',
'p2sh_script',
'p2sh_addr',
'redeem_script',
'p2wsh_script',
'p2wsh_addr',
'p2sh_p2wsh_script',
'p2sh_p2wsh_addr'])
def get_key(node):
"""Generate a fresh key on node
Returns a named tuple of privkey, pubkey and all address and scripts."""
addr = node.getnewaddress()
pubkey = node.getaddressinfo(addr)['pubkey']
return Key(privkey=node.dumpprivkey(addr),
pubkey=pubkey,
p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_generate_key():
"""Generate a fresh key
Returns a named tuple of privkey, pubkey and all address and scripts."""
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
pubkey = eckey.get_pubkey().get_bytes().hex()
return Key(privkey=privkey,
pubkey=pubkey,
p2pkh_script=key_to_p2pkh_script(pubkey).hex(),
p2pkh_addr=key_to_p2pkh(pubkey),
p2wpkh_script=key_to_p2wpkh_script(pubkey).hex(),
p2wpkh_addr=key_to_p2wpkh(pubkey),
p2sh_p2wpkh_script=script_to_p2sh_script(key_to_p2wpkh_script(pubkey)).hex(),
p2sh_p2wpkh_redeem_script=key_to_p2wpkh_script(pubkey).hex(),
p2sh_p2wpkh_addr=key_to_p2sh_p2wpkh(pubkey))
def get_multisig(node):
"""Generate a fresh 2-of-3 multisig on node
Returns a named tuple of privkeys, pubkeys and all address and scripts."""
addrs = []
pubkeys = []
for _ in range(3):
addr = node.getaddressinfo(node.getnewaddress())
addrs.append(addr['address'])
pubkeys.append(addr['pubkey'])
script_code = CScript([OP_2] + [hex_str_to_bytes(pubkey) for pubkey in pubkeys] + [OP_3, OP_CHECKMULTISIG])
witness_script = script_to_p2wsh_script(script_code)
return Multisig(privkeys=[node.dumpprivkey(addr) for addr in addrs],
pubkeys=pubkeys,
p2sh_script=script_to_p2sh_script(script_code).hex(),
p2sh_addr=script_to_p2sh(script_code),
redeem_script=script_code.hex(),
p2wsh_script=witness_script.hex(),
p2wsh_addr=script_to_p2wsh(script_code),
p2sh_p2wsh_script=script_to_p2sh_script(witness_script).hex(),
p2sh_p2wsh_addr=script_to_p2sh_p2wsh(script_code))
def test_address(node, address, **kwargs):
"""Get address info for `address` and test whether the returned values are as expected."""
addr_info = node.getaddressinfo(address)
for key, value in kwargs.items():
if value is None:
if key in addr_info.keys():
raise AssertionError("key {} unexpectedly returned in getaddressinfo.".format(key))
elif addr_info[key] != value:
raise AssertionError("key {} value {} did not match expected value {}".format(key, addr_info[key], value))
def bytes_to_wif(b, compressed=True):
if compressed:
b += b'\x01'
return byte_to_base58(b, 239)
def generate_wif_key():
# Makes a WIF privkey for imports
k = ECKey()
k.generate()
return bytes_to_wif(k.get_bytes(), k.is_compressed)
| 39.640625 | 118 | 0.615294 | [
"MIT"
] | ludirium/ludirium | test/functional/test_framework/wallet_util.py | 5,074 | Python |
import pymongo
import sys
# establish a connection to the database
# note this uses the now deprecated Connection class, as we did in the lecture.
# MongoClient is the preferred way of connecting.
connection = pymongo.Connection("mongodb://localhost", safe=True)
# get a handle to the school database
db=connection.school
scores = db.scores
query = {''}
try:
doc = scores.find_one(query)
except:
print "Unexpected error:", sys.exc_info()[0]
print doc
| 23.090909 | 80 | 0.679134 | [
"Apache-2.0"
] | hemmerling/nosql-mongodb2013 | src/m101p/week02/lesson_files/hemmerling_week2_01.py | 508 | Python |
from django.contrib import admin
from .models import Arts, Comments, Tags, ArtworksTags, Stili, Umetnina, Umetnik
# Register your models here.
admin.site.register(Umetnik)
admin.site.register(Umetnina)
admin.site.register(Stili)
admin.site.register(Arts)
admin.site.register(Comments)
admin.site.register(Tags)
admin.site.register(ArtworksTags)
# admin.site.register(ArtworkLikes)
| 25.6 | 80 | 0.807292 | [
"MIT"
] | jaanos/OPB-umetnine | umetnine/artists/admin.py | 384 | Python |
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import os
import json
import argparse
import subprocess
import shlex
import uuid
from fortio import METRICS_START_SKIP_DURATION, METRICS_END_SKIP_DURATION
import sys
if sys.version_info.major == 2:
from commands import getoutput
else:
from subprocess import getoutput
POD = collections.namedtuple('Pod', ['name', 'namespace', 'ip', 'labels'])
def pod_info(filterstr="", namespace="twopods", multi_ok=True):
cmd = "kubectl -n {namespace} get pod {filterstr} -o json".format(
namespace=namespace, filterstr=filterstr)
op = getoutput(cmd)
o = json.loads(op)
items = o['items']
if not multi_ok and len(items) > 1:
raise Exception("more than one found " + op)
if not items:
raise Exception("no pods found with command [" + cmd + "]")
i = items[0]
return POD(i['metadata']['name'], i['metadata']['namespace'],
i['status']['podIP'], i['metadata']['labels'])
def run_command(command):
process = subprocess.Popen(shlex.split(command))
process.wait()
def run_command_sync(command):
op = getoutput(command)
return op.strip()
class Fortio:
ports = {
"http": {"direct_port": 8077, "port": 8080, "ingress": 80},
"grpc": {"direct_port": 8076, "port": 8079, "ingress": 80},
"direct_envoy": {"direct_port": 8076, "port": 8079},
}
def __init__(
self,
conn=None,
qps=None,
duration=None,
size=None,
mode="http",
mixer_mode="mixer",
mixer_cache=True,
perf_record=False,
server="fortioserver",
client="fortioclient",
additional_args=None,
filter_fn=None,
labels=None,
baseline=False,
serversidecar=False,
clientsidecar=True,
ingress=None,
mesh="istio"):
self.run_id = str(uuid.uuid4()).partition('-')[0]
self.conn = conn
self.qps = qps
self.size = size
self.duration = duration
self.mode = mode
self.ns = os.environ.get("NAMESPACE", "twopods")
# bucket resolution in seconds
self.r = "0.00005"
self.mixer_mode = mixer_mode
self.mixer_cache = mixer_cache
self.perf_record = perf_record
self.server = pod_info("-lapp=" + server, namespace=self.ns)
self.client = pod_info("-lapp=" + client, namespace=self.ns)
self.additional_args = additional_args
self.filter_fn = filter_fn
self.labels = labels
self.run_baseline = baseline
self.run_serversidecar = serversidecar
self.run_clientsidecar = clientsidecar
self.run_ingress = ingress
if mesh == "linkerd":
self.mesh = "linkerd"
elif mesh == "istio":
self.mesh = "istio"
else:
sys.exit("invalid mesh %s, must be istio or linkerd" % mesh)
def nosidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_base " + basestr.format(
svc=self.server.ip, port=self.ports[self.mode]["direct_port"], size=self.size)
def serversidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_serveronly " + basestr.format(
svc=self.server.ip, port=self.ports[self.mode]["port"], size=self.size)
def bothsidecar(self, fortio_cmd):
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return fortio_cmd + "_both " + basestr.format(
svc=self.server.labels["app"], port=self.ports[self.mode]["port"], size=self.size)
def ingress(self, fortio_cmd):
svc = self.run_ingress
if ':' not in svc:
svc += ":{port}".format(port=self.ports[self.mode]["ingress"])
return fortio_cmd + "_ingress http://{svc}/echo?size={size}".format(
svc=svc, size=self.size)
def run(self, conn, qps, size, duration):
size = size or self.size
if duration is None:
duration = self.duration
labels = self.run_id
labels += "_qps_" + str(qps)
labels += "_c_" + str(conn)
labels += "_" + str(size)
# Mixer label
labels += "_"
labels += self.mixer_mode
if self.labels is not None:
labels += "_" + self.labels
grpc = ""
if self.mode == "grpc":
grpc = "-grpc -ping"
fortio_cmd = (
"fortio load -c {conn} -qps {qps} -t {duration}s -a -r {r} {grpc} -httpbufferkb=128 " +
"-labels {labels}").format(
conn=conn,
qps=qps,
duration=duration,
r=self.r,
grpc=grpc,
labels=labels)
if self.run_ingress:
kubectl_exec(self.client.name, self.ingress(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_ingress",
duration=40)
if self.run_serversidecar:
kubectl_exec(self.client.name, self.serversidecar(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_serveronly",
duration=40)
if self.run_clientsidecar:
kubectl_exec(self.client.name, self.bothsidecar(fortio_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_bothsidecars",
duration=40)
if self.run_baseline:
kubectl_exec(self.client.name, self.nosidecar(fortio_cmd))
PERFCMD = "/usr/lib/linux-tools/4.4.0-131-generic/perf"
PERFSH = "get_perfdata.sh"
PERFWD = "/etc/istio/proxy/"
def run_perf(mesh, pod, labels, duration=20):
filename = labels + "_perf.data"
filepath = PERFWD + filename
perfpath = PERFWD + PERFSH
# copy executable over
kubectl_cp(PERFSH, pod + ":" + perfpath, mesh + "-proxy")
kubectl_exec(
pod,
"{perf_cmd} {filename} {duration}".format(
perf_cmd=perfpath,
filename=filename,
duration=duration),
container=mesh + "-proxy")
kubectl_cp(pod + ":" + filepath + ".perf", filename + ".perf", mesh + "-proxy")
run_command_sync("../flame/flame.sh " + filename + ".perf")
def kubectl_cp(from_file, to_file, container):
namespace = os.environ.get("NAMESPACE", "twopods")
cmd = "kubectl --namespace {namespace} cp {from_file} {to_file} -c {container}".format(
namespace=namespace,
from_file=from_file,
to_file=to_file,
container=container)
print(cmd)
run_command_sync(cmd)
def kubectl_exec(pod, remote_cmd, runfn=run_command, container=None):
namespace = os.environ.get("NAMESPACE", "twopods")
c = ""
if container is not None:
c = "-c " + container
cmd = "kubectl --namespace {namespace} exec -i -t {pod} {c} -- {remote_cmd}".format(
pod=pod,
remote_cmd=remote_cmd,
c=c,
namespace=namespace)
print(cmd)
runfn(cmd)
def rc(command):
process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print(output.strip() + "\n")
return process.poll()
def run(args):
min_duration = METRICS_START_SKIP_DURATION + METRICS_END_SKIP_DURATION
if args.duration <= min_duration:
print("Duration must be greater than {min_duration}".format(
min_duration=min_duration))
exit(1)
fortio = Fortio(
conn=args.conn,
qps=args.qps,
duration=args.duration,
size=args.size,
perf_record=args.perf,
labels=args.labels,
baseline=args.baseline,
serversidecar=args.serversidecar,
clientsidecar=args.clientsidecar,
ingress=args.ingress,
mode=args.mode,
mesh=args.mesh,
mixer_mode=args.mixer_mode)
for conn in args.conn:
for qps in args.qps:
fortio.run(conn=conn, qps=qps,
duration=args.duration, size=args.size)
def csv_to_int(s):
return [int(i) for i in s.split(",")]
def get_parser():
parser = argparse.ArgumentParser("Run performance test")
parser.add_argument(
"conn",
help="number of connections, comma separated list",
type=csv_to_int,)
parser.add_argument(
"qps",
help="qps, comma separated list",
type=csv_to_int,)
parser.add_argument(
"duration",
help="duration in seconds of the extract",
type=int)
parser.add_argument(
"--size",
help="size of the payload",
type=int,
default=1024)
parser.add_argument(
"--mesh",
help="istio or linkerd",
default="istio")
parser.add_argument(
"--mixer_mode",
help="run with different mixer configurations: mixer, nomixer, mixerv2",
default="mixer")
parser.add_argument(
"--client",
help="where to run the test from",
default=None)
parser.add_argument(
"--server",
help="pod ip of the server",
default=None)
parser.add_argument(
"--perf",
help="also run perf and produce flame graph",
default=False)
parser.add_argument(
"--ingress",
help="run traffic through ingress",
default=None)
parser.add_argument(
"--labels",
help="extra labels",
default=None)
parser.add_argument(
"--mode",
help="http or grpc",
default="http")
define_bool(parser, "baseline", "run baseline for all", False)
define_bool(parser, "serversidecar",
"run serversidecar-only for all", False)
define_bool(parser, "clientsidecar",
"run clientsidecar and serversidecar for all", True)
return parser
def define_bool(parser, opt, help_arg, default_val):
parser.add_argument(
"--" + opt, help=help_arg, dest=opt, action='store_true')
parser.add_argument(
"--no-" + opt, help="do not " + help_arg, dest=opt, action='store_false')
val = {opt: default_val}
parser.set_defaults(**val)
def main(argv):
args = get_parser().parse_args(argv)
print(args)
return run(args)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))
| 30.658793 | 99 | 0.583769 | [
"Apache-2.0"
] | jwendell/tools | perf/benchmark/runner/runner.py | 11,681 | Python |
# Usage: testWordsInCorpus.py [language] {corpus file}
# If no corpus file is named, the programme will try to load a corresponding cPickle file.
#
# German corpus: /mounts/data/proj/huiming/SIGMORPHON/dewiki-20151102-pages-articles-multistream.xml
#
# This script finds words that should belong to a paradigm in the corpus and adds them (for training?).
from getEditTrees import editTreesByPos
from getEditTrees import applyOnlyTree
import sys
import pickle as cPickle
toAdd = {} # lemma to things that should be autocompleted
uniquenessCheck = {} # (lemma, form) -> word, avoiding that we add things we are unsure about
# New autocomplete. Finds union and checks if paradigms can complete each other.
# We suppose the union consists of at least 2 edit trees.
# TODO: account for Umlaute.
# Returns a dictinary lemma -> (et, tags) with things to add to the original one.
# TODO: irgendwas stimmt hier nicht. korrigiere es
def autoComplete(lemma1, etTag1, lemma2, etTag2, corpusWords):
etAndTagToAdd = set()
notFound = 0
allRight1 = True
allRight2 = True
for (et, form) in etTag1.difference(etTag2):
result = applyOnlyTree(lemma2, et)
if result == '#error#':
allRight = False
break
if result not in corpusWords or corpusWords[result] <=3: # orig is 3
notFound += 1
if notFound == 2:
allRight = False
break
else:
etAndTagToAdd.add((et, form))
if allRight and etAndTagToAdd:
if lemma2 not in toAdd:
toAdd[lemma2] = set()
toAdd[lemma2] = toAdd[lemma2].union(etAndTagToAdd)
for (et, form) in etAndTagToAdd:
if (lemma2, form) not in uniquenessCheck:
uniquenessCheck[(lemma2, form)] = set()
else:
if applyOnlyTree(lemma2,et) not in uniquenessCheck[(lemma2, form)]:
print("yeay")
uniquenessCheck[(lemma2, form)].add(applyOnlyTree(lemma2, et))
# Lemma 1 has more ETs than lemma 2.
# Returns a dictinary lemma -> (et, tags) with things to add to the original one.
def autoComplete2(lemma1, etTag1, lemma2, etTag2, corpusWords):
etAndTagToAdd = set()
notFound = 0
allRight = True
for (et, form) in etTag1.difference(etTag2):
result = applyOnlyTree(lemma2, et)
if result == '#error#':
allRight = False
break
if result not in corpusWords or corpusWords[result] <=3: # orig is 3
notFound += 1
if notFound == 2:
allRight = False
break
else:
etAndTagToAdd.add((et, form))
if allRight and etAndTagToAdd:
if lemma2 not in toAdd:
toAdd[lemma2] = set()
toAdd[lemma2] = toAdd[lemma2].union(etAndTagToAdd)
for (et, form) in etAndTagToAdd:
if (lemma2, form) not in uniquenessCheck:
uniquenessCheck[(lemma2, form)] = set()
uniquenessCheck[(lemma2, form)].add(applyOnlyTree(lemma2, et))
# Test if a group of (edit tree, tag) combinations for a lemma is subset of the one for another lemma.
# If yes, try if the missing edit trees are applicable and if the corresponding word appears in the corpus.
def getAdditionalWords(lemmaToEtAndTag, corpusWords):
isTrue = 0
isFalse = 0
for lemma1, etTag1 in lemmaToEtAndTag.items():
for lemma2, etTag2 in lemmaToEtAndTag.items():
if len(etTag1) <= 1 or len(etTag2) <= 1: # for now, don't complete things with 0 or only 1 entry. We are just not sure enough.
isFalse += 1
continue
maybeSame = False
if len(etTag1) > len(etTag2)+2:
if len(etTag1) >= 3 and len(etTag2.union(etTag1)) > 1 and etTag2.issubset(etTag1):
maybeSame = True
autoComplete(lemma1, etTag1, lemma2, etTag2, corpusWords)
isTrue += 1
else:
isFalse += 1
elif len(etTag2) > len(etTag1)+2:
if len(etTag2) >= 3 and len(etTag2.union(etTag1)) > 1 and etTag1.issubset(etTag2):
maybeSame = True
autoComplete(lemma2, etTag2, lemma1, etTag1, corpusWords)
isTrue += 1
else:
isFalse += 1
#print(str(len(toAdd)) + ' words have been added.')
#print("Is subset: " + str(isTrue))
#print("No subset: " + str(isFalse))
#sys.exit(0)
noWordsToAdd = 0
for lemma, aSet in toAdd.items():
noWordsToAdd += len(aSet)
'''
for (lemma, form), word in uniquenessCheck.items():
if len(word) > 1:
print(word)
sys.exit(0)
'''
return noWordsToAdd
def announce(*objs):
print("# ", *objs, file = sys.stderr)
if __name__ == "__main__":
lang = sys.argv[1]
if len(sys.argv) == 2:
usePickle = True
else:
usePickle = False
posToEt, lemmaToEtAndTag = editTreesByPos(lang)
for lemma, aSet in lemmaToEtAndTag.items():
for (et, form) in aSet:
if (lemma, form) not in uniquenessCheck:
uniquenessCheck[(lemma, form)] = set()
uniquenessCheck[(lemma, form)].add(applyOnlyTree(lemma, et))
#print(applyOnlyTree(lemma, et))
#sys.exit(0)
if not usePickle:
# Read the bonus corpus.
announce('Start reading corpus...')
corpusWords = {} # word to its frequency
with open(sys.argv[2], 'r') as corpus_file:
for line in corpus_file:
#tokens = tokenize.word_tokenize(line.strip())
tokens = line.strip().split(' ')
for token in tokens:
if token not in corpusWords:
corpusWords[token] = 0
corpusWords[token] += 1
announce('Done reading corpus.')
# Store the dictionary to a binary file.
print('Store the dictionary with the corpus words to a binary file...')
save_file = open('/mounts/data/proj/huiming/SIGMORPHON/corpusWords_' + lang, 'wb')
cPickle.dump(corpusWords, save_file, -1)
save_file.close()
print('Done.')
else:
# Load the corpusWords dictionary.
announce('Load the words with cPickle...')
vocListFile = open('/mounts/data/proj/huiming/SIGMORPHON/corpusWords_' + lang, 'rb')
corpusWords = cPickle.load(vocListFile)
vocListFile.close()
announce('Words loaded.')
lastNumber = 0
noWordsToAdd = 1
while noWordsToAdd > lastNumber:
lastNumber = noWordsToAdd
noWordsToAdd = getAdditionalWords(lemmaToEtAndTag, corpusWords)
for lemma, aSet in lemmaToEtAndTag.items():
if lemma in toAdd:
lemmaToEtAndTag[lemma] = lemmaToEtAndTag[lemma].union(toAdd[lemma])
announce('Number word to add: ' + str(noWordsToAdd))
# The union did not work well for some reason. Therefore, use toAdd directly.
additionalWordsCounter = 0
with open('/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/' + lang + '-bigger-task1-train', 'w') as out_file:
with open('/mounts/Users/cisintern/huiming/SIGMORPHON/Code/data/' + lang + '-task1-train', 'r') as original_file:
for line in original_file:
out_file.write(line)
for lemma, etAndTagSet in toAdd.items():
for (et, form) in etAndTagSet:
if len(uniquenessCheck[(lemma, form)]) > 1:
continue
out_file.write(lemma + '\t' + form + '\t' + applyOnlyTree(lemma, et) + '\n')
additionalWordsCounter += 1
print(str(additionalWordsCounter) + ' words have been added.')
| 34.570048 | 132 | 0.654556 | [
"MIT"
] | oncebasun/seq2seq-theano | MyAlgorithm/addWordsToParadigms_old.py | 7,156 | Python |
#!/usr/bin/env python2
from setuptools import setup
from setuptools import find_packages
setup(
name="rover",
version="0.1",
description="Algorithm for risk and sensor quality aware sensor" +
"coverage for quadrotors",
author="Alex Wallar",
author_email="wallarelvo@gmail.com",
packages=find_packages(),
install_requires=[
"numpy",
"scipy"
],
data_files=[
(
'config',
['configs/config.json'],
)
]
)
| 20.04 | 70 | 0.59481 | [
"Apache-2.0"
] | wallarelvo/rover | setup.py | 501 | Python |
#!/usr/bin/python
#coding:utf-8
import time
import json
import requests
from selenium import webdriver
filename = 'a.csv'
url = 'http://www.icourse163.org/university/view/all.htm#/'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'}
# with open(filename, 'w+') as file:
# file.write("大学,课程,课程时长,课程负载,内容类型,课程分类\n")
file = open(filename, 'w+')
print("大学,课程,课程时长,课程负载,内容类型,课程分类")
file.write("大学,课程,课程时长,课程负载,内容类型,课程分类\n")
browser = webdriver.PhantomJS()
browser2 = webdriver.PhantomJS()
browser3 = webdriver.PhantomJS()
browser.get(url)
# 大学
university = browser.find_elements_by_class_name("u-usity")
for i in university:
university_url = i.get_attribute("href")
university_name = i.find_element_by_id("").get_attribute("alt")
browser2.get(university_url)
# 课程
course = browser2.find_elements_by_class_name("g-cell1")
for j in course:
course_url = "http://www.icourse163.org" + j.get_attribute("data-href")
course_name = j.find_element_by_class_name("card").find_element_by_class_name("f-f0").text
browser3.get(course_url)
# 课程信息
course_text = browser3.find_elements_by_class_name("block")
try:
k0 = course_text[0].find_element_by_class_name("t2").text
k1 = course_text[1].find_element_by_class_name("t2").text
k2 = course_text[2].find_element_by_class_name("t2").text
k3 = course_text[3].find_element_by_class_name("t2").text
except Exception as e:
k3 = k2
k2 = k1
k1 = None
K0 = None
finally:
print("%s,%s,%s,%s,%s,%s" % (university_name,course_name,k0,k1,k2,k3))
file.write("%s,%s,%s,%s,%s,%s\n" % (university_name,course_name,k0,k1,k2,k3))
# with open(filename, 'a+') as file:
# file.write("%s,%s,%s,%s,%s,%s\n" % (university_name,course_name,k0,k1,k2,k3))
browser3.close()
browser2.close()
browser.close()
| 32.307692 | 183 | 0.645714 | [
"MIT"
] | yeonzi/163course_spider | spider.py | 2,236 | Python |
#!/usr/bin/env python
# This example uses Uvicorn package that must be installed. However, it can be
# replaced with any other ASGI-compliant server.
#
# NOTE: Python 3.6 requires aiocontextvars package to be installed.
#
# Run: python app_global_request.py
import rollbar
import uvicorn
from rollbar.contrib.starlette import LoggerMiddleware
from starlette.applications import Starlette
from starlette.responses import JSONResponse
# Integrate Rollbar with Starlette application
app = Starlette()
app.add_middleware(LoggerMiddleware) # should be added as the last middleware
async def get_user_agent():
# Global access to the current request object
request = rollbar.get_request()
user_agent = request.headers['User-Agent']
return user_agent
# $ curl -i http://localhost:8888
@app.route('/')
async def root(request):
user_agent = await get_user_agent()
return JSONResponse({'user-agent': user_agent})
if __name__ == '__main__':
uvicorn.run(app, host='localhost', port=8888)
| 26.025641 | 78 | 0.759606 | [
"MIT"
] | Apep8/pyrollbar | rollbar/examples/starlette/app_global_request.py | 1,015 | Python |
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
c.JupyterApp.answer_yes = True
# Full path of a config file.
# c.JupyterApp.config_file = u''
# Generate default config file.
# c.JupyterApp.generate_config = False
# Specify a config file to load.
# c.JupyterApp.config_file_name = u''
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# The number of additional ports to try if the specified port is not available.
c.NotebookApp.port_retries = 0
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = traitlets.Undefined
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = traitlets.Undefined
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# Python modules to load as notebook server extensions. This is an experimental
# API, and may change in future releases.
# c.NotebookApp.server_extensions = traitlets.Undefined
# Note: These extensions require the ~/.jupyter path to exist otherwise, errors will occur on startup
c.NotebookApp.server_extensions=['ipyparallel.nbextension']
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# The port the notebook server will listen on.
c.NotebookApp.port = 8754
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of IPython and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = <class 'jupyter_client.kernelspec.KernelSpecManager'>
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
c.NotebookApp.allow_origin = '*'
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = <class 'notebook.services.contents.filemanager.FileContentsManager'>
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u''
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = <class 'notebook.auth.logout.LogoutHandler'>
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
c.NotebookApp.base_url = '/'
# The session manager class to use.
# c.NotebookApp.session_manager_class = <class 'notebook.services.sessions.sessionmanager.SessionManager'>
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
# c.NotebookApp.tornado_settings = traitlets.Undefined
# The directory to use for notebooks and kernels.
c.NotebookApp.notebook_dir = u'/root/pipeline/myapps/jupyter/'
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = <class 'notebook.services.kernels.kernelmanager.MappingKernelManager'>
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = u''
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = traitlets.Undefined
#
# c.NotebookApp.file_to_run = ''
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
# c.NotebookApp.pylab = 'disabled'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library IPython uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = traitlets.Undefined
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = traitlets.Undefined
# The login handler class to use.
# c.NotebookApp.login_handler_class = <class 'notebook.auth.login.LoginHandler'>
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = traitlets.Undefined
# The config manager class to use
# c.NotebookApp.config_manager_class = <class 'notebook.services.config.manager.ConfigManager'>
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u''
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = traitlets.Undefined
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = u''
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = traitlets.Undefined
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Username for the Session. Default is your system username.
# c.Session.username = u'username'
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = traitlets.Undefined
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# execution key, for signing messages.
# c.Session.key = ''
# Debug output in the Session
# c.Session.debug = False
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# path to file containing execution key.
# c.Session.keyfile = ''
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python2'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = u''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
#
# c.ContentsManager.checkpoints_class = <class 'notebook.services.contents.checkpoints.Checkpoints'>
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = traitlets.Undefined
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
#
# c.ContentsManager.checkpoints = traitlets.Undefined
#
# c.ContentsManager.checkpoints_kwargs = traitlets.Undefined
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# DEPRECATED, use post_save_hook
# c.FileContentsManager.save_script = False
#
# c.FileContentsManager.root_dir = u''
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = u''
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = traitlets.Undefined
| 36.942197 | 109 | 0.668388 | [
"Apache-2.0"
] | TrinathY/pipeline | config/jupyter/jupyter_notebook_config.py | 19,173 | Python |
import asyncio
import logging
import os
import shutil
import warnings
from types import TracebackType
from typing import Any, Coroutine, Dict, List, Optional, Text, Type, TypeVar
import rasa.core.utils
import rasa.utils.io
from rasa.constants import (
DEFAULT_LOG_LEVEL_LIBRARIES,
ENV_LOG_LEVEL_LIBRARIES,
)
from rasa.shared.constants import DEFAULT_LOG_LEVEL, ENV_LOG_LEVEL
import rasa.shared.utils.io
logger = logging.getLogger(__name__)
T = TypeVar("T")
class TempDirectoryPath(str):
"""Represents a path to an temporary directory. When used as a context
manager, it erases the contents of the directory on exit.
"""
def __enter__(self) -> "TempDirectoryPath":
return self
def __exit__(
self,
_exc: Optional[Type[BaseException]],
_value: Optional[Exception],
_tb: Optional[TracebackType],
) -> bool:
if os.path.exists(self):
shutil.rmtree(self)
def read_global_config(path: Text) -> Dict[Text, Any]:
"""Read global Rasa configuration.
Args:
path: Path to the configuration
Returns:
The global configuration
"""
# noinspection PyBroadException
try:
return rasa.shared.utils.io.read_config_file(path)
except Exception:
# if things go south we pretend there is no config
return {}
def set_log_level(log_level: Optional[int] = None):
"""Set log level of Rasa and Tensorflow either to the provided log level or
to the log level specified in the environment variable 'LOG_LEVEL'. If none is set
a default log level will be used."""
if not log_level:
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
log_level = logging.getLevelName(log_level)
logging.getLogger("rasa").setLevel(log_level)
update_tensorflow_log_level()
update_asyncio_log_level()
update_apscheduler_log_level()
update_socketio_log_level()
os.environ[ENV_LOG_LEVEL] = logging.getLevelName(log_level)
def update_apscheduler_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
apscheduler_loggers = [
"apscheduler",
"apscheduler.scheduler",
"apscheduler.executors",
"apscheduler.executors.default",
]
for logger_name in apscheduler_loggers:
logging.getLogger(logger_name).setLevel(log_level)
logging.getLogger(logger_name).propagate = False
def update_socketio_log_level() -> None:
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
socketio_loggers = ["websockets.protocol", "engineio.server", "socketio.server"]
for logger_name in socketio_loggers:
logging.getLogger(logger_name).setLevel(log_level)
logging.getLogger(logger_name).propagate = False
def update_tensorflow_log_level() -> None:
"""Set the log level of Tensorflow to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
# Disables libvinfer, tensorRT, cuda, AVX2 and FMA warnings (CPU support). This variable needs to be set before the
# first import since some warnings are raised on the first import.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
if log_level == "DEBUG":
tf_log_level = tf.compat.v1.logging.DEBUG
elif log_level == "INFO":
tf_log_level = tf.compat.v1.logging.INFO
elif log_level == "WARNING":
tf_log_level = tf.compat.v1.logging.WARN
else:
tf_log_level = tf.compat.v1.logging.ERROR
tf.compat.v1.logging.set_verbosity(tf_log_level)
logging.getLogger("tensorflow").propagate = False
def update_sanic_log_level(log_file: Optional[Text] = None):
"""Set the log level of sanic loggers to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
from sanic.log import logger, error_logger, access_logger
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logger.setLevel(log_level)
error_logger.setLevel(log_level)
access_logger.setLevel(log_level)
logger.propagate = False
error_logger.propagate = False
access_logger.propagate = False
if log_file is not None:
formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
error_logger.addHandler(file_handler)
access_logger.addHandler(file_handler)
def update_asyncio_log_level() -> None:
"""Set the log level of asyncio to the log level specified in the environment
variable 'LOG_LEVEL_LIBRARIES'."""
log_level = os.environ.get(ENV_LOG_LEVEL_LIBRARIES, DEFAULT_LOG_LEVEL_LIBRARIES)
logging.getLogger("asyncio").setLevel(log_level)
def set_log_and_warnings_filters() -> None:
"""
Set log filters on the root logger, and duplicate filters for warnings.
Filters only propagate on handlers, not loggers.
"""
for handler in logging.getLogger().handlers:
handler.addFilter(RepeatedLogFilter())
warnings.filterwarnings("once", category=UserWarning)
def obtain_verbosity() -> int:
"""Returns a verbosity level according to the set log level."""
log_level = os.environ.get(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL)
verbosity = 0
if log_level == "DEBUG":
verbosity = 2
if log_level == "INFO":
verbosity = 1
return verbosity
def sort_list_of_dicts_by_first_key(dicts: List[Dict]) -> List[Dict]:
"""Sorts a list of dictionaries by their first key."""
return sorted(dicts, key=lambda d: list(d.keys())[0])
def write_global_config_value(name: Text, value: Any) -> None:
"""Read global Rasa configuration."""
# need to use `rasa.constants.GLOBAL_USER_CONFIG_PATH` to allow patching
# in tests
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
try:
os.makedirs(os.path.dirname(config_path), exist_ok=True)
c = read_global_config(config_path)
c[name] = value
rasa.core.utils.dump_obj_as_yaml_to_file(
rasa.constants.GLOBAL_USER_CONFIG_PATH, c
)
except Exception as e:
logger.warning(f"Failed to write global config. Error: {e}. Skipping.")
def read_global_config_value(name: Text, unavailable_ok: bool = True) -> Any:
"""Read a value from the global Rasa configuration."""
def not_found():
if unavailable_ok:
return None
else:
raise ValueError(f"Configuration '{name}' key not found.")
# need to use `rasa.constants.GLOBAL_USER_CONFIG_PATH` to allow patching
# in tests
config_path = rasa.constants.GLOBAL_USER_CONFIG_PATH
if not os.path.exists(config_path):
return not_found()
c = read_global_config(config_path)
if name in c:
return c[name]
else:
return not_found()
def update_existing_keys(
original: Dict[Any, Any], updates: Dict[Any, Any]
) -> Dict[Any, Any]:
"""Iterate through all the updates and update a value in the original dictionary.
If the updates contain a key that is not present in the original dict, it will
be ignored."""
updated = original.copy()
for k, v in updates.items():
if k in updated:
updated[k] = v
return updated
class RepeatedLogFilter(logging.Filter):
"""Filter repeated log records."""
last_log = None
def filter(self, record):
current_log = (
record.levelno,
record.pathname,
record.lineno,
record.msg,
record.args,
)
if current_log != self.last_log:
self.last_log = current_log
return True
return False
def run_in_loop(
f: Coroutine[Any, Any, T], loop: Optional[asyncio.AbstractEventLoop] = None
) -> T:
"""Execute the awaitable in the passed loop.
If no loop is passed, the currently existing one is used or a new one is created
if no loop has been started in the current context.
After the awaitable is finished, all remaining tasks on the loop will be
awaited as well (background tasks).
WARNING: don't use this if there are never ending background tasks scheduled.
in this case, this function will never return.
Args:
f: function to execute
loop: loop to use for the execution
Returns:
return value from the function
"""
if loop is None:
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
result = loop.run_until_complete(f)
# Let's also finish all running tasks:
pending = asyncio.Task.all_tasks()
loop.run_until_complete(asyncio.gather(*pending))
return result
| 30.01 | 119 | 0.690103 | [
"Apache-2.0"
] | karen-white/rasa | rasa/utils/common.py | 9,003 | Python |
from io import BytesIO
from gtts import gTTS
from PIL import Image
from vkbottle import AudioUploader, Bot, DocUploader, Message, PhotoUploader
bot = Bot("token")
photo_uploader = PhotoUploader(bot.api, generate_attachment_strings=True)
doc_uploader = DocUploader(bot.api, generate_attachment_strings=True)
audio_uploader = AudioUploader(bot.api, generate_attachment_strings=True)
@bot.on.message_handler(text="photo_from_bytes", lower=True)
async def photo_from_bytes(ans: Message):
image = Image.new("RGB", (320, 320), (0, 0, 0))
fp = BytesIO()
image.save(fp, "RGB")
setattr(fp, "name", "image.png")
photo = await photo_uploader.upload_message_photo(fp)
await ans(attachment=photo)
@bot.on.message_handler(text="doc_from_file", lower=True)
async def photo_from_bytes(ans: Message):
image = Image.new("RGB", (320, 320), (0, 0, 0))
image.save("image.png", "RGB")
photo = await doc_uploader.upload_doc_to_message("image.png", ans.peer_id)
await ans(attachment=photo)
@bot.on.message_handler(text="audio_message")
async def audio(ans: Message):
tts = gTTS(text="бокале монада", lang="ru")
fp = BytesIO()
tts.write_to_fp(fp)
audio_message = await audio_uploader.upload_audio_message(fp, ans.peer_id)
await ans(attachment=audio_message)
if __name__ == "__main__":
bot.run_polling()
| 31.465116 | 78 | 0.731707 | [
"MIT"
] | MooFreak/vkbottle | examples/uploaders.py | 1,365 | Python |
"""
ECB没有偏移量
"""
from Crypto.Cipher import AES
from binascii import b2a_hex, a2b_hex
from utils import DES_decrypt, DES_encrypt
def add_to_16(text):
if len(text.encode('utf-8')) % 16:
add = 16 - (len(text.encode('utf-8')) % 16)
else:
add = 0
text = text + ('\0' * add)
return text.encode('utf-8')
# 加密函数
def encrypt(text):
key = '9999999999999999'.encode('utf-8')
mode = AES.MODE_ECB
text = add_to_16(text)
cryptos = AES.new(key, mode)
cipher_text = cryptos.encrypt(text)
return b2a_hex(cipher_text)
# 解密后,去掉补足的空格用strip() 去掉
def decrypt(text):
key = '9999999999999999'.encode('utf-8')
mode = AES.MODE_ECB
cryptor = AES.new(key, mode)
plain_text = cryptor.decrypt(a2b_hex(text))
return bytes.decode(plain_text).rstrip('\0')
if __name__ == '__main__':
e = DES_encrypt("hello world") # 加密
print(type(e))
d = DES_decrypt(e) # 解密
print("加密:", e)
print("解密:", d) | 22.904762 | 51 | 0.626819 | [
"MIT"
] | peterzheng98/Valentine-Gift | try.py | 1,024 | Python |
"""This module contains the general information for ChassisPowerMonitor ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class ChassisPowerMonitorConsts:
pass
class ChassisPowerMonitor(ManagedObject):
"""This is ChassisPowerMonitor class."""
consts = ChassisPowerMonitorConsts()
naming_props = set([])
mo_meta = {
"modular": MoMeta("ChassisPowerMonitor", "chassisPowerMonitor", "pwrmonitor", VersionMeta.Version2013e, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], ['equipmentChassis'], [], ["Get"])
}
prop_meta = {
"modular": {
"average": MoPropertyMeta("average", "average", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"current": MoPropertyMeta("current", "current", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"maximum": MoPropertyMeta("maximum", "maximum", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"minimum": MoPropertyMeta("minimum", "minimum", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"period": MoPropertyMeta("period", "period", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
},
}
prop_map = {
"modular": {
"average": "average",
"childAction": "child_action",
"current": "current",
"dn": "dn",
"maximum": "maximum",
"minimum": "minimum",
"period": "period",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.average = None
self.child_action = None
self.current = None
self.maximum = None
self.minimum = None
self.period = None
self.status = None
ManagedObject.__init__(self, "ChassisPowerMonitor", parent_mo_or_dn, **kwargs)
| 42.835821 | 234 | 0.619164 | [
"Apache-2.0"
] | CiscoUcs/imcsdk | imcsdk/mometa/chassis/ChassisPowerMonitor.py | 2,870 | Python |
"""
FILE : BiLSTM.py
FUNCTION : None
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import random
from DataUtils.Common import *
from models.initialize import *
from models.modelHelp import prepare_pack_padded_sequence
torch.manual_seed(seed_num)
random.seed(seed_num)
class BiLSTM(nn.Module):
"""
BiLSTM
"""
def __init__(self, **kwargs):
super(BiLSTM, self).__init__()
for k in kwargs:
self.__setattr__(k, kwargs[k])
V = self.embed_num
D = self.embed_dim
C = self.label_num
paddingId = self.paddingId
self.embed = nn.Embedding(V, D, padding_idx=paddingId)
if self.pretrained_embed:
self.embed.weight.data.copy_(self.pretrained_weight)
else:
init_embedding(self.embed.weight)
self.dropout_embed = nn.Dropout(self.dropout_emb)
self.dropout = nn.Dropout(self.dropout)
self.bilstm = nn.LSTM(input_size=D, hidden_size=self.lstm_hiddens, num_layers=self.lstm_layers,
bidirectional=True, batch_first=True, bias=True)
self.linear = nn.Linear(in_features=self.lstm_hiddens * 2, out_features=C, bias=True)
init_linear(self.linear)
def forward(self, word, sentence_length):
"""
:param word:
:param sentence_length:
:param desorted_indices:
:return:
"""
word, sentence_length, desorted_indices = prepare_pack_padded_sequence(word, sentence_length, device=self.device)
x = self.embed(word) # (N,W,D)
x = self.dropout_embed(x)
packed_embed = pack_padded_sequence(x, sentence_length, batch_first=True)
x, _ = self.bilstm(packed_embed)
x, _ = pad_packed_sequence(x, batch_first=True)
x = x[desorted_indices]
x = self.dropout(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
class BiLSTM(nn.Module):
def __init__(self, vocab_size, emb_size, hidden_size, out_size):
""":
vocab_size:
emb_size:
hidden_size:
out_size:
"""
super(BiLSTM, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.bilstm = nn.LSTM(emb_size, hidden_size,
batch_first=True,
bidirectional=True)
self.lin = nn.Linear(2*hidden_size, out_size)
def forward(self, sents_tensor, lengths):
emb = self.embedding(sents_tensor) # [B, L, emb_size]
packed = pack_padded_sequence(emb, lengths, batch_first=True)
rnn_out, _ = self.bilstm(packed)
# rnn_out:[B, L, hidden_size*2]
rnn_out, _ = pad_packed_sequence(rnn_out, batch_first=True)
scores = self.lin(rnn_out) # [B, L, out_size]
return scores
def test(self, sents_tensor, lengths, _):
logits = self.forward(sents_tensor, lengths) # [B, L, out_size]
_, batch_tagids = torch.max(logits, dim=2)
return batch_tagids
| 30.892157 | 121 | 0.623294 | [
"Apache-2.0"
] | Ahmed2xD/NER-with-bilstm-CRF-CNN | models/BiLSTM.py | 3,155 | Python |
'''
@author: kris
'''
# import modules; set up logging
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
from gensim.test.utils import datapath
import numpy as np
import logging, os, sys, gzip
import datetime
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', filename='word2vec.out', level=logging.INFO)
# Path to a file that contains lines with the locations of files
# containing the sentences we want for our Word2Vec model
# Also works with entities that are just stacked line by line
pathsLocator = "./sentencesPaths.txt"
outputPath = "./entity_embeddings.txt"
# Model to load
to_load = '/vol2/cb/crunchbase-201806/embeddings/dim200-iter10-win5/CB_sg1_size200_mincount1_window5_neg15_iter10.wv.vectors.npy'
#'/home/faerberm/mag-training/MAG_sg1_size128_minCount5_window5_neg15_iter10_alpha_cbowMean.wv.vectors.npy'
#'/vol2/cb/crunchbase-201806/embeddings/dim200-iter10-win5/CB_sg1_size200_mincount1_window5_neg15_iter10'
#'MAG_sg1_size128_minCount5_window5_neg15_iter5'
loadKeyedVector = True
#'dbpedia_sg1_size200_mincount1_window5_neg15_iter10'
#'RDF2Vec_sg1_size200_mincount1_window5_neg15_iter20'
#'MAG_sg1_size200_mincount1_window5_neg15_iter15'
#What is the newline character on the machine
newline = '\n'
ignorePrefix = '#'
#What separates one walk from another (aka. one sentence from another)?
walkSeparator = "\t"
#What separates the single 'units' of a given walk?
hopSeparator = '->'
# Mapping dict
entity_mapping_dict = {}
# Mapping file
mapping_file = "/home/noulletk/prog/bmw/dbpedia_full/resources/data/walks/walk_entity_mapping.txt"
mapping_sep = "\t"
hasMapping = False
iterationCounter = {'val': 0}
#Load mappings if there are any
if hasMapping:
for mapping_line in open(mapping_file, mode='rt'):
mapping_tokens = mapping_line.rstrip(newline).split(mapping_sep)
if len(mapping_tokens) == 2:
entity_mapping_dict[mapping_tokens[0]] = mapping_tokens[1]
print("Loaded %s mappings!" % (len(entity_mapping_dict)))
class MySentences:
def __init__(self, iterationCounter):
self.iterationCounter = iterationCounter
def __iter__(self):
print("Running Iteration #%s" % (iterationCounter['val']))
iterationCounter['val'] += 1
# Iterate to find which files are to be read
for fname in open(pathsLocator, mode='rt'): # os.listdir(self.dirname):
sentencesPath = fname.rstrip(newline)
# Ignore commented-out lines
if sentencesPath.startswith(ignorePrefix):
continue
now = datetime.datetime.now()
print("[%s] Grabbing sentences from: %s" % (now.strftime("%Y-%m-%d %H:%M"), sentencesPath))
try:
# Go through all paths
for line in open(sentencesPath, mode='rt'):
# If you're NOT grouping the walks and separating them by tabs
sentence = line.rstrip(newline).split(hopSeparator)
for tokenPos in range(len(sentence)):
token = sentence[tokenPos]
# Give the proper URL for the entity IF it exists, otherwise return the entity itself
sentence[tokenPos] = entity_mapping_dict.get(token, token)
#print(sentence)
yield sentence
except Exception:
print("Failed reading file:")
print(sentencesPath)
#load model
if loadKeyedVector:
print("Loading [KeyedVectors] from: ",to_load)
#model_wv = KeyedVectors.load(to_load, mmap='r')
#model_wv = KeyedVectors.load_word2vec_format(to_load, binary=True)
#model_wv = KeyedVectors.load_word2vec_format(to_load)
model_wv = KeyedVectors.load(to_load)
#model_wv = KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'), binary=False) # C text format
#model_wv = KeyedVectors.load_word2vec_format(to_load, binary=True, unicode_errors='ignore')
else:
print("Loading [MODEL] from: ",to_load)
model_wv = Word2Vec.load(to_load).wv
print("Vocab keys size:",len(model_wv.vocab.keys()))
print("Outputting entity embeddings to: ",outputPath)
sentences = MySentences(iterationCounter)
#Open the output file for the entity embeddings
outFile = open(outputPath, "w")
#Make a dictionary for in-memory aggregation while going over sentences
default_val = None
entity_embeddings_dict = {}
vocab_keys = model_wv.vocab.keys()
displayCounter = 0
maxDisplay = 10
for voc in vocab_keys:
print(voc)
if displayCounter >= maxDisplay:
break
displayCounter+=1
print("Compute entity embeddings (through combination of word embeddings)...")
counter = 0
'''
for sentence in sentences:
entity = sentence[0]
entity_embedding = None
#Sum over all words' embeddings and then output the resulting embedding
for word in sentence:
word_embedding = model.wv[word]
if default_val is None:
#Initialise default_val if it isn't yet
default_val = np.zeros(word_embedding.shape)
if entity_embedding is None:
entity_embedding = np.zeros(word_embedding.shape)
entity_embedding += word_embedding
entity_embeddings_dict[entity] = entity_embeddings_dict.get(entity, default_val) + entity_embedding
if (counter % 1000000 == 0):
print("Combined word embeddings: ",counter)
print("Last one completed: ",entity)
counter+=1
'''
#Go through all sentences to see which entities we want
for sentence in sentences:
# idea is that the entity is in the document, so we check what it is like and
# since every entity has 'the same' treatment, that we can determine their probabilities based on that
entity = sentence[0]
if hasMapping:
entity = entity_mapping_dict.get(entity, entity)
entity_embedding = None
dict_val = entity_embeddings_dict.get(entity, None)
if (dict_val is None):
if entity in vocab_keys:
entity_embedding = model_wv[entity]
entity_embeddings_dict[entity] = entity_embedding
#Encountered first time, so output it
outFile.write("%s" % entity)
for number in entity_embedding:
outFile.write("\t%s" % number)
outFile.write("\n")
if (counter % 1000000 == 0):
print("Lines passed through: ",counter)
print("Current line's entity: ",entity)
print("Embeddings output: ",len(entity_embeddings_dict))
counter+=1
#print("Output computed entity embeddings!")
#for (entity, entity_embedding) in entity_embeddings_dict.items():
# #Output computed embedding
# outFile.write("%s" % entity)
# for number in entity_embedding:
# outFile.write("\t%s" % number)
# outFile.write("\n")
#Close the output file post finishing output operations
outFile.close()
print("Finished outputting entity embeddings")
| 34.737705 | 129 | 0.757747 | [
"MIT"
] | michaelfaerber/Agnos | scripts/loadModelDoEntityEmbeddingsUnsorted.py | 6,357 | Python |
"""inter-base steganography
producing base32 and base64 decodable strings"""
from base64 import b64encode, b64decode
import string
from itertools import product
from argparse import ArgumentParser
CHARSET = string.printable.encode()
B32_CHARSET = (string.ascii_uppercase + '234567').encode()
B64_CHARSET = (
string.ascii_lowercase +
string.ascii_uppercase +
string.digits +
'+/').encode()
ASCII_LOWER = string.ascii_lowercase.encode()
WHITESPACE = string.whitespace.encode()
ALPHA_SPACE = (
string.ascii_uppercase +
string.ascii_lowercase +
string.whitespace).encode()
ASCII_SUBS = {"a": ["a", "A", "4", "@"],
"b": ["b", "B", "8", "6"],
"c": ["c", "C", "("],
"d": ["d", "D"],
"e": ["e", "E", "3"],
"f": ["f", "F"],
"g": ["g", "G", "6", "9"],
"h": ["h", "H", "#"],
"i": ["i", "I", "1", "|", "!"],
"j": ["j", "J", "]", ";"],
"k": ["k", "K"],
"l": ["l", "L", "1", "|"],
"m": ["m", "M"],
"n": ["n", "N"],
"o": ["o", "O", "0"],
"p": ["p", "P"],
"q": ["q", "Q", "9"],
"r": ["r", "R", "2"],
"s": ["s", "S", "5", "$"],
"t": ["t", "T", "7", "+"],
"u": ["u", "U"],
"v": ["v", "V"],
"w": ["w", "W"],
"x": ["x", "X"],
"y": ["y", "Y"],
"z": ["z", "Z", "2", "%"],
"0": ["0"],
"1": ["1"],
"2": ["2"],
"3": ["3"],
"4": ["4"],
"5": ["5"],
"6": ["6"],
"7": ["7"],
"8": ["8"],
"9": ["9"],
" ": [" ", "\t", "_"]
}
def all_variations(word: str) -> list:
"""
Produce all single-character leet variations of a string
"""
ans = [""]
for leet_letter in [ASCII_SUBS[i] for i in word]:
ans = [x + y for x in ans for y in leet_letter]
return ans
def variation_gen(word: str):
"""
Produces all single-character leet variations of a string
Args:
word: a 3 character string to generate all variations
Returns:
generator: generator for all possible leet variations
"""
return product(*(ASCII_SUBS[i] for i in word))
def all_valid_variations(word: str) -> list:
"""
Returns all leet variations of a triplet which result in a
Base32 only charset words on base64 encoding
Args:
word: An english triplet
Returns:
list: of all valid variations
"""
result = []
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
result.append("".join(variation))
return result
def valid_variation(word: str) -> str:
"""
Generates a single valid variation
Args:
word: the triplet to generate a variation from
Returns:
str: A valid variation of `word` or None otherwise
"""
for variation in variation_gen(word):
if all(i in B32_CHARSET for i in b64encode(
''.join(variation).encode())):
return "".join(variation)
return None
# List to precompute the triplets for which there doesnt exist a valid
# variation
NON_LEET = []
for perm in product(string.ascii_lowercase + ' ' + string.digits, repeat=3):
if not valid_variation(''.join(perm)):
NON_LEET.append(''.join(perm))
def transform(strng: str) -> str:
"""
Transform the string to only lower alpha and numerics and spaces
Converts uppercase to lower case and strips all other characters except
space
"""
for char in string.punctuation + string.whitespace[1:]:
strng = strng.replace(char, '')
return strng.lower() + ' ' * (8 - len(strng) % 8)
def master_encode(strng: str) -> bytes:
"""
Encodes a string to its leet equivalent (sans punctuation) which when
base64 encoded contains only base32 characters
"""
if isinstance(strng, (bytes, bytearray)):
strng = strng.decode()
strng = transform(strng)
result = ''
i = 0
while i < len(strng):
try:
current = strng[i:i + 3]
if current in NON_LEET:
if current[:2] + ' ' not in NON_LEET:
result += valid_variation(current[:2] + ' ')
i += 2
elif current[0] + ' ' not in NON_LEET:
result += valid_variation(current[0] + ' ')
i += 1
elif ' {} '.format(current[0]) not in NON_LEET:
result += valid_variation(' {} '.format(current[0]))
i += 1
elif ' {}'.format(current[0]) not in NON_LEET:
result += valid_variation(' {}'.format(current[0]))
i += 1
else:
i += 1
else:
result += valid_variation(current)
i += 3
except TypeError:
i += 1
return b64encode(result.encode())
if __name__ == "__main__":
PARSER = ArgumentParser(description="")
PARSER.add_argument(
'--input',
help='read a single line directly from input',
action="store_true")
PARSER.add_argument(
'--show',
help='shows the transformed input which results in correct encoding',
action="store_true")
PARSER.add_argument(
'--file',
help='reading text from file for conversion',
action="append")
ARGS = PARSER.parse_args()
TEST_STRING = """Steganography is the practice of concealing a file,
message, image, or video within another file, message, image, or video.
The word steganography comes from Greek steganographia, which combines
the words steganos meaning "covered or concealed", and graphia meaning
"writing". The first recorded use of the term was by Johannes Trithemius
in his Steganographia, a treatise on cryptography and steganography,
disguised as a book on magic. Generally, the hidden messages appear to
be (or to be part of) something else: images, articles, shopping lists,
or some other cover text. For example, the hidden message may be in
invisible ink between the visible lines of a private letter. Some
implementations of steganography that lack a shared secret are forms
of security through obscurity, and key-dependent steganographic schemes
adhere to Kerckhoffs's principle."""
if ARGS.file:
with open(ARGS.file[0], 'rb') as inp_file:
TEST_STRING = inp_file.read()
else:
TEST_STRING = input("input the line to encode:\n")
ENCODED_STRING = master_encode(TEST_STRING)
print("ENCODED STRING: {}".format(ENCODED_STRING))
if ARGS.show:
print("Transformed string: {}".format(b64decode(ENCODED_STRING)))
# WTBVICAJV2VSZSBFWHBFY3RJIG4JOSBGTGFHNSBCVXQJYTFMICAJWTBVIDZFVCBJNSB3ZTFS\
# ZCBCYXNFNSBCYSAJTWJPMDJMZSAJTWVOVCBET25UICAJICB3T3JSWSBJVHMJIGYJVW4JIG4JZXZ\
# FIHIJVCNFTGVTNSAJ
| 33.747664 | 82 | 0.542924 | [
"MIT"
] | deut-erium/BASEic-steganography | encode.py | 7,222 | Python |
import sys
import datetime
def capitalize(string):
return string[0].upper() + string[1:]
action = sys.argv[1]
file_path = sys.argv[2]
project_name = sys.argv[3]
namespace = sys.argv[4]
now = datetime.datetime.now()
date = now.strftime("%m-%d-%Y %H:%M:%S")
args = sys.argv[6:]
username = "Logan Rickert"
def new_class():
file_name = sys.argv[5]
cpp_file_path = file_path + "src/" + file_name + ".cpp"
h_file_path = file_path + "include/" + file_name + ".h"
if len(args) % 2 != 0:
print "You must have an even amount of arguments!"
sys.exit()
parse = []
for arg in xrange(0,len(args),2):
parse.append([args[arg], args[arg + 1]])
cpp_file_contents = None
h_file_contents = None
with open(cpp_file_path, 'r') as f:
cpp_file_contents = f.read()
with open(h_file_path, 'r') as f:
h_file_contents = f.read()
cpp_file_contents = cpp_file_contents.replace(
"{{class_name}}", file_name
)
cpp_file_contents = cpp_file_contents.replace(
"{{namespace}}", namespace
)
cpp_file_contents = cpp_file_contents.replace(
"{{date}}", date
)
cpp_file_contents = cpp_file_contents.replace(
"{{username}}", username
)
if len(args) > 0:
construct_init = file_name + "::" + file_name + "("
for key, value in parse:
construct_init += key + " s" + capitalize(value) + ", "
construct_init = construct_init[:-2] + ") {"
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init}}", construct_init
)
construct_init_equals = ""
for key, value in parse:
construct_init_equals += "\t" + value + " = s" + capitalize(value) + ";\n"
construct_init_equals += "}"
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init_equals}}", construct_init_equals
)
getters_setters = ""
for key, value in parse:
getters_setters += """%s %s::get%s() {
return %s;
}
void %s::set%s(%s s%s) {
%s = s%s;
}
""" % (
key,
file_name,
capitalize(value),
value,
file_name,
capitalize(value),
key,
capitalize(value),
value,
capitalize(value)
)
getters_setters = getters_setters[:-2]
cpp_file_contents = cpp_file_contents.replace(
"{{getters_setters}}", getters_setters
)
else:
cpp_file_contents = cpp_file_contents.replace(
"\n{{construct_init}}\n", ""
)
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init_equals}}\n", ""
)
cpp_file_contents = cpp_file_contents.replace(
"\n{{getters_setters}}\n", ""
)
with open(cpp_file_path, 'w') as f:
f.write(cpp_file_contents)
h_file_contents = h_file_contents.replace(
"{{class_name_caps}}", file_name.upper()
)
h_file_contents = h_file_contents.replace(
"{{class_name}}", file_name
)
h_file_contents = h_file_contents.replace(
"{{username}}", username
)
h_file_contents = h_file_contents.replace(
"{{namespace}}", namespace
)
h_file_contents = h_file_contents.replace(
"{{date}}", date
)
if len(args) > 0:
class_construct_full = file_name + "("
for key, value in parse:
class_construct_full += key + ", "
class_construct_full = class_construct_full[:-2] + ");"
h_file_contents = h_file_contents.replace(
"{{class_construct_full}}", class_construct_full
)
getters_setters = ""
for key, value in parse:
getters_setters += "\t\t" + key + " get" + capitalize(value) + "();\n"
getters_setters += '\n'
for key, value in parse:
getters_setters += "\t\tvoid set" + capitalize(value) + "(" + key + " s" + capitalize(value) + ");\n"
h_file_contents = h_file_contents.replace(
"{{getters_setters}}", getters_setters
)
class_fields = ""
for key, value in parse:
class_fields += "\t\t" + key + " " + value + ";\n"
h_file_contents = h_file_contents.replace(
"{{class_fields}}", class_fields
)
else:
h_file_contents = h_file_contents.replace(
"\n\t\t{{class_construct_full}}", ""
)
h_file_contents = h_file_contents.replace(
"{{getters_setters}}\n", ""
)
h_file_contents = h_file_contents.replace(
"{{class_fields}}", ""
)
with open(h_file_path, 'w') as f:
f.write(h_file_contents)
def new_main():
cpp_file_path = file_path + "/src/Main.cpp"
cpp_file_contents = None
h_file_contents = None
with open(cpp_file_path, 'r') as f:
cpp_file_contents = f.read()
cpp_file_contents = cpp_file_contents.replace(
"{{class_name}}", "Main"
)
cpp_file_contents = cpp_file_contents.replace(
"{{namespace}}", namespace
)
cpp_file_contents = cpp_file_contents.replace(
"{{username}}", username
)
cpp_file_contents = cpp_file_contents.replace(
"{{date}}", date
)
with open(cpp_file_path, 'w') as f:
f.write(cpp_file_contents)
if action == "class":
new_class()
elif action == "namespace" or action == "project":
new_main() | 20.828194 | 104 | 0.666244 | [
"CC0-1.0"
] | LoganRickert/CPP-Builder-And-Documentator | bin/parse_new_files.py | 4,728 | Python |
#!/usr/bin/python3
import time
import datetime
from gpiozero import InputDevice, LED
import subprocess
import requests
# RPI enumeration is:
# pin 5 & 6 are used for the button (3 & ground)
# pin 7 & 9 are used for the LED (4 & ground)
button_pin = 3
led_pin = 4
button = InputDevice(button_pin, pull_up=True)
last_active = False
last_press = None
led = LED(led_pin)
led.on()
def button_hold(now, seconds):
if seconds > 3:
print('button hold')
led.blink(.05, .5)
requests.get('http://localhost:8080/home')
time.sleep(2)
subprocess.call(['shutdown', '-h', 'now'], shell=False)
def button_release(now, seconds):
print('button release')
requests.get('http://localhost:8080/button')
while True:
cur_active = button.is_active
now = datetime.datetime.now()
if cur_active and not last_active:
last_press = now
if cur_active:
duration = now - last_press
button_hold(now, duration.total_seconds())
if not cur_active and last_active:
duration = now - last_press
button_release(now, duration.total_seconds())
last_active = cur_active
time.sleep(1/60) | 25.456522 | 63 | 0.668659 | [
"MIT"
] | kylemcdonald/bsp | pi/button/button.py | 1,171 | Python |
import argparse
import torch
from tqdm import tqdm
import vgg.data_loader.data_loaders as module_data
import vgg.model.loss as module_loss
import vgg.model.metric as module_metric
import vgg.model.model as module_arch
from vgg.parse_config import ConfigParser
def main(config):
logger = config.get_logger('test')
# setup data_loader instances
data_loader = getattr(module_data, config['data_loader']['type'])(
config['data_loader']['args']['data_dir'],
batch_size=512,
shuffle=False,
validation_split=0.0,
training=False,
num_workers=2
)
# build model architecture
model = config.init_obj('arch', module_arch)
logger.info(model)
# get function handles of loss and metrics
loss_fn = getattr(module_loss, config['loss'])
metric_fns = [getattr(module_metric, met) for met in config['metrics']]
logger.info('Loading checkpoint: {} ...'.format(config.resume))
checkpoint = torch.load(config.resume)
state_dict = checkpoint['state_dict']
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
model.load_state_dict(state_dict)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
total_loss = 0.0
total_metrics = torch.zeros(len(metric_fns))
with torch.no_grad():
for i, (data, target) in enumerate(tqdm(data_loader)):
data, target = data.to(device), target.to(device)
output = model(data)
#
# save sample images, or do something with output here
#
# computing loss, metrics on test set
loss = loss_fn(output, target)
batch_size = data.shape[0]
total_loss += loss.item() * batch_size
for i, metric in enumerate(metric_fns):
total_metrics[i] += metric(output, target) * batch_size
n_samples = len(data_loader.sampler)
log = {'loss': total_loss / n_samples}
log.update({
met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)
})
logger.info(log)
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
config = ConfigParser.from_args(args)
main(config)
| 33.414634 | 93 | 0.641241 | [
"MIT"
] | mhd53/vgg-from-torch | vgg/test.py | 2,740 | Python |
import os
import argparse
from ops.os_operation import mkdir
import time
def write_slurm_sh_multi_H2(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")#!/bin/bash
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
file.write('#SBATCH --constraint="volta"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
#file.write("bash /private/home/wang3702/.bashrc\n")
#file.write("module load anaconda3\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
file.write("conda activate pytorch2\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " --slurm=1 --dist_url=$dist_url &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
def find_checkpoint(current_dir,checkpoint_name):
if not os.path.isdir(current_dir):
return None
listfiles = os.listdir(current_dir)
for item in listfiles:
sub_dir = os.path.join(current_dir,item)
if item==checkpoint_name:
return sub_dir
elif os.path.isdir(sub_dir):
search_result = find_checkpoint(sub_dir,checkpoint_name)
if search_result is not None:
return search_result
return None
def write_slurm_sh_multi(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",
CPU_PER_GPU=8,gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")#!/bin/bash
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))#--mem : Specify the real memory required per node.
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory is False:
file.write('#SBATCH --constraint="volta"\n')
else:
file.write('#SBATCH --constraint="volta32gb"\n')
#file.write('#SBATCH --constraint="volta"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
#file.write("bash /private/home/wang3702/.bashrc\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
#file.write("module load anaconda3\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:4}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " --slurm=1 --dist_url=$dist_url &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
def write_slurm_sh_multi2(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8,
gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
prefix = "node%d_gpu%d"%(nodes,gpu_per_node)
batch_file = os.path.join(run_path, prefix+"slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, prefix+"output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, prefix+"error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#! /bin/bash\n")#!/bin/bash
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % 1)
file.write("#SBATCH --mem=%dG\n"%(350/8*gpu_per_node))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory is False:
file.write('#SBATCH --constraint="volta"\n')
else:
file.write('#SBATCH --constraint="volta32gb"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("export GLOO_SOCKET_IFNAME=\nexport NCCL_SOCKET_IFNAME=\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
#file.write("bash /private/home/wang3702/.bashrc\n")
# file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
#file.write("module load anaconda3\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
#file.write("source activate\n")
file.write("master_node=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:3}\n")
file.write('dist_url="tcp://"\n')
file.write("dist_url+=$master_node\n")
file.write("dist_url+=:40000\n")
file.write("export MASTER_ADDR=${SLURM_NODELIST:0:9}${SLURM_NODELIST:10:3}\n")
file.write("export MASTER_PORT=29500\n")
file.write("srun --label "+command_line + " &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
def write_slurm_sh_faster(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=8,
gpu_memory=False,environment=0):
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(), "ops")
dependency_handler_path = os.path.join(dependency_handler_path, "handler.txt")
run_path = os.path.join(os.getcwd(), "log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
batch_file = os.path.join(run_path, "slurm_job_" + str(id) + ".sh")
output_path = os.path.join(run_path, "output_" + str(id) + "_" + str(formatted_today + now) + ".log")
error_path = os.path.join(run_path, "error_" + str(id) + "_" + str(formatted_today + now) + ".log")
with open(batch_file, "w") as file:
file.write("#!/bin/bash\n")#!/bin/bash
file.write("#SBATCH --job-name=%s\n" % id)
file.write("#SBATCH --output=%s\n" % output_path)
file.write("#SBATCH --error=%s\n" % error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n" % nodes)
file.write("#SBATCH --ntasks-per-node=%d\n" % gpu_per_node)
file.write("#SBATCH --mem=%dG\n"%(int(350/8*gpu_per_node)))
file.write("#SBATCH --gpus=%d\n" % (nodes * gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
if gpu_memory:
file.write('#SBATCH --constraint="volta32gb"\n')
else:
file.write('#SBATCH --constraint="volta"\n')
report_info = "%s job failed; \t" % id
report_info += "log path: %s; \t" % output_path
report_info += "error record path: %s\t" % error_path
report_info += "command line path: %s\t" % batch_file
file.write('#SBATCH --comment="%s"\n' % (report_info))
with open(dependency_handler_path, 'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
#file.write("bash /private/home/wang3702/.bashrc\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
#file.write("module load anaconda3\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
if environment==0:
file.write("conda activate pytorch2\n")
else:
file.write("conda activate pytorch\n")
#file.write("source activate\n")
file.write(command_line + " &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
def write_slurm_sh(id,command_line, queue_name="learnfair",nodes=1,
gpu_per_node=8,wall_time=3*24*60,username="wang3702",CPU_PER_GPU=10):
"""
Args:
id: running id
command_line: command line
outlog_path: saving path
Returns:
"""
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dependency_handler_path = os.path.join(os.getcwd(),"ops")
dependency_handler_path = os.path.join(dependency_handler_path,"handler.txt")
run_path = os.path.join(os.getcwd(),"log")
mkdir(run_path)
run_path = os.path.abspath(run_path)
batch_file = os.path.join(run_path,"slurm_job_"+str(id)+".sh")
output_path = os.path.join(run_path,"output_"+str(id)+"_"+str(formatted_today+now)+".log")
error_path = os.path.join(run_path,"error_"+str(id)+"_"+str(formatted_today+now)+".log")
with open(batch_file,"w") as file:
file.write("#!/bin/sh\n")
file.write("#SBATCH --job-name=%s\n"%id)
file.write("#SBATCH --output=%s\n"%output_path)
file.write("#SBATCH --error=%s\n"%error_path)
file.write("#SBATCH --partition=%s\n"%queue_name)
file.write("#SBATCH --signal=USR1@600\n")
file.write("#SBATCH --nodes=%d\n"%nodes )
file.write("#SBATCH --ntasks-per-node=1\n")
file.write("#SBATCH --mem=350G\n")
file.write("#SBATCH --gpus=%d\n"%(nodes*gpu_per_node))
file.write("#SBATCH --gpus-per-node=%d\n" % (gpu_per_node))
file.write("#SBATCH --cpus-per-task=%d\n"%(CPU_PER_GPU*gpu_per_node))
file.write("#SBATCH --time=%d\n"%wall_time)
file.write("#SBATCH --mail-user=%s@fb.com\n"%username)
file.write("#SBATCH --mail-type=FAIL\n")
file.write("#SBATCH --mail-type=end \n")
file.write('#SBATCH --constraint="volta"\n')
report_info ="%s job failed; \t"%id
report_info += "log path: %s; \t"%output_path
report_info += "error record path: %s\t"%error_path
report_info += "command line path: %s\t"%batch_file
file.write('#SBATCH --comment="%s"\n'%(report_info))
with open(dependency_handler_path,'r') as rfile:
line = rfile.readline()
while line:
file.write(line)
line = rfile.readline()
#file.write("bash /private/home/wang3702/.bashrc\n")
# file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
#file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
#file.write("module load anaconda3\n")
#file.write("conda activate pytorch2\n")
file.write("module load cuda/10.2 cudnn/v7.6.5.32-cuda.10.2 gcc/7.3.0\n")
file.write("/private/home/wang3702/anaconda3/bin/conda init\n")
file.write("CONDA_BASE=$(conda info --base) ; source $CONDA_BASE/etc/profile.d/conda.sh\n")
file.write("conda activate pytorch2\n")
file.write(command_line+" &\n")
file.write("wait $!\n")
file.write("set +x \n")
file.write("echo ..::Job Finished, but No, AGI is to BE Solved::.. \n")
# signal that job is finished
os.system('sbatch ' + batch_file)
parser = argparse.ArgumentParser(description='slurm job submission')
parser.add_argument('--data', default="imagenet", type=str, metavar='DIR',
help='path to dataset')
parser.add_argument("--mode",type=int,default=0,help="control mode for training")
parser.add_argument("--type",type=int,default=0,help="running type control")
parser.add_argument("--roi",type=int,default = 20, help="number of rois sampled here")
parser.add_argument("--queue",type=int,default=0, help="queue specified list")
parser.add_argument("-F",type=str, default=None, help="resume path for running again")
parser.add_argument("--comment", type=str,default=None,help="adding comment for script names")
parser.add_argument("--node",type=int,default=1,help="nodes needed for training")
parser.add_argument("--gpu",type=int,default=8,help="number of gpus per node")
args = parser.parse_args()
if args.queue ==0:
queue_name = "learnfair"
elif args.queue ==1:
queue_name = "dev"
elif args.queue ==2:
queue_name = "scavenge"
elif args.queue ==3:
queue_name = 'priority'
elif args.queue ==4:
queue_name = 'learnlab'
elif args.queue==5:
queue_name = 'devlab'
elif args.queue==6:
queue_name = 'prioritylab'
dump_path= os.path.join(os.getcwd(),"swav_dump_100")
from ops.os_operation import mkdir
mkdir(dump_path)
import time
import datetime
today = datetime.date.today()
formatted_today = today.strftime('%y%m%d')
now = time.strftime("%H:%M:%S")
dump_path = os.path.join(dump_path, formatted_today + now)
if args.mode==1:
if args.type==0:
# command_line = "python3 main_adco.py --mode=1 --lr=0.06 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0006 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=57" % args.data
# write_slurm_sh("baseline_sym_moco_lr0.06_proj", command_line, queue_name)
command_line = "python3 main_adco.py --mode=1 --lr=0.06 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0006 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 --mask_size=32 " \
"--num_roi=1 " % args.data
write_slurm_sh("baseline_sym_moco_lr0.06", command_line, queue_name)
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 --mask_size=32 " \
# "--num_roi=1 --img_size=96 " % args.data
# write_slurm_sh("baseline_sym_moco_input96", command_line, queue_name)
#running all the baseline with 100 epochs
#base line moco
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=35 --mask_size=32 " \
# " --num_roi=1 " % args.data
# write_slurm_sh("baseline_sym_mocobn_100", command_line, queue_name)
# #moco multi baseline
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=18 --nmb_crops 2 6 " \
# "--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " % (args.data)
# write_slurm_sh("multi_moco_baseline_100_new", command_line, queue_name)
# # #moco multi sym baseline
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=20 --nmb_crops 4 " \
# "--size_crops 224 --min_scale_crops 0.14 --max_scale_crops 1.0 " % (args.data)
# write_slurm_sh("2key_multi_moco_baseline_4_224", command_line, queue_name)
# #swav multi baseline
# command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 100 --lr=0.6 " \
# "--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 " \
# "--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 " \
# "--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 " \
# "--size_crops 224 --min_scale_crops 0.14 --max_scale_crops 1.0 --dump_path %s " % (args.data,dump_path)
# write_slurm_sh("swav_baseline_100_only224", command_line, queue_name)
# command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 100 --lr=0.6 " \
# "--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 " \
# "--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 " \
# "--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 6 " \
# "--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 --dump_path %s " % (
# args.data, dump_path)
# write_slurm_sh("swav_baseline_100", command_line, queue_name)
elif args.type==10:
#half dropout results
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=10 " % args.data
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += "--resume=%s"%args.F
write_slurm_sh("halfdropoutnew_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("halfdropoutnew", command_line, queue_name)
elif args.type==11:
# to make sure overlap region can really not work
for mask_size in [96, 160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=11 --shift_ratio=0 " \
" --mask_size=%d " % (args.data,mask_size)
write_slurm_sh("type11_roimatch_%s"%mask_size, command_line, queue_name)
elif args.type==13:
for mask_size in [96,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=13 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type13_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
time.sleep(1)
elif args.type==14:
#roi vs global
for mask_size in [96,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=14 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type14_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==16:
for mask_size in [96,128,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 " \
"--mask_size=%d --num_roi=10 "%(args.data,mask_size)
write_slurm_sh("type16_roi+global_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==-16:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=16 --mask_size=32 --num_roi=1 " % args.data
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += " --resume=%s"%args.F
write_slurm_sh("baseline_sym_moco_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("baseline_sym_moco", command_line,queue_name)
elif args.type==17:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=17 --mask_size=32" \
" --num_roi=%d" % (args.data,args.roi)
write_slurm_sh("type17_randroi_%d"%args.roi, command_line,queue_name)
elif args.type==-17:
#roi vs roi,with global as negative
for roi in [10,20,50,100]:
for mask_size in [32, 96, 160, 196]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=17 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type17_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==18:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=18 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 "% (args.data)
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += "--resume=%s"%args.F
write_slurm_sh("multi_moco_baseline_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("multi_moco_baseline" , command_line, queue_name)
elif args.type==19:
for roi in [20]:
for mask_size in [32,160]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=19 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type19_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==20:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=20 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 "% (args.data)
if args.F is not None:
resume_name = os.path.split(os.path.abspath(args.F))[1]
command_line += " --resume=%s"%args.F
write_slurm_sh("2key_multi_moco_baseline_correct_resume%s"%resume_name, command_line, queue_name)
else:
write_slurm_sh("2key_multi_moco_baseline_correct", command_line, queue_name)
elif args.type==21:
for roi in [20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.09 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=768 --knn_batch_size=256 --cos=1 --lr_final=0.0009 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=21 --mask_size=%d" \
" --num_roi=%d" % (args.data,mask_size, roi)
write_slurm_sh("type21_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==22:
for roi in [50]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=22 --mask_size=%d" \
" --num_roi=%d" % (args.data, mask_size, roi)
write_slurm_sh("type22_randroi_%d_masksize_%d" % (roi,mask_size), command_line,queue_name)
elif args.type==23:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=23 --nmb_crops 2 2 2 2 2 2 2 2" \
" --size_crops 96 112 128 144 160 176 192 208 " % args.data
write_slurm_sh("type23_specifyroi", command_line, queue_name)
elif args.type==-23:
# command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=200 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=23 --nmb_crops 6" \
# " --size_crops 96 " % args.data
# write_slurm_sh("type23_specifyroi_6_96", command_line, queue_name)
min_scale = 64
max_scale = 224
divide_list = [2,4,8,16,32]
pick_times = [1,2,3]
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale<max_scale:
check_list+=str(current_scale)+" "
num_list+=str(pick_time)+" "
current_scale+=divide
print(check_list)
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=200 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=23 --nmb_crops %s " \
" --size_crops %s " % (args.data,num_list,check_list)
write_slurm_sh("type23_specifyroi_%d_%d"%(pick_time,divide), command_line, queue_name)
elif args.type==24:
for alpha in [0.5, 1.0, 2.0]:
for local_t in [0.1,0.2,0.3]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=24 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=1.0 " % (args.data,local_t)
write_slurm_sh("type24_lg_t_%.3f_alpha_%.2f"%(local_t,alpha), command_line, queue_name)
elif args.type==25:
for alpha in [0.5]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=24 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % (args.data, local_t,alpha)
write_slurm_sh("type25_lgq_t_%.3f_alpha_%.2f" %(local_t,alpha), command_line, queue_name)
elif args.type==26:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=26 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % (args.data, local_t,alpha)
write_slurm_sh("type26_lgq_t_%.3f_alpha_%.2f" %(local_t,alpha), command_line, queue_name)
elif args.type == 27:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.05]:#[0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1,0.15,0.2,0.3]:#[0.3, 0.5, 1.0]:
for local_t in [0.12,0.15,0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate,args.data, local_t,num_list, check_list, local_t, alpha)
write_slurm_sh("type27_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, pick_time, divide,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == -270:
for num_roi in [6,10,20,30]:
for crop_size in [64, 96, 128, 160, 192]:
for learning_rate in [0.05]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type27crop_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-271:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.18,0.2]:
for moco_dim in [256,512]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=%d " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,moco_dim, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh(
"type27dim_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f_dim%d" % (
local_t, alpha, num_roi, crop_size, learning_rate,moco_dim),
command_line, queue_name)
time.sleep(1)
elif args.type == -27:
#calculate baseline 6*96 for type 27 as a direct cmp with SWAV
for learning_rate in [0.05]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.18]:
for moco_dim in [128,256,512]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=27 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type27baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 28:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=28 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " % (args.data)
write_slurm_sh("type28_small_inside", command_line, queue_name)
elif args.type==29:
for learning_rate in [0.03]:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=29 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " \
"" % (learning_rate,args.data, learning_rate/100,local_t, alpha)
write_slurm_sh("type29_lgq_t_%.3f_alpha_%.2f_lr_%.4f" % (local_t, alpha,learning_rate), command_line, queue_name)
elif args.type==30:
for learning_rate in [0.03]:
for alpha in [0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=30 --nmb_crops 6 " \
" --size_crops 96 --local_t=%.4f --alpha=%.2f " \
"" % (learning_rate,args.data, learning_rate/100,local_t, alpha)
write_slurm_sh("type30_lgq_t_%.3f_alpha_%.2f_lr_%.4f" % (local_t, alpha,learning_rate), command_line, queue_name)
elif args.type==31:
for learning_rate in [0.03]:
for alpha in [0.5]:
for local_t in [0.2]:
for num_roi in [5, 10, 20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=31 " \
"--local_t=%.4f --alpha=%.2f --num_roi=%d --mask_size=%d " \
"" % (learning_rate, args.data, learning_rate / 100,
local_t, alpha,num_roi,mask_size)
write_slurm_sh("type31_lgq_t_%.3f_alpha_%.2f_lr_%.4f_roi%d_mask%d" %
(local_t, alpha, learning_rate,num_roi,mask_size),
command_line, queue_name)
elif args.type==32:
for learning_rate in [0.03]:
for alpha in [0.5]:
for local_t in [0.2]:
for num_roi in [5, 10, 20]:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.2f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.5f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=32 " \
"--local_t=%.4f --alpha=%.2f --num_roi=%d --mask_size=%d " \
"" % (learning_rate, args.data, learning_rate / 100,
local_t, alpha,num_roi,mask_size)
write_slurm_sh("type32_lgq_t_%.3f_alpha_%.2f_lr_%.4f_roi%d_mask%d" %
(local_t, alpha, learning_rate,num_roi,mask_size),
command_line, queue_name)
elif args.type==33:
for learning_rate in [0.03,0.04,0.05,0.06,0.09,0.12]:
for alpha in [0.5,1.0,2.0,5.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=33 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimoco_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==-28:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=28 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimocoinside_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==34:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.04, 0.05]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1, 0.3, 0.5,1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=34 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, num_list, check_list, local_t, alpha)
write_slurm_sh("type34_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 36:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.04,0.05]:#[0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1]:#[0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=36 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate,args.data, local_t,num_list, check_list, local_t, alpha)
write_slurm_sh("type36_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, pick_time, divide,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==37:
for learning_rate in [0.03,0.04,0.05,0.06]:
for alpha in [0.1,0.3,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=37 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type37baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==38:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.05]: # [0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0]: #[0.1, 0.3, 0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=38 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,"", "", local_t, alpha)
write_slurm_sh("type38_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-38:
for learning_rate in [0.05]:
for alpha in [0.1,0.3,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=38 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type38baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==39:
for learning_rate in [0.05]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=39 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type39baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==40:
for learning_rate in [0.05]:
for alpha in [0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=40 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type40baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==41:
for mask_size in [96]:
command_line = "python3 main_adco.py --mode=1 --lr=0.03 --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=41 " \
"--mask_size=%d "%(args.data,mask_size)
write_slurm_sh("type41_singleroi_vs_global_%d"%mask_size,command_line,queue_name)
elif args.type==42:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.15,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=42 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type42baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==43:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.15,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=43 --nmb_crops 1 6" \
" --size_crops 224 96 --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t,local_t, alpha)
write_slurm_sh("type43baseline_lgq_t_%.3f_alpha_%.2f_6_96_lr%.4f" % (local_t, alpha,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 44:
# for num_roi in [6]:
# for crop_size in [96]:
# for learning_rate in [0.05]:
# for alpha in [0.1]: # [0.3, 0.5, 1.0]:
# for local_t in [0.15, 0.18, 0.2]:
# for sample_ratio in [2,4]:
# command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
# "--dist_url=tcp://localhost:10031 --epochs=100 " \
# "--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
# "--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
# "--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
# "--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
# "--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=44 --nmb_crops 1 %d" \
# " --size_crops 224 %d --local_t=%.4f --alpha=%.2f --sample_ratio=%d " % \
# (learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha,sample_ratio)
# write_slurm_sh(
# "type44crop_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f_ratio%d" % (local_t, alpha, num_roi,crop_size, learning_rate,sample_ratio),
# command_line, queue_name)
# time.sleep(1)
for num_roi in [6]:
for crop_size in [96,192]:
for learning_rate in [0.03,0.05,0.06]:
for alpha in [0.1,0.3,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=44 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type44_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-44:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0.1,0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=44 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh(
"type44align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==45 or args.type==46:
for crop_size in [96]:
for learning_rate in [0.03,0.04,0.05]:
for alpha in [0.1,0.3,0.5,1,2]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --mask_size %d" \
" --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t, args.type, crop_size,local_t, alpha)
write_slurm_sh(
"type%d_crop_lgq_t_%.3f_alpha_%.2f_%d_lr%.4f" % (args.type, local_t,alpha,
crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type ==47:
min_scale = 96
max_scale = 224
divide_list = [16]
pick_times = [1]
for learning_rate in [0.03,0.05]: # [0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0.1,0.5,1.0]: # [0.1, 0.3, 0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=47 " \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f " % \
(learning_rate, args.data, local_t, check_list, local_t, alpha)
write_slurm_sh("type47_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, pick_time, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type ==49:
min_scale = 96
max_scale = 224
divide_list = [2,4,8,16,32]
pick_times = [1]
for learning_rate in [0.06]: # [0.02,0.03,0.04,0.05,0.06,0.1,0.15]:
for pick_time in pick_times:
for divide in divide_list:
check_list = ""
num_list = ""
current_scale = min_scale
while current_scale < max_scale:
check_list += str(current_scale) + " "
num_list += str(pick_time) + " "
current_scale += divide
print(check_list)
print(num_list)
for alpha in [0]: # [0.3, 0.5, 1.0]:
for local_t in [0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=49 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_list,check_list, local_t, alpha)
write_slurm_sh_faster(
"type49crop_lgq_t_%.3f_alpha_%.2f_divide%d_lr%.4f" % (
local_t, alpha, divide, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-49:
#only run on pytorch environment, not base environment
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [-0.1,-0.3,-0.5,-1]: # [0.3, 0.5, 1.0]:
for local_t in [0.18]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=49 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type49align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==50:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0,2.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=50 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type50align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==51:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=51 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type51align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==52:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0, 0.1,0.2,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=52 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type52_1v1_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==53:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=53 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type53align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==54:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05]:
for alpha in [0, 0.1,0.5,1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.15,0.18,0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=54 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type54align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==55:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type55align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==551:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type55align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==550:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0.1]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
for pred_dim in [256,1024,2048]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=55 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 --pred_dim=%d " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha,pred_dim)
write_slurm_sh_faster(
"type55dim%d_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (pred_dim,local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==56:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.05,0.06]:
for alpha in [0, 0.05,0.1,0.2]: # [0.3, 0.5, 1.0]:
for local_t in [0.18, 0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=56 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data,local_t, num_roi,crop_size, local_t, alpha)
write_slurm_sh_faster(
"type56align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (local_t, alpha, num_roi,crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==58:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=58 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("multimoco_proj_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==59:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=59 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type59_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==60:
for num_roi in [3,6,10,15,20,25,30]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=60 --num_roi=%d " \
" --mask_size=%d --local_t=%.4f --align=1 " % \
(learning_rate, args.data, epoch, 256,
256,learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type60_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==61:
#for num_roi in ['','6']:
# for crop_size in ['','96']:
indicate_list=[['',''],['6','96']]
for indication in indicate_list:
num_roi = indication[0]
crop_size= indication[1]
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=61 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --align=1 " % \
(learning_rate, args.data, epoch, 256, 256,
learning_rate / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type61_lgq_t_%.3f_%s_%s_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==62:
for learning_rate in [0.06]:
for alpha in [0,1.0]:#0 denotes only shuffling to influence
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=62 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("pixelembedshufflemoco_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type==63:
for learning_rate in [0.06]:
for alpha in [0,1.0]:#0 denotes only shuffling to influence
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=63 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh("pixelGLsync_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name)
elif args.type == 64:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0,0.1,0.2,0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=64 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type64align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 65:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0,0.1,0.2,0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=65 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type65align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 66:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for alpha in [0, 0.1, 0.2, 0.5, 1.0]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=66 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type66align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type == 67:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06,0.08,0.09]:
for alpha in [0, 0.1, 0.2, 0.5]: # [0.3, 0.5, 1.0]:
for local_t in [0.20]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=0.0003 " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f --choose=0,1,2,3,4,5,6,7 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=67 --nmb_crops 1 %d " \
" --size_crops 224 %d --local_t=%.4f --alpha=%.2f --align=1 " % \
(learning_rate, args.data, local_t, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type67align_lgq_t_%.3f_alpha_%.2f_%d_%d_lr%.4f" % (
local_t, alpha, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==68:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=68 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type68_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==69:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=69 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type69_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==70:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=70 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type70_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==71:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0,0.05,0.1,0.2]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=71 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --alpha=%.4f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,alpha)
write_slurm_sh_faster(
"type71_lgq_t_%.3f_%d_%d_lr%.4f_alpha%.4f" % (local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==72:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=72 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type72_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==73:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=73 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type73_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==74:
for crop_size in [64,96,128,160,192]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=74 --mask_size %d " \
" --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, crop_size, local_t)
write_slurm_sh_faster(
"type74_lgq_t_%.3f_mask%d_lr%.4f" % (local_t, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==75:
for num_roi in [3,6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=75 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type75_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==76 or args.type==98:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(9):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type,num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==-76:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=76 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d --mlp_bn_stat=0 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type76_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==77:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,2,3,5,6]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=77 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type77_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==78:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,3,4,5,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=78 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type78_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==79:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(2,11):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=79 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type79_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==80:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [0,1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=80 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type80_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==81:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=81 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type81_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==82:
for num_roi in [6,16,32,64]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=82 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type82_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type == 83 or args.type==84:
for num_roi in [1,3,5,10]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0.1,0.2,0.5,1.0,2.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --num_roi %d" \
" --mask_size %d --local_t=%.4f --align=1 --alpha=%f " \
" " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_alpha%f" % (args.type,
local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==85:
for num_roi in [6,16,32,64]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=85 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" \
" --mlp_bn_stat=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode,mlp_bn_stat)
write_slurm_sh_faster(
"type85_%d_lgq_t_%.3f_%d_%d_lr%.4f_bnmode%d" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate,mlp_bn_stat),
command_line, queue_name)
time.sleep(1)
elif args.type==86:
for num_roi in [6,16,32]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1,5,7]:
for mlp_bn_stat in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=86 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type86_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==87 or args.type==88 or args.type==93 or args.type==94 or args.type==95 or args.type==96:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==89 or args.type==90:
for num_roi in [1,5,10]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for alpha in [0.1,0.2,0.5,1.0,2.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --num_roi %d" \
" --mask_size %d --local_t=%.4f --align=1 --alpha=%f " \
" " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t, alpha)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_alpha%f" % (args.type,
local_t, num_roi, crop_size, learning_rate,alpha),
command_line, queue_name)
time.sleep(1)
elif args.type==91:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t)
write_slurm_sh_faster(
"type%d_lgq_t_%.3f_lr%.4f" % (args.type, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==92:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(4):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==97:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(4):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=97 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, shuffle_mode)
write_slurm_sh_faster(
"type97_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
shuffle_mode, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==99 or args.type==103 or args.type==104 or args.type==105 \
or args.type==106 or args.type==107 or args.type==108 or args.type==109 \
or args.type==110 or args.type==111 or args.type==112 or args.type==113:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==126 or args.type==127 or args.type==129 or args.type==131:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(8):
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%dablation_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==133 or args.type==134:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in range(3):
for momentum_weight_decay in [0.9,0.99,0.999]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --use_fp16=1 --momentum_stat=%f" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, shuffle_mode,momentum_weight_decay)
write_slurm_sh_faster(
"type%dablation_%d_%f_lgq_t_%.3f_lr%.4f" % (
args.type, shuffle_mode,momentum_weight_decay, local_t, learning_rate),
command_line, queue_name, environment=1)
time.sleep(1)
elif args.type==128 or args.type==130 or args.type==132 or args.type==135 or args.type==136:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16,32,64,128]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%dgroupablation_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==152:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16,32,64,128]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%dgroup_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name,environment=0)
time.sleep(1)
elif args.type==137 or args.type==138:
for learning_rate in [0.03]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t)
write_slurm_sh_faster(
"type%d2bnablation_lgq_t_%.3f_lr%.4f" % (args.type,local_t, learning_rate),
command_line, queue_name,environment=1)
time.sleep(1)
elif args.type==118:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [1]:
for conv_size in [1,2,3,4]:
for stride_size in [1,2,3]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d --loco_conv_size=%d " \
"--loco_conv_stride=%d" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, shuffle_mode,conv_size,stride_size)
write_slurm_sh_faster(
"type%d_%d_conv%d_%d_lr%.4f" % (args.type, shuffle_mode, conv_size,
stride_size,learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==114:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size, local_t, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==115 or args.type==116 or args.type==117 or args.type==120 \
or args.type==121 or args.type==122 or args.type==123 or args.type==124:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,8]:
for alpha in [1.0,3.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t,group_norm_size,alpha)
write_slurm_sh_faster(
"type%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size,alpha, local_t, learning_rate),
command_line, queue_name,gpu_memory=True)
time.sleep(1)
elif args.type==-120:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16,32]:
same_alpha = int(num_crops / 2) - 1
iter_alpha =[same_alpha,1.0] if same_alpha!=1 else [1.0]
for alpha in iter_alpha:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 " \
" --size_crops 96 --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f --use_fp16=1" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_crops,abs(args.type), local_t, group_norm_size, alpha)
write_slurm_sh_faster(
"type%d_%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (
args.type,num_crops, group_norm_size, alpha, local_t, learning_rate),
command_line, queue_name, gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==139 or args.type==140 or args.type==141 or args.type==142 \
or args.type==143 or args.type==144 or args.type==145 or args.type==146 or args.type==147:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 " \
" --size_crops 96 --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_crops,args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dviewnorm_%d_%d_lgq_t_%.3f_lr%.4f" % (
args.type, num_crops,group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==148 or args.type==149 or args.type==150:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for num_crops in [4,8,16,32]:
for crop_size in [224,96]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.2 " \
" --size_crops %d --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, crop_size,num_crops, args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dviewnorm_%d_%d_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, num_crops,crop_size, group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==151:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
" --type=%d --min_scale_crops 0.14 0.05 " \
" --size_crops 224 96 --nmb_crops 4 6 --max_scale_crops 1.0 0.14" \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 --alpha 1.0" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type, local_t, group_norm_size)
write_slurm_sh_faster(
"type%dmultiquery_viewkey_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, group_norm_size, local_t, learning_rate),
command_line, queue_name, gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==125:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for momentum_stat in [0.9,0.99,0.999]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --momentum_stat=%f " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,256,
learning_rate * args.node / 100,
local_t, args.type, local_t, momentum_stat)
write_slurm_sh_faster(
"type%d_momentum%f_lgq_t_%.3f_lr%.4f" % (
args.type, momentum_stat, local_t, learning_rate),
command_line, queue_name, gpu_memory=True)
time.sleep(1)
elif args.type==-108:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for batch_size in [1024]:
for shuffle_mode in [1]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * batch_size/256, args.data, epoch, batch_size,
256,
learning_rate * batch_size/256/ 100,
local_t, abs(args.type), local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_lr%.4f" % (args.type,shuffle_mode, local_t, learning_rate*batch_size/256),
command_line, queue_name,gpu_memory=True)
time.sleep(1)
elif args.type==100:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate/2, args.data, epoch, 128,
128,
learning_rate/ 200,
local_t,args.type, num_roi, crop_size, local_t,group_norm_size)
write_slurm_sh_faster(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,group_norm_size,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name,gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==101:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_num in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=101 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, group_num)
write_slurm_sh_faster(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.type==102:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [0,1,7]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, args.type,num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_faster(
"type%d_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,shuffle_mode,local_t, num_roi, crop_size, learning_rate),
command_line, queue_name)
time.sleep(1)
elif args.mode==2:
if args.type==58:
for learning_rate in [0.06]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=100 " \
"--batch_size=256 --knn_batch_size=256 --cos=1 --lr_final=%.4f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=0.2 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=58 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--alpha=%.4f " \
" " % (learning_rate,args.data,learning_rate/100,alpha)
write_slurm_sh_multi("multimoco_proj_alpha_%.2f_lr_%.4f"%(alpha,learning_rate), command_line, queue_name,
nodes=args.node,gpu_per_node=args.gpu)
elif args.type==59:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=59 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate*args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate*args.node / 100,
local_t, num_roi, crop_size, local_t)
write_slurm_sh_multi(
"type59_lgq_t_%.3f_%d_%d_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
elif args.type==61:
for num_roi in ['','6']:
for crop_size in ['','96']:
for learning_rate in [0.04,0.06,0.08]:
for local_t in [0.2]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=61 --nmb_crops 1 %s" \
" --size_crops 224 %s --local_t=%.4f --align=1 --ngpu=%d " % \
(learning_rate, args.data, epoch, 256,256,
learning_rate / 100,
local_t, num_roi, crop_size, local_t,args.gpu)
write_slurm_sh_multi(
"type61_lgq_t_%.3f_%s_%s_lr%.4f" % (local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==77:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for shuffle_mode in [5]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=77 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --shuffle_mode=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t,shuffle_mode)
write_slurm_sh_multi(
"type77_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (shuffle_mode,local_t, num_roi, crop_size, learning_rate*args.node),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==87 or args.type==88 or args.type==94:
if args.type==87:
roi_num_list=[32]
elif args.type==88:
roi_num_list = [6,32]
else:
roi_num_list = [0]
for num_roi in roi_num_list:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 128,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t)
if args.queue<=1:
write_slurm_sh_multi2(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_epoch%d" % (args.type,
local_t, num_roi, crop_size,
learning_rate, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type%d_lgq_t_%.3f_%d_%d_lr%.4f_epoch%d" % (args.type,
local_t, num_roi, crop_size, learning_rate,epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type == 100:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_norm_size in [1,2,4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t,args.type, num_roi, crop_size, local_t,group_norm_size)
if args.node>=4:
command_line += " --warmup_epochs=10 "
if args.queue <= 1:
write_slurm_sh_multi2(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type,group_norm_size,
local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type%d_group%d_lgq_t_%.3f_%d_%d_lr%.4f" % (args.type, group_norm_size,
local_t, num_roi, crop_size,
learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==101:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [100]:
for group_num in [1,2,4,8,16]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=101 --nmb_crops 1 %d" \
" --size_crops 224 %d --local_t=%.4f --align=1 --group_norm_size=%d " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
args.node * 256,
learning_rate * args.node / 100,
local_t, num_roi, crop_size, local_t, group_num)
if args.node >= 4:
command_line += " --warmup_epochs=10 "
if args.queue <= 1:
write_slurm_sh_multi2(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"type101_%d_lgq_t_%.3f_%d_%d_lr%.4f" % (
group_num, local_t, num_roi, crop_size, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==119:
for batch_size in [4096]:
#for crop_size in [96]:
if True:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [800]:
for group_num in [1,8,16,32]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * batch_size / 256, args.data, epoch, batch_size,
256,
learning_rate * batch_size / 256 / 100,
local_t, abs(args.type), local_t,group_num)
command_line += " --warmup_epochs=10 "
write_slurm_sh_multi(
"mocov2bigbatch_type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_num, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu, gpu_memory=True,
environment=1)
elif args.type==115 or args.type==120:
for batch_size in [2048]:
for learning_rate in [0.045]:
for local_t in [0.2]:
for epoch in [800]:
for group_norm_size in [64]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=10 --tensorboard=1 --min_scale_crops 0.14 0.05" \
" --size_crops 224 96 --nmb_crops 2 6 --max_scale_crops 1.0 0.14 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --alpha=%f --use_fp16=1 " % \
(learning_rate * batch_size/256, args.data, epoch, batch_size,
256,
learning_rate * batch_size/256/ 100,
local_t, args.type, local_t,group_norm_size,alpha)
write_slurm_sh_multi(
"multimoco_type%d_%d_alpha%f_lgq_t_%.3f_lr%.4f" % (args.type,group_norm_size,alpha, local_t, learning_rate),
command_line, queue_name,nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==149:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [1000]:
for group_norm_size in [1]:
for num_crops in [4]:
for crop_size in [224]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --min_scale_crops 0.2 " \
" --size_crops %d --nmb_crops %d --max_scale_crops 1.0 --type=%d " \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 " % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
512,
learning_rate * args.node / 100,
local_t, crop_size,num_crops, args.type, local_t, group_norm_size)
write_slurm_sh_multi2(
"mocov2_%dview_type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, num_crops,group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu, gpu_memory=False,
environment=0)
time.sleep(1)
elif args.type==151:
for learning_rate in [0.06]:
for local_t in [0.2]:
for epoch in [1000]:
for group_norm_size in [1]:
for alpha in [1.0]:
command_line = "python3 main_adco.py --mode=1 --lr=%.4f --data=%s " \
"--dist_url=tcp://localhost:10031 --epochs=%d " \
"--batch_size=%d --knn_batch_size=%d --cos=1 --lr_final=%.8f " \
"--momentum=0.9 --weight_decay=1e-4 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 " \
"--moco_m=0.999 --moco_k=65536 --moco_t=%.4f " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
" --type=%d --min_scale_crops 0.14 0.05 " \
" --size_crops 224 96 --nmb_crops 4 6 --max_scale_crops 1.0 0.14" \
" --local_t=%.4f --align=1 --group_norm_size=%d --use_fp16=1 --alpha=1.0" % \
(learning_rate * args.node, args.data, epoch, args.node * 256,
512,
learning_rate * args.node / 100,
local_t, args.type, local_t, group_norm_size)
write_slurm_sh_multi(
"type%dmultiquery_viewkey_group%d_lgq_t_%.3f_lr%.4f" % (
args.type, group_norm_size, local_t, learning_rate),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.mode==6:
if args.type==0 or args.type==1 or args.type==2 or args.type==3:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [512]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=0.9 " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d " \
% (
args.type, args.data, epoch, batch_size,local_t, num_roi, crop_size, args.node * 64)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,
local_t, num_roi,
crop_size,
epoch),
command_line, queue_name)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"mocov3type%d_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==4 or args.type==5 or args.type==6:
for num_roi in [1]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==7 or args.type==8:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,2,4,8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==-7:
combine_choice=[1024,16]#[[1024,16],[2048,32],[4096,64]]
for num_roi in [10]:
for crop_size in [96]:
for learning_rate in [0.3]:
for local_t in [1.0]:
for epoch in [1000]:
for batch_size,group_norm_size in combine_choice:
command_line = "python3 main_adco.py --mode=6 --type=7 --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1.5e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.996 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% ( args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==-13:
combine_choice=[[4096,1],[4096,64]]#[[1024,16],[2048,32],[4096,64]]
for num_roi in [20]:
for crop_size in [96]:
for learning_rate in [0.3]:
for local_t in [1.0]:
for epoch in [1000]:
for batch_size,group_norm_size in combine_choice:
command_line = "python3 main_adco.py --mode=6 --type=13 --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1.5e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.996 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% ( args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==9 or args.type==10:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for ema_param in [0.001,0.01,0.1]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --momentum_stat=%f --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,ema_param)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%f_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
ema_param,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==11:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for ema_param in [0.999]:
for group_norm_size in [1,4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --momentum_stat=%f --use_fp16=1 --group_norm_size=%d " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,ema_param,group_norm_size)
if args.node == 1:
write_slurm_sh_faster(
"mocov3type%d_%f_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
ema_param,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,ema_param, learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,ema_param, learning_rate, local_t, num_roi,
crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==12:
for num_roi in [6]:
for crop_size in [96]:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [8]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 --nmb_crops 1 %d " \
" --size_crops 224 %d --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,local_t, num_roi,
crop_size, args.node * 64,group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_%d_%d_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
num_roi,
crop_size,
epoch),
command_line, queue_name,gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (args.type,group_norm_size,learning_rate,
local_t, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=False,environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_%d_%d_epoch%d" % (
args.type, group_norm_size,learning_rate,local_t, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==13 or args.type==14 or args.type==15:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64, group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==19:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for group_norm_size in [1,4,8,16,32]:
for key_group_norm_size in [1,4,8,16,32]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --key_group=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64, group_norm_size,key_group_norm_size)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
key_group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 3:
write_slurm_sh_multi2(
"mocov3type%d_%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, key_group_norm_size,learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, key_group_norm_size,learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==16:
for learning_rate in [0.9]:
for local_t in [1.0]:
for epoch in [100]:
for batch_size in [1024]:
for crop_size in [4,8,16]:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=1e-6 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f --warmup_epochs=10 " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=1 --use_fp16=1 " \
"--nmb_crops %d" \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, args.node * 64,crop_size )
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
crop_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, crop_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, crop_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.type==17 or args.type==18:
warmup_epoch=10
for learning_rate in [1.5e-4]:
for local_t in [0.2]:
for epoch in [100]:
for batch_size in [1024]:
if args.type==18:
group_list = [1,2,4,8,16,32,64,128]
else:
group_list = [1]
for group_norm_size in group_list:
command_line = "python3 main_adco.py --mode=6 --type=%d --data=%s " \
"--epochs=%d --start_epoch=0 --batch_size=%d --lr=%f " \
"--weight_decay=0.1 --dist_url=tcp://localhost:10031 --rank=0 " \
"--multiprocessing_distributed=1 --world_size=1 --moco_dim=256 " \
"--mlp_dim=4096 --moco_m=0.99 --moco_t=%f " \
" --align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 " \
"--knn_batch_size=%d --group_norm_size=%d --use_fp16=1 " \
"--warmup_epochs %d -a vit_small --crop_min 0.08 " \
% (args.type, args.data, epoch, batch_size, learning_rate,
local_t, 256 , group_norm_size,warmup_epoch)
if args.node == 1:
write_slurm_sh_faster("mocov3type%d_%d_%flgq_t_%.3f_epoch%d" % (args.type,
group_norm_size,
learning_rate,
local_t,
epoch),
command_line, queue_name, gpu_memory=True, environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate,
local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi(
"mocov3type%d_%d_%f_lgq_t_%.3f_epoch%d" % (
args.type, group_norm_size, learning_rate, local_t, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True, environment=1)
time.sleep(1)
elif args.mode==7:
if args.type==0 or args.type==1 or args.type==2 or args.type==3 or args.type==4:
for num_roi in [16]:
for crop_size in [96]:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --nmb_crops 1 %d --size_crops 224 %d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d "\
%(args.type,args.data,epoch,barch_size,learning_rate,num_roi,crop_size,max(64*args.node,256))
if args.node==1:
write_slurm_sh_faster("simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size,
epoch),command_line, queue_name,)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_%d_epoch%d" % (args.type, num_roi, crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==5 or args.type==6 or args.type==7 or args.type==8 or args.type==9:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
for group_norm_size in [1, 2, 4, 8,16,32,64]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --group_norm_size=%d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, barch_size, learning_rate,group_norm_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch), command_line, queue_name,
gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
time.sleep(1)
elif args.type==-6:
for learning_rate in [0.05]:
for barch_size in [256,512]:
for epoch in [800]:
for group_norm_size in [8]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --group_norm_size=%d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (abs(args.type), args.data, epoch, barch_size, learning_rate,group_norm_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch), command_line, queue_name,
gpu_memory=True )
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,group_norm_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu)
time.sleep(1)
elif args.type==10:
for learning_rate in [0.05]:
for barch_size in [512]:
for epoch in [100]:
for crop_size in [4, 8,16]:
command_line = "python3 main_adco.py --mode=7 --type=%d " \
" --data=%s --epochs=%d --start_epoch=0 --batch_size=%d " \
"--lr=%f --weight_decay=1e-4 --dist_url=tcp://localhost:10031 " \
"--rank=0 --multiprocessing_distributed=1 --world_size=1 " \
"--moco_dim=2048 --mlp_dim=512 --nmb_crops %d " \
"--align=1 --knn_neighbor=20 --knn_freq=1 --tensorboard=1 --knn_batch_size=%d " \
"--use_fp16=1 " \
% (args.type, args.data, epoch, barch_size, learning_rate,crop_size,
max(64 * args.node, 256))
if args.node == 1:
write_slurm_sh_faster("simsiamtype%d_%d_epoch%d" % (args.type,crop_size,
epoch), command_line, queue_name,
gpu_memory=True,environment=1)
else:
if args.queue <= 1:
write_slurm_sh_multi2(
"simsiamtype%d_%d_epoch%d" % (args.type,crop_size,
epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
else:
write_slurm_sh_multi(
"simsiamtype%d_%d_epoch%d" % (args.type,crop_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=True,environment=1)
time.sleep(1)
elif args.mode==5:
#run swav baseline
if args.type==0:
if args.F is None:
command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 200 --lr=0.6 "\
"--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 "\
"--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 "\
"--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 "\
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 "\
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 --dump_path %s"%(args.data,dump_path)
write_slurm_sh("swav_baseline" , command_line, queue_name)
else:
args.F= os.path.abspath(args.F)
command_line = "python3 main_adco.py --mode=5 --type=0 --data=%s --epochs 200 --lr=0.6 " \
"--lr_final 0.0006 --batch_size=256 --warmup_epochs 0 --freeze_prototypes_niters 5005 " \
"--queue_length 3840 --epoch_queue_starts 15 --dist_url=tcp://localhost:10031 " \
"--knn_batch_size=256 --cos=1 --momentum=0.9 --weight_decay=1e-6 --world_size=1 " \
"--rank=0 --multiprocessing_distributed=1 --moco_dim=128 --moco_k=3000 --moco_t=0.1 " \
"--knn_neighbor=20 --knn_freq=1 --tensorboard=1 --nmb_crops 2 6 " \
"--size_crops 224 96 --min_scale_crops 0.14 0.05 --max_scale_crops 1.0 0.14 " \
"--resume=%s --dump_path %s " % (args.data,args.F,dump_path)
resume_name= os.path.split(os.path.abspath(args.F))[1]
write_slurm_sh("swav_baseline_resume%s"%resume_name, command_line, queue_name)
elif args.mode==8:
if args.type==0 or args.type==1:
for epoch in [100]:
for batch_size in [2048]:
for lr_w in [0.2]:
for lr_bias in [0.0048]:
for alpha in [0.51]:
command_line="python3 main.py %s --epochs=%d " \
"--batch-size=%d --learning-rate-weights=%f --learning-rate-biases=%f " \
"--weight-decay=1e-6 --lambd=%f --type=%d --knn_neighbor=20 " \
"--knn_freq=1 --knn_batch_size=%d --tensorboard=1 "%(args.data,epoch,
batch_size,lr_w,lr_bias,alpha,args.type,256 )
if args.node==1:
write_slurm_sh_faster("BTtype%d_%d_epoch%d" % (args.type,batch_size,epoch), command_line, queue_name,
gpu_memory=False, environment=0)
else:
write_slurm_sh_multi2(
"BTtype%d_%d_epoch%d" % (args.type, batch_size, epoch),
command_line, queue_name, nodes=args.node, gpu_per_node=args.gpu,
gpu_memory=False, environment=0)
elif args.type==2:
for epoch in [100]:
for batch_size in [1024]:
for lr_w in [0.2]:
for lr_bias in [0.0048]:
for alpha in [0.51]:
for group_size in [2,4,8,16,32]:
command_line = "python3 main.py %s --epochs=%d " \
"--batch-size=%d --learning-rate-weights=%f --learning-rate-biases=%f " \
"--weight-decay=1e-6 --lambd=%f --type=%d --knn_neighbor=20 " \
"--knn_freq=1 --knn_batch_size=%d --tensorboard=1 --group_norm_size=%d " % (args.data, epoch,
batch_size, lr_w,
lr_bias, alpha,
args.type, 256,group_size)
write_slurm_sh_faster("BTtype%d_%d_%d_epoch%d" % (args.type,group_size, batch_size,epoch), command_line, queue_name,
gpu_memory=False, environment=0)
elif args.mode==0:
#used for finetuning, which will submit finetune jobs and a comment for which
use_bn=args.type
for lr in [20]:
for weight_decay in [1e-6,1e-7,1e-8,1e-9]:
command_line = "python3 lincls.py --data=%s --dist-url=tcp://localhost:10031 " \
"--pretrained='%s' --lr=%.4f --final_lr=%.8f --dataset=ImageNet --use_bn=%d --wd %.8f" % (
args.data, args.F, lr, lr / 100, use_bn,weight_decay)
write_slurm_sh("linear_eval_%s_%.4f_bn%d_wd_%f" % (args.comment, lr, use_bn,weight_decay), command_line, queue_name)
time.sleep(1)
elif args.mode==-2:
use_bn = args.type
#type 3:l2 norm linear
for lr in [1.0]:
for weight_decay in [1e-5,1e-6,1e-7,1e-8,1e-9]:
command_line = "python3 lincls.py --data=%s --dist-url=tcp://localhost:10031 --batch-size=4096 " \
"--pretrained='%s' --lr=%.4f --final_lr=%.8f --dataset=ImageNet --use_bn=%d --wd %.8f" % (
args.data, args.F, lr, lr / 100, use_bn, weight_decay)
write_slurm_sh("linearb4096_eval_%s_%.4f_bn%d_wd_%.8f" % (args.comment, lr, use_bn, weight_decay), command_line,
queue_name)
elif args.mode==-1:
command_line = "python3 encode.py --data=%s --dist-url=tcp://localhost:10031 " \
"--pretrained='%s' --dataset=ImageNet " % (args.data, args.F)
write_slurm_sh("encode_%s" % (args.comment), command_line, queue_name)
elif args.mode==-3:
command_line = "python3 main_adco.py --sym=0 --lr=0.03 --memory_lr=3 --moco_t=0.12 " \
"--mem_t=0.02 --data=%s --dist_url=tcp://localhost:10001 --mode=0 " \
"--epochs=200 --moco_dim=128 --moco_m=0.999 --moco_k=65536 --cluster=65536 " \
"--knn_neighbor=20 --knn_freq=1 --data=imagenet --batch_size=256 --ad_init=1 "%(args.data)
write_slurm_sh("type0",command_line,queue_name)
elif args.mode==-4:
use_bn = args.type
vit_model =True
for lr in [0.05,0.1]:
for weight_decay in [0]:
for model_type in [0]:
command_line ="python lincls_lars.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
"--lars --data %s --use_bn=%d --model_type=%d "%(args.F,lr,
weight_decay,args.data,use_bn,model_type)
if vit_model:
command_line +=" --arch vit_small"
write_slurm_sh("linear_larsb4096_eval_%s_bn%d_%.4f_wd_%.8f" % (args.comment, use_bn,lr,weight_decay),
command_line,
queue_name)
elif args.mode==-40:
use_bn = args.type
study_dir = os.path.abspath(args.F)
checkpoint_name = "checkpoint_0099.pth.tar"
for item in os.listdir(study_dir):
if item== checkpoint_name:
current_model_path = os.path.join(study_dir,item)
current_dir = study_dir
current_comment = os.path.split(current_dir)[1]
else:
current_dir = os.path.join(study_dir,item)
current_comment = os.path.split(current_dir)[1]
current_model_path = find_checkpoint(current_dir,checkpoint_name)
if current_model_path is None:
print("%s dir did not find checkpoint"%current_dir)
continue
if not os.path.exists(current_model_path):
print("%s model path did not exist"%current_model_path)
continue
print("fintune %s model"%current_model_path)
for lr in [0.05, 0.1]:
for weight_decay in [0]:
for model_type in [0]:
command_line = "python lincls_lars.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
"--lars --data %s --use_bn=%d --model_type=%d " % (current_model_path, lr,
weight_decay, args.data, use_bn,
model_type)
write_slurm_sh(
"linear_larsb4096_eval_%s_bn%d_%.4f_wd_%.8f" % (str(args.comment)+current_comment, use_bn, lr, weight_decay),
command_line,
queue_name)
elif args.mode==-5:
config_dict={}
config_path = os.path.join(os.getcwd(),"detection")
config_path = os.path.join(config_path,"configs")
config_dict['VOC']=os.path.join(config_path,"pascal_voc_R_50_C4_24k_loco.yaml")
config_dict['VOC_freeze'] = os.path.join(config_path, "pascal_voc_R_50_C4_24k_loco_freeze.yaml")
config_dict['COCO'] = os.path.join(config_path,"coco_R_50_C4_2x.yaml_loco.yaml")
config_dict['COCO_freeze'] =os.path.join(config_path,"coco_R_50_C4_2x.yaml_loco_freeze.yaml")
model_path = os.path.abspath(args.F)
model_name = os.path.split(model_path)[1].replace(".pkl","")
for kk in range(5):
for config_now in ['VOC','VOC_freeze']:
command_line = "python detection/train_net.py --config-file %s --num-gpus 8" \
" MODEL.WEIGHTS %s"%(config_dict[config_now],args.F)
write_slurm_sh_faster("detection_%s_run%d_%s" % (config_now, kk,model_name),
command_line, queue_name, gpu_memory=True)
for config_now in ['COCO',"COCO_freeze"]:
command_line = "python detection/train_net.py --config-file %s --num-gpus 8" \
" MODEL.WEIGHTS %s" % (config_dict[config_now], args.F)
write_slurm_sh_faster("detection_%s_%s" % (config_now, model_name),
command_line, queue_name, gpu_memory=True)
elif args.mode==-6:
#finetune with mocov3 protocol
for lr in [0.03,0.06,0.1,0.15,0.12]:
for weight_decay in [0]:
command_line ="python main_lincls.py -a resnet50 --dist-url 'tcp://localhost:10001' " \
"--multiprocessing-distributed --world-size 1 --rank 0 --pretrained='%s' --lr %f --wd %f " \
" %s "%(args.F,lr,weight_decay,args.data)
write_slurm_sh("linear_main_lincls_%s_%.4f_wd_%.8f" % (args.comment, lr,weight_decay),
command_line,
queue_name)
| 72.542391 | 156 | 0.428224 | [
"MIT"
] | wang3702/barlowtwins | run_slurm.py | 266,956 | Python |
# -*- coding: UTF-8 -*-
#
# Copyright 2018 Joachim Lusiardi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements the Secure Remote Password (SRP) algorithm. More information can be found on
https://tools.ietf.org/html/rfc5054. See HomeKit spec page 36 for adjustments imposed by Apple.
"""
import math
import hashlib
import os
class Srp:
def __init__(self):
# generator as defined by 3072bit group of RFC 5054
self.g = int(b'5', 16)
# modulus as defined by 3072bit group of RFC 5054
self.n = int(b'''\
FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08\
8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B\
302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9\
A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6\
49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8\
FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D\
670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C\
180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718\
3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D\
04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D\
B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226\
1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C\
BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC\
E0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''', 16)
# HomeKit requires SHA-512 (See page 36)
self.h = hashlib.sha512
self.A = None
self.B = None
self.salt = None
self.username = None
self.password = None
@staticmethod
def generate_private_key():
"""
Static function to generate a 16 byte random key.
:return: the key as an integer
"""
# see
# - https://github.com/jlusiardi/homekit_python/issues/185#issuecomment-616344895 and
# - https://cryptography.io/en/latest/random-numbers/
return int.from_bytes(os.urandom(16), byteorder="big")
def _calculate_k(self) -> int:
# calculate k (see https://tools.ietf.org/html/rfc5054#section-2.5.3)
hash_instance = self.h()
n = Srp.to_byte_array(self.n)
g = bytearray.fromhex((383 * '00' + '05')) # 383 * b'0' + '5'.encode()
hash_instance.update(n)
hash_instance.update(g)
k = int.from_bytes(hash_instance.digest(), "big")
return k
def _calculate_u(self) -> int:
if self.A is None:
raise RuntimeError('Client\'s public key is missing')
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
A_b = Srp.to_byte_array(self.A)
B_b = Srp.to_byte_array(self.B)
hash_instance.update(A_b)
hash_instance.update(B_b)
u = int.from_bytes(hash_instance.digest(), "big")
return u
def get_session_key(self) -> int:
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.get_shared_secret()))
hash_value = int.from_bytes(hash_instance.digest(), "big")
return hash_value
@staticmethod
def to_byte_array(num: int) -> bytearray:
return bytearray(num.to_bytes(int(math.ceil(num.bit_length() / 8)), "big"))
def _calculate_x(self) -> int:
i = (self.username + ':' + self.password).encode()
hash_instance = self.h()
hash_instance.update(i)
hash_value = hash_instance.digest()
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(hash_value)
return int.from_bytes(hash_instance.digest(), "big")
def get_shared_secret(self):
raise NotImplementedError()
class SrpClient(Srp):
"""
Implements all functions that are required to simulate an iOS HomeKit controller
"""
def __init__(self, username: str, password: str):
Srp.__init__(self)
self.username = username
self.password = password
self.salt = None
self.a = self.generate_private_key()
self.A = pow(self.g, self.a, self.n)
self.B = None
def set_salt(self, salt):
if isinstance(salt, bytearray) or isinstance(salt, bytes):
self.salt = int.from_bytes(salt, "big")
else:
self.salt = salt
def get_public_key(self):
return pow(self.g, self.a, self.n)
def set_server_public_key(self, B):
if isinstance(B, bytearray) or isinstance(B, bytes):
self.B = int.from_bytes(B, "big")
else:
self.B = B
def get_shared_secret(self):
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
u = self._calculate_u()
x = self._calculate_x()
k = self._calculate_k()
tmp1 = (self.B - (k * pow(self.g, x, self.n)))
tmp2 = (self.a + (u * x)) # % self.n
S = pow(tmp1, tmp2, self.n)
return S
def get_proof(self):
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.n))
hN = bytearray(hash_instance.digest())
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.g))
hg = bytearray(hash_instance.digest())
for index in range(0, len(hN)):
hN[index] ^= hg[index]
u = self.username.encode()
hash_instance = self.h()
hash_instance.update(u)
hu = hash_instance.digest()
K = Srp.to_byte_array(self.get_session_key())
hash_instance = self.h()
hash_instance.update(hN)
hash_instance.update(hu)
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.B))
hash_instance.update(K)
return int.from_bytes(hash_instance.digest(), "big")
def verify_servers_proof(self, M):
if isinstance(M, bytearray) or isinstance(M, bytes):
tmp = int.from_bytes(M, "big")
else:
tmp = M
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.get_proof()))
hash_instance.update(Srp.to_byte_array(self.get_session_key()))
return tmp == int.from_bytes(hash_instance.digest(), "big")
class SrpServer(Srp):
"""
Implements all functions that are required to simulate an iOS HomeKit accessory
"""
def __init__(self, username, password):
Srp.__init__(self)
self.username = username
self.salt = SrpServer._create_salt()
self.password = password
self.verifier = self._get_verifier()
self.b = self.generate_private_key()
k = self._calculate_k()
g_b = pow(self.g, self.b, self.n)
self.B = (k * self.verifier + g_b) % self.n
self.A = None
@staticmethod
def _create_salt() -> int:
# see
# - https://github.com/jlusiardi/homekit_python/issues/185#issuecomment-616344895 and
# - https://cryptography.io/en/latest/random-numbers/
return int.from_bytes(os.urandom(16), byteorder="big")
def _get_verifier(self) -> int:
hash_value = self._calculate_x()
v = pow(self.g, hash_value, self.n)
return v
def set_client_public_key(self, A):
self.A = A
def get_salt(self):
return self.salt
def get_public_key(self):
k = self._calculate_k()
return (k * self.verifier + pow(self.g, self.b, self.n)) % self.n
def get_shared_secret(self):
if self.A is None:
raise RuntimeError('Client\'s public key is missing')
tmp1 = self.A * pow(self.verifier, self._calculate_u(), self.n)
return pow(tmp1, self.b, self.n)
def verify_clients_proof(self, m) -> bool:
if self.B is None:
raise RuntimeError('Server\'s public key is missing')
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.n))
hN = bytearray(hash_instance.digest())
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.g))
hg = bytearray(hash_instance.digest())
for index in range(0, len(hN)):
hN[index] ^= hg[index]
u = self.username.encode()
hash_instance = self.h()
hash_instance.update(u)
hu = hash_instance.digest()
K = Srp.to_byte_array(self.get_session_key())
hash_instance = self.h()
hash_instance.update(hN)
hash_instance.update(hu)
hash_instance.update(Srp.to_byte_array(self.salt))
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(self.B))
hash_instance.update(K)
return m == int.from_bytes(hash_instance.digest(), "big")
def get_proof(self, m) -> int:
hash_instance = self.h()
hash_instance.update(Srp.to_byte_array(self.A))
hash_instance.update(Srp.to_byte_array(m))
hash_instance.update(Srp.to_byte_array(self.get_session_key()))
return int.from_bytes(hash_instance.digest(), "big")
| 34.743772 | 95 | 0.651029 | [
"Apache-2.0"
] | jlusiardi/homekit_client | homekit/crypto/srp.py | 9,763 | Python |