content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020- IBM Inc. All rights reserved
# SPDX-License-Identifier: Apache2.0
#
"""
"""
from abc import ABC, abstractproperty, abstractmethod
class AbstractType(ABC):
@abstractproperty
def length(self):
pass
@abstractmethod
def __call__(self):
pass
def _get_chunk(self):
return self.locator.content(self.length)
| 16.68 | 53 | 0.657074 | [
"Apache-2.0"
] | ambitus/cbexplorer | cbexplorer/types/AbstractType.py | 417 | Python |
"""
Copyright (C) 2018-2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import openvino.inference_engine as ie
from ..infer_raw_results import InferRawResults
from ..aggregated_statistics import AggregatedStatistics
class CollectResultsCallback:
def __init__(
self,
network: ie.IENetwork,
exec_network: ie.ExecutableNetwork,
collect_resuls: bool = True,
collect_layers: set = None,
collect_aggregated_statistics: bool = True,
iterations_count: int = 1,
dataset_size: int = 1):
if not network:
raise ValueError("network is not specified")
if not exec_network:
raise ValueError("exec_network is not specified")
self._network = network
self._exec_network = exec_network
self._aggregated_statistics = None
self._iterations_count = iterations_count
self._dataset_size = dataset_size
self._collect_results = collect_resuls
self._collect_layers = collect_layers
self._collect_aggregated_statistics = collect_aggregated_statistics
self._infer_raw_results = InferRawResults() if collect_resuls else None
self._latencies = list()
def callback(self, value, latency = None):
if self._collect_aggregated_statistics:
if not self._aggregated_statistics:
self._aggregated_statistics = AggregatedStatistics(
iterations_count = self._iterations_count,
dataset_size = self._dataset_size)
self._aggregated_statistics.add(self._network, self._exec_network, value)
if self._collect_results:
if self._collect_layers:
collect_value = dict()
for layer_name in value:
if layer_name in self._collect_layers:
collect_value[layer_name] = value[layer_name]
self._infer_raw_results.add(collect_value)
else:
self._infer_raw_results.add(value)
if latency:
self._latencies.append(latency)
@property
def aggregated_statistics(self) -> AggregatedStatistics:
return self._aggregated_statistics
@property
def infer_raw_result(self) -> InferRawResults:
return self._infer_raw_results
@property
def latencies(self) -> list:
return self._latencies
def release(self):
if self._aggregated_statistics:
self._aggregated_statistics.release()
if self._infer_raw_results:
self._infer_raw_results.release()
def get_accuracy_drop(self):
return None | 35.505618 | 85 | 0.67943 | [
"Apache-2.0"
] | ChinHuatAng/dldt | tools/calibration/process_dataset_callbacks/collect_results_callback.py | 3,160 | Python |
from django.shortcuts import render
from wiki.models import Page
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.shortcuts import get_object_or_404,render
class PageList(ListView):
"""
This view grabs all the pages out of the database
returns a list of each unique wiki page for the
user to access on the website through 'list.html'
"""
model = Page
def get(self, request):
""" Returns a list of wiki pages. """
pages = Page.objects.all()
context = {'pages': pages}
return render(request, 'list.html', context=context)
class PageDetailView(DetailView):
"""
This view returns a page for a unique wiki using it's slug as an identifier
or a 404 message if the page does not exist
"""
model = Page
def get(self, request, slug):
wiki = get_object_or_404(Page, slug=slug)
return render(request, 'page.html', {'wiki': wiki})
def post(self, request, slug):
pass
| 29.542857 | 79 | 0.675048 | [
"MIT"
] | ebonnecab/makewiki | wiki/views.py | 1,034 | Python |
import policy
import traceback
import logging
import monitoring
import itertools
from .policy_registry import GetConfig
def ApplyPolicies(g):
config = GetConfig()
enabled = config.get('enabled', True)
if enabled is not None and not enabled:
return
monitoring_db = monitoring.GetDatabase('spinbot')
logging.info('Processing issues, repos')
for i in itertools.chain(*[g.issues(), g.repos()]):
for p in policy.Policies():
if p.applies(i):
err = None
try:
p.apply(g, i)
except Exception as _err:
logging.warn('Failure applying {} to {}: {}'.format(
p, i, traceback.format_exc()
))
err = _err
monitoring_db.write('issues_handled', { 'value': 1 }, tags={
'policy': p.id,
'error': err
})
| 29.242424 | 76 | 0.516062 | [
"Apache-2.0"
] | pchinmay/spinnaker | spinbot/policy/executor.py | 965 | Python |
"""Polynomial model class used by agents for building stuff.
"""
from torch import nn, optim
import torch
import torch.nn.functional as F
from stock_trading_backend.agent.model import Model
class NNModel(nn.Module):
"""Torch neural network model.
"""
def __init__(self, num_inputs, num_hidden_layers, num_inner_features):
"""Initializer for linear model.
Args:
num_inputs: the dimension of input data.
num_hidden_layers: the number of hidden layers.
num_inner_features: the number of features in the hidden layers
"""
super(NNModel, self).__init__()
self.input_layer = nn.Linear(num_inputs, num_inner_features)
hidden_layers = []
for _ in range(num_hidden_layers):
hidden_layers.append(nn.Linear(num_inner_features, num_inner_features))
hidden_layers.append(nn.ReLU())
self.hidden_layers = nn.Sequential(*hidden_layers)
self.output_layer = nn.Linear(num_inner_features, 1)
def forward(self, input_tensor):
"""Forward pass on the neural network model.
Args:
input_tensor: the input tensor.
Returns:
Tensor with model results.
"""
output = F.relu(self.input_layer(input_tensor))
output = self.hidden_layers(output)
output = self.output_layer(output)
return output
class NeuralNetworkModel(Model):
"""Neural netowrk model class.
"""
name = "neural_network_model"
def __init__(self, learning_rate=1e-3, num_hidden_layers=1, num_inner_features=100):
"""Initializer for model class.
Args:
learning_rate: the learning rate of the model.
num_hidden_layers: number of hidden layers in the network.
num_inner_features: number of features in the hidden layers.
"""
super(NeuralNetworkModel, self).__init__()
self.model = None
self.optimizer = None
self.criterion = nn.MSELoss()
self.learning_rate = learning_rate
self.num_hidden_layers = num_hidden_layers
self.num_inner_features = num_inner_features
self.id_str = "{}_{}_{}_{}".format(self.name, learning_rate, num_hidden_layers,
num_inner_features)
def _init_model(self, num_inputs):
"""Initializes internal linear model.
Args:
num_inputs: number of inputs that model will have.
"""
self.model = NNModel(num_inputs, self.num_hidden_layers, self.num_inner_features)
self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)
def _predict(self, state_action_tensor):
"""Use provided information to make a prediction.
Args:
state_action_tensor: pytorch tensor with state-action values.
Returns:
Predicted values for observation-action tensors.
"""
if self.model is None:
self._init_model(state_action_tensor.shape[1])
return self.model(state_action_tensor).detach().reshape(-1)
def _train(self, state_action_tensor, expected_values_tensor):
"""Train the model for 1 epoch.
Args:
state_action_tensor: pytorch tensor with state-action expected_values.
expected_values: pytorch tensor with expected values for each state-action.
Returns:
The loss before trainig.
"""
if self.model is None:
self._init_model(state_action_tensor.shape[1])
self.optimizer.zero_grad()
output = self.model(state_action_tensor)
loss = self.criterion(output, expected_values_tensor)
loss_value = loss.data.item()
loss.backward()
self.optimizer.step()
return loss_value
| 34.495495 | 89 | 0.648211 | [
"MIT"
] | iryzhkov/stock-trading-backend | stock_trading_backend/agent/neural_network_model.py | 3,829 | Python |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_waas_access_rules_facts
short_description: Fetches details about one or multiple AccessRules resources in Oracle Cloud Infrastructure
description:
- Fetches details about one or multiple AccessRules resources in Oracle Cloud Infrastructure
- Gets the currently configured access rules for the Web Application Firewall configuration of a specified WAAS policy.
The order of the access rules is important. The rules will be checked in the order they are specified and the first matching rule will be used.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
waas_policy_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the WAAS policy.
type: str
required: true
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_name_option ]
"""
EXAMPLES = """
- name: List access_rules
oci_waas_access_rules_facts:
# required
waas_policy_id: "ocid1.waaspolicy.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
access_rules:
description:
- List of AccessRules resources
returned: on success
type: complex
contains:
name:
description:
- The unique name of the access rule.
returned: on success
type: str
sample: name_example
criteria:
description:
- The list of access rule criteria. The rule would be applied only for the requests that matched all the listed conditions.
returned: on success
type: complex
contains:
condition:
description:
- "The criteria the access rule and JavaScript Challenge uses to determine if action should be taken on a request.
- **URL_IS:** Matches if the concatenation of request URL path and query is identical to the contents of the `value` field. URL must
start with a `/`.
- **URL_IS_NOT:** Matches if the concatenation of request URL path and query is not identical to the contents of the `value` field.
URL must start with a `/`.
- **URL_STARTS_WITH:** Matches if the concatenation of request URL path and query starts with the contents of the `value` field. URL
must start with a `/`.
- **URL_PART_ENDS_WITH:** Matches if the concatenation of request URL path and query ends with the contents of the `value` field.
- **URL_PART_CONTAINS:** Matches if the concatenation of request URL path and query contains the contents of the `value` field.
- **URL_REGEX:** Matches if the concatenation of request URL path and query is described by the regular expression in the value field.
The value must be a valid regular expression recognized by the PCRE library in Nginx (https://www.pcre.org).
- **URL_DOES_NOT_MATCH_REGEX:** Matches if the concatenation of request URL path and query is not described by the regular expression
in the `value` field. The value must be a valid regular expression recognized by the PCRE library in Nginx (https://www.pcre.org).
- **URL_DOES_NOT_START_WITH:** Matches if the concatenation of request URL path and query does not start with the contents of the
`value` field.
- **URL_PART_DOES_NOT_CONTAIN:** Matches if the concatenation of request URL path and query does not contain the contents of the
`value` field.
- **URL_PART_DOES_NOT_END_WITH:** Matches if the concatenation of request URL path and query does not end with the contents of the
`value` field.
- **IP_IS:** Matches if the request originates from one of the IP addresses contained in the defined address list. The `value` in this
case is string with one or multiple IPs or CIDR notations separated by new line symbol \\\\n
*Example:* \\"1.1.1.1\\\\n1.1.1.2\\\\n1.2.2.1/30\\"
- **IP_IS_NOT:** Matches if the request does not originate from any of the IP addresses contained in the defined address list. The
`value` in this case is string with one or multiple IPs or CIDR notations separated by new line symbol \\\\n
*Example:* \\"1.1.1.1\\\\n1.1.1.2\\\\n1.2.2.1/30\\"
- **IP_IN_LIST:** Matches if the request originates from one of the IP addresses contained in the referenced address list. The `value`
in this case is OCID of the address list.
- **IP_NOT_IN_LIST:** Matches if the request does not originate from any IP address contained in the referenced address list. The
`value` field in this case is OCID of the address list.
- **HTTP_HEADER_CONTAINS:** The HTTP_HEADER_CONTAINS criteria is defined using a compound value separated by a colon: a header field
name and a header field value. `host:test.example.com` is an example of a criteria value where `host` is the header field name and
`test.example.com` is the header field value. A request matches when the header field name is a case insensitive match and the
header field value is a case insensitive, substring match.
*Example:* With a criteria value of `host:test.example.com`, where `host` is the name of the field and `test.example.com` is the value
of the host field, a request with the header values, `Host: www.test.example.com` will match, where as a request with header values of
`host: www.example.com` or `host: test.sub.example.com` will not match.
- **HTTP_METHOD_IS:** Matches if the request method is identical to one of the values listed in field. The `value` in this case is
string with one or multiple HTTP methods separated by new line symbol \\\\n The list of available methods: `GET`, `HEAD`, `POST`,
`PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`"
- "*Example:* \\"GET\\\\nPOST\\""
- "- **HTTP_METHOD_IS_NOT:** Matches if the request is not identical to any of the contents of the `value` field. The `value` in this
case is string with one or multiple HTTP methods separated by new line symbol \\\\n The list of available methods: `GET`, `HEAD`,
`POST`, `PUT`, `DELETE`, `CONNECT`, `OPTIONS`, `TRACE`, `PATCH`"
- "*Example:* \\"GET\\\\nPOST\\""
- "- **COUNTRY_IS:** Matches if the request originates from one of countries in the `value` field. The `value` in this case is string
with one or multiple countries separated by new line symbol \\\\n Country codes are in ISO 3166-1 alpha-2 format. For a list of codes,
see L(ISO's website,https://www.iso.org/obp/ui/#search/code/).
*Example:* \\"AL\\\\nDZ\\\\nAM\\"
- **COUNTRY_IS_NOT:** Matches if the request does not originate from any of countries in the `value` field. The `value` in this case
is string with one or multiple countries separated by new line symbol \\\\n Country codes are in ISO 3166-1 alpha-2 format. For a
list of codes, see L(ISO's website,https://www.iso.org/obp/ui/#search/code/).
*Example:* \\"AL\\\\nDZ\\\\nAM\\"
- **USER_AGENT_IS:** Matches if the requesting user agent is identical to the contents of the `value` field.
*Example:* `Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0`
- **USER_AGENT_IS_NOT:** Matches if the requesting user agent is not identical to the contents of the `value` field.
*Example:* `Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0`"
returned: on success
type: str
sample: URL_IS
value:
description:
- The criteria value.
returned: on success
type: str
sample: value_example
is_case_sensitive:
description:
- When enabled, the condition will be matched with case-sensitive rules.
returned: on success
type: bool
sample: true
action:
description:
- The action to take when the access criteria are met for a rule. If unspecified, defaults to `ALLOW`.
- "- **ALLOW:** Takes no action, just logs the request."
- "- **DETECT:** Takes no action, but creates an alert for the request."
- "- **BLOCK:** Blocks the request by returning specified response code or showing error page."
- "- **BYPASS:** Bypasses some or all challenges."
- "- **REDIRECT:** Redirects the request to the specified URL. These fields are required when `REDIRECT` is selected: `redirectUrl`,
`redirectResponseCode`."
- "- **SHOW_CAPTCHA:** Show a CAPTCHA Challenge page instead of the requested page."
- Regardless of action, no further rules are processed once a rule is matched.
returned: on success
type: str
sample: ALLOW
block_action:
description:
- The method used to block requests if `action` is set to `BLOCK` and the access criteria are met. If unspecified, defaults to
`SET_RESPONSE_CODE`.
returned: on success
type: str
sample: SET_RESPONSE_CODE
block_response_code:
description:
- "The response status code to return when `action` is set to `BLOCK`, `blockAction` is set to `SET_RESPONSE_CODE`, and the access criteria are
met. If unspecified, defaults to `403`. The list of available response codes: `200`, `201`, `202`, `204`, `206`, `300`, `301`, `302`, `303`,
`304`, `307`, `400`, `401`, `403`, `404`, `405`, `408`, `409`, `411`, `412`, `413`, `414`, `415`, `416`, `422`, `444`, `494`, `495`, `496`,
`497`, `499`, `500`, `501`, `502`, `503`, `504`, `507`."
returned: on success
type: int
sample: 56
block_error_page_message:
description:
- The message to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria are
met. If unspecified, defaults to 'Access to the website is blocked.'
returned: on success
type: str
sample: block_error_page_message_example
block_error_page_code:
description:
- The error code to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access criteria
are met. If unspecified, defaults to 'Access rules'.
returned: on success
type: str
sample: block_error_page_code_example
block_error_page_description:
description:
- The description text to show on the error page when `action` is set to `BLOCK`, `blockAction` is set to `SHOW_ERROR_PAGE`, and the access
criteria are met. If unspecified, defaults to 'Access blocked by website owner. Please contact support.'
returned: on success
type: str
sample: block_error_page_description_example
bypass_challenges:
description:
- The list of challenges to bypass when `action` is set to `BYPASS`. If unspecified or empty, all challenges are bypassed.
- "- **JS_CHALLENGE:** Bypasses JavaScript Challenge."
- "- **DEVICE_FINGERPRINT_CHALLENGE:** Bypasses Device Fingerprint Challenge."
- "- **HUMAN_INTERACTION_CHALLENGE:** Bypasses Human Interaction Challenge."
- "- **CAPTCHA:** Bypasses CAPTCHA Challenge."
returned: on success
type: list
sample: []
redirect_url:
description:
- The target to which the request should be redirected, represented as a URI reference. Required when `action` is `REDIRECT`.
returned: on success
type: str
sample: redirect_url_example
redirect_response_code:
description:
- The response status code to return when `action` is set to `REDIRECT`.
- "- **MOVED_PERMANENTLY:** Used for designating the permanent movement of a page (numerical code - 301)."
- "- **FOUND:** Used for designating the temporary movement of a page (numerical code - 302)."
returned: on success
type: str
sample: MOVED_PERMANENTLY
captcha_title:
description:
- The title used when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged.
returned: on success
type: str
sample: captcha_title_example
captcha_header:
description:
- The text to show in the header when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged.
returned: on success
type: str
sample: captcha_header_example
captcha_footer:
description:
- The text to show in the footer when showing a CAPTCHA challenge when `action` is set to `SHOW_CAPTCHA` and the request is challenged.
returned: on success
type: str
sample: captcha_footer_example
captcha_submit_label:
description:
- The text to show on the label of the CAPTCHA challenge submit button when `action` is set to `SHOW_CAPTCHA` and the request is challenged.
returned: on success
type: str
sample: captcha_submit_label_example
response_header_manipulation:
description:
- An object that represents an action to apply to an HTTP response headers if all rule criteria will be matched regardless of `action` value.
returned: on success
type: complex
contains:
action:
description:
- ""
returned: on success
type: str
sample: EXTEND_HTTP_RESPONSE_HEADER
header:
description:
- A header field name that conforms to RFC 7230.
- "Example: `example_header_name`"
returned: on success
type: str
sample: header_example
value:
description:
- A header field value that conforms to RFC 7230.
- "Example: `example_value`"
returned: on success
type: str
sample: value_example
sample: [{
"name": "name_example",
"criteria": [{
"condition": "URL_IS",
"value": "value_example",
"is_case_sensitive": true
}],
"action": "ALLOW",
"block_action": "SET_RESPONSE_CODE",
"block_response_code": 56,
"block_error_page_message": "block_error_page_message_example",
"block_error_page_code": "block_error_page_code_example",
"block_error_page_description": "block_error_page_description_example",
"bypass_challenges": [],
"redirect_url": "redirect_url_example",
"redirect_response_code": "MOVED_PERMANENTLY",
"captcha_title": "captcha_title_example",
"captcha_header": "captcha_header_example",
"captcha_footer": "captcha_footer_example",
"captcha_submit_label": "captcha_submit_label_example",
"response_header_manipulation": [{
"action": "EXTEND_HTTP_RESPONSE_HEADER",
"header": "header_example",
"value": "value_example"
}]
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.waas import WaasClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class AccessRulesFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: list"""
def get_required_params_for_list(self):
return [
"waas_policy_id",
]
def list_resources(self):
optional_list_method_params = [
"name",
]
optional_kwargs = dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
)
return oci_common_utils.list_all_resources(
self.client.list_access_rules,
waas_policy_id=self.module.params.get("waas_policy_id"),
**optional_kwargs
)
AccessRulesFactsHelperCustom = get_custom_class("AccessRulesFactsHelperCustom")
class ResourceFactsHelper(AccessRulesFactsHelperCustom, AccessRulesFactsHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(waas_policy_id=dict(type="str", required=True), name=dict(type="str"),)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="access_rules",
service_client_class=WaasClient,
namespace="waas",
)
result = []
if resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(access_rules=result)
if __name__ == "__main__":
main()
| 53.663043 | 160 | 0.597731 | [
"Apache-2.0"
] | oracle/oci-ansible-collection | plugins/modules/oci_waas_access_rules_facts.py | 19,748 | Python |
import enum
import warnings
from optuna import exceptions
from optuna import logging
from optuna import type_checking
if type_checking.TYPE_CHECKING:
from datetime import datetime # NOQA
from typing import Any # NOQA
from typing import Dict # NOQA
from typing import Optional # NOQA
from optuna.distributions import BaseDistribution # NOQA
class TrialState(enum.Enum):
"""State of a :class:`~optuna.trial.Trial`.
Attributes:
RUNNING:
The :class:`~optuna.trial.Trial` is running.
COMPLETE:
The :class:`~optuna.trial.Trial` has been finished without any error.
PRUNED:
The :class:`~optuna.trial.Trial` has been pruned with
:class:`~optuna.exceptions.TrialPruned`.
FAIL:
The :class:`~optuna.trial.Trial` has failed due to an uncaught error.
"""
RUNNING = 0
COMPLETE = 1
PRUNED = 2
FAIL = 3
WAITING = 4
def __repr__(self):
# type: () -> str
return str(self)
def is_finished(self):
# type: () -> bool
return self != TrialState.RUNNING and self != TrialState.WAITING
class StudyDirection(enum.Enum):
"""Direction of a :class:`~optuna.study.Study`.
Attributes:
NOT_SET:
Direction has not been set.
MINIMIZE:
:class:`~optuna.study.Study` minimizes the objective function.
MAXIMIZE:
:class:`~optuna.study.Study` maximizes the objective function.
"""
NOT_SET = 0
MINIMIZE = 1
MAXIMIZE = 2
class FrozenTrial(object):
"""Status and results of a :class:`~optuna.trial.Trial`.
Attributes:
number:
Unique and consecutive number of :class:`~optuna.trial.Trial` for each
:class:`~optuna.study.Study`. Note that this field uses zero-based numbering.
state:
:class:`TrialState` of the :class:`~optuna.trial.Trial`.
value:
Objective value of the :class:`~optuna.trial.Trial`.
datetime_start:
Datetime where the :class:`~optuna.trial.Trial` started.
datetime_complete:
Datetime where the :class:`~optuna.trial.Trial` finished.
params:
Dictionary that contains suggested parameters.
distributions:
Dictionary that contains the distributions of :attr:`params`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.trial.Trial` set with
:func:`optuna.trial.Trial.set_user_attr`.
intermediate_values:
Intermediate objective values set with :func:`optuna.trial.Trial.report`.
"""
def __init__(
self,
number, # type: int
state, # type: TrialState
value, # type: Optional[float]
datetime_start, # type: Optional[datetime]
datetime_complete, # type: Optional[datetime]
params, # type: Dict[str, Any]
distributions, # type: Dict[str, BaseDistribution]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
intermediate_values, # type: Dict[int, float]
trial_id, # type: int
):
# type: (...) -> None
self.number = number
self.state = state
self.value = value
self.datetime_start = datetime_start
self.datetime_complete = datetime_complete
self.params = params
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.intermediate_values = intermediate_values
self._distributions = distributions
self._trial_id = trial_id
# Ordered list of fields required for `__repr__`, `__hash__` and dataframe creation.
# TODO(hvy): Remove this list in Python 3.6 as the order of `self.__dict__` is preserved.
_ordered_fields = [
'number', 'value', 'datetime_start', 'datetime_complete', 'params', '_distributions',
'user_attrs', 'system_attrs', 'intermediate_values', '_trial_id', 'state', ]
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number < other.number
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, FrozenTrial):
return NotImplemented
return self.number <= other.number
def __hash__(self):
# type: () -> int
return hash(tuple(getattr(self, field) for field in self._ordered_fields))
def __repr__(self):
# type: () -> str
return ('{cls}({kwargs})'.format(
cls=self.__class__.__name__,
kwargs=', '.join('{field}={value}'.format(
field=field if not field.startswith('_') else field[1:],
value=repr(getattr(self, field))) for field in self._ordered_fields)))
def _validate(self):
# type: () -> None
if self.datetime_start is None:
raise ValueError('`datetime_start` is supposed to be set.')
if self.state.is_finished():
if self.datetime_complete is None:
raise ValueError('`datetime_complete` is supposed to be set for a finished trial.')
else:
if self.datetime_complete is not None:
raise ValueError(
'`datetime_complete` is supposed to not be set for a finished trial.')
if self.state == TrialState.COMPLETE and self.value is None:
raise ValueError('`value` is supposed to be set for a complete trial.')
if set(self.params.keys()) != set(self.distributions.keys()):
raise ValueError('Inconsistent parameters {} and distributions {}.'.format(
set(self.params.keys()), set(self.distributions.keys())))
for param_name, param_value in self.params.items():
distribution = self.distributions[param_name]
param_value_in_internal_repr = distribution.to_internal_repr(param_value)
if not distribution._contains(param_value_in_internal_repr):
raise ValueError(
"The value {} of parameter '{}' isn't contained in the distribution {}.".
format(param_value, param_name, distribution))
@property
def distributions(self):
# type: () -> Dict[str, BaseDistribution]
"""Return the distributions for this trial.
Returns:
The distributions.
"""
return self._distributions
@distributions.setter
def distributions(self, value):
# type: (Dict[str, BaseDistribution]) -> None
"""Set the distributions for this trial.
Args:
value: The distributions.
"""
self._distributions = value
@property
def trial_id(self):
# type: () -> int
"""Return the trial ID.
.. deprecated:: 0.19.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.trial.FrozenTrial.number` instead.
Returns:
The trial ID.
"""
warnings.warn(
'The use of `FrozenTrial.trial_id` is deprecated. '
'Please use `FrozenTrial.number` instead.', DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(
'The use of `FrozenTrial.trial_id` is deprecated. '
'Please use `FrozenTrial.number` instead.')
return self._trial_id
@property
def last_step(self):
# type: () -> Optional[int]
if len(self.intermediate_values) == 0:
return None
else:
return max(self.intermediate_values.keys())
class StudySummary(object):
"""Basic attributes and aggregated results of a :class:`~optuna.study.Study`.
See also :func:`optuna.study.get_all_study_summaries`.
Attributes:
study_name:
Name of the :class:`~optuna.study.Study`.
direction:
:class:`StudyDirection` of the :class:`~optuna.study.Study`.
best_trial:
:class:`FrozenTrial` with best objective value in the :class:`~optuna.study.Study`.
user_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` set with
:func:`optuna.study.Study.set_user_attr`.
system_attrs:
Dictionary that contains the attributes of the :class:`~optuna.study.Study` internally
set by Optuna.
n_trials:
The number of trials ran in the :class:`~optuna.study.Study`.
datetime_start:
Datetime where the :class:`~optuna.study.Study` started.
"""
def __init__(
self,
study_name, # type: str
direction, # type: StudyDirection
best_trial, # type: Optional[FrozenTrial]
user_attrs, # type: Dict[str, Any]
system_attrs, # type: Dict[str, Any]
n_trials, # type: int
datetime_start, # type: Optional[datetime]
study_id, # type: int
):
# type: (...) -> None
self.study_name = study_name
self.direction = direction
self.best_trial = best_trial
self.user_attrs = user_attrs
self.system_attrs = system_attrs
self.n_trials = n_trials
self.datetime_start = datetime_start
self._study_id = study_id
def __eq__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return other.__dict__ == self.__dict__
def __lt__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id < other._study_id
def __le__(self, other):
# type: (Any) -> bool
if not isinstance(other, StudySummary):
return NotImplemented
return self._study_id <= other._study_id
@property
def study_id(self):
# type: () -> int
"""Return the study ID.
.. deprecated:: 0.20.0
The direct use of this attribute is deprecated and it is recommended that you use
:attr:`~optuna.structs.StudySummary.study_name` instead.
Returns:
The study ID.
"""
message = 'The use of `StudySummary.study_id` is deprecated. ' \
'Please use `StudySummary.study_name` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
return self._study_id
class TrialPruned(exceptions.TrialPruned):
"""Exception for pruned trials.
.. deprecated:: 0.19.0
This class was moved to :mod:`~optuna.exceptions`. Please use
:class:`~optuna.exceptions.TrialPruned` instead.
"""
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
message = 'The use of `optuna.structs.TrialPruned` is deprecated. ' \
'Please use `optuna.exceptions.TrialPruned` instead.'
warnings.warn(message, DeprecationWarning)
logger = logging.get_logger(__name__)
logger.warning(message)
| 32.02507 | 99 | 0.604593 | [
"MIT"
] | VladSkripniuk/optuna | optuna/structs.py | 11,497 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class UsagePlan(pulumi.CustomResource):
api_stages: pulumi.Output[list]
"""
The associated API stages of the usage plan.
* `api_id` (`str`) - API Id of the associated API stage in a usage plan.
* `stage` (`str`) - API stage name of the associated API stage in a usage plan.
"""
arn: pulumi.Output[str]
"""
Amazon Resource Name (ARN)
"""
description: pulumi.Output[str]
"""
The description of a usage plan.
"""
name: pulumi.Output[str]
"""
The name of the usage plan.
"""
product_code: pulumi.Output[str]
"""
The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
"""
quota_settings: pulumi.Output[dict]
"""
The quota settings of the usage plan.
* `limit` (`float`) - The maximum number of requests that can be made in a given time period.
* `offset` (`float`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`str`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
"""
tags: pulumi.Output[dict]
"""
Key-value map of resource tags
"""
throttle_settings: pulumi.Output[dict]
"""
The throttling limits of the usage plan.
* `burstLimit` (`float`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`float`) - The API request steady-state rate limit.
"""
def __init__(__self__, resource_name, opts=None, api_stages=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an API Gateway Usage Plan.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
myapi = aws.apigateway.RestApi("myapi")
dev = aws.apigateway.Deployment("dev",
rest_api=myapi.id,
stage_name="dev")
prod = aws.apigateway.Deployment("prod",
rest_api=myapi.id,
stage_name="prod")
my_usage_plan = aws.apigateway.UsagePlan("myUsagePlan",
api_stages=[
{
"api_id": myapi.id,
"stage": dev.stage_name,
},
{
"api_id": myapi.id,
"stage": prod.stage_name,
},
],
description="my description",
product_code="MYCODE",
quota_settings={
"limit": 20,
"offset": 2,
"period": "WEEK",
},
throttle_settings={
"burstLimit": 5,
"rate_limit": 10,
})
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['api_stages'] = api_stages
__props__['description'] = description
__props__['name'] = name
__props__['product_code'] = product_code
__props__['quota_settings'] = quota_settings
__props__['tags'] = tags
__props__['throttle_settings'] = throttle_settings
__props__['arn'] = None
super(UsagePlan, __self__).__init__(
'aws:apigateway/usagePlan:UsagePlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, api_stages=None, arn=None, description=None, name=None, product_code=None, quota_settings=None, tags=None, throttle_settings=None):
"""
Get an existing UsagePlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] api_stages: The associated API stages of the usage plan.
:param pulumi.Input[str] arn: Amazon Resource Name (ARN)
:param pulumi.Input[str] description: The description of a usage plan.
:param pulumi.Input[str] name: The name of the usage plan.
:param pulumi.Input[str] product_code: The AWS Markeplace product identifier to associate with the usage plan as a SaaS product on AWS Marketplace.
:param pulumi.Input[dict] quota_settings: The quota settings of the usage plan.
:param pulumi.Input[dict] tags: Key-value map of resource tags
:param pulumi.Input[dict] throttle_settings: The throttling limits of the usage plan.
The **api_stages** object supports the following:
* `api_id` (`pulumi.Input[str]`) - API Id of the associated API stage in a usage plan.
* `stage` (`pulumi.Input[str]`) - API stage name of the associated API stage in a usage plan.
The **quota_settings** object supports the following:
* `limit` (`pulumi.Input[float]`) - The maximum number of requests that can be made in a given time period.
* `offset` (`pulumi.Input[float]`) - The number of requests subtracted from the given limit in the initial time period.
* `period` (`pulumi.Input[str]`) - The time period in which the limit applies. Valid values are "DAY", "WEEK" or "MONTH".
The **throttle_settings** object supports the following:
* `burstLimit` (`pulumi.Input[float]`) - The API request burst limit, the maximum rate limit over a time ranging from one to a few seconds, depending upon whether the underlying token bucket is at its full capacity.
* `rate_limit` (`pulumi.Input[float]`) - The API request steady-state rate limit.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["api_stages"] = api_stages
__props__["arn"] = arn
__props__["description"] = description
__props__["name"] = name
__props__["product_code"] = product_code
__props__["quota_settings"] = quota_settings
__props__["tags"] = tags
__props__["throttle_settings"] = throttle_settings
return UsagePlan(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 46.524038 | 225 | 0.644105 | [
"ECL-2.0",
"Apache-2.0"
] | JakeGinnivan/pulumi-aws | sdk/python/pulumi_aws/apigateway/usage_plan.py | 9,677 | Python |
# -*- coding: utf-8 -*-
"""Tests for NullTask plugin."""
import unittest
from pomito.plugins.task import nulltask, TaskPlugin
class NullTaskTests(unittest.TestCase):
"""Tests for NullTask."""
def setUp(self):
self.task = nulltask.NullTask(None)
def test_nulltask_is_a_task_plugin(self):
assert issubclass(nulltask.NullTask, TaskPlugin)
def test_nulltask_initialize_should_not_throw(self):
self.task.initialize()
def test_nulltask_get_tasks_returns_empty_list(self):
assert len(self.task.get_tasks()) == 0
def test_nulltask_get_tasks_by_filter_returns_empty_list(self):
assert len(self.task.get_tasks_by_filter("")) == 0
def test_nulltask_get_task_by_id_returns_none(self):
assert self.task.get_task_by_id(1) is None
| 27.655172 | 67 | 0.724439 | [
"MIT"
] | codito/pomito | tests/plugins/task/test_nulltask.py | 802 | Python |
#!/usr/bin/python
# Copyright (c) 2017, 2020 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_network_ip_sec_connection_device_status_facts
short_description: Fetches details about a IpSecConnectionDeviceStatus resource in Oracle Cloud Infrastructure
description:
- Fetches details about a IpSecConnectionDeviceStatus resource in Oracle Cloud Infrastructure
- Deprecated. To get the tunnel status, instead use
L(GetIPSecConnectionTunnel,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/IPSecConnectionTunnel/GetIPSecConnectionTunnel).
version_added: "2.9"
author: Oracle (@oracle)
options:
ipsc_id:
description:
- The OCID of the IPSec connection.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific ip_sec_connection_device_status
oci_network_ip_sec_connection_device_status_facts:
ipsc_id: ocid1.ipsc.oc1..xxxxxxEXAMPLExxxxxx
"""
RETURN = """
ip_sec_connection_device_status:
description:
- IpSecConnectionDeviceStatus resource
returned: on success
type: complex
contains:
compartment_id:
description:
- The OCID of the compartment containing the IPSec connection.
returned: on success
type: string
sample: ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx
id:
description:
- The IPSec connection's Oracle ID (OCID).
returned: on success
type: string
sample: ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx
time_created:
description:
- The date and time the IPSec connection was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
tunnels:
description:
- Two L(TunnelStatus,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/iaas/20160918/TunnelStatus/) objects.
returned: on success
type: complex
contains:
ip_address:
description:
- The IP address of Oracle's VPN headend.
- "Example: `203.0.113.50`"
returned: on success
type: string
sample: 203.0.113.50
lifecycle_state:
description:
- The tunnel's current state.
returned: on success
type: string
sample: UP
time_created:
description:
- The date and time the IPSec connection was created, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
time_state_modified:
description:
- When the state of the tunnel last changed, in the format defined by L(RFC3339,https://tools.ietf.org/html/rfc3339).
- "Example: `2016-08-25T21:10:29.600Z`"
returned: on success
type: string
sample: 2016-08-25T21:10:29.600Z
sample: {
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"time_created": "2016-08-25T21:10:29.600Z",
"tunnels": [{
"ip_address": "203.0.113.50",
"lifecycle_state": "UP",
"time_created": "2016-08-25T21:10:29.600Z",
"time_state_modified": "2016-08-25T21:10:29.600Z"
}]
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.core import VirtualNetworkClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class IpSecConnectionDeviceStatusFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"ipsc_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_ip_sec_connection_device_status,
ipsc_id=self.module.params.get("ipsc_id"),
)
IpSecConnectionDeviceStatusFactsHelperCustom = get_custom_class(
"IpSecConnectionDeviceStatusFactsHelperCustom"
)
class ResourceFactsHelper(
IpSecConnectionDeviceStatusFactsHelperCustom,
IpSecConnectionDeviceStatusFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(dict(ipsc_id=dict(aliases=["id"], type="str", required=True),))
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="ip_sec_connection_device_status",
service_client_class=VirtualNetworkClient,
namespace="core",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
elif resource_facts_helper.is_list():
result = resource_facts_helper.list()
else:
resource_facts_helper.fail()
module.exit_json(ip_sec_connection_device_status=result)
if __name__ == "__main__":
main()
| 33.905759 | 150 | 0.637431 | [
"Apache-2.0"
] | A7rMtWE57x/oci-ansible-collection | plugins/modules/oci_network_ip_sec_connection_device_status_facts.py | 6,476 | Python |
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly_study.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "streamtube.hoverlabel"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.streamtube.hoverlabel.Font
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.streamtube.hoverlabel.Font
constructor must be a dict or
an instance of plotly_study.graph_objs.streamtube.hoverlabel.Font"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.streamtube.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["colorsrc"] = v_font.ColorsrcValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["familysrc"] = v_font.FamilysrcValidator()
self._validators["size"] = v_font.SizeValidator()
self._validators["sizesrc"] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
| 34.268519 | 88 | 0.565973 | [
"MIT"
] | lucasiscovici/plotly_py | plotly_study/graph_objs/streamtube/hoverlabel/__init__.py | 11,103 | Python |
class MGDHCPSettings(object):
def __init__(self, session):
super(MGDHCPSettings, self).__init__()
self._session = session
def getNetworkCellularGatewaySettingsDhcp(self, networkId: str):
"""
**List common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!get-network-cellular-gateway-settings-dhcp
- networkId (string)
"""
metadata = {
'tags': ['MG DHCP settings'],
'operation': 'getNetworkCellularGatewaySettingsDhcp',
}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
return self._session.get(metadata, resource)
def updateNetworkCellularGatewaySettingsDhcp(self, networkId: str, **kwargs):
"""
**Update common DHCP settings of MGs**
https://developer.cisco.com/meraki/api/#!update-network-cellular-gateway-settings-dhcp
- networkId (string)
- dhcpLeaseTime (string): DHCP Lease time for all MG of the network. It can be '30 minutes', '1 hour', '4 hours', '12 hours', '1 day' or '1 week'.
- dnsNameservers (string): DNS name servers mode for all MG of the network. It can take 4 different values: 'upstream_dns', 'google_dns', 'opendns', 'custom'.
- dnsCustomNameservers (array): list of fixed IP representing the the DNS Name servers when the mode is 'custom'
"""
kwargs.update(locals())
metadata = {
'tags': ['MG DHCP settings'],
'operation': 'updateNetworkCellularGatewaySettingsDhcp',
}
resource = f'/networks/{networkId}/cellularGateway/settings/dhcp'
body_params = ['dhcpLeaseTime', 'dnsNameservers', 'dnsCustomNameservers']
payload = {k: v for (k, v) in kwargs.items() if k in body_params}
return self._session.put(metadata, resource, payload)
| 40.956522 | 166 | 0.636943 | [
"MIT"
] | NoFliesOnYou/dashboard-api-python | meraki/api/mg_dhcp_settings.py | 1,884 | Python |
"""Config Port Stats message tests."""
from pyof.v0x04.controller2switch.common import PortStats
from tests.test_struct import TestStruct
class TestPortStats(TestStruct):
"""Config Port Stats message tests."""
@classmethod
def setUpClass(cls):
"""Configure raw file and its object in parent class (TestDump)."""
super().setUpClass()
super().set_raw_dump_file('v0x04', 'ofpt_port_stats')
super().set_raw_dump_object(PortStats)
super().set_minimum_size(112)
| 31.9375 | 75 | 0.702544 | [
"MIT"
] | smythtech/python-openflow-legacy | build/lib/tests/v0x04/test_controller2switch/test_port_stats.py | 511 | Python |
"""Report routes."""
import os
from urllib import parse
import bottle
import requests
from pymongo.database import Database
from database import sessions
from database.datamodels import latest_datamodel
from database.measurements import recent_measurements_by_metric_uuid
from database.reports import insert_new_report, latest_reports
from initialization.report import import_json_report
from model.actions import copy_report
from model.data import ReportData
from model.transformations import hide_credentials, summarize_report
from server_utilities.functions import report_date_time, uuid
from server_utilities.type import ReportId
@bottle.post("/api/v3/report/import")
def post_report_import(database: Database):
"""Import a preconfigured report into the database."""
report = dict(bottle.request.json)
result = import_json_report(database, report)
result["new_report_uuid"] = report["report_uuid"]
return result
@bottle.post("/api/v3/report/new")
def post_report_new(database: Database):
"""Add a new report."""
report_uuid = uuid()
user = sessions.user(database)
report = dict(
report_uuid=report_uuid, title="New report", subjects={},
delta=dict(uuids=[report_uuid], email=user["email"], description=f"{user['user']} created a new report."))
result = insert_new_report(database, report)
result["new_report_uuid"] = report_uuid
return result
@bottle.post("/api/v3/report/<report_uuid>/copy")
def post_report_copy(report_uuid: ReportId, database: Database):
"""Copy a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
report_copy = copy_report(data.report, data.datamodel)
user = sessions.user(database)
report_copy["delta"] = dict(
uuids=[report_uuid, report_copy["report_uuid"]], email=user["email"],
description=f"{user['user']} copied the report '{data.report_name}'.")
result = insert_new_report(database, report_copy)
result["new_report_uuid"] = report_copy["report_uuid"]
return result
@bottle.get("/api/v3/report/<report_uuid>/pdf")
def export_report_as_pdf(report_uuid: ReportId):
"""Download the report as pdf."""
renderer_host = os.environ.get("RENDERER_HOST", "renderer")
renderer_port = os.environ.get("RENDERER_PORT", "9000")
render_url = f"http://{renderer_host}:{renderer_port}/api/render"
proxy_host = os.environ.get("PROXY_HOST", "www")
proxy_port = os.environ.get("PROXY_PORT", "80")
query_string = f"?{bottle.request.query_string}" if bottle.request.query_string else ""
report_url = parse.quote(f"http://{proxy_host}:{proxy_port}/{report_uuid}{query_string}")
margins = "&".join([f"pdf.margin.{side}=25" for side in ("top", "bottom", "left", "right")])
# Set pdf scale to 70% or otherwise the dashboard falls off the page
options = f"emulateScreenMedia=false&goto.timeout=60000&pdf.scale=0.7&{margins}"
response = requests.get(f"{render_url}?url={report_url}&{options}")
response.raise_for_status()
bottle.response.content_type = "application/pdf"
return response.content
@bottle.delete("/api/v3/report/<report_uuid>")
def delete_report(report_uuid: ReportId, database: Database):
"""Delete a report."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
data.report["deleted"] = "true"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} deleted the report '{data.report_name}'.")
return insert_new_report(database, data.report)
@bottle.post("/api/v3/report/<report_uuid>/attribute/<report_attribute>")
def post_report_attribute(report_uuid: ReportId, report_attribute: str, database: Database):
"""Set a report attribute."""
data_model = latest_datamodel(database)
reports = latest_reports(database)
data = ReportData(data_model, reports, report_uuid)
value = dict(bottle.request.json)[report_attribute]
old_value = data.report.get(report_attribute) or ""
data.report[report_attribute] = value
value_change_description = "" if report_attribute == "layout" else f" from '{old_value}' to '{value}'"
user = sessions.user(database)
data.report["delta"] = dict(
uuids=[report_uuid], email=user["email"],
description=f"{user['user']} changed the {report_attribute} of report '{data.report_name}'"
f"{value_change_description}.")
return insert_new_report(database, data.report)
@bottle.get("/api/v3/tagreport/<tag>")
def get_tag_report(tag: str, database: Database):
"""Get a report with all metrics that have the specified tag."""
date_time = report_date_time()
reports = latest_reports(database, date_time)
data_model = latest_datamodel(database, date_time)
subjects = _get_subjects_and_metrics_by_tag(data_model, reports, tag)
tag_report = dict(
title=f'Report for tag "{tag}"', subtitle="Note: tag reports are read-only", report_uuid=f"tag-{tag}",
timestamp=date_time, subjects=subjects)
hide_credentials(data_model, tag_report)
summarize_report(tag_report, recent_measurements_by_metric_uuid(database, date_time), data_model)
return tag_report
def _get_subjects_and_metrics_by_tag(data_model, reports, tag: str):
"""Return all subjects and metrics that have the tag."""
subjects = {}
for report in reports:
for subject_uuid, subject in list(report.get("subjects", {}).items()):
for metric_uuid, metric in list(subject.get("metrics", {}).items()):
if tag not in metric.get("tags", []):
del subject["metrics"][metric_uuid]
if subject.get("metrics", {}):
subject_name = subject.get("name") or data_model["subjects"][subject["type"]]["name"]
subject["name"] = report["title"] + " / " + subject_name
subjects[subject_uuid] = subject
return subjects
| 44.05036 | 114 | 0.709783 | [
"Apache-2.0"
] | Gamer1120/quality-time | components/server/src/routes/report.py | 6,123 | Python |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various ops for augmentation."""
import math
import tensorflow as tf
from tensorflow_addons import image as tfa_image
# Default replace value
REPLACE_VALUE = 128
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
A value of factor 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor.
image2: An image Tensor.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor.
"""
image1 = tf.cast(image1, tf.float32)
image2 = tf.cast(image2, tf.float32)
return tf.saturate_cast(image1 + factor * (image2 - image1), tf.uint8)
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def unwrap(image):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = tf.expand_dims(flattened_image[:, image_shape[2] - 1], 1)
replace = tf.constant([REPLACE_VALUE, REPLACE_VALUE, REPLACE_VALUE, 1],
image.dtype)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0],
[image_shape[0], image_shape[1], image_shape[2] - 1])
return image
def solarize(image, threshold=128):
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
threshold = tf.saturate_cast(threshold, image.dtype)
return tf.where(image < threshold, image, 255 - image)
def solarize_add(image, addition=0, threshold=128):
# For each pixel in the image less than threshold
# we add 'addition' amount to it and then clip the
# pixel value to be between 0 and 255. The value
# of 'addition' is between -128 and 128
threshold = tf.saturate_cast(threshold, image.dtype)
added_im = tf.cast(image, tf.int32) + tf.cast(addition, tf.int32)
added_im = tf.saturate_cast(added_im, tf.uint8)
return tf.where(image < threshold, added_im, image)
def invert(image):
"""Inverts the image pixels."""
return 255 - tf.convert_to_tensor(image)
def invert_blend(image, factor):
"""Implements blend of invert with original image."""
return blend(invert(image), image, factor)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def contrast(image, factor):
"""Equivalent of PIL Contrast."""
grayscale_im = tf.image.rgb_to_grayscale(image)
mean = tf.reduce_mean(tf.cast(grayscale_im, tf.float32))
mean = tf.saturate_cast(mean + 0.5, tf.uint8)
degenerate = tf.ones_like(grayscale_im, dtype=tf.uint8) * mean
degenerate = tf.image.grayscale_to_rgb(degenerate)
return blend(degenerate, image, factor)
def brightness(image, factor):
"""Equivalent of PIL Brightness."""
degenerate = tf.zeros_like(image)
return blend(degenerate, image, factor)
def posterize(image, bits):
"""Equivalent of PIL Posterize."""
shift = tf.cast(8 - bits, image.dtype)
return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)
def rotate(image, degrees):
"""Equivalent of PIL Rotation."""
# Convert from degrees to radians
degrees_to_radians = math.pi / 180.0
radians = degrees * degrees_to_radians
# In practice, we should randomize the rotation degrees by flipping
# it negatively half the time, but that's done on 'degrees' outside
# of the function.
image = tfa_image.transform_ops.rotate(wrap(image), radians)
return unwrap(image)
def translate_x(image, pixels):
"""Equivalent of PIL Translate in X dimension."""
image = tfa_image.translate_ops.translate(wrap(image), [-pixels, 0])
return unwrap(image)
def translate_y(image, pixels):
"""Equivalent of PIL Translate in Y dimension."""
image = tfa_image.translate_ops.translate(wrap(image), [0, -pixels])
return unwrap(image)
def shear_x(image, level):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1]
image = tfa_image.transform_ops.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image)
def shear_y(image, level):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1]
image = tfa_image.transform_ops.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image)
def autocontrast(image):
"""Implements Autocontrast function from PIL using TF ops."""
def scale_channel(channel):
"""Scale the 2D image using the autocontrast rule."""
# A possibly cheaper version can be done using cumsum/unique_with_counts
# over the histogram values, rather than iterating over the entire image.
# to compute mins and maxes.
lo = tf.cast(tf.reduce_min(channel), tf.float32)
hi = tf.cast(tf.reduce_max(channel), tf.float32)
# Scale the image, making the lowest value 0 and the highest value 255.
def scale_values(im):
scale = 255.0 / (hi - lo)
offset = -lo * scale
im = tf.cast(im, tf.float32) * scale + offset
return tf.saturate_cast(im, tf.uint8)
result = tf.cond(hi > lo, lambda: scale_values(channel), lambda: channel)
return result
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image[:, :, 0])
s2 = scale_channel(image[:, :, 1])
s3 = scale_channel(image[:, :, 2])
image = tf.stack([s1, s2, s3], 2)
return image
def autocontrast_blend(image, factor):
"""Implements blend of autocontrast with original image."""
return blend(autocontrast(image), image, factor)
def sharpness(image, factor):
"""Implements Sharpness function from PIL using TF ops."""
orig_im = image
image = tf.cast(image, tf.float32)
# Make image 4D for conv operation
image = tf.expand_dims(image, 0)
# SMOOTH PIL Kernel
kernel = tf.constant([[1, 1, 1], [1, 5, 1], [1, 1, 1]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.
# Tile across channel dimension
kernel = tf.tile(kernel, [1, 1, 3, 1])
strides = [1, 1, 1, 1]
degenerate = tf.nn.depthwise_conv2d(
image, kernel, strides, padding='VALID', dilations=[1, 1])
degenerate = tf.squeeze(tf.saturate_cast(degenerate, tf.uint8), [0])
# For the borders of the resulting image, fill in the values of the
# original image.
mask = tf.ones_like(degenerate)
padded_mask = tf.pad(mask, [[1, 1], [1, 1], [0, 0]])
padded_degenerate = tf.pad(degenerate, [[1, 1], [1, 1], [0, 0]])
result = tf.where(tf.equal(padded_mask, 1), padded_degenerate, orig_im)
# Blend the final result
return blend(result, orig_im, factor)
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(
tf.equal(step, 0), lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, tf.uint8)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def equalize_blend(image, factor):
"""Implements blend of equalize with original image."""
return blend(equalize(image), image, factor)
def _convolve_image_with_kernel(image, kernel):
num_channels = tf.shape(image)[-1]
kernel = tf.tile(kernel, [1, 1, num_channels, 1])
image = tf.expand_dims(image, axis=0)
convolved_im = tf.nn.depthwise_conv2d(
tf.cast(image, tf.float32), kernel, strides=[1, 1, 1, 1], padding='SAME')
# adding 0.5 for future rounding, same as in PIL:
# https://github.com/python-pillow/Pillow/blob/555e305a60d7fcefd1ad4aa6c8fd879e2f474192/src/libImaging/Filter.c#L101 # pylint: disable=line-too-long
convolved_im = convolved_im + 0.5
return tf.squeeze(convolved_im, axis=0)
def blur(image, factor):
"""Blur with the same kernel as ImageFilter.BLUR."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class BLUR(BuiltinFilter):
# name = "Blur"
# # fmt: off
# filterargs = (5, 5), 16, 0, (
# 1, 1, 1, 1, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 1, 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
blur_kernel = tf.constant(
[[1., 1., 1., 1., 1.], [1., 0., 0., 0., 1.], [1., 0., 0., 0., 1.],
[1., 0., 0., 0., 1.], [1., 1., 1., 1., 1.]],
dtype=tf.float32,
shape=[5, 5, 1, 1]) / 16.0
blurred_im = _convolve_image_with_kernel(image, blur_kernel)
return blend(image, blurred_im, factor)
def smooth(image, factor):
"""Smooth with the same kernel as ImageFilter.SMOOTH."""
# See https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageFilter.py # pylint: disable=line-too-long
# class SMOOTH(BuiltinFilter):
# name = "Smooth"
# # fmt: off
# filterargs = (3, 3), 13, 0, (
# 1, 1, 1,
# 1, 5, 1,
# 1, 1, 1,
# )
# # fmt: on
#
# filterargs are following:
# (kernel_size_x, kernel_size_y), divisor, offset, kernel
#
smooth_kernel = tf.constant([[1., 1., 1.], [1., 5., 1.], [1., 1., 1.]],
dtype=tf.float32,
shape=[3, 3, 1, 1]) / 13.0
smoothed_im = _convolve_image_with_kernel(image, smooth_kernel)
return blend(image, smoothed_im, factor)
def rescale(image, level):
"""Rescales image and enlarged cornet."""
# See tf.image.ResizeMethod for full list
size = image.shape[:2]
scale = level * 0.25
scale_height = tf.cast(scale * size[0], tf.int32)
scale_width = tf.cast(scale * size[1], tf.int32)
cropped_image = tf.image.crop_to_bounding_box(
image,
offset_height=scale_height,
offset_width=scale_width,
target_height=size[0] - scale_height,
target_width=size[1] - scale_width)
rescaled = tf.image.resize(cropped_image, size, tf.image.ResizeMethod.BICUBIC)
return tf.saturate_cast(rescaled, tf.uint8)
NAME_TO_FUNC = {
'Identity': tf.identity,
'AutoContrast': autocontrast,
'AutoContrastBlend': autocontrast_blend,
'Equalize': equalize,
'EqualizeBlend': equalize_blend,
'Invert': invert,
'InvertBlend': invert_blend,
'Rotate': rotate,
'Posterize': posterize,
'Solarize': solarize,
'SolarizeAdd': solarize_add,
'Color': color,
'Contrast': contrast,
'Brightness': brightness,
'Sharpness': sharpness,
'ShearX': shear_x,
'ShearY': shear_y,
'TranslateX': translate_x,
'TranslateY': translate_y,
'Blur': blur,
'Smooth': smooth,
'Rescale': rescale,
}
| 33.474576 | 151 | 0.670524 | [
"Apache-2.0"
] | google-research/crest | third_party/augment_ops.py | 13,825 | Python |
from harry import get_harry_most_common_word
def test_get_harry_most_common_word():
top_word = get_harry_most_common_word()
assert type(top_word) == tuple
assert top_word[0] == 'dursley'
assert top_word[1] == 45 | 29.5 | 45 | 0.724576 | [
"MIT"
] | alex-vegan/100daysofcode-with-python-course | days/day101/Bite 18. Find the most common word/test_harry.py | 236 | Python |
"""
Scrape quotes, books and authors from ``Good Reads`` website.
"""
import bs4
from .utils import *
def get_author_name(soup):
"""Get the author's name from its main page.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
string: name of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_name(soup)
J.K. Rowling
"""
author_h1 = soup.find('h1', attrs={'class': 'authorName'})
return author_h1.find('span').text
def get_author_desc(soup):
"""Get the author description / biography.
Args:
soup (bs4.element.Tag): connection to the author page.
Returns:
str: long description of the author.
Examples::
>>> from scrapereads import connect
>>> url = 'https://www.goodreads.com/author/show/1077326'
>>> soup = connect(url)
>>> get_author_desc(soup)
See also: Robert Galbraith
Although she writes under the pen name J.K. Rowling, pronounced like rolling,
her name when her first Harry Potter book was published was simply Joanne Rowling.
...
"""
author_info_desc = soup.find('div', attrs={'class': 'aboutAuthorInfo'})
author_info_long = author_info_desc.findAll('span')[-1]
long_desc = ""
for sentence in author_info_long.children:
if isinstance(sentence, bs4.element.Tag):
if sentence.name == 'br':
long_desc += '\n'
else:
long_desc += sentence.text
else:
long_desc += sentence
long_desc = long_desc.replace('’', "'")
return long_desc
def get_author_info(soup):
"""Get all information from an author (genres, influences, website etc.).
Args:
soup (bs4.element.Tag): author page connection.
Returns:
dict
"""
container = soup.find('div', attrs={'class': 'rightContainer'})
author_info = {}
data_div = container.find('br', attrs={'class': 'clear'})
while data_div:
if data_div.name:
data_class = data_div.get('class')[0]
# Information section is finished
if data_class == 'aboutAuthorInfo':
break
# Key elements
elif data_class == 'dataTitle':
key = data_div.text.strip()
author_info[key] = []
# Born section
if data_div.text == 'Born':
data_div = data_div.next_sibling
author_info[key].append(data_div.strip())
# Influences section
elif data_div.text == 'Influences':
data_div = data_div.next_sibling.next_sibling
data_items = data_div.findAll('span')[-1].findAll('a')
for data_a in data_items:
author_info[key].append(data_a.text.strip())
# Member since section
elif data_div.text == 'Member Since':
data_div = data_div.next_sibling.next_sibling
author_info[key].append(data_div.text.strip())
# Genre, website and other sections
else:
data_items = data_div.findAll('a')
for data_a in data_items:
author_info[key].append(data_a.text.strip())
data_div = data_div.next_sibling
author_info.update({'Description': get_author_desc(soup)})
return author_info
def scrape_quotes_container(soup):
"""Get the quote container from a quote page.
Args:
soup (bs4.element.Tag): connection to the quote page.
Returns:
bs4.element.Tag
"""
return soup.findAll('div', attrs={'class': 'quotes'})
def scrape_quotes(soup):
"""Retrieve all ``<div>`` quote element from a quote page.
Args:
soup (bs4.element.Tag): connection to the quote page.
Returns:
yield bs4.element.Tag
"""
for container_div in scrape_quotes_container(soup):
quote_div = container_div.find('div', attrs={'class': 'quote'})
while quote_div:
if quote_div.name == 'div' and quote_div.get('class') and 'quote' in quote_div.get('class'):
yield quote_div
quote_div = quote_div.next_sibling
def get_quote_text(quote_div):
"""Get the text from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element to extract the text.
Returns:
string
"""
quote_text = ''
text_iterator = quote_div.find('div', attrs={'class': 'quoteText'}).children
for text in text_iterator:
if text.name == 'br':
quote_text += '\n'
elif not text.name:
quote_text += text.strip()
quote_text = process_quote_text(quote_text)
return quote_text
def scrape_quote_tags(quote_div):
"""Scrape tags from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
yield ``<a>`` tags
"""
tags_container = quote_div.find('div', attrs={'class': 'greyText smallText left'})
if tags_container:
for tag in tags_container.children:
if tag.name == 'a':
yield tag
return None
def get_quote_book(quote_div):
"""Get the reference (book) from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
bs4.element.Tag
"""
quote_details = quote_div.find('div', attrs={'class': 'quoteText'})
return quote_details.find('a', attrs={'class': 'authorOrTitle'})
def get_quote_author_name(quote_div):
"""Get the author's name from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
string
"""
quote_text = quote_div.find('div', attrs={'class': 'quoteText '})
author_name = quote_text.find('span', attrs={'class': 'authorOrTitle'}).text
return remove_punctuation(author_name).title()
def get_quote_likes(quote_div):
"""Get the likes ``<a>`` tag from a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
bs4.element.Tag: ``<a>`` tag for likes.
"""
quote_footer = quote_div.find('div', attrs={'class': 'quoteFooter'})
return quote_footer.find('a', attrs={'class': 'smallText'})
# TODO: deprecate this
def get_quote_name_id(quote_div):
"""Get the name and id of a ``<div>`` quote element.
Args:
quote_div (bs4.element.Tag): ``<div>`` quote element from a quote page.
Returns:
tuple: id and name.
"""
quote_href = get_quote_likes(quote_div).get('href')
quote_id = quote_href.split('/')[-1].split('-')[0]
quote_name = '-'.join(quote_href.split('/')[-1].split('-')[1:])
return quote_id, quote_name
def scrape_author_books(soup):
"""Retrieve books from an author's page.
Args:
soup (bs4.element.Tag): connection to an author books page.
Returns:
yield bs4.element.Tag: ``<tr>`` element.
"""
table_tr = soup.find('tr')
while table_tr:
if table_tr.name == 'tr':
yield table_tr
table_tr = table_tr.next_sibling
def get_author_book_title(book_tr):
"""Get the book title ``<a>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: book title ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_title = get_author_book_title(book_tr)
... print(book_title.text.strip(), book_title.get('href'))
The Bell Jar /book/show/6514.The_Bell_Jar
Ariel /book/show/395090.Ariel
The Collected Poems /book/show/31426.The_Collected_Poems
The Unabridged Journals of Sylvia Plath /book/show/11623.The_Unabridged_Journals_of_Sylvia_Plath
"""
return book_tr.find('a', attrs={'class': 'bookTitle'})
def get_author_book_author(book_tr):
"""Get the author ``<a>`` element from a table ``<tr>`` element.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: author name ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_author = get_author_book_author(book_tr)
... print(book_author.text, book_author.get('href'))
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
Sylvia Plath https://www.goodreads.com/author/show/4379.Sylvia_Plath
"""
return book_tr.find('a', attrs={'class': 'authorName'})
def get_author_book_ratings(book_tr):
"""Get the ratings ``<span>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: ratings ``<span>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... ratings_span = get_author_book_ratings(book_tr)
... print(ratings_span.contents[-1])
4.55 avg rating — 2,414 ratings
3.77 avg rating — 1,689 ratings
4.28 avg rating — 892 ratings
4.54 avg rating — 490 ratings
...
"""
return book_tr.find('span', attrs={'class': 'minirating'})
def get_author_book_edition(book_tr):
"""Get the edition ``<a>`` element from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
bs4.element.Tag: book edition ``<a>`` element.
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_edition = get_author_book_edition(book_tr)
... if book_edition:
... print(book_edition.text, book_edition.get('href'))
... print()
493 editions /work/editions/1385044-the-bell-jar
80 editions /work/editions/1185316-ariel
30 editions /work/editions/1003095-the-collected-poems
45 editions /work/editions/3094683-the-unabridged-journals-of-sylvia-plath
...
"""
book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'})
return book_details.find('a', attrs={'class': 'greyText'})
def get_author_book_date(book_tr):
"""Get the published date from a table ``<tr>`` element from an author page.
Args:
book_tr (bs4.element.Tag): ``<tr>`` book element.
Returns:
int: date of publication
Examples::
>>> for book_tr in scrape_author_books(soup):
... book_date = get_author_book_date(book_tr)
... print(book_date)
None
None
1958
2009
...
"""
book_details = book_tr.find('span', attrs={'class': 'greyText smallText uitext'})
book_publish = book_details.contents[-1].replace('—', '').replace('\n', '')
book_date = book_publish.replace('published', '').strip()
book_date = eval(book_date) if book_date != '' else None
return book_date
def get_book_quote_page(soup):
"""Find the ``<a>`` element pointing to the quote page of a book.
Args:
soup (bs4.element.Tag):
Returns:
"""
quote_div = soup.findAll('div', attrs={'class': ' clearFloats bigBox'})
if quote_div:
return quote_div[-1].find('a')
return None
| 30.470738 | 108 | 0.600585 | [
"MIT"
] | arthurdjn/scrape-goodreads | scrapereads/scrape.py | 11,987 | Python |
from pyspark.sql import Column, DataFrame, SparkSession, functions
from pyspark.sql.functions import *
from py4j.java_collections import MapConverter
from delta.tables import *
import shutil
import threading
tableName = "tbltestpython"
# Enable SQL/DML commands and Metastore tables for the current spark session.
# We need to set the following configs
spark = SparkSession.builder \
.appName("quickstart_sql") \
.master("local[*]") \
.config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
.config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \
.getOrCreate()
# Clear any previous runs
spark.sql("DROP TABLE IF EXISTS " + tableName)
spark.sql("DROP TABLE IF EXISTS newData")
try:
# Create a table
print("############# Creating a table ###############")
spark.sql("CREATE TABLE %s(id LONG) USING delta" % tableName)
spark.sql("INSERT INTO %s VALUES 0, 1, 2, 3, 4" % tableName)
# Read the table
print("############ Reading the table ###############")
spark.sql("SELECT * FROM %s" % tableName).show()
# Upsert (merge) new data
print("########### Upsert new data #############")
spark.sql("CREATE TABLE newData(id LONG) USING parquet")
spark.sql("INSERT INTO newData VALUES 3, 4, 5, 6")
spark.sql('''MERGE INTO {0} USING newData
ON {0}.id = newData.id
WHEN MATCHED THEN
UPDATE SET {0}.id = newData.id
WHEN NOT MATCHED THEN INSERT *
'''.format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Update table data
print("########## Overwrite the table ###########")
spark.sql("INSERT OVERWRITE %s select * FROM (VALUES 5, 6, 7, 8, 9) x (id)" % tableName)
spark.sql("SELECT * FROM %s" % tableName).show()
# Update every even value by adding 100 to it
print("########### Update to the table(add 100 to every even value) ##############")
spark.sql("UPDATE {0} SET id = (id + 100) WHERE (id % 2 == 0)".format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Delete every even value
print("######### Delete every even value ##############")
spark.sql("DELETE FROM {0} WHERE (id % 2 == 0)".format(tableName))
spark.sql("SELECT * FROM %s" % tableName).show()
# Read old version of data using time travel
print("######## Read old data using time travel ############")
df = spark.read.format("delta").option("versionAsOf", 0).table(tableName)
df.show()
finally:
# cleanup
spark.sql("DROP TABLE " + tableName)
spark.sql("DROP TABLE IF EXISTS newData")
spark.stop()
| 35.986486 | 99 | 0.615096 | [
"Apache-2.0"
] | Kimahriman/delta | examples/python/quickstart_sql.py | 2,663 | Python |
import numpy as np
from sigman.analyzer import InvalidArgumentError
procedure_type = 'points'
description = (
"""Procedure calculate time of B point from equation:
RB = 1.233RZ-0.0032RZ^2-31.59
where RZ - time between R and dz/dt max [ms]
RB - time between R and B
Equation was proposed by D.L. Lozano in paper "Where to B in dZ/dt" (2007)
""")
author = 'mzylinski'
arguments = {
}
default_arguments = {
}
output_type = 'B'
required_waves = ['Signal']
required_points = [ 'R','dzdtmax']
def procedure(waves, points, begin_time, end_time, settings):
wave = waves['Signal']
R = points['R']
dzdtmax = points['dzdtmax']
r_x = []
r_y = []
for i in range(0,len(R)-1):
data = wave.data_slice(R.data_x[i], R.data_x[i+1])
RZ = (dzdtmax.data_x[i] - R.data_x[i])/wave.sample_length
RB = 1.233*RZ -0.0032*(RZ*RZ)-31.59
t = int(round(RB))
if (t<0):
t = 0
r_y.append(data[t])
r_x.append(R.data_x[i] + t*wave.sample_length)
return r_x, r_y
def interpret_arguments(waves, points, arguments):
output_arguments = {}
for key, item in arguments.items():
try:
output_arguments[key] = float(item)
except:
raise InvalidArgumentError("{} is invalid.".format(arguments[key]))
return output_arguments
def execute(waves, points, begin_time, end_time, arguments):
arguments = interpret_arguments(waves, points, arguments)
return procedure(waves, points, begin_time, end_time, arguments) | 26.931034 | 79 | 0.630602 | [
"MIT"
] | k-cybulski/sigman-project | procedures/points_B_ICG_Lozaano_Equation.py | 1,562 | Python |
"""Welcome to MLToolset, a package to simplify machine learning research!
Author: Ryan Eloff
Contact: ryan.peter.eloff@gmail.com
Date: May 2018
"""
from . import data
from . import nearest_neighbour
from . import neural_blocks
from . import siamese
from . import training
from . import utils
from ._globals import TF_FLOAT
from ._globals import TF_INT
from ._globals import NP_FLOAT
from ._globals import NP_INT
| 19.857143 | 73 | 0.786571 | [
"MIT"
] | rpeloff/multimodal-one-shot-learning | src/mltoolset/__init__.py | 417 | Python |
"""
This file implements the signature scheme from "Unique Ring Signatures: A Practical
Construction" by Matthew Franklin and Haibin Zhang
"""
import sys
import math
from random import randint
import hashlib
from libsig.AbstractRingSignatureScheme import AbstractRingSignatureScheme
#from AbstractRingSignatureScheme import AbstractRingSignatureScheme
#from libsig import primes
# ----------- HELPER FUNCTIONS -----------
# function to find divisors in order to find generators
def find_divisors(x):
"""
This is the "function to find divisors in order to find generators" module.
This DocTest verifies that the module is correctly calculating all divisors
of a number x.
>>> find_divisors(10)
[1, 2, 5, 10]
>>> find_divisors(112)
[1, 2, 4, 7, 8, 14, 16, 28, 56, 112]
"""
divisors = [ i for i in range(1,x+1) if x % i == 0]
return divisors
# function to find random generator of G
def find_generator(p):
'''
The order of any element in a group can be divided by p-1.
Step 1: Calculate all Divisors.
Step 2: Test for a random element e of G wether e to the power of a Divisor is 1.
if neither is one but e to the power of p-1, a generator is found.
'''
# Init
# Generate element which is tested for generator characteristics.
# Saved in list to prevent checking the same element twice.
testGen = randint(1,p)
listTested = []
listTested.append(testGen)
# Step 1.
divisors = find_divisors(p)
# try for all random numbers
# Caution: this leads to a truly random generator but is not very efficient.
while len(listTested) < p-1:
# only test each possible generator once
if testGen in listTested:
# Step 2.
for div in divisors:
testPotency = math.pow(testGen,div) % (p+1)
if testPotency == 1.0 and div != divisors[-1]:
# element does not have the same order like the group,
# therefore try next element
break
elif testPotency == 1.0 and div == divisors[-1]:
# generator is found
return testGen
# try new element
testGen = randint(1,p)
listTested.append(testGen)
def list_to_string(input_list):
'''
convert a list into a concatenated string of all its elements
'''
result = ''.join(map(str,input_list))
return result
# ----------- HELPER FUNCTIONS END -----------
class UniqueRingSignature(AbstractRingSignatureScheme):
'''
| output: pp = (lamdba, q, G, H, H2) with,
| q is prime,
| g is generator of G,
| G is multiplicative Group with prime order q,
| H1 and H2 are two Hash functions H1: {0,1}* -> G,
| (as well as H2: {0,1}* -> Zq which is the same).
'''
# set prime p (Sophie-Germain and therefore save)
#q = 53
q = 59
# find random generator of G
g = find_generator(q-1)
# hash functions with desired range and the usage of secure hashes
h1 = lambda x: int(hashlib.sha256(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q)
# this way to share the information should be improved
h2 = lambda x: int(hashlib.sha512(str(x).encode()).hexdigest(),16)%(UniqueRingSignature.q)
# list of public keys
Rp = list()
@staticmethod
def keygen(verbose=False):
#print("---- KeyGen Started ---- \n")
r = randint(1,UniqueRingSignature.q)
# x = g**r % q
x = pow(UniqueRingSignature.g, r,UniqueRingSignature.q)
# y = g**x
y = pow(UniqueRingSignature.g, x, UniqueRingSignature.q)
if verbose == True:
print("KeyGen Config: public key y=" + str(y) + ", private key x=" + str(x) + "\n")
print("---- KeyGen Completed ---- \n")
# Caution! I know, keygen should NOT return the private key, but this is needed to "play" through a whole signature - validation process
return x,y
@staticmethod
def ringsign(x, pubkey, message,verbose=False):
'''
input: x is the privkey from user i,
| all public keys: pubkeys,
| the message
output: (R,m, (H(mR)^xi), c1,t1,...,cn,tn),
| R: all the pubkeys concatenated,
| cj,tj: random number within Zq
'''
# calculate R = pk1,pk2,..,pkn
R = list_to_string(pubkey)
g = UniqueRingSignature.g
q = UniqueRingSignature.q
h1 = UniqueRingSignature.h1
h2 = UniqueRingSignature.h2
# message + pubkeys concatenated
mR = message + str(R)
C = list()
T = list()
A = list()
B = list()
ri = -1
# simulation step
#
for i in pubkey:
# Step 1:
#
a = 0
b = 0
c = 0
t = 0
if pow(g,x,q) != i:
c, t = randint(1,q), randint(1,q)
a = (pow(g, t) * pow(int(i), c)) % q
b = (pow(h1(mR), t) * pow(pow(h1(mR),x),c)) % q
else:
# Step 2:
#
ri = randint(1, q)
a = pow(g, ri, q)
b = pow(h1(mR), ri, q)
# insert to allocate place
c = -1
t = -1
A.append(a)
B.append(b)
C.append(c)
T.append(t)
# for end
# Step 3:
#
cj = 0
# list count from 0
ab = ''.join('{}{}'.format(*t) for t in zip(A,B))
usernr = 0
for i in range(len(pubkey)):
if pubkey[i] != (pow(g,x,q)):
cj = (cj + C[i]) % q
else:
usernr = i
ci = h2(message + R + ab) - (cj % (q-1))
# update ci, this was initialized with -1
C[usernr] = ci
ti = ((ri - (C[usernr]*x)) % (q-1))
if ti < 0:
ti = (q-1) + ti
# update ti, this was initialized with -1
T[usernr] = ti
# Step 4:
#
# concatenate ct: c1,t1,c2,t2,...,cn,tn
ct = ','.join('{},{}'.format(*t) for t in zip(C,T))
# returning result
result = R + ","+message+","+str(pow(h1(mR),x, q))+"," + ct
if verbose == True:
print("RingSign Result: "+ result)
print("---- RingSign Completed ---- \n")
return result
@staticmethod
def verify(R, message, signature,verbose=False):
'''
Input: the public keys R
| the message
| the signature computed with ringsign
Output: whether the message was signed by R or not
'''
g = UniqueRingSignature.g
q = UniqueRingSignature.q
h1 = UniqueRingSignature.h1
h2 = UniqueRingSignature.h2
# parse the signature
parsed = signature.split(",")
tt = int(parsed[2])
cjs = list()
tjs = list()
for i in range(0,int(((len(parsed))/2)-1)):
cjs.append(int(parsed[3+2*i]))
tjs.append(int(parsed[4+2*i]))
#print(str(cjs)+" "+str(tjs) + " "+ str(tt))
# check signature
# sum of all cjs
# =?
# self.pp['h2'](message + R + gyh1)
mR = list_to_string(R)
val1 = sum(cjs) % q
# for all users in R:
# g**tj * yj ** cj , h1(m||R)**tj * tt**cj
gyh1 = ""
for i in range(len(tjs)):
if tjs[i] < 0:
tjs[i] = (q-1) + tjs[i]
if cjs[i] < 0:
cjs[i] = (q-1) + cjs[i]
gy = (pow(g,(tjs[i]),q) * (pow((R[i]),(cjs[i]),q))) % q
h = (pow(int(h1(message + mR)), int(tjs[i])) * pow(tt,int(cjs[i]))) % q
gyh1 = gyh1 + str( gy) + str( h)
val2 = str(h2(message + list_to_string(R) + gyh1))
if int(val1) == int(val2):
if verbose == True:
print("Signature is valid!\n")
print("Common Result: " + str(val1))
print("---- Validation Completed ---- \n")
return True
else:
if verbose == True:
print("Signature is not valid!\n")
print(str(val1) + " != " + str(val2))
print("---- Validation Completed ---- \n")
return False
def local_test(verbose=True):
# verbose output
print(verbose)
# user 1 will signate and validate later,
# therefore his private key is saved for test purposes
privKey1,pubkey = UniqueRingSignature.keygen(verbose)
UniqueRingSignature.Rp.append(pubkey)
a,pubkey = UniqueRingSignature.keygen(verbose)
UniqueRingSignature.Rp.append(pubkey)
# usernr start from 0
# ringsign(self, privkey, usernr, pubkeys, message)
ring = UniqueRingSignature.ringsign(privKey1, UniqueRingSignature.Rp, "asdf", verbose)
if verbose:
print("Result of Signature Validation:")
# verify(pubkeys, message, signature):
UniqueRingSignature.verify(UniqueRingSignature.Rp, "asdf", ring, verbose)
if __name__ == '__main__':
# doctest start
import doctest
doctest.testmod()
if len(sys.argv) > 1:
verbose = False
if sys.argv[1] == "True":
verbose = True
# run a local test
local_test(verbose)
| 30.194268 | 144 | 0.528214 | [
"MIT"
] | vs-uulm/libsig_pets | libsig/FZZ_unique_ring_signature.py | 9,481 | Python |
import crcmod
from selfdrive.car.hyundai.values import CAR, CHECKSUM
hyundai_checksum = crcmod.mkCrcFun(0x11D, initCrc=0xFD, rev=False, xorOut=0xdf)
def create_lkas11(packer, car_fingerprint, bus, apply_steer, steer_req, cnt, enabled, lkas11, hud_alert,
lane_visible, left_lane_depart, right_lane_depart, keep_stock=False):
values = {
"CF_Lkas_Bca_R": lkas11["CF_Lkas_Bca_R"] if keep_stock else 3,
#"CF_Lkas_LdwsSysState": 3 if steer_req else lane_visible,
"CF_Lkas_LdwsSysState": 3 if enabled else 1,
"CF_Lkas_SysWarning": hud_alert,
#"CF_Lkas_LdwsLHWarning": lkas11["CF_Lkas_LdwsLHWarning"],
#"CF_Lkas_LdwsRHWarning": lkas11["CF_Lkas_LdwsRHWarning"],
"CF_Lkas_LdwsLHWarning": left_lane_depart,
"CF_Lkas_LdwsRHWarning": right_lane_depart,
"CF_Lkas_HbaLamp": lkas11["CF_Lkas_HbaLamp"] if keep_stock else 0,
"CF_Lkas_FcwBasReq": lkas11["CF_Lkas_FcwBasReq"] if keep_stock else 0,
"CR_Lkas_StrToqReq": apply_steer,
"CF_Lkas_ActToi": steer_req,
"CF_Lkas_ToiFlt": 0,
"CF_Lkas_HbaSysState": lkas11["CF_Lkas_HbaSysState"] if keep_stock else 1,
"CF_Lkas_FcwOpt": lkas11["CF_Lkas_FcwOpt"] if keep_stock else 0,
"CF_Lkas_HbaOpt": lkas11["CF_Lkas_HbaOpt"] if keep_stock else 3,
"CF_Lkas_MsgCount": cnt,
"CF_Lkas_FcwSysState": lkas11["CF_Lkas_FcwSysState"] if keep_stock else 0,
"CF_Lkas_FcwCollisionWarning": lkas11["CF_Lkas_FcwCollisionWarning"] if keep_stock else 0,
"CF_Lkas_FusionState": lkas11["CF_Lkas_FusionState"] if keep_stock else 0,
"CF_Lkas_Chksum": 0,
"CF_Lkas_FcwOpt_USM": lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 2,
"CF_Lkas_LdwsOpt_USM": lkas11["CF_Lkas_LdwsOpt_USM"] if keep_stock else 3,
}
if car_fingerprint == CAR.GENESIS:
values["CF_Lkas_Bca_R"] = 2
values["CF_Lkas_HbaSysState"] = lkas11["CF_Lkas_HbaSysState"] if keep_stock else 0
values["CF_Lkas_HbaOpt"] = lkas11["CF_Lkas_HbaOpt"] if keep_stock else 1
values["CF_Lkas_FcwOpt_USM"] = lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 2
values["CF_Lkas_LdwsOpt_USM"] = lkas11["CF_Lkas_LdwsOpt_USM"] if keep_stock else 0
if car_fingerprint == CAR.KIA_OPTIMA:
values["CF_Lkas_Bca_R"] = 0
values["CF_Lkas_HbaOpt"] = lkas11["CF_Lkas_HbaOpt"] if keep_stock else 1
values["CF_Lkas_FcwOpt_USM"] = lkas11["CF_Lkas_FcwOpt_USM"] if keep_stock else 0
if car_fingerprint == CAR.KIA_CARDENZA:
########################################################
#values["CF_Lkas_Bca_R"] = int(left_lane) + (int(right_lane) << 1)
#values["CF_Lkas_FcwOpt_USM"] = 2 if enabled else 1
# FcwOpt_USM 5 = Orange blinking car + lanes
# FcwOpt_USM 4 = Orange car + lanes
# FcwOpt_USM 3 = Green blinking car + lanes
# FcwOpt_USM 2 = Green car + lanes
# FcwOpt_USM 1 = White car + lanes
# FcwOpt_USM 0 = No car + lanes
#values["CF_Lkas_SysWarning"] = 4 if sys_warning else 0
# SysWarning 4 = keep hands on wheel
# SysWarning 5 = keep hands on wheel (red)
# SysWarning 6 = keep hands on wheel (red) + beep
# Note: the warning is hidden while the blinkers are on
#values["CF_Lkas_LdwsOpt_USM"] = 2
########################################################
values["CF_Lkas_Bca_R"] = 0
values["CF_Lkas_FcwOpt_USM"] = 1
values["CF_Lkas_LdwsOpt_USM"] = 3
dat = packer.make_can_msg("LKAS11", 0, values)[2]
if car_fingerprint in CHECKSUM["crc8"]:
# CRC Checksum as seen on 2019 Hyundai Santa Fe
dat = dat[:6] + dat[7:8]
checksum = hyundai_checksum(dat)
elif car_fingerprint in CHECKSUM["6B"]:
# Checksum of first 6 Bytes, as seen on 2018 Kia Sorento
checksum = sum(dat[:6]) % 256
else:
# Checksum of first 6 Bytes and last Byte as seen on 2018 Kia Stinger
checksum = (sum(dat[:6]) + dat[7]) % 256
values["CF_Lkas_Chksum"] = checksum
return packer.make_can_msg("LKAS11", bus, values)
def create_clu11(packer, bus, clu11, button, speed, cnt):
values = {
"CF_Clu_CruiseSwState": button,
"CF_Clu_CruiseSwMain": clu11["CF_Clu_CruiseSwMain"],
"CF_Clu_SldMainSW": clu11["CF_Clu_SldMainSW"],
"CF_Clu_ParityBit1": clu11["CF_Clu_ParityBit1"],
"CF_Clu_VanzDecimal": clu11["CF_Clu_VanzDecimal"],
"CF_Clu_Vanz": speed,
"CF_Clu_SPEED_UNIT": clu11["CF_Clu_SPEED_UNIT"],
"CF_Clu_DetentOut": clu11["CF_Clu_DetentOut"],
"CF_Clu_RheostatLevel": clu11["CF_Clu_RheostatLevel"],
"CF_Clu_CluInfo": clu11["CF_Clu_CluInfo"],
"CF_Clu_AmpInfo": clu11["CF_Clu_AmpInfo"],
"CF_Clu_AliveCnt1": cnt,
}
return packer.make_can_msg("CLU11", bus, values)
def create_scc12(packer, apply_accel, enabled, cnt, scc12):
values = {
"CF_VSM_Prefill": scc12["CF_VSM_Prefill"],
"CF_VSM_DecCmdAct": scc12["CF_VSM_DecCmdAct"],
"CF_VSM_HBACmd": scc12["CF_VSM_HBACmd"],
"CF_VSM_Warn": scc12["CF_VSM_Warn"],
"CF_VSM_Stat": scc12["CF_VSM_Stat"],
"CF_VSM_BeltCmd": scc12["CF_VSM_BeltCmd"],
"ACCFailInfo": scc12["ACCFailInfo"],
"ACCMode": scc12["ACCMode"],
"StopReq": scc12["StopReq"],
"CR_VSM_DecCmd": scc12["CR_VSM_DecCmd"],
"aReqMax": apply_accel if enabled and scc12["ACCMode"] == 1 else scc12["aReqMax"],
"TakeOverReq": scc12["TakeOverReq"],
"PreFill": scc12["PreFill"],
"aReqMin": apply_accel if enabled and scc12["ACCMode"] == 1 else scc12["aReqMin"],
"CF_VSM_ConfMode": scc12["CF_VSM_ConfMode"],
"AEB_Failinfo": scc12["AEB_Failinfo"],
"AEB_Status": scc12["AEB_Status"],
"AEB_CmdAct": scc12["AEB_CmdAct"],
"AEB_StopReq": scc12["AEB_StopReq"],
"CR_VSM_Alive": cnt,
"CR_VSM_ChkSum": 0,
}
dat = packer.make_can_msg("SCC12", 0, values)[2]
values["CR_VSM_ChkSum"] = 16 - sum([sum(divmod(i, 16)) for i in dat]) % 16
return packer.make_can_msg("SCC12", 0, values)
def create_mdps12(packer, car_fingerprint, cnt, mdps12):
values = {
"CR_Mdps_StrColTq": mdps12["CR_Mdps_StrColTq"],
"CF_Mdps_Def": mdps12["CF_Mdps_Def"],
"CF_Mdps_ToiActive": 0,
"CF_Mdps_ToiUnavail": 1,
"CF_Mdps_MsgCount2": cnt,
"CF_Mdps_Chksum2": 0,
"CF_Mdps_ToiFlt": mdps12["CF_Mdps_ToiFlt"],
"CF_Mdps_SErr": mdps12["CF_Mdps_SErr"],
"CR_Mdps_StrTq": mdps12["CR_Mdps_StrTq"],
"CF_Mdps_FailStat": mdps12["CF_Mdps_FailStat"],
"CR_Mdps_OutTq": mdps12["CR_Mdps_OutTq"],
}
dat = packer.make_can_msg("MDPS12", 2, values)[2]
checksum = sum(dat) % 256
values["CF_Mdps_Chksum2"] = checksum
return packer.make_can_msg("MDPS12", 2, values)
| 43.395973 | 104 | 0.689453 | [
"MIT"
] | zzune/openpilot | selfdrive/car/hyundai/hyundaican.py | 6,466 | Python |
import torch
from facenet_pytorch import MTCNN, InceptionResnetV1
from torchvision import transforms
from Configs import Global_Config
IMAGE_SIZE = 220
mtcnn = MTCNN(
image_size=IMAGE_SIZE, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
device=Global_Config.device
)
to_pil = transforms.ToPILImage(mode='RGB')
crop_transform = transforms.Compose([transforms.Resize(IMAGE_SIZE),
transforms.CenterCrop(IMAGE_SIZE)])
resnet = InceptionResnetV1(pretrained='vggface2', classify=False).eval().to(Global_Config.device)
class ID_Encoder(torch.nn.Module):
def __init__(self):
super(ID_Encoder, self).__init__()
def crop_tensor_according_to_bboxes(self, images, bboxes):
cropped_batch = []
for idx, image in enumerate(images):
try:
cropped_image = crop_transform(image[:, int(bboxes[idx][0][1]):int(bboxes[idx][0][3]),
int(bboxes[idx][0][0]):int(bboxes[idx][0][2])].unsqueeze(0))
except:
cropped_image = crop_transform(image.unsqueeze(0))
cropped_batch.append(cropped_image)
return torch.cat(cropped_batch, dim=0)
def preprocess_images_to_id_encoder(self, images):
bboxes = [mtcnn.detect(to_pil(image))[0] for image in images]
cropped_images = self.crop_tensor_according_to_bboxes(images, bboxes)
return cropped_images
def forward(self, images):
cropped_images = self.preprocess_images_to_id_encoder(images)
img_embeddings = resnet(cropped_images)
return img_embeddings | 38.767442 | 102 | 0.673065 | [
"MIT"
] | YuGong123/ID-disentanglement-Pytorch | Models/Encoders/ID_Encoder.py | 1,667 | Python |
"""
Current-flow betweenness centrality measures for subsets of nodes.
"""
# Copyright (C) 2010-2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['current_flow_betweenness_centrality_subset',
'edge_current_flow_betweenness_centrality_subset']
import itertools
import networkx as nx
from networkx.algorithms.centrality.flow_matrix import *
def current_flow_betweenness_centrality_subset(G,sources,targets,
normalized=True,
weight='weight',
dtype=float, solver='lu'):
r"""Compute current-flow betweenness centrality for subsets of nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
sources: list of nodes
Nodes to use as sources for current
targets: list of nodes
Nodes to use as sinks for current
normalized : bool, optional (default=True)
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
approximate_current_flow_betweenness_centrality
betweenness_centrality
edge_betweenness_centrality
edge_current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('current_flow_betweenness_centrality() ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
mapping=dict(zip(ordering,range(n)))
H = nx.relabel_nodes(G,mapping)
betweenness = dict.fromkeys(H,0.0) # b[v]=0 for v in H
for row,(s,t) in flow_matrix_row(H, weight=weight, dtype=dtype,
solver=solver):
for ss in sources:
i=mapping[ss]
for tt in targets:
j=mapping[tt]
betweenness[s]+=0.5*np.abs(row[i]-row[j])
betweenness[t]+=0.5*np.abs(row[i]-row[j])
if normalized:
nb=(n-1.0)*(n-2.0) # normalization factor
else:
nb=2.0
for v in H:
betweenness[v]=betweenness[v]/nb+1.0/(2-n)
return dict((ordering[k],v) for k,v in betweenness.items())
def edge_current_flow_betweenness_centrality_subset(G, sources, targets,
normalized=True,
weight='weight',
dtype=float, solver='lu'):
"""Compute current-flow betweenness centrality for edges using subsets
of nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
sources: list of nodes
Nodes to use as sources for current
targets: list of nodes
Nodes to use as sinks for current
normalized : bool, optional (default=True)
If True the betweenness values are normalized by b=b/(n-1)(n-2) where
n is the number of nodes in G.
weight : string or None, optional (default='weight')
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
dtype: data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver: string (default='lu')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of edge tuples with betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_betweenness_centrality
current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in `O(I(n-1)+mn \log n)`
time [1]_, where `I(n-1)` is the time needed to compute the
inverse Laplacian. For a full matrix this is `O(n^3)` but using
sparse methods you can achieve `O(nm{\sqrt k})` where `k` is the
Laplacian matrix condition number.
The space required is `O(nw) where `w` is the width of the sparse
Laplacian matrix. Worse case is `w=n` for `O(n^2)`.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
http://www.inf.uni-konstanz.de/algo/publications/bf-cmbcf-05.pdf
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
try:
import numpy as np
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires NumPy ',
'http://scipy.org/')
try:
import scipy
except ImportError:
raise ImportError('current_flow_betweenness_centrality requires SciPy ',
'http://scipy.org/')
if G.is_directed():
raise nx.NetworkXError('edge_current_flow_betweenness_centrality ',
'not defined for digraphs.')
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
mapping=dict(zip(ordering,range(n)))
H = nx.relabel_nodes(G,mapping)
betweenness=(dict.fromkeys(H.edges(),0.0))
if normalized:
nb=(n-1.0)*(n-2.0) # normalization factor
else:
nb=2.0
for row,(e) in flow_matrix_row(H, weight=weight, dtype=dtype,
solver=solver):
for ss in sources:
i=mapping[ss]
for tt in targets:
j=mapping[tt]
betweenness[e]+=0.5*np.abs(row[i]-row[j])
betweenness[e]/=nb
return dict(((ordering[s],ordering[t]),v)
for (s,t),v in betweenness.items())
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
import scipy
except:
raise SkipTest("NumPy not available")
| 36.155303 | 80 | 0.629649 | [
"BSD-3-Clause"
] | AllenDowney/networkx | networkx/algorithms/centrality/current_flow_betweenness_subset.py | 9,545 | Python |
# Generated by Django 3.1.4 on 2021-09-28 13:49
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='paystack_response',
),
]
| 18.166667 | 47 | 0.590214 | [
"MIT"
] | Joetib/jshop | apps/store/migrations/0002_remove_payment_paystack_response.py | 327 | Python |
#!/usr/bin/python3
import functools
from copy import deepcopy
from .grammar import BASE_NODE_TYPES
class NodeBase:
"""Represents a node within the solidity AST.
Attributes:
depth: Number of nodes between this node and the SourceUnit
offset: Absolute source offsets as a (start, stop) tuple
contract_id: Contract ID as given by the standard compiler JSON
fields: List of attributes for this node
"""
def __init__(self, ast, parent):
self.depth = parent.depth + 1 if parent is not None else 0
self._parent = parent
self._children = set()
src = [int(i) for i in ast["src"].split(":")]
self.offset = (src[0], src[0] + src[1])
self.contract_id = src[2]
self.fields = sorted(ast.keys())
for key, value in ast.items():
if isinstance(value, dict) and value.get("nodeType") == "Block":
value = value["statements"]
elif key == "body" and not value:
value = []
if isinstance(value, dict):
item = node_class_factory(value, self)
if isinstance(item, NodeBase):
self._children.add(item)
setattr(self, key, item)
elif isinstance(value, list):
items = [node_class_factory(i, self) for i in value]
setattr(self, key, items)
self._children.update(i for i in items if isinstance(i, NodeBase))
else:
setattr(self, key, value)
def __hash__(self):
return hash(f"{self.nodeType}{self.depth}{self.offset}")
def __repr__(self):
repr_str = f"<{self.nodeType}"
if hasattr(self, "nodes"):
repr_str += " iterable"
if hasattr(self, "type"):
if isinstance(self.type, str):
repr_str += f" {self.type}"
else:
repr_str += f" {self.type._display()}"
if self._display():
repr_str += f" '{self._display()}'"
else:
repr_str += " object"
return f"{repr_str}>"
def _display(self):
if hasattr(self, "name") and hasattr(self, "value"):
return f"{self.name} = {self.value}"
for attr in ("name", "value", "absolutePath"):
if hasattr(self, attr):
return f"{getattr(self, attr)}"
return ""
def children(
self,
depth=None,
include_self=False,
include_parents=True,
include_children=True,
required_offset=None,
offset_limits=None,
filters=None,
exclude_filter=None,
):
"""Get childen nodes of this node.
Arguments:
depth: Number of levels of children to traverse. 0 returns only this node.
include_self: Includes this node in the results.
include_parents: Includes nodes that match in the results, when they also have
child nodes that match.
include_children: If True, as soon as a match is found it's children will not
be included in the search.
required_offset: Only match nodes with a source offset that contains this offset.
offset_limits: Only match nodes when their source offset is contained inside
this source offset.
filters: Dictionary of {attribute: value} that children must match. Can also
be given as a list of dicts, children that match one of the dicts
will be returned.
exclude_filter: Dictionary of {attribute:value} that children cannot match.
Returns:
List of node objects."""
if filters is None:
filters = {}
if exclude_filter is None:
exclude_filter = {}
if isinstance(filters, dict):
filters = [filters]
filter_fn = functools.partial(
_check_filters, required_offset, offset_limits, filters, exclude_filter
)
find_fn = functools.partial(_find_children, filter_fn, include_parents, include_children)
result = find_fn(find_fn, depth, self)
if include_self or not result or result[0] != self:
return result
return result[1:]
def parents(self, depth=-1, filters=None):
"""Get parent nodes of this node.
Arguments:
depth: Depth limit. If given as a negative value, it will be subtracted
from this object's depth.
filters: Dictionary of {attribute: value} that parents must match.
Returns: list of nodes"""
if filters and not isinstance(filters, dict):
raise TypeError("Filters must be a dict")
if depth < 0:
depth = self.depth + depth
if depth >= self.depth or depth < 0:
raise IndexError("Given depth exceeds node depth")
node_list = []
parent = self
while True:
parent = parent._parent
if not filters or _check_filter(parent, filters, {}):
node_list.append(parent)
if parent.depth == depth:
return node_list
def parent(self, depth=-1, filters=None):
"""Get a parent node of this node.
Arguments:
depth: Depth limit. If given as a negative value, it will be subtracted
from this object's depth. The parent at this exact depth is returned.
filters: Dictionary of {attribute: value} that the parent must match.
If a filter value is given, will return the first parent that meets the filters
up to the given depth. If none is found, returns None.
If no filter is given, returns the parent at the given depth."""
if filters and not isinstance(filters, dict):
raise TypeError("Filters must be a dict")
if depth < 0:
depth = self.depth + depth
if depth >= self.depth or depth < 0:
raise IndexError("Given depth exceeds node depth")
parent = self
while parent.depth > depth:
parent = parent._parent
if parent.depth == depth and not filters:
return parent
if filters and _check_filter(parent, filters, {}):
return parent
return None
def is_child_of(self, node):
"""Checks if this object is a child of the given node object."""
if node.depth >= self.depth:
return False
return self.parent(node.depth) == node
def is_parent_of(self, node):
"""Checks if this object is a parent of the given node object."""
if node.depth <= self.depth:
return False
return node.parent(self.depth) == self
def get(self, key, default=None):
"""
Gets an attribute from this node, if that attribute exists.
Arguments:
key: Field name to return. May contain decimals to return a value
from a child node.
default: Default value to return.
Returns: Field value if it exists. Default value if not.
"""
if key is None:
raise TypeError("Cannot match against None")
obj = self
for k in key.split("."):
if isinstance(obj, dict):
obj = obj.get(k)
else:
obj = getattr(obj, k, None)
return obj or default
class IterableNodeBase(NodeBase):
def __getitem__(self, key):
if isinstance(key, str):
try:
return next(i for i in self.nodes if getattr(i, "name", None) == key)
except StopIteration:
raise KeyError(key)
return self.nodes[key]
def __iter__(self):
return iter(self.nodes)
def __len__(self):
return len(self.nodes)
def __contains__(self, obj):
return obj in self.nodes
def node_class_factory(ast, parent):
ast = deepcopy(ast)
if not isinstance(ast, dict) or "nodeType" not in ast:
return ast
if "body" in ast:
ast["nodes"] = ast.pop("body")
base_class = IterableNodeBase if "nodes" in ast else NodeBase
base_type = next((k for k, v in BASE_NODE_TYPES.items() if ast["nodeType"] in v), None)
if base_type:
ast["baseNodeType"] = base_type
return type(ast["nodeType"], (base_class,), {})(ast, parent)
def _check_filters(required_offset, offset_limits, filters, exclude, node):
if required_offset and not is_inside_offset(required_offset, node.offset):
return False
if offset_limits and not is_inside_offset(node.offset, offset_limits):
return False
for f in filters:
if _check_filter(node, f, exclude):
return True
return False
def _check_filter(node, filters, exclude):
for key, value in filters.items():
if node.get(key) != value:
return False
for key, value in exclude.items():
if node.get(key) == value:
return False
return True
def _find_children(filter_fn, include_parents, include_children, find_fn, depth, node):
if depth is not None:
depth -= 1
if depth < 0:
return [node] if filter_fn(node) else []
if not include_children and filter_fn(node):
return [node]
node_list = []
for child in node._children:
node_list.extend(find_fn(find_fn, depth, child))
if (include_parents or not node_list) and filter_fn(node):
node_list.insert(0, node)
return node_list
def is_inside_offset(inner, outer):
"""Checks if the first offset is contained in the second offset
Args:
inner: inner offset tuple
outer: outer offset tuple
Returns: bool"""
return outer[0] <= inner[0] <= inner[1] <= outer[1]
| 35.496403 | 97 | 0.588164 | [
"MIT"
] | danhper/py-solc-ast | solcast/nodes.py | 9,868 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Pool
import requests
PROCESS_POOL_SIZE = 10
REQUESTS = 10000
BASE_URL = "http://localhost:8888"
RESOURCE_NAME = "resource"
def f(process_number):
resource_name = RESOURCE_NAME
raw_body = '{"title": "%i", "lifetime": 300, "wait": 20}' % process_number
r = requests.post("%s/locks/%s" % (BASE_URL, resource_name), data=raw_body)
if r.status_code != 201:
raise Exception("bad status code %i from post request" % r.status_code)
lock_url = r.headers['Location']
r = requests.delete(lock_url)
if r.status_code != 204:
raise Exception("bad status code %i from delete request" % r.status_code)
if __name__ == '__main__':
pool = Pool(processes=PROCESS_POOL_SIZE)
pool.map(f, range(0, REQUESTS))
| 30.259259 | 81 | 0.679315 | [
"MIT"
] | thefab/restful-distributed-lock-manager | tests/bomb1.py | 817 | Python |
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Splits the gencost variable into two pieces if costs are given for Qg.
"""
from sys import stderr
from numpy import array, arange
def pqcost(gencost, ng, on=None):
"""Splits the gencost variable into two pieces if costs are given for Qg.
Checks whether C{gencost} has cost information for reactive power
generation (rows C{ng+1} to C{2*ng}). If so, it returns the first C{ng}
rows in C{pcost} and the last C{ng} rows in C{qcost}. Otherwise, leaves
C{qcost} empty. Also does some error checking.
If C{on} is specified (list of indices of generators which are on line)
it only returns the rows corresponding to these generators.
@author: Ray Zimmerman (PSERC Cornell)
"""
if on is None:
on = arange(ng)
if gencost.shape[0] == ng:
pcost = gencost[on, :]
qcost = array([])
elif gencost.shape[0] == 2 * ng:
pcost = gencost[on, :]
qcost = gencost[on + ng, :]
else:
stderr.write('pqcost: gencost has wrong number of rows\n')
return pcost, qcost
| 31.842105 | 77 | 0.66281 | [
"BSD-3-Clause"
] | AdrienGougeon/pandapower | pandapower/pypower/pqcost.py | 1,210 | Python |
""" Full assembly of the parts to form the complete network """
import torch.nn.functional as F
from .unet_parts import *
from .channels import C
class UNet3D(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True, apply_sigmoid_to_output=False):
super(UNet3D, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv3D(n_channels, C[0])
self.down1 = Down(C[0], C[1])
self.down2 = Down(C[1], C[2])
self.down3 = Down(C[2], C[3])
factor = 2 if bilinear else 1
self.down4 = Down(C[3], C[4] // factor) # switch do Double CONV if stick do 8x spatial down
self.up1 = Up(C[4], C[3] // factor, bilinear)
self.up2 = Up(C[3], C[2] // factor, bilinear)
self.up3 = Up(C[2], C[1] // factor, bilinear)
self.up4 = Up(C[1], C[0], bilinear)
self.outc = OutConv(C[0], n_classes) if apply_sigmoid_to_output is False else OutConv(C[0], n_classes, sigmoid=True)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
| 34.4 | 124 | 0.582849 | [
"MIT"
] | mistermoutan/ModelsGenesis | pytorch/unet_3d/unet_model.py | 1,376 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from neutron.conf.policies import base
DEPRECATED_REASON = (
"The security group API now supports system scope and default roles.")
SG_COLLECTION_PATH = '/security-groups'
SG_RESOURCE_PATH = '/security-groups/{id}'
RULE_COLLECTION_PATH = '/security-group-rules'
RULE_RESOURCE_PATH = '/security-group-rules/{id}'
RULE_ADMIN_OR_SG_OWNER = 'rule:admin_or_sg_owner'
RULE_ADMIN_OWNER_OR_SG_OWNER = 'rule:admin_owner_or_sg_owner'
rules = [
policy.RuleDefault(
name='admin_or_sg_owner',
check_str=base.policy_or(
'rule:context_is_admin',
'tenant_id:%(security_group:tenant_id)s'),
description='Rule for admin or security group owner access'),
policy.RuleDefault(
name='admin_owner_or_sg_owner',
check_str=base.policy_or(
'rule:owner',
RULE_ADMIN_OR_SG_OWNER),
description=('Rule for resource owner, '
'admin or security group owner access')),
# TODO(amotoki): admin_or_owner is the right rule?
# Does an empty string make more sense for create_security_group?
policy.DocumentedRuleDefault(
name='create_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Create a security group',
operations=[
{
'method': 'POST',
'path': SG_COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_security_group',
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
description='Get a security group',
operations=[
{
'method': 'GET',
'path': SG_COLLECTION_PATH,
},
{
'method': 'GET',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_security_group',
check_str=base.RULE_ANY,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='update_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Update a security group',
operations=[
{
'method': 'PUT',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='update_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_security_group',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Delete a security group',
operations=[
{
'method': 'DELETE',
'path': SG_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_security_group',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
# TODO(amotoki): admin_or_owner is the right rule?
# Does an empty string make more sense for create_security_group_rule?
policy.DocumentedRuleDefault(
name='create_security_group_rule',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Create a security group rule',
operations=[
{
'method': 'POST',
'path': RULE_COLLECTION_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='create_security_group_rule',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='get_security_group_rule',
check_str=base.policy_or(
base.SYSTEM_OR_PROJECT_READER,
base.RULE_SG_OWNER),
scope_types=['system', 'project'],
description='Get a security group rule',
operations=[
{
'method': 'GET',
'path': RULE_COLLECTION_PATH,
},
{
'method': 'GET',
'path': RULE_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='get_security_group_rule',
check_str=RULE_ADMIN_OWNER_OR_SG_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
policy.DocumentedRuleDefault(
name='delete_security_group_rule',
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description='Delete a security group rule',
operations=[
{
'method': 'DELETE',
'path': RULE_RESOURCE_PATH,
},
],
deprecated_rule=policy.DeprecatedRule(
name='delete_security_group_rule',
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY)
),
]
def list_rules():
return rules
| 35.021739 | 76 | 0.620112 | [
"Apache-2.0"
] | AurelienLourot/neutron | neutron/conf/policies/security_group.py | 6,444 | Python |
import re
find_image_scheme = re.compile(r'(?P<image_construction><img\b[^>]*src="(?P<image_url>[^"]+?)"[^>]*?\/>)')
# find_link_around_image_scheme = re.compile(r"<a\b[^>]*>(.*?)<img\b(.*?)<\/a>")
def move_image_to_attachment(content, attachment_object):
# collect images from the post body
intext_image_list = re.findall(find_image_scheme, content)
if intext_image_list:
# delete images form text
content = re.sub(find_image_scheme, r"", content)
# insert link to image into attachments
attachment_object += [{
"type": "Document",
"mediaType": "image/jpeg",
"url": image[1],
"name": "null"
} for image in intext_image_list]
return content
| 28.074074 | 106 | 0.604222 | [
"BSD-3-Clause"
] | autogestion/pubgate-rssbot | rssbot/utils.py | 758 | Python |
import numpy as np
import matplotlib.pyplot as plt
import gym
import random
# hyper parameters
# test 1
# alpha = 0.5
# gamma = 0.95
# epsilon = 0.1
epsilon = 0.1
alpha = 0.1
gamma = 0.1
def update_sarsa_table(sarsa, state, action, reward, next_state, next_action, alpha, gamma):
'''
update sarsa state-action pair value, main difference from q learning is that it uses epsilon greedy policy
return action
'''
next_max = sarsa[next_state,next_action] # corresponding action-state value to current action
# print(f'current status is: {type(q[pre_state,action])},{type(alpha)},{type(reward)},{type(gamma)},{type(next_max)}')
sarsa[state,action] = sarsa[state,action] + alpha * (reward + gamma * next_max - sarsa[state,action])
def epsilon_greedy_policy_sarsa(env, state, sarsa, epsilon):
'''
epsilon greedy policy for q learning to generate actions
'''
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return np.argmax(sarsa[state])
def epsilon_greedy_policy(env, state, q, epsilon):
'''
epsilon greedy policy for q learning to generate actions
'''
if random.uniform(0,1) < epsilon:
return env.action_space.sample()
else:
return np.argmax(q[state])
def update_q_table(q, pre_state, action, reward, next_state, alpha, gamma):
'''
'''
next_max = np.max(q[next_state]) # max state-action value for next state
# print(f'current status is: {type(q[pre_state,action])},{type(alpha)},{type(reward)},{type(gamma)},{type(next_max)}')
q[pre_state,action] = q[pre_state,action] + alpha * (reward + gamma * next_max - q[pre_state,action])
#-----------------------q learning-------------------------------------------
env = gym.make("Taxi-v3")
# initialize q table
q = np.zeros((env.observation_space.n, env.action_space.n))
q_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
reward_record = []
error_record = []
# loop for each episode:
for episode in range(5000):
r = 0
state = env.reset()
while True:# loop for each step of episode
# choose A from S using policy derived from Q(e.g, epsilon greedy policy)
action = epsilon_greedy_policy(env,state,q,epsilon)
# take action A, observe R, S'
next_state, reward, done, _ = env.step(action)
# update Q(S,A)
update_q_table(q,state,action,reward,next_state,alpha,gamma)
# S<--S'
state = next_state
r += reward
if done:
break
reward_record.append(r)
error = 0
for i in range(q.shape[0]):
error = error + np.sum(np.abs(q[i]-q_pre[i]))
# print(f'{np.abs(q[i]-q_pre[i])},{np.sum(np.abs(q[i]-q_pre[i]))}')
error_record.append(error)
q_pre = np.copy(q)
if episode%100 == 0:
print(f'{episode}th episode: {r}, {error}')
#close game env
env.close()
#plot diagram
# plt.plot(list(range(5000)),reward_record)
# plt.show()
# plt.plot(list(range(5000)),error_record)
# plt.show()
#double q learning
env = gym.make("Taxi-v3")
# initialize q table
q1 = np.zeros((env.observation_space.n, env.action_space.n))
q2 = np.zeros((env.observation_space.n, env.action_space.n))
q1_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
q2_pre = np.zeros((env.observation_space.n, env.action_space.n)) # to check convergence when training
# reward and error record
d_reward_record = []
d_error_record = []
# loop for each episode:
for episode in range(5000):
r = 0
state = env.reset()
while True:# loop for each step of episode
# choose A from S using policy derived from Q1+Q2(e.g, epsilon greedy policy)
action = epsilon_greedy_policy(env,state,q1+q2,epsilon)
# take action A, observe R, S'
next_state, reward, done, _ = env.step(action)
# with 0.5 probability:
if random.uniform(0,1) < 0.5:
update_q_table(q1,state,action,reward,next_state,alpha,gamma)
else:
update_q_table(q2,state,action,reward,next_state,alpha,gamma)
# S<--S'
state = next_state
r += reward
if done:
break
d_reward_record.append(r)
error = 0
for i in range(q.shape[0]):
error = error + 0.5 * np.sum(np.abs(q1[i]-q1_pre[i])) + 0.5 * np.sum(np.abs(q2[i]-q2_pre[i]))
# print(f'{np.abs(q[i]-q_pre[i])},{np.sum(np.abs(q[i]-q_pre[i]))}')
d_error_record.append(error)
q1_pre = np.copy(q1)
q2_pre = np.copy(q2)
if episode%100 == 0:
print(f'{episode}th episode: {r}, {error}')
#close game env
env.close()
#plot diagram
plt.plot(list(range(5000)),reward_record,label='q learning')
plt.plot(list(range(5000)),d_reward_record,label='double q learning')
plt.legend()
plt.show()
plt.plot(list(range(5000)),error_record,label='q learning')
plt.plot(list(range(5000)),d_error_record, label='double q learning')
plt.legend()
plt.show()
| 31.910828 | 122 | 0.645709 | [
"MIT"
] | hadleyhzy34/reinforcement_learning | TD/double_q_learning.py | 5,010 | Python |
"""
Meta Data Extension for Python-Markdown
=======================================
This extension adds Meta Data handling to markdown.
See <https://Python-Markdown.github.io/extensions/meta_data>
for documentation.
Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
All changes Copyright 2008-2014 The Python Markdown Project
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..preprocessors import Preprocessor
import re
import logging
log = logging.getLogger('MARKDOWN')
# Global Vars
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
class MetaExtension (Extension):
""" Meta-Data extension for Python-Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add MetaPreprocessor to Markdown instance. """
md.preprocessors.add("meta",
MetaPreprocessor(md),
">normalize_whitespace")
class MetaPreprocessor(Preprocessor):
""" Get Meta-Data. """
def run(self, lines):
""" Parse Meta-Data and store in Markdown.Meta. """
meta = {}
key = None
if lines and BEGIN_RE.match(lines[0]):
lines.pop(0)
while lines:
line = lines.pop(0)
m1 = META_RE.match(line)
if line.strip() == '' or END_RE.match(line):
break # blank line or end of YAML header - done
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
else:
m2 = META_MORE_RE.match(line)
if m2 and key:
# Add another line to existing key
meta[key].append(m2.group('value').strip())
else:
lines.insert(0, line)
break # no meta data - done
self.markdown.Meta = meta
return lines
def makeExtension(*args, **kwargs):
return MetaExtension(*args, **kwargs)
| 30.316456 | 74 | 0.561169 | [
"MIT"
] | AzDan/Sac-Portal | venv/lib/python3.6/site-packages/markdown/extensions/meta.py | 2,395 | Python |
from datetime import datetime, timedelta
from typing import List, Optional
from django.conf import settings
from django.core.cache import cache
from django.utils.translation import ugettext as _
from celery.schedules import crontab
from celery.task import periodic_task, task
from celery.utils.log import get_task_logger
from dimagi.utils.couch import CriticalSection
from corehq.apps.domain.models import Domain
from corehq.apps.domain_migration_flags.api import any_migrations_in_progress
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from corehq.motech.repeaters.dbaccessors import (
get_couch_repeat_record_ids_by_payload_id,
get_sql_repeat_records_by_payload_id,
iter_repeat_record_ids_by_repeater,
)
from corehq.motech.repeaters.models import SQLRepeatRecord
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
from corehq.toggles import CASE_DEDUPE, DISABLE_CASE_UPDATE_RULE_SCHEDULED_TASK
from corehq.util.celery_utils import no_result_task
from corehq.util.decorators import serial_task
from .deduplication import reset_deduplicate_rule, backfill_deduplicate_rule
from .interfaces import FormManagementMode
from .models import (
AUTO_UPDATE_XMLNS,
AutomaticUpdateRule,
CaseDuplicate,
CaseRuleSubmission,
DomainCaseRuleRun,
)
from .utils import (
add_cases_to_case_group,
archive_or_restore_forms,
iter_cases_and_run_rules,
operate_on_payloads,
run_rules_for_case,
)
logger = get_task_logger('data_interfaces')
ONE_HOUR = 60 * 60
def _get_upload_progress_tracker(upload_id):
def _progress_tracker(current, total):
cache.set(upload_id, {
'inProgress': True,
'current': current,
'total': total,
}, ONE_HOUR)
return _progress_tracker
@no_result_task(queue='case_rule_queue', acks_late=True,
soft_time_limit=15 * settings.CELERY_TASK_SOFT_TIME_LIMIT)
def reset_and_backfill_deduplicate_rule_task(domain, rule_id):
if not CASE_DEDUPE.enabled(domain):
return
try:
rule = AutomaticUpdateRule.objects.get(
id=rule_id,
domain=domain,
workflow=AutomaticUpdateRule.WORKFLOW_DEDUPLICATE,
active=True,
deleted=False,
)
except AutomaticUpdateRule.DoesNotExist:
return
AutomaticUpdateRule.clear_caches(rule.domain, AutomaticUpdateRule.WORKFLOW_DEDUPLICATE)
reset_deduplicate_rule(rule)
backfill_deduplicate_rule(domain, rule)
@task(queue='background_queue')
def delete_duplicates_for_cases(case_ids):
CaseDuplicate.bulk_remove_unique_cases(case_ids)
CaseDuplicate.remove_duplicates_for_case_ids(case_ids)
@task(serializer='pickle', ignore_result=True)
def bulk_upload_cases_to_group(upload_id, domain, case_group_id, cases):
results = add_cases_to_case_group(
domain,
case_group_id,
cases,
progress_tracker=_get_upload_progress_tracker(upload_id)
)
cache.set(upload_id, results, ONE_HOUR)
@task(serializer='pickle')
def bulk_form_management_async(archive_or_restore, domain, couch_user, form_ids):
task = bulk_form_management_async
mode = FormManagementMode(archive_or_restore, validate=True)
if not form_ids:
return {'messages': {'errors': [_('No Forms are supplied')]}}
response = archive_or_restore_forms(domain, couch_user.user_id, couch_user.username, form_ids, mode, task)
return response
@periodic_task(serializer='pickle',
run_every=crontab(hour='*', minute=0),
queue=settings.CELERY_PERIODIC_QUEUE,
ignore_result=True
)
def run_case_update_rules(now=None):
domains = (AutomaticUpdateRule
.objects
.filter(active=True, deleted=False, workflow=AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
.values_list('domain', flat=True)
.distinct()
.order_by('domain'))
hour_to_run = now.hour if now else datetime.utcnow().hour
for domain in domains:
if not any_migrations_in_progress(domain) and not DISABLE_CASE_UPDATE_RULE_SCHEDULED_TASK.enabled(domain):
domain_obj = Domain.get_by_name(domain)
if domain_obj.auto_case_update_hour is None:
domain_hour = settings.RULE_UPDATE_HOUR
else:
domain_hour = domain_obj.auto_case_update_hour
if hour_to_run == domain_hour:
run_case_update_rules_for_domain.delay(domain, now)
@task(serializer='pickle', queue='case_rule_queue')
def run_case_update_rules_for_domain(domain, now=None):
now = now or datetime.utcnow()
domain_rules = AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
all_rule_case_types = set(domain_rules.values_list('case_type', flat=True))
for case_type in all_rule_case_types:
run_record = DomainCaseRuleRun.objects.create(
domain=domain,
started_on=datetime.utcnow(),
status=DomainCaseRuleRun.STATUS_RUNNING,
case_type=case_type
)
for db in get_db_aliases_for_partitioned_query():
run_case_update_rules_for_domain_and_db.delay(domain, now, run_record.pk, case_type, db=db)
@serial_task(
'{domain}-{case_type}-{db}',
timeout=36 * 60 * 60,
max_retries=0,
queue='case_rule_queue',
)
def run_case_update_rules_for_domain_and_db(domain, now, run_id, case_type, db=None):
all_rules = AutomaticUpdateRule.by_domain(domain, AutomaticUpdateRule.WORKFLOW_CASE_UPDATE)
rules = list(all_rules.filter(case_type=case_type))
boundary_date = AutomaticUpdateRule.get_boundary_date(rules, now)
iterator = AutomaticUpdateRule.iter_cases(domain, case_type, boundary_date, db=db)
run = iter_cases_and_run_rules(domain, iterator, rules, now, run_id, case_type, db)
if run.status == DomainCaseRuleRun.STATUS_FINISHED:
for rule in rules:
AutomaticUpdateRule.objects.filter(pk=rule.pk).update(last_run=now)
@task(serializer='pickle', queue='background_queue', acks_late=True, ignore_result=True)
def run_case_update_rules_on_save(case):
key = 'case-update-on-save-case-{case}'.format(case=case.case_id)
with CriticalSection([key]):
update_case = True
if case.xform_ids:
last_form = FormAccessors(case.domain).get_form(case.xform_ids[-1])
update_case = last_form.xmlns != AUTO_UPDATE_XMLNS
if update_case:
rules = AutomaticUpdateRule.by_domain(case.domain,
AutomaticUpdateRule.WORKFLOW_CASE_UPDATE).filter(case_type=case.type)
now = datetime.utcnow()
run_rules_for_case(case, rules, now)
@periodic_task(run_every=crontab(hour=0, minute=0), queue='case_rule_queue', ignore_result=True)
def delete_old_rule_submission_logs():
start = datetime.utcnow()
max_age = start - timedelta(days=90)
CaseRuleSubmission.objects.filter(created_on__lt=max_age).delete()
@task(serializer='pickle')
def task_operate_on_payloads(
record_ids: List[str],
domain: str,
action, # type: Literal['resend', 'cancel', 'requeue'] # 3.8+
use_sql: bool,
):
return operate_on_payloads(record_ids, domain, action, use_sql,
task=task_operate_on_payloads)
@task(serializer='pickle')
def task_generate_ids_and_operate_on_payloads(
payload_id: Optional[str],
repeater_id: Optional[str],
domain: str,
action, # type: Literal['resend', 'cancel', 'requeue'] # 3.8+
use_sql: bool,
) -> dict:
repeat_record_ids = _get_repeat_record_ids(payload_id, repeater_id, domain,
use_sql)
return operate_on_payloads(repeat_record_ids, domain, action, use_sql,
task=task_generate_ids_and_operate_on_payloads)
def _get_repeat_record_ids(
payload_id: Optional[str],
repeater_id: Optional[str],
domain: str,
use_sql: bool,
) -> List[str]:
if not payload_id and not repeater_id:
return []
if payload_id:
if use_sql:
records = get_sql_repeat_records_by_payload_id(domain, payload_id)
return [r.id for r in records]
else:
return get_couch_repeat_record_ids_by_payload_id(domain, payload_id)
else:
if use_sql:
queryset = SQLRepeatRecord.objects.filter(
domain=domain,
repeater__repeater_id=repeater_id,
)
return [r['id'] for r in queryset.values('id')]
else:
return list(iter_repeat_record_ids_by_repeater(domain, repeater_id))
| 35.600823 | 114 | 0.715871 | [
"BSD-3-Clause"
] | akashkj/commcare-hq | corehq/apps/data_interfaces/tasks.py | 8,651 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from hermes_python.hermes import Hermes
INTENT_HOW_ARE_YOU = "mikpan:how_are_you"
INTENT_GOOD = "bezzam:feeling_good"
INTENT_BAD = "bezzam:feeling_bad"
INTENT_ALRIGHT = "bezzam:feeling_alright"
INTENT_FILTER_FEELING = [INTENT_GOOD, INTENT_BAD, INTENT_ALRIGHT]
def main():
with Hermes("localhost:1883") as h:
h.subscribe_intent(INTENT_HOW_ARE_YOU, how_are_you_callback) \
.subscribe_intent(INTENT_GOOD, feeling_good_callback) \
.subscribe_intent(INTENT_BAD, feeling_bad_callback) \
.subscribe_intent(INTENT_ALRIGHT, feeling_alright_callback) \
.start()
def how_are_you_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "I'm doing great. How about you?"
hermes.publish_continue_session(session_id, response, INTENT_FILTER_FEELING)
def feeling_good_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's awesome! I'm happy to hear that."
hermes.publish_end_session(session_id, response)
def feeling_bad_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "Sorry to hear that. I hope you feel better soon."
hermes.publish_end_session(session_id, response)
def feeling_alright_callback(hermes, intent_message):
session_id = intent_message.session_id
response = "That's cool."
hermes.publish_end_session(session_id, response)
if __name__ == "__main__":
main()
| 31.625 | 80 | 0.755599 | [
"MIT"
] | mikpan/amld19-snips-workshop | V2_action-how-are-you.py | 1,518 | Python |
"""Python 3.9.5"""
import cv2
import HandTrackingModule as htm
def thumbIncrementCheck(lmList: list[list[int]]) -> int:
"""Checks whether your thumb is up or not.
No matter what hand you use.
returns 1 if thumb is up else 0"""
count = 0
t_x = lmList[4][1]
p_x = lmList[17][1]
if t_x > p_x: # If true: RIGHT hand
if lmList[4][1] >= lmList[2][1]:
count += 1
else: # ELse: LEFT hand
if lmList[4][1] <= lmList[2][1]:
count += 1
return count
def textOutput(count, cc) -> str:
"""Returns an appropriate text output depending on
`count` and `cc`."""
text = "NOTHING"
if (count, cc) == (2, 2):
text = "SCISSOR"
elif count == 0:
text = "ROCK"
elif count == 5:
text = "PAPER"
else:
pass
return text
def main():
# cap = cv2.VideoCapture(0) # opens the camera
detector = htm.HandDetector()
while True:
success, img = cv2.imread("/home/laughinglouds/Pictures/Webcam/2021-04-13-133250.jpg")
img = detector.findHands(img)
lmlist = detector.findPosition(img, draw=True)
# If a hand is not detected value will be 0
# else non-zero (21)
hand_exists = len(lmlist)
tipIDs = [4, 8, 12, 16, 20] # Represents fingertips
dipIDs = [2, 7, 11, 15, 19] # Represents landmarks below the tips
count = 0 # keeps count of how many fingers are up
cc = 0 # for later checking if `Scissor` or not
if hand_exists:
# Looping for the five fingers
for i in range(0, 5):
if i == 0:
count += thumbIncrementCheck(lmlist)
else:
# 8: Index finger
# 12: Middle finger
if (lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]) and (
tipIDs[i] in (8, 12) # if either index or middle
):
count += 1
cc += 1
elif lmlist[tipIDs[i]][2] < lmlist[dipIDs[i]][2]:
count += 1
# print(cc)
else:
count = -1
txt = textOutput(count, cc)
# (10, 140) is coordinate of txt on the screen
cv2.putText(img, str(txt), (10, 140), cv2.FONT_HERSHEY_PLAIN, 3, (0, 0, 255), 3)
cv2.imshow("Image", img)
# close key isn't working for me
# os: linux mint 20.1
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if __name__ == "__main__":
main()
| 29.954023 | 94 | 0.506523 | [
"MIT"
] | laughingclouds/dt-mst-project | forOutput.py | 2,606 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""Basic neural network layers."""
from ..block import Block, HybridBlock
from ..utils import _indent
class Sequential(Block):
"""Stacks `Block`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(Sequential, self).__init__(prefix=prefix, params=params)
def add(self, block):
"""Adds block on top of the stack."""
self.register_child(block)
def forward(self, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, i):
return self._children[i]
def __len__(self):
return len(self._children)
class HybridSequential(HybridBlock):
"""Stacks `HybridBlock`s sequentially.
Example::
net = nn.Sequential()
# use net's name_scope to give child Blocks appropriate names.
with net.name_scope():
net.add(nn.Dense(10, activation='relu'))
net.add(nn.Dense(20))
"""
def __init__(self, prefix=None, params=None):
super(HybridSequential, self).__init__(prefix=prefix, params=params)
def add(self, block):
"""Adds block on top of the stack."""
self.register_child(block)
def hybrid_forward(self, F, x):
for block in self._children:
x = block(x)
return x
def __repr__(self):
s = '{name}(\n{modstr}\n)'
modstr = '\n'.join([' ({key}): {block}'.format(key=key,
block=_indent(block.__repr__(), 2))
for key, block in enumerate(self._children)
if isinstance(block, Block)])
return s.format(name=self.__class__.__name__,
modstr=modstr)
def __getitem__(self, i):
return self._children[i]
def __len__(self):
return len(self._children)
class Dense(HybridBlock):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, weight) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `weight` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: the input must be a tensor with rank 2. Use `flatten` to convert it
to rank 2 manually if necessary.
Parameters
----------
units : int
Dimensionality of the output space.
activation : str
Activation function to use. See help on `Activation` layer.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias : bool
Whether the layer uses a bias vector.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
in_units : int, optional
Size of the input data. If not specified, initialization will be
deferred to the first time `forward` is called and `in_units`
will be inferred from the shape of input data.
prefix : str or None
See document of `Block`.
params : ParameterDict or None
See document of `Block`.
Input shape:
A 2D input with shape `(batch_size, in_units)`.
Output shape:
The output would have shape `(batch_size, units)`.
"""
def __init__(self, units, activation=None, use_bias=True,
weight_initializer=None, bias_initializer='zeros',
in_units=0, **kwargs):
super(Dense, self).__init__(**kwargs)
with self.name_scope():
self._units = units
self._in_units = in_units
self.weight = self.params.get('weight', shape=(units, in_units),
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get('bias', shape=(units,),
init=bias_initializer,
allow_deferred_init=True)
else:
self.bias = None
if activation is not None:
self.act = Activation(activation, prefix=activation+'_')
else:
self.act = None
def hybrid_forward(self, F, x, weight, bias=None):
if bias is None:
act = F.FullyConnected(x, weight, no_bias=True, num_hidden=self._units,
name='fwd')
else:
act = F.FullyConnected(x, weight, bias, num_hidden=self._units,
name='fwd')
if self.act is not None:
act = self.act(act)
return act
def __repr__(self):
s = '{name}({layout}, {act})'
return s.format(name=self.__class__.__name__,
act=self.act if self.act else 'linear',
layout='{0} -> {1}'.format(self._in_units, self._units) if self._in_units
else self._units)
class Activation(HybridBlock):
"""Applies an activation function to input.
Parameters
----------
activation : str
Name of activation function to use.
See :func:`~mxnet.ndarray.Activation` for available choices.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
self._act_type = activation
super(Activation, self).__init__(**kwargs)
def _alias(self):
return self._act_type
def hybrid_forward(self, F, x):
return F.Activation(x, act_type=self._act_type, name='fwd')
def __repr__(self):
s = '{name}({_act_type})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class Dropout(HybridBlock):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units
to 0 at each update during training time, which helps prevent overfitting.
Parameters
----------
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
References
----------
`Dropout: A Simple Way to Prevent Neural Networks from Overfitting
<http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf>`_
"""
def __init__(self, rate, **kwargs):
super(Dropout, self).__init__(**kwargs)
self._rate = rate
def hybrid_forward(self, F, x):
return F.Dropout(x, p=self._rate, name='fwd')
def __repr__(self):
s = '{name}(p = {_rate})'
return s.format(name=self.__class__.__name__,
**self.__dict__)
class BatchNorm(HybridBlock):
"""Batch normalization layer (Ioffe and Szegedy, 2014).
Normalizes the input at each batch, i.e. applies a transformation
that maintains the mean activation close to 0 and the activation
standard deviation close to 1.
Parameters
----------
axis : int, default 1
The axis that should be normalized. This is typically the channels
(C) axis. For instance, after a `Conv2D` layer with `layout='NCHW'`,
set `axis=1` in `BatchNorm`. If `layout='NHWC'`, then set `axis=3`.
momentum: float, default 0.9
Momentum for the moving average.
epsilon: float, default 1e-5
Small float added to variance to avoid dividing by zero.
center: bool, default True
If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: bool, default True
If True, multiply by `gamma`. If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: str or `Initializer`, default 'zeros'
Initializer for the beta weight.
gamma_initializer: str or `Initializer`, default 'ones'
Initializer for the gamma weight.
moving_mean_initializer: str or `Initializer`, default 'zeros'
Initializer for the moving mean.
moving_variance_initializer: str or `Initializer`, default 'ones'
Initializer for the moving variance.
in_channels : int, default 0
Number of channels (feature maps) in input data. If not specified,
initialization will be deferred to the first time `forward` is called
and `in_channels` will be inferred from the shape of input data.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, axis=1, momentum=0.9, epsilon=1e-5, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
running_mean_initializer='zeros', running_variance_initializer='ones',
in_channels=0, **kwargs):
super(BatchNorm, self).__init__(**kwargs)
self._kwargs = {'axis': axis, 'eps': epsilon, 'momentum': momentum,
'fix_gamma': not scale}
if in_channels != 0:
self.in_channels = in_channels
self.gamma = self.params.get('gamma', grad_req='write' if scale else 'null',
shape=(in_channels,), init=gamma_initializer,
allow_deferred_init=True,
differentiable=scale)
self.beta = self.params.get('beta', grad_req='write' if center else 'null',
shape=(in_channels,), init=beta_initializer,
allow_deferred_init=True,
differentiable=center)
self.running_mean = self.params.get('running_mean', grad_req='null',
shape=(in_channels,),
init=running_mean_initializer,
allow_deferred_init=True,
differentiable=False)
self.running_var = self.params.get('running_var', grad_req='null',
shape=(in_channels,),
init=running_variance_initializer,
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, gamma, beta, running_mean, running_var):
return F.BatchNorm(x, gamma, beta, running_mean, running_var,
name='fwd', **self._kwargs)
def __repr__(self):
s = '{name}({content}'
if hasattr(self, 'in_channels'):
s += ', in_channels={0}'.format(self.in_channels)
s += ')'
return s.format(name=self.__class__.__name__,
content=', '.join(['='.join([k, v.__repr__()])
for k, v in self._kwargs.items()]))
class LeakyReLU(HybridBlock):
"""Leaky version of a Rectified Linear Unit.
It allows a small gradient when the unit is not active::
`f(x) = alpha * x for x < 0`,
`f(x) = x for x >= 0`.
Parameters
----------
alpha : float
slope coefficient for the negative half axis. Must be >= 0.
Input shape:
Arbitrary.
Output shape:
Same shape as input.
"""
def __init__(self, alpha, **kwargs):
super(LeakyReLU, self).__init__(**kwargs)
self._alpha = alpha
def hybrid_forward(self, F, x):
return F.LeakyReLU(x, act_type='leaky', slope=self._alpha, name='fwd')
def __repr__(self):
s = '{name}({alpha})'
return s.format(name=self.__class__.__name__,
alpha=self._alpha)
class Embedding(HybridBlock):
"""Turns non-negative integers (indexes/tokens) into dense vectors
of fixed size. eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
Parameters
----------
input_dim : int
Size of the vocabulary, i.e. maximum integer index + 1.
output_dim : int
Dimension of the dense embedding.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : Initializer
Initializer for the `embeddings` matrix.
Input shape:
2D tensor with shape: `(N, M)`.
Output shape:
3D tensor with shape: `(N, M, output_dim)`.
"""
def __init__(self, input_dim, output_dim, dtype='float32',
weight_initializer=None, **kwargs):
super(Embedding, self).__init__(**kwargs)
self._kwargs = {'input_dim': input_dim, 'output_dim': output_dim,
'dtype': dtype}
self.weight = self.params.get('weight', shape=(input_dim, output_dim),
init=weight_initializer,
allow_deferred_init=True)
def hybrid_forward(self, F, x, weight):
return F.Embedding(x, weight, name='fwd', **self._kwargs)
def __repr__(self):
s = '{block_name}({input_dim} -> {output_dim}, {dtype})'
return s.format(block_name=self.__class__.__name__,
**self._kwargs)
class Flatten(HybridBlock):
"""Flattens the input to two dimensional.
Input shape:
Arbitrary shape `(N, a, b, c, ...)`
Output shape:
2D tensor with shape: `(N, a*b*c...)`
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x.reshape((0, -1))
def __repr__(self):
return self.__class__.__name__
| 35.637413 | 97 | 0.580325 | [
"Apache-2.0"
] | IIMarch/mxnet | python/mxnet/gluon/nn/basic_layers.py | 15,431 | Python |
import numpy as np
np.random.seed(0)
from bokeh.io import curdoc
from bokeh.layouts import widgetbox, row, column
from bokeh.models import ColumnDataSource, Select, Slider
from bokeh.plotting import figure
from bokeh.palettes import Spectral6
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
# define some helper functions
def clustering(X, algorithm, n_clusters):
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# Generate the new colors:
if algorithm=='MiniBatchKMeans':
model = cluster.MiniBatchKMeans(n_clusters=n_clusters)
elif algorithm=='Birch':
model = cluster.Birch(n_clusters=n_clusters)
elif algorithm=='DBSCAN':
model = cluster.DBSCAN(eps=.2)
elif algorithm=='AffinityPropagation':
model = cluster.AffinityPropagation(damping=.9,
preference=-200)
elif algorithm=='MeanShift':
model = cluster.MeanShift(bandwidth=bandwidth,
bin_seeding=True)
elif algorithm=='SpectralClustering':
model = cluster.SpectralClustering(n_clusters=n_clusters,
eigen_solver='arpack',
affinity="nearest_neighbors")
elif algorithm=='Ward':
model = cluster.AgglomerativeClustering(n_clusters=n_clusters,
linkage='ward',
connectivity=connectivity)
elif algorithm=='AgglomerativeClustering':
model = cluster.AgglomerativeClustering(linkage="average",
affinity="cityblock",
n_clusters=n_clusters,
connectivity=connectivity)
model.fit(X)
if hasattr(model, 'labels_'):
y_pred = model.labels_.astype(np.int)
else:
y_pred = model.predict(X)
return X, y_pred
def get_dataset(dataset, n_samples):
if dataset == 'Noisy Circles':
return datasets.make_circles(n_samples=n_samples,
factor=0.5,
noise=0.05)
elif dataset == 'Noisy Moons':
return datasets.make_moons(n_samples=n_samples,
noise=0.05)
elif dataset == 'Blobs':
return datasets.make_blobs(n_samples=n_samples,
random_state=8)
elif dataset == "No Structure":
return np.random.rand(n_samples, 2), None
# set up initial data
n_samples = 1500
n_clusters = 2
algorithm = 'MiniBatchKMeans'
dataset = 'Noisy Circles'
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
spectral = np.hstack([Spectral6] * 20)
colors = [spectral[i] for i in y]
# set up plot (styling in theme.yaml)
plot = figure(toolbar_location=None, title=algorithm)
source = ColumnDataSource(data=dict(x=X[:, 0], y=X[:, 1], colors=colors))
plot.circle('x', 'y', fill_color='colors', line_color=None, source=source)
# set up widgets
clustering_algorithms= [
'MiniBatchKMeans',
'AffinityPropagation',
'MeanShift',
'SpectralClustering',
'Ward',
'AgglomerativeClustering',
'DBSCAN',
'Birch'
]
datasets_names = [
'Noisy Circles',
'Noisy Moons',
'Blobs',
'No Structure'
]
algorithm_select = Select(value='MiniBatchKMeans',
title='Select algorithm:',
width=200,
options=clustering_algorithms)
dataset_select = Select(value='Noisy Circles',
title='Select dataset:',
width=200,
options=datasets_names)
samples_slider = Slider(title="Number of samples",
value=1500.0,
start=1000.0,
end=3000.0,
step=100,
width=400)
clusters_slider = Slider(title="Number of clusters",
value=2.0,
start=2.0,
end=10.0,
step=1,
width=400)
# set up callbacks
def update_algorithm_or_clusters(attrname, old, new):
global X
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data['colors'] = colors
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
plot.title.text = algorithm
def update_samples_or_dataset(attrname, old, new):
global X, y
dataset = dataset_select.value
algorithm = algorithm_select.value
n_clusters = int(clusters_slider.value)
n_samples = int(samples_slider.value)
X, y = get_dataset(dataset, n_samples)
X, y_pred = clustering(X, algorithm, n_clusters)
colors = [spectral[i] for i in y_pred]
source.data['x'] = X[:, 0]
source.data['y'] = X[:, 1]
source.data['colors'] = colors
algorithm_select.on_change('value', update_algorithm_or_clusters)
clusters_slider.on_change('value', update_algorithm_or_clusters)
dataset_select.on_change('value', update_samples_or_dataset)
samples_slider.on_change('value', update_samples_or_dataset)
# set up layout
selects = row(dataset_select, algorithm_select, width=420)
inputs = column(selects, widgetbox(samples_slider, clusters_slider))
# add to document
curdoc().add_root(row(inputs, plot))
curdoc().title = "Clustering"
| 31.473958 | 74 | 0.608307 | [
"BSD-3-Clause"
] | SiggyF/bokeh | examples/app/clustering/main.py | 6,043 | Python |
import csv
source_file = "Resources/budget_data.csv"
output_file = "Resources/budget_data_analysis.txt"
#initialize months counter, total income, decrease and increase in revenue amounts
number_of_months = 0 # to track the total number of months
income_total = 0 #variable to hold total income as we iterate through the csv
previous_income = 0 #variable to hold previously eveluated value from csv
greatest_profit_increase = ["",0] #list to hold the greatest profit increase, inaitialized to lowest value 0
greatest_loss_decrease = ["",1000000000000] #list to hold the greatest loss decrease, inaitialized to highest value
change_in_pl = [] #list to hold change in profit/loss as we iterate through the csv
change_in_income = 0
#print (revenue_decrease)
with open(source_file) as budget_data:
csv_reader = csv.DictReader(budget_data)
for row in csv_reader:
number_of_months = number_of_months + 1
#print(row["Profit/Losses"])
income_total = income_total + int(row["Profit/Losses"])
#print(row)
#trace the changes in amount
change_in_income = int(row["Profit/Losses"]) - previous_income
#print(change_in_income)
#reinitiate the value to the record we completed evaluating
previous_income = int(row["Profit/Losses"])
#print(previous_income)
#greatest increase
if(change_in_income > greatest_profit_increase[1]):
greatest_profit_increase[0] = row["Date"]
greatest_profit_increase[1] = change_in_income
#greatest decrease
if(change_in_income < greatest_loss_decrease[1]):
greatest_loss_decrease[0] = row["Date"]
greatest_loss_decrease[1] = change_in_income
#append to the change_in_pl for sum calculations
#print(int(row['Profit/Losses']))
change_in_pl.append(int(row['Profit/Losses']))
#calculate net profit or loss
net_profit = sum(change_in_pl)
#print(net_profit)
print()
print('Financial Anlysis')
print('--------------------------')
print("Total Months: " + str(number_of_months))
print("Total Income: " + "$" + str(net_profit))
print("Greatest Increase in Profits: " + str(greatest_profit_increase[0]) + " $" + str(greatest_profit_increase[1]))
print("Greatest Decrease in Profits: " + str(greatest_loss_decrease[0]) + " $" + str(greatest_loss_decrease[1]))
#write outup to text file
with open(output_file,"w") as results:
results.write("Total Months: " + str(number_of_months))
results.write("\n")
results.write("Total Income: " + "$" + str(net_profit))
results.write("\n")
results.write("Greatest Increase in Profits: " + str(greatest_profit_increase[0]) + " $" + str(greatest_profit_increase[1]))
results.write("\n")
results.write("Greatest Decrease in Profits: " + str(greatest_loss_decrease[0]) + " $" + str(greatest_loss_decrease[1])) | 45.6875 | 128 | 0.691518 | [
"MIT"
] | abelgk/python-challenge | PyBank/main.py | 2,924 | Python |
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import subprocess
import sys
import time
import urllib2
import xml.dom.minidom
from webkitpy.common.net.file_uploader import FileUploader
try:
import json
except ImportError:
# python 2.5 compatibility
import webkitpy.thirdparty.simplejson as json
# A JSON results generator for generic tests.
# FIXME: move this code out of the layout_package directory.
_log = logging.getLogger(__name__)
_JSON_PREFIX = "ADD_RESULTS("
_JSON_SUFFIX = ");"
def has_json_wrapper(string):
return string.startswith(_JSON_PREFIX) and string.endswith(_JSON_SUFFIX)
def strip_json_wrapper(json_content):
# FIXME: Kill this code once the server returns json instead of jsonp.
if has_json_wrapper(json_content):
return json_content[len(_JSON_PREFIX):len(json_content) - len(_JSON_SUFFIX)]
return json_content
def load_json(filesystem, file_path):
content = filesystem.read_text_file(file_path)
content = strip_json_wrapper(content)
return json.loads(content)
def write_json(filesystem, json_object, file_path, callback=None):
# Specify separators in order to get compact encoding.
json_string = json.dumps(json_object, separators=(',', ':'))
if callback:
json_string = callback + "(" + json_string + ");"
filesystem.write_text_file(file_path, json_string)
def convert_trie_to_flat_paths(trie, prefix=None):
"""Converts the directory structure in the given trie to flat paths, prepending a prefix to each."""
result = {}
for name, data in trie.iteritems():
if prefix:
name = prefix + "/" + name
if len(data) and not "results" in data:
result.update(convert_trie_to_flat_paths(data, name))
else:
result[name] = data
return result
def add_path_to_trie(path, value, trie):
"""Inserts a single flat directory path and associated value into a directory trie structure."""
if not "/" in path:
trie[path] = value
return
directory, slash, rest = path.partition("/")
if not directory in trie:
trie[directory] = {}
add_path_to_trie(rest, value, trie[directory])
def test_timings_trie(port, individual_test_timings):
"""Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g.
foo/bar/baz.html: 1ms
foo/bar/baz1.html: 3ms
becomes
foo: {
bar: {
baz.html: 1,
baz1.html: 3
}
}
"""
trie = {}
for test_result in individual_test_timings:
test = test_result.test_name
add_path_to_trie(test, int(1000 * test_result.test_run_time), trie)
return trie
# FIXME: We already have a TestResult class in test_results.py
class TestResult(object):
"""A simple class that represents a single test result."""
# Test modifier constants.
(NONE, FAILS, FLAKY, DISABLED) = range(4)
def __init__(self, test, failed=False, elapsed_time=0):
self.test_name = test
self.failed = failed
self.test_run_time = elapsed_time
test_name = test
try:
test_name = test.split('.')[1]
except IndexError:
_log.warn("Invalid test name: %s.", test)
pass
if test_name.startswith('FAILS_'):
self.modifier = self.FAILS
elif test_name.startswith('FLAKY_'):
self.modifier = self.FLAKY
elif test_name.startswith('DISABLED_'):
self.modifier = self.DISABLED
else:
self.modifier = self.NONE
def fixable(self):
return self.failed or self.modifier == self.DISABLED
class JSONResultsGeneratorBase(object):
"""A JSON results generator for generic tests."""
MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
# Min time (seconds) that will be added to the JSON.
MIN_TIME = 1
# Note that in non-chromium tests those chars are used to indicate
# test modifiers (FAILS, FLAKY, etc) but not actual test results.
PASS_RESULT = "P"
SKIP_RESULT = "X"
FAIL_RESULT = "F"
FLAKY_RESULT = "L"
NO_DATA_RESULT = "N"
MODIFIER_TO_CHAR = {TestResult.NONE: PASS_RESULT,
TestResult.DISABLED: SKIP_RESULT,
TestResult.FAILS: FAIL_RESULT,
TestResult.FLAKY: FLAKY_RESULT}
VERSION = 4
VERSION_KEY = "version"
RESULTS = "results"
TIMES = "times"
BUILD_NUMBERS = "buildNumbers"
TIME = "secondsSinceEpoch"
TESTS = "tests"
FIXABLE_COUNT = "fixableCount"
FIXABLE = "fixableCounts"
ALL_FIXABLE_COUNT = "allFixableCount"
RESULTS_FILENAME = "results.json"
TIMES_MS_FILENAME = "times_ms.json"
INCREMENTAL_RESULTS_FILENAME = "incremental_results.json"
URL_FOR_TEST_LIST_JSON = "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&master=%s"
# FIXME: Remove generate_incremental_results once the reference to it in
# http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtest_slave_utils.py
# has been removed.
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_results_map, svn_repositories=None,
test_results_server=None,
test_type="",
master_name="",
generate_incremental_results=None):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
Args
port: port-specific wrapper
builder_name: the builder name (e.g. Webkit).
build_name: the build name (e.g. webkit-rel).
build_number: the build number.
results_file_base_path: Absolute path to the directory containing the
results json file.
builder_base_url: the URL where we have the archived test results.
If this is None no archived results will be retrieved.
test_results_map: A dictionary that maps test_name to TestResult.
svn_repositories: A (json_field_name, svn_path) pair for SVN
repositories that tests rely on. The SVN revision will be
included in the JSON with the given json_field_name.
test_results_server: server that hosts test results json.
test_type: test type string (e.g. 'layout-tests').
master_name: the name of the buildbot master.
"""
self._port = port
self._filesystem = port._filesystem
self._builder_name = builder_name
self._build_name = build_name
self._build_number = build_number
self._builder_base_url = builder_base_url
self._results_directory = results_file_base_path
self._test_results_map = test_results_map
self._test_results = test_results_map.values()
self._svn_repositories = svn_repositories
if not self._svn_repositories:
self._svn_repositories = {}
self._test_results_server = test_results_server
self._test_type = test_type
self._master_name = master_name
self._archived_results = None
def generate_json_output(self):
json_object = self.get_json()
if json_object:
file_path = self._filesystem.join(self._results_directory, self.INCREMENTAL_RESULTS_FILENAME)
write_json(self._filesystem, json_object, file_path)
def generate_times_ms_file(self):
# FIXME: rename to generate_times_ms_file. This needs to be coordinated with
# changing the calls to this on the chromium build slaves.
times = test_timings_trie(self._port, self._test_results_map.values())
file_path = self._filesystem.join(self._results_directory, self.TIMES_MS_FILENAME)
write_json(self._filesystem, times, file_path)
def get_json(self):
"""Gets the results for the results.json file."""
results_json = {}
if not results_json:
results_json, error = self._get_archived_json_results()
if error:
# If there was an error don't write a results.json
# file at all as it would lose all the information on the
# bot.
_log.error("Archive directory is inaccessible. Not "
"modifying or clobbering the results.json "
"file: " + str(error))
return None
builder_name = self._builder_name
if results_json and builder_name not in results_json:
_log.debug("Builder name (%s) is not in the results.json file."
% builder_name)
self._convert_json_to_current_version(results_json)
if builder_name not in results_json:
results_json[builder_name] = (
self._create_results_for_builder_json())
results_for_builder = results_json[builder_name]
self._insert_generic_metadata(results_for_builder)
self._insert_failure_summaries(results_for_builder)
# Update the all failing tests with result type and time.
tests = results_for_builder[self.TESTS]
all_failing_tests = self._get_failed_test_names()
all_failing_tests.update(convert_trie_to_flat_paths(tests))
for test in all_failing_tests:
self._insert_test_time_and_result(test, tests)
return results_json
def set_archived_results(self, archived_results):
self._archived_results = archived_results
def upload_json_files(self, json_files):
"""Uploads the given json_files to the test_results_server (if the
test_results_server is given)."""
if not self._test_results_server:
return
if not self._master_name:
_log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
return
_log.info("Uploading JSON files for builder: %s", self._builder_name)
attrs = [("builder", self._builder_name),
("testtype", self._test_type),
("master", self._master_name)]
files = [(file, self._filesystem.join(self._results_directory, file))
for file in json_files]
url = "http://%s/testfile/upload" % self._test_results_server
uploader = FileUploader(url)
try:
# Set uploading timeout in case appengine server is having problem.
# 120 seconds are more than enough to upload test results.
uploader.upload(attrs, files, 120)
except Exception, err:
_log.error("Upload failed: %s" % err)
return
_log.info("JSON files uploaded.")
def _get_test_timing(self, test_name):
"""Returns test timing data (elapsed time) in second
for the given test_name."""
if test_name in self._test_results_map:
# Floor for now to get time in seconds.
return int(self._test_results_map[test_name].test_run_time)
return 0
def _get_failed_test_names(self):
"""Returns a set of failed test names."""
return set([r.test_name for r in self._test_results if r.failed])
def _get_modifier_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test modifier
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier in self.MODIFIER_TO_CHAR.keys():
return self.MODIFIER_TO_CHAR[test_result.modifier]
return self.__class__.PASS_RESULT
def _get_result_char(self, test_name):
"""Returns a single char (e.g. SKIP_RESULT, FAIL_RESULT,
PASS_RESULT, NO_DATA_RESULT, etc) that indicates the test result
for the given test_name.
"""
if test_name not in self._test_results_map:
return self.__class__.NO_DATA_RESULT
test_result = self._test_results_map[test_name]
if test_result.modifier == TestResult.DISABLED:
return self.__class__.SKIP_RESULT
if test_result.failed:
return self.__class__.FAIL_RESULT
return self.__class__.PASS_RESULT
# FIXME: Callers should use scm.py instead.
# FIXME: Identify and fix the run-time errors that were observed on Windows
# chromium buildbot when we had updated this code to use scm.py once before.
def _get_svn_revision(self, in_directory):
"""Returns the svn revision for the given directory.
Args:
in_directory: The directory where svn is to be run.
"""
if self._filesystem.exists(self._filesystem.join(in_directory, '.svn')):
# Note: Not thread safe: http://bugs.python.org/issue2320
output = subprocess.Popen(["svn", "info", "--xml"],
cwd=in_directory,
shell=(sys.platform == 'win32'),
stdout=subprocess.PIPE).communicate()[0]
try:
dom = xml.dom.minidom.parseString(output)
return dom.getElementsByTagName('entry')[0].getAttribute(
'revision')
except xml.parsers.expat.ExpatError:
return ""
return ""
def _get_archived_json_results(self):
"""Download JSON file that only contains test
name list from test-results server. This is for generating incremental
JSON so the file generated has info for tests that failed before but
pass or are skipped from current run.
Returns (archived_results, error) tuple where error is None if results
were successfully read.
"""
results_json = {}
old_results = None
error = None
if not self._test_results_server:
return {}, None
results_file_url = (self.URL_FOR_TEST_LIST_JSON %
(urllib2.quote(self._test_results_server),
urllib2.quote(self._builder_name),
self.RESULTS_FILENAME,
urllib2.quote(self._test_type),
urllib2.quote(self._master_name)))
try:
# FIXME: We should talk to the network via a Host object.
results_file = urllib2.urlopen(results_file_url)
info = results_file.info()
old_results = results_file.read()
except urllib2.HTTPError, http_error:
# A non-4xx status code means the bot is hosed for some reason
# and we can't grab the results.json file off of it.
if (http_error.code < 400 and http_error.code >= 500):
error = http_error
except urllib2.URLError, url_error:
error = url_error
if old_results:
# Strip the prefix and suffix so we can get the actual JSON object.
old_results = strip_json_wrapper(old_results)
try:
results_json = json.loads(old_results)
except:
_log.debug("results.json was not valid JSON. Clobbering.")
# The JSON file is not valid JSON. Just clobber the results.
results_json = {}
else:
_log.debug('Old JSON results do not exist. Starting fresh.')
results_json = {}
return results_json, error
def _insert_failure_summaries(self, results_for_builder):
"""Inserts aggregate pass/failure statistics into the JSON.
This method reads self._test_results and generates
FIXABLE, FIXABLE_COUNT and ALL_FIXABLE_COUNT entries.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
"""
# Insert the number of tests that failed or skipped.
fixable_count = len([r for r in self._test_results if r.fixable()])
self._insert_item_into_raw_list(results_for_builder,
fixable_count, self.FIXABLE_COUNT)
# Create a test modifiers (FAILS, FLAKY etc) summary dictionary.
entry = {}
for test_name in self._test_results_map.iterkeys():
result_char = self._get_modifier_char(test_name)
entry[result_char] = entry.get(result_char, 0) + 1
# Insert the pass/skip/failure summary dictionary.
self._insert_item_into_raw_list(results_for_builder, entry,
self.FIXABLE)
# Insert the number of all the tests that are supposed to pass.
all_test_count = len(self._test_results)
self._insert_item_into_raw_list(results_for_builder,
all_test_count, self.ALL_FIXABLE_COUNT)
def _insert_item_into_raw_list(self, results_for_builder, item, key):
"""Inserts the item into the list with the given key in the results for
this builder. Creates the list if no such list exists.
Args:
results_for_builder: Dictionary containing the test results for a
single builder.
item: Number or string to insert into the list.
key: Key in results_for_builder for the list to insert into.
"""
if key in results_for_builder:
raw_list = results_for_builder[key]
else:
raw_list = []
raw_list.insert(0, item)
raw_list = raw_list[:self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG]
results_for_builder[key] = raw_list
def _insert_item_run_length_encoded(self, item, encoded_results):
"""Inserts the item into the run-length encoded results.
Args:
item: String or number to insert.
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
if len(encoded_results) and item == encoded_results[0][1]:
num_results = encoded_results[0][0]
if num_results <= self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
encoded_results[0][0] = num_results + 1
else:
# Use a list instead of a class for the run-length encoding since
# we want the serialized form to be concise.
encoded_results.insert(0, [1, item])
def _insert_generic_metadata(self, results_for_builder):
""" Inserts generic metadata (such as version number, current time etc)
into the JSON.
Args:
results_for_builder: Dictionary containing the test results for
a single builder.
"""
self._insert_item_into_raw_list(results_for_builder,
self._build_number, self.BUILD_NUMBERS)
# Include SVN revisions for the given repositories.
for (name, path) in self._svn_repositories:
self._insert_item_into_raw_list(results_for_builder,
self._get_svn_revision(path),
name + 'Revision')
self._insert_item_into_raw_list(results_for_builder,
int(time.time()),
self.TIME)
def _insert_test_time_and_result(self, test_name, tests):
""" Insert a test item with its results to the given tests dictionary.
Args:
tests: Dictionary containing test result entries.
"""
result = self._get_result_char(test_name)
time = self._get_test_timing(test_name)
this_test = tests
for segment in test_name.split("/"):
if segment not in this_test:
this_test[segment] = {}
this_test = this_test[segment]
if not len(this_test):
self._populate_results_and_times_json(this_test)
if self.RESULTS in this_test:
self._insert_item_run_length_encoded(result, this_test[self.RESULTS])
else:
this_test[self.RESULTS] = [[1, result]]
if self.TIMES in this_test:
self._insert_item_run_length_encoded(time, this_test[self.TIMES])
else:
this_test[self.TIMES] = [[1, time]]
def _convert_json_to_current_version(self, results_json):
"""If the JSON does not match the current version, converts it to the
current version and adds in the new version number.
"""
if self.VERSION_KEY in results_json:
archive_version = results_json[self.VERSION_KEY]
if archive_version == self.VERSION:
return
else:
archive_version = 3
# version 3->4
if archive_version == 3:
num_results = len(results_json.values())
for builder, results in results_json.iteritems():
self._convert_tests_to_trie(results)
results_json[self.VERSION_KEY] = self.VERSION
def _convert_tests_to_trie(self, results):
if not self.TESTS in results:
return
test_results = results[self.TESTS]
test_results_trie = {}
for test in test_results.iterkeys():
single_test_result = test_results[test]
add_path_to_trie(test, single_test_result, test_results_trie)
results[self.TESTS] = test_results_trie
def _populate_results_and_times_json(self, results_and_times):
results_and_times[self.RESULTS] = []
results_and_times[self.TIMES] = []
return results_and_times
def _create_results_for_builder_json(self):
results_for_builder = {}
results_for_builder[self.TESTS] = {}
return results_for_builder
def _remove_items_over_max_number_of_builds(self, encoded_list):
"""Removes items from the run-length encoded list after the final
item that exceeds the max number of builds to track.
Args:
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
"""
num_builds = 0
index = 0
for result in encoded_list:
num_builds = num_builds + result[0]
index = index + 1
if num_builds > self.MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG:
return encoded_list[:index]
return encoded_list
def _normalize_results_json(self, test, test_name, tests):
""" Prune tests where all runs pass or tests that no longer exist and
truncate all results to maxNumberOfBuilds.
Args:
test: ResultsAndTimes object for this test.
test_name: Name of the test.
tests: The JSON object with all the test results for this builder.
"""
test[self.RESULTS] = self._remove_items_over_max_number_of_builds(
test[self.RESULTS])
test[self.TIMES] = self._remove_items_over_max_number_of_builds(
test[self.TIMES])
is_all_pass = self._is_results_all_of_type(test[self.RESULTS],
self.PASS_RESULT)
is_all_no_data = self._is_results_all_of_type(test[self.RESULTS],
self.NO_DATA_RESULT)
max_time = max([time[1] for time in test[self.TIMES]])
# Remove all passes/no-data from the results to reduce noise and
# filesize. If a test passes every run, but takes > MIN_TIME to run,
# don't throw away the data.
if is_all_no_data or (is_all_pass and max_time <= self.MIN_TIME):
del tests[test_name]
def _is_results_all_of_type(self, results, type):
"""Returns whether all the results are of the given type
(e.g. all passes)."""
return len(results) == 1 and results[0][1] == type
# Left here not to break anything.
class JSONResultsGenerator(JSONResultsGeneratorBase):
pass
| 38.279879 | 110 | 0.651306 | [
"Apache-2.0"
] | JavaScriptTesting/LJS | WebKit/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py | 25,303 | Python |
from io import BytesIO
import pytest
from app import app
def test_otter():
with open('./images/otter.jpeg', 'rb') as img:
img_string = BytesIO(img.read())
response = app.test_client().post('/predict', data={'file': (img_string, 'otter.jpeg')},
content_type="multipart/form-data")
assert response.json['class_name'] == 'otter'
| 32.636364 | 90 | 0.657382 | [
"MIT"
] | tadashi0713/circleci-demo-pytorch-api | tests/test_otter.py | 359 | Python |
################################################################################
#
# MIT License
#
# Copyright (c) 2020 Advanced Micro Devices, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
from __future__ import print_function
import argparse
import sys, os, shutil
from python import *
OUT_DIR='out'
def igemm_flatten(args, config_content):
asm_target = os.path.join(args.dir, os.path.splitext(os.path.basename(args.config_file))[0] + '.s')
emitter = mc_emit_to_file_t(asm_target)
sec_root = config_content.get_section('codegen')[0]
arch = amdgpu_arch_config_t({
'arch' : amdgpu_string_to_arch( sec_root['arch'] ),
'data_type' : AMDGPU_PRECISION_FP32,
'code_object' : amdgpu_string_to_codeobj( sec_root['code_object']) })
# create mc
mc = mc_asm_printer_t(emitter, arch)
mc_set_current(mc)
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
td['arch'] = sec_root['arch'] # append arch to each section
codegen_driver_t(mc, tunable_dicts)(split_kernel = args.split_kernel)
# os.chmod(asm_target, 0x777)
def igemm_out_tunable_param(output_file, config_content):
sec_root = config_content.get_section('codegen')[0]
list_emitter = mc_emit_to_file_t(output_file)
list_emitter.open()
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
td['arch'] = sec_root['arch'] # append arch to each section
td_item = igemm_gtc_tunable_parameter_t(td)
list_emitter.emit(td_item.output())
list_emitter.close()
def igemm_check_fp16_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "fp16" in td['precision']:
return True
return False
def igemm_check_int8_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "int8" in td['precision']:
return True
return False
def igemm_check_bf16_configs(config_content):
tunable_dicts = [sec.to_dict() for sec in config_content if sec.get_name().startswith('igemm_')]
for td in tunable_dicts:
if "bf16" in td['precision']:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("config_file", help="config file as input")
parser.add_argument("-d", "--dir", help="directory of output files", default = OUT_DIR)
parser.add_argument("-output", nargs='?', const='tunable_parameter_list.txt', help="output tunable parameter list")
parser.add_argument("-s", "--split_kernel", action="store_true")
args = parser.parse_args()
config_parser = config_parser_t(args.config_file)
#print(os.getcwd())
config_content = config_parser()
#config_content.dump()
#print(args.output)
if args.output:
igemm_out_tunable_param(args.output, config_content)
arch = config_content.get_section('codegen')[0]['arch']
code_object = config_content.get_section('codegen')[0]['code_object']
has_fp16_config = igemm_check_fp16_configs(config_content)
has_int8_config = igemm_check_int8_configs(config_content)
has_bf16_config = igemm_check_bf16_configs(config_content)
if config_content.get_section('codegen')[0]['mode'] in ('flat', 'flatten'):
if os.path.exists(args.dir):
shutil.rmtree(args.dir)
os.mkdir(args.dir)
cxxflags = []
if args.split_kernel:
cxxflags += ["-DIGEMM_SPLIT_KERNEL"]
host_driver(cxxflags=cxxflags, arch=arch, config_file=args.config_file, out_dir=args.dir, has_fp16_config=has_fp16_config, has_int8_config=has_int8_config, has_bf16_config=has_bf16_config)
igemm_flatten(args, config_content)
if config_content.get_section('codegen')[0]['mode'] in ('seq', 'sequencer'):
# config_content.dump()
# igemm_sequence(args, config_content)
if os.path.exists(args.dir):
shutil.rmtree(args.dir)
os.mkdir(args.dir)
sequence_driver(arch=arch, code_object=code_object,
config_content=config_content, out_dir=args.dir )
| 42.875969 | 196 | 0.685952 | [
"MIT"
] | ROCmSoftwarePlatform/iGEMMgen | igemm_codegen.py | 5,531 | Python |
from __future__ import unicode_literals
from boto.ec2.instancetype import InstanceType
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from moto.ec2.utils import instance_ids_from_querystring, filters_from_querystring, \
dict_from_querystring, optional_from_querystring
class InstanceResponse(BaseResponse):
def describe_instances(self):
filter_dict = filters_from_querystring(self.querystring)
instance_ids = instance_ids_from_querystring(self.querystring)
if instance_ids:
reservations = self.ec2_backend.get_reservations_by_instance_ids(
instance_ids, filters=filter_dict)
else:
reservations = self.ec2_backend.all_reservations(
make_copy=True, filters=filter_dict)
template = self.response_template(EC2_DESCRIBE_INSTANCES)
return template.render(reservations=reservations)
def run_instances(self):
min_count = int(self.querystring.get('MinCount', ['1'])[0])
image_id = self.querystring.get('ImageId')[0]
user_data = self.querystring.get('UserData')
security_group_names = self._get_multi_param('SecurityGroup')
security_group_ids = self._get_multi_param('SecurityGroupId')
nics = dict_from_querystring("NetworkInterface", self.querystring)
instance_type = self.querystring.get("InstanceType", ["m1.small"])[0]
placement = self.querystring.get(
"Placement.AvailabilityZone", [None])[0]
subnet_id = self.querystring.get("SubnetId", [None])[0]
private_ip = self.querystring.get("PrivateIpAddress", [None])[0]
associate_public_ip = self.querystring.get(
"AssociatePublicIpAddress", [None])[0]
key_name = self.querystring.get("KeyName", [None])[0]
if self.is_not_dryrun('RunInstance'):
new_reservation = self.ec2_backend.add_instances(
image_id, min_count, user_data, security_group_names,
instance_type=instance_type, placement=placement, subnet_id=subnet_id,
key_name=key_name, security_group_ids=security_group_ids,
nics=nics, private_ip=private_ip, associate_public_ip=associate_public_ip)
template = self.response_template(EC2_RUN_INSTANCES)
return template.render(reservation=new_reservation)
def terminate_instances(self):
instance_ids = instance_ids_from_querystring(self.querystring)
if self.is_not_dryrun('TerminateInstance'):
instances = self.ec2_backend.terminate_instances(instance_ids)
template = self.response_template(EC2_TERMINATE_INSTANCES)
return template.render(instances=instances)
def reboot_instances(self):
instance_ids = instance_ids_from_querystring(self.querystring)
if self.is_not_dryrun('RebootInstance'):
instances = self.ec2_backend.reboot_instances(instance_ids)
template = self.response_template(EC2_REBOOT_INSTANCES)
return template.render(instances=instances)
def stop_instances(self):
instance_ids = instance_ids_from_querystring(self.querystring)
if self.is_not_dryrun('StopInstance'):
instances = self.ec2_backend.stop_instances(instance_ids)
template = self.response_template(EC2_STOP_INSTANCES)
return template.render(instances=instances)
def start_instances(self):
instance_ids = instance_ids_from_querystring(self.querystring)
if self.is_not_dryrun('StartInstance'):
instances = self.ec2_backend.start_instances(instance_ids)
template = self.response_template(EC2_START_INSTANCES)
return template.render(instances=instances)
def describe_instance_status(self):
instance_ids = instance_ids_from_querystring(self.querystring)
include_all_instances = optional_from_querystring('IncludeAllInstances',
self.querystring) == 'true'
if instance_ids:
instances = self.ec2_backend.get_multi_instances_by_id(
instance_ids)
elif include_all_instances:
instances = self.ec2_backend.all_instances()
else:
instances = self.ec2_backend.all_running_instances()
template = self.response_template(EC2_INSTANCE_STATUS)
return template.render(instances=instances)
def describe_instance_types(self):
instance_types = [InstanceType(
name='t1.micro', cores=1, memory=644874240, disk=0)]
template = self.response_template(EC2_DESCRIBE_INSTANCE_TYPES)
return template.render(instance_types=instance_types)
def describe_instance_attribute(self):
# TODO this and modify below should raise IncorrectInstanceState if
# instance not in stopped state
attribute = self.querystring.get("Attribute")[0]
key = camelcase_to_underscores(attribute)
instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0]
instance, value = self.ec2_backend.describe_instance_attribute(
instance_id, key)
if key == "group_set":
template = self.response_template(
EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE)
else:
template = self.response_template(EC2_DESCRIBE_INSTANCE_ATTRIBUTE)
return template.render(instance=instance, attribute=attribute, value=value)
def modify_instance_attribute(self):
handlers = [self._dot_value_instance_attribute_handler,
self._block_device_mapping_handler,
self._security_grp_instance_attribute_handler]
for handler in handlers:
success = handler()
if success:
return success
msg = "This specific call to ModifyInstanceAttribute has not been" \
" implemented in Moto yet. Feel free to open an issue at" \
" https://github.com/spulec/moto/issues"
raise NotImplementedError(msg)
def _block_device_mapping_handler(self):
"""
Handles requests which are generated by code similar to:
instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True})
The querystring contains information similar to:
BlockDeviceMapping.1.Ebs.DeleteOnTermination : ['true']
BlockDeviceMapping.1.DeviceName : ['/dev/sda1']
For now we only support the "BlockDeviceMapping.1.Ebs.DeleteOnTermination"
configuration, but it should be trivial to add anything else.
"""
mapping_counter = 1
mapping_device_name_fmt = 'BlockDeviceMapping.%s.DeviceName'
mapping_del_on_term_fmt = 'BlockDeviceMapping.%s.Ebs.DeleteOnTermination'
while True:
mapping_device_name = mapping_device_name_fmt % mapping_counter
if mapping_device_name not in self.querystring.keys():
break
mapping_del_on_term = mapping_del_on_term_fmt % mapping_counter
del_on_term_value_str = self.querystring[mapping_del_on_term][0]
del_on_term_value = True if 'true' == del_on_term_value_str else False
device_name_value = self.querystring[mapping_device_name][0]
instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0]
instance = self.ec2_backend.get_instance(instance_id)
if self.is_not_dryrun('ModifyInstanceAttribute'):
block_device_type = instance.block_device_mapping[
device_name_value]
block_device_type.delete_on_termination = del_on_term_value
# +1 for the next device
mapping_counter += 1
if mapping_counter > 1:
return EC2_MODIFY_INSTANCE_ATTRIBUTE
def _dot_value_instance_attribute_handler(self):
attribute_key = None
for key, value in self.querystring.items():
if '.Value' in key:
attribute_key = key
break
if not attribute_key:
return
if self.is_not_dryrun('Modify' + attribute_key.split(".")[0]):
value = self.querystring.get(attribute_key)[0]
normalized_attribute = camelcase_to_underscores(
attribute_key.split(".")[0])
instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0]
self.ec2_backend.modify_instance_attribute(
instance_id, normalized_attribute, value)
return EC2_MODIFY_INSTANCE_ATTRIBUTE
def _security_grp_instance_attribute_handler(self):
new_security_grp_list = []
for key, value in self.querystring.items():
if 'GroupId.' in key:
new_security_grp_list.append(self.querystring.get(key)[0])
instance_ids = instance_ids_from_querystring(self.querystring)
instance_id = instance_ids[0]
if self.is_not_dryrun('ModifyInstanceSecurityGroups'):
self.ec2_backend.modify_instance_security_groups(
instance_id, new_security_grp_list)
return EC2_MODIFY_INSTANCE_ATTRIBUTE
EC2_RUN_INSTANCES = """<RunInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<reservationId>{{ reservation.id }}</reservationId>
<ownerId>123456789012</ownerId>
<groupSet>
<item>
<groupId>sg-245f6a01</groupId>
<groupName>default</groupName>
</item>
</groupSet>
<instancesSet>
{% for instance in reservation.instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<imageId>{{ instance.image_id }}</imageId>
<instanceState>
<code>0</code>
<name>pending</name>
</instanceState>
<privateDnsName>{{ instance.private_dns }}</privateDnsName>
<publicDnsName>{{ instance.public_dns }}</publicDnsName>
<dnsName>{{ instance.public_dns }}</dnsName>
<reason/>
<keyName>{{ instance.key_name }}</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<instanceType>{{ instance.instance_type }}</instanceType>
<launchTime>{{ instance.launch_time }}</launchTime>
<placement>
<availabilityZone>{{ instance.placement}}</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
<monitoring>
<state>enabled</state>
</monitoring>
{% if instance.nics %}
{% if instance.nics[0].subnet %}
<subnetId>{{ instance.nics[0].subnet.id }}</subnetId>
<vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId>
{% endif %}
<privateIpAddress>{{ instance.private_ip }}</privateIpAddress>
{% if instance.public_ip %}
<ipAddress>{{ instance.public_ip }}</ipAddress>
{% endif %}
{% else %}
<subnetId>{{ instance.subnet_id }}</subnetId>
{% endif %}
<sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck>
<groupSet>
{% for group in instance.dynamic_group_list %}
<item>
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
</item>
{% endfor %}
</groupSet>
{% if instance.platform %}
<platform>{{ instance.platform }}</platform>
{% endif %}
<virtualizationType>{{ instance.virtualization_type }}</virtualizationType>
<architecture>{{ instance.architecture }}</architecture>
<kernelId>{{ instance.kernel }}</kernelId>
<clientToken/>
<hypervisor>xen</hypervisor>
<ebsOptimized>false</ebsOptimized>
<networkInterfaceSet>
{% for nic in instance.nics.values() %}
<item>
<networkInterfaceId>{{ nic.id }}</networkInterfaceId>
{% if nic.subnet %}
<subnetId>{{ nic.subnet.id }}</subnetId>
<vpcId>{{ nic.subnet.vpc_id }}</vpcId>
{% endif %}
<description>Primary network interface</description>
<ownerId>123456789012</ownerId>
<status>in-use</status>
<macAddress>1b:2b:3c:4d:5e:6f</macAddress>
<privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress>
<sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck>
<groupSet>
{% for group in nic.group_set %}
<item>
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
</item>
{% endfor %}
</groupSet>
<attachment>
<attachmentId>{{ nic.attachment_id }}</attachmentId>
<deviceIndex>{{ nic.device_index }}</deviceIndex>
<status>attached</status>
<attachTime>2015-01-01T00:00:00Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
{% if nic.public_ip %}
<association>
<publicIp>{{ nic.public_ip }}</publicIp>
<ipOwnerId>123456789012</ipOwnerId>
</association>
{% endif %}
<privateIpAddressesSet>
<item>
<privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress>
<primary>true</primary>
{% if nic.public_ip %}
<association>
<publicIp>{{ nic.public_ip }}</publicIp>
<ipOwnerId>123456789012</ipOwnerId>
</association>
{% endif %}
</item>
</privateIpAddressesSet>
</item>
{% endfor %}
</networkInterfaceSet>
</item>
{% endfor %}
</instancesSet>
</RunInstancesResponse>"""
EC2_DESCRIBE_INSTANCES = """<DescribeInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>fdcdcab1-ae5c-489e-9c33-4637c5dda355</requestId>
<reservationSet>
{% for reservation in reservations %}
<item>
<reservationId>{{ reservation.id }}</reservationId>
<ownerId>123456789012</ownerId>
<groupSet>
{% for group in reservation.dynamic_group_list %}
<item>
{% if group.id %}
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
{% else %}
<groupId>{{ group }}</groupId>
{% endif %}
</item>
{% endfor %}
</groupSet>
<instancesSet>
{% for instance in reservation.instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<imageId>{{ instance.image_id }}</imageId>
<instanceState>
<code>{{ instance._state.code }}</code>
<name>{{ instance._state.name }}</name>
</instanceState>
<privateDnsName>{{ instance.private_dns }}</privateDnsName>
<publicDnsName>{{ instance.public_dns }}</publicDnsName>
<dnsName>{{ instance.public_dns }}</dnsName>
<reason>{{ instance._reason }}</reason>
<keyName>{{ instance.key_name }}</keyName>
<amiLaunchIndex>0</amiLaunchIndex>
<productCodes/>
<instanceType>{{ instance.instance_type }}</instanceType>
<launchTime>{{ instance.launch_time }}</launchTime>
<placement>
<availabilityZone>{{ instance.placement }}</availabilityZone>
<groupName/>
<tenancy>default</tenancy>
</placement>
{% if instance.platform %}
<platform>{{ instance.platform }}</platform>
{% endif %}
<monitoring>
<state>disabled</state>
</monitoring>
{% if instance.nics %}
{% if instance.nics[0].subnet %}
<subnetId>{{ instance.nics[0].subnet.id }}</subnetId>
<vpcId>{{ instance.nics[0].subnet.vpc_id }}</vpcId>
{% endif %}
<privateIpAddress>{{ instance.private_ip }}</privateIpAddress>
{% if instance.nics[0].public_ip %}
<ipAddress>{{ instance.nics[0].public_ip }}</ipAddress>
{% endif %}
{% endif %}
<sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck>
<groupSet>
{% for group in instance.dynamic_group_list %}
<item>
{% if group.id %}
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
{% else %}
<groupId>{{ group }}</groupId>
{% endif %}
</item>
{% endfor %}
</groupSet>
<stateReason>
<code>{{ instance._state_reason.code }}</code>
<message>{{ instance._state_reason.message }}</message>
</stateReason>
<architecture>{{ instance.architecture }}</architecture>
<kernelId>{{ instance.kernel }}</kernelId>
<rootDeviceType>ebs</rootDeviceType>
<rootDeviceName>/dev/sda1</rootDeviceName>
<blockDeviceMapping>
{% for device_name,deviceobject in instance.get_block_device_mapping %}
<item>
<deviceName>{{ device_name }}</deviceName>
<ebs>
<volumeId>{{ deviceobject.volume_id }}</volumeId>
<status>{{ deviceobject.status }}</status>
<attachTime>{{ deviceobject.attach_time }}</attachTime>
<deleteOnTermination>{{ deviceobject.delete_on_termination }}</deleteOnTermination>
<size>{{deviceobject.size}}</size>
</ebs>
</item>
{% endfor %}
</blockDeviceMapping>
<virtualizationType>{{ instance.virtualization_type }}</virtualizationType>
<clientToken>ABCDE1234567890123</clientToken>
<tagSet>
{% for tag in instance.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
<hypervisor>xen</hypervisor>
<networkInterfaceSet>
{% for nic in instance.nics.values() %}
<item>
<networkInterfaceId>{{ nic.id }}</networkInterfaceId>
{% if nic.subnet %}
<subnetId>{{ nic.subnet.id }}</subnetId>
<vpcId>{{ nic.subnet.vpc_id }}</vpcId>
{% endif %}
<description>Primary network interface</description>
<ownerId>123456789012</ownerId>
<status>in-use</status>
<macAddress>1b:2b:3c:4d:5e:6f</macAddress>
<privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress>
<sourceDestCheck>{{ instance.source_dest_check }}</sourceDestCheck>
<groupSet>
{% for group in nic.group_set %}
<item>
{% if group.id %}
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
{% else %}
<groupId>{{ group }}</groupId>
{% endif %}
</item>
{% endfor %}
</groupSet>
<attachment>
<attachmentId>{{ nic.attachment_id }}</attachmentId>
<deviceIndex>{{ nic.device_index }}</deviceIndex>
<status>attached</status>
<attachTime>2015-01-01T00:00:00Z</attachTime>
<deleteOnTermination>true</deleteOnTermination>
</attachment>
{% if nic.public_ip %}
<association>
<publicIp>{{ nic.public_ip }}</publicIp>
<ipOwnerId>123456789012</ipOwnerId>
</association>
{% endif %}
<privateIpAddressesSet>
<item>
<privateIpAddress>{{ nic.private_ip_address }}</privateIpAddress>
<primary>true</primary>
{% if nic.public_ip %}
<association>
<publicIp>{{ nic.public_ip }}</publicIp>
<ipOwnerId>123456789012</ipOwnerId>
</association>
{% endif %}
</item>
</privateIpAddressesSet>
</item>
{% endfor %}
</networkInterfaceSet>
</item>
{% endfor %}
</instancesSet>
</item>
{% endfor %}
</reservationSet>
</DescribeInstancesResponse>"""
EC2_TERMINATE_INSTANCES = """
<TerminateInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instancesSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<previousState>
<code>16</code>
<name>running</name>
</previousState>
<currentState>
<code>32</code>
<name>shutting-down</name>
</currentState>
</item>
{% endfor %}
</instancesSet>
</TerminateInstancesResponse>"""
EC2_STOP_INSTANCES = """
<StopInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instancesSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<previousState>
<code>16</code>
<name>running</name>
</previousState>
<currentState>
<code>64</code>
<name>stopping</name>
</currentState>
</item>
{% endfor %}
</instancesSet>
</StopInstancesResponse>"""
EC2_START_INSTANCES = """
<StartInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instancesSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<previousState>
<code>16</code>
<name>running</name>
</previousState>
<currentState>
<code>0</code>
<name>pending</name>
</currentState>
</item>
{% endfor %}
</instancesSet>
</StartInstancesResponse>"""
EC2_REBOOT_INSTANCES = """<RebootInstancesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</RebootInstancesResponse>"""
EC2_DESCRIBE_INSTANCE_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceId>{{ instance.id }}</instanceId>
<{{ attribute }}>
<value>{{ value }}</value>
</{{ attribute }}>
</DescribeInstanceAttributeResponse>"""
EC2_DESCRIBE_INSTANCE_GROUPSET_ATTRIBUTE = """<DescribeInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceId>{{ instance.id }}</instanceId>
<{{ attribute }}>
{% for sg_id in value %}
<item>
<groupId>{{ sg_id }}</groupId>
</item>
{% endfor %}
</{{ attribute }}>
</DescribeInstanceAttributeResponse>"""
EC2_MODIFY_INSTANCE_ATTRIBUTE = """<ModifyInstanceAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</ModifyInstanceAttributeResponse>"""
EC2_INSTANCE_STATUS = """<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstanceStatusResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-15/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<instanceStatusSet>
{% for instance in instances %}
<item>
<instanceId>{{ instance.id }}</instanceId>
<availabilityZone>{{ instance.placement }}</availabilityZone>
<instanceState>
<code>{{ instance.state_code }}</code>
<name>{{ instance.state }}</name>
</instanceState>
{% if instance.state_code == 16 %}
<systemStatus>
<status>ok</status>
<details>
<item>
<name>reachability</name>
<status>passed</status>
</item>
</details>
</systemStatus>
<instanceStatus>
<status>ok</status>
<details>
<item>
<name>reachability</name>
<status>passed</status>
</item>
</details>
</instanceStatus>
{% else %}
<systemStatus>
<status>not-applicable</status>
</systemStatus>
<instanceStatus>
<status>not-applicable</status>
</instanceStatus>
{% endif %}
</item>
{% endfor %}
</instanceStatusSet>
</DescribeInstanceStatusResponse>"""
EC2_DESCRIBE_INSTANCE_TYPES = """<?xml version="1.0" encoding="UTF-8"?>
<DescribeInstanceTypesResponse xmlns="http://api.outscale.com/wsdl/fcuext/2014-04-15/">
<requestId>f8b86168-d034-4e65-b48d-3b84c78e64af</requestId>
<instanceTypeSet>
{% for instance_type in instance_types %}
<item>
<name>{{ instance_type.name }}</name>
<vcpu>{{ instance_type.cores }}</vcpu>
<memory>{{ instance_type.memory }}</memory>
<storageSize>{{ instance_type.disk }}</storageSize>
<storageCount>{{ instance_type.storageCount }}</storageCount>
<maxIpAddresses>{{ instance_type.maxIpAddresses }}</maxIpAddresses>
<ebsOptimizedAvailable>{{ instance_type.ebsOptimizedAvailable }}</ebsOptimizedAvailable>
</item>
{% endfor %}
</instanceTypeSet>
</DescribeInstanceTypesResponse>"""
| 43.666667 | 130 | 0.555556 | [
"Apache-2.0"
] | adtsys-cloud/moto-aws-mock | moto/ec2/responses/instances.py | 28,296 | Python |
import urllib.parse
from functools import partial, wraps
from pathlib import Path
from drfs import config
from drfs.util import prepend_scheme, remove_scheme
def get_fs(path, opts=None, rtype="instance"):
"""Helper to infer filesystem correctly.
Gets filesystem options from settings and updates them with given `opts`.
Parameters
----------
path: str
Path for which we want to infer filesystem.
opts: dict
Kwargs that will be passed to inferred filesystem instance.
rtype: str
Either 'instance' (default) or 'class'.
"""
from drfs.filesystems import FILESYSTEMS
try:
protocol = path.scheme
except AttributeError:
protocol = _get_protocol(path)
try:
cls = FILESYSTEMS[protocol]
if rtype == "class":
return cls
except KeyError:
raise KeyError(
f"No filesystem for protocol {protocol}. Try "
f"installing it. Available protocols are: "
f"{set(FILESYSTEMS.keys())}"
)
config_scheme_key = protocol if protocol else "file"
opts_ = config["fs_opts"][config_scheme_key].get(dict).copy() # type: dict
if opts is not None:
opts_.update(opts)
opts_ = _fix_opts_abfs(cls, path, opts_)
return cls(**opts_)
def _get_protocol(path):
if "://" in str(path):
protocol = urllib.parse.urlparse(str(path)).scheme
else:
# most likely a windows path, basically if in doubt assume local
protocol = ""
return protocol
def _fix_opts_abfs(cls, path, opts: dict):
try:
from drfs.filesystems.azure_blob import AzureBlobFileSystem, extract_abfs_parts
except ImportError:
AzureBlobFileSystem = extract_abfs_parts = None
if (
AzureBlobFileSystem is not None
and cls is AzureBlobFileSystem
and "account_name" not in opts
):
opts = opts.copy()
opts["account_name"] = extract_abfs_parts(path)[0]
return opts
def allow_pathlib(func):
"""Allow methods to receive pathlib.Path objects.
Parameters
----------
func: callable
function to decorate must have the following signature
self, path, *args, **kwargs
Returns
-------
wrapper: callable
"""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
# Can only be used if path is passed as first argument right
# after self
from drfs.path import asstr
p = asstr(path)
return func(self, p, *args, **kwargs)
return wrapper
def return_pathlib(func):
@wraps(func)
def wrapper(self, path, *args, **kwargs):
from drfs.path import aspath
res = func(self, path, *args, **kwargs)
as_path = aspath(res)
return as_path
return wrapper
def return_schemes(func):
"""Make sure method returns full path with scheme."""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
res = func(self, path, *args, **kwargs)
try:
res = list(map(partial(prepend_scheme, self.scheme), res))
except TypeError:
res = prepend_scheme(self.scheme, res)
return res
return wrapper
def maybe_remove_scheme(func):
"""Remove scheme from args and kwargs in case underlying fs does not support it."""
@wraps(func)
def wrapper(self, path, *args, **kwargs):
if not self.supports_scheme:
path = remove_scheme(path, raise_=False)
args = [remove_scheme(a, raise_=False) for a in args]
kwargs = {
k: remove_scheme(v, raise_=False) if isinstance(v, (Path, str)) else v
for k, v in kwargs.items()
}
return func(self, path, *args, **kwargs)
return wrapper
| 26.697183 | 87 | 0.619889 | [
"MIT"
] | datarevenue-berlin/drfs | drfs/filesystems/util.py | 3,791 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import textwrap
import unittest
from contextlib import redirect_stdout
from airflow.cli import cli_parser
from airflow.cli.commands import plugins_command
from airflow.hooks.base import BaseHook
from airflow.listeners.listener import get_listener_manager
from airflow.plugins_manager import AirflowPlugin
from tests.plugins.test_plugin import AirflowTestPlugin as ComplexAirflowPlugin
from tests.test_utils.mock_plugins import mock_plugin_manager
class PluginHook(BaseHook):
pass
class TestPlugin(AirflowPlugin):
name = "test-plugin-cli"
hooks = [PluginHook]
class TestPluginsCommand(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli_parser.get_parser()
@mock_plugin_manager(plugins=[])
def test_should_display_no_plugins(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
stdout = temp_stdout.getvalue()
assert 'No plugins loaded' in stdout
@mock_plugin_manager(plugins=[ComplexAirflowPlugin])
def test_should_display_one_plugins(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=json']))
stdout = temp_stdout.getvalue()
print(stdout)
info = json.loads(stdout)
assert info == [
{
'name': 'test_plugin',
'macros': ['tests.plugins.test_plugin.plugin_macro'],
'executors': ['tests.plugins.test_plugin.PluginExecutor'],
'flask_blueprints': [
"<flask.blueprints.Blueprint: name='test_plugin' import_name='tests.plugins.test_plugin'>"
],
'appbuilder_views': [
{
'name': 'Test View',
'category': 'Test Plugin',
'view': 'tests.plugins.test_plugin.PluginTestAppBuilderBaseView',
}
],
'global_operator_extra_links': [
'<tests.test_utils.mock_operators.AirflowLink object>',
'<tests.test_utils.mock_operators.GithubLink object>',
],
'timetables': ['tests.plugins.test_plugin.CustomCronDataIntervalTimetable'],
'operator_extra_links': [
'<tests.test_utils.mock_operators.GoogleLink object>',
'<tests.test_utils.mock_operators.AirflowLink2 object>',
'<tests.test_utils.mock_operators.CustomOpLink object>',
'<tests.test_utils.mock_operators.CustomBaseIndexOpLink object>',
],
'hooks': ['tests.plugins.test_plugin.PluginHook'],
'listeners': ['tests.listeners.empty_listener'],
'source': None,
'appbuilder_menu_items': [
{'name': 'Google', 'href': 'https://www.google.com', 'category': 'Search'},
{
'name': 'apache',
'href': 'https://www.apache.org/',
'label': 'The Apache Software Foundation',
},
],
'ti_deps': ['<TIDep(CustomTestTriggerRule)>'],
}
]
get_listener_manager().clear()
@mock_plugin_manager(plugins=[TestPlugin])
def test_should_display_one_plugins_as_table(self):
with redirect_stdout(io.StringIO()) as temp_stdout:
plugins_command.dump_plugins(self.parser.parse_args(['plugins', '--output=table']))
stdout = temp_stdout.getvalue()
# Remove leading spaces
stdout = "\n".join(line.rstrip(" ") for line in stdout.splitlines())
# Assert that only columns with values are displayed
expected_output = textwrap.dedent(
"""\
name | hooks
================+===================================================
test-plugin-cli | tests.cli.commands.test_plugins_command.PluginHook
"""
)
self.assertEqual(stdout, expected_output)
| 41.677686 | 110 | 0.614515 | [
"Apache-2.0"
] | AMS-Kepler/airflow | tests/cli/commands/test_plugins_command.py | 5,043 | Python |
"""Conversion tool from SQD to FIF.
RawKIT class is adapted from Denis Engemann et al.'s mne_bti2fiff.py.
"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Joan Massich <mailsik@gmail.com>
# Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
from collections import defaultdict, OrderedDict
from math import sin, cos
from os import SEEK_CUR, path as op
from struct import unpack
import numpy as np
from scipy import linalg
from ..pick import pick_types
from ...utils import (verbose, logger, warn, fill_doc, _check_option,
_stamp_to_dt)
from ...transforms import apply_trans, als_ras_trans
from ..base import BaseRaw
from ..utils import _mult_cal_one
from ...epochs import BaseEpochs
from ..constants import FIFF
from ..meas_info import _empty_info
from .constants import KIT, LEGACY_AMP_PARAMS
from .coreg import read_mrk
from ...event import read_events
from .._digitization import _set_dig_kit
def _call_digitization(info, mrk, elp, hsp, kit_info):
# Use values from kit_info only if all others are None
if mrk is None and elp is None and hsp is None:
mrk = kit_info.get('mrk', None)
elp = kit_info.get('elp', None)
hsp = kit_info.get('hsp', None)
# prepare mrk
if isinstance(mrk, list):
mrk = [read_mrk(marker) if isinstance(marker, str)
else marker for marker in mrk]
mrk = np.mean(mrk, axis=0)
# setup digitization
if mrk is not None and elp is not None and hsp is not None:
dig_points, dev_head_t = _set_dig_kit(
mrk, elp, hsp, kit_info['eeg_dig'])
info['dig'] = dig_points
info['dev_head_t'] = dev_head_t
elif mrk is not None or elp is not None or hsp is not None:
raise ValueError("mrk, elp and hsp need to be provided as a group "
"(all or none)")
return info
class UnsupportedKITFormat(ValueError):
"""Our reader is not guaranteed to work with old files."""
def __init__(self, sqd_version, *args, **kwargs): # noqa: D102
self.sqd_version = sqd_version
ValueError.__init__(self, *args, **kwargs)
@fill_doc
class RawKIT(BaseRaw):
"""Raw object from KIT SQD file.
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>' | None
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes. If None, no synthesized channel is generated.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event. If None, stim must also be set to None.
%(preload)s
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Notes
-----
``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
Polhemus FastScan system. hsp refers to the headshape surface points. elp
refers to the points in head-space that corresponds to the HPI points.
Currently, '*.elp' and '*.hsp' files are NOT supported.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, stim_code='binary',
allow_unknown_format=False, standardize_names=None,
verbose=None): # noqa: D102
logger.info('Extracting SQD Parameters from %s...' % input_fname)
input_fname = op.abspath(input_fname)
self.preload = False
logger.info('Creating Raw.info structure...')
info, kit_info = get_kit_info(
input_fname, allow_unknown_format, standardize_names)
kit_info['slope'] = slope
kit_info['stimthresh'] = stimthresh
if kit_info['acq_type'] != KIT.CONTINUOUS:
raise TypeError('SQD file contains epochs, not raw data. Wrong '
'reader.')
logger.info('Creating Info structure...')
last_samps = [kit_info['n_samples'] - 1]
self._raw_extras = [kit_info]
self._set_stimchannels(info, stim, stim_code)
super(RawKIT, self).__init__(
info, preload, last_samps=last_samps, filenames=[input_fname],
raw_extras=self._raw_extras, verbose=verbose)
self.info = _call_digitization(
info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)
logger.info('Ready.')
def read_stim_ch(self, buffer_size=1e5):
"""Read events from data.
Parameter
---------
buffer_size : int
The size of chunk to by which the data are scanned.
Returns
-------
events : array, [samples]
The event vector (1 x samples).
"""
buffer_size = int(buffer_size)
start = int(self.first_samp)
stop = int(self.last_samp + 1)
pick = pick_types(self.info, meg=False, ref_meg=False,
stim=True, exclude=[])
stim_ch = np.empty((1, stop), dtype=np.int64)
for b_start in range(start, stop, buffer_size):
b_stop = b_start + buffer_size
x = self[pick, b_start:b_stop][0]
stim_ch[:, b_start:b_start + x.shape[1]] = x
return stim_ch
def _set_stimchannels(self, info, stim, stim_code):
"""Specify how the trigger channel is synthesized from analog channels.
Has to be done before loading data. For a RawKIT instance that has been
created with preload=True, this method will raise a
NotImplementedError.
Parameters
----------
info : instance of MeasInfo
The measurement info.
stim : list of int | '<' | '>'
Can be submitted as list of trigger channels.
If a list is not specified, the default triggers extracted from
misc channels will be used with specified directionality.
'<' means that largest values assigned to the first channel
in sequence.
'>' means the largest trigger assigned to the last channel
in sequence.
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
"""
if self.preload:
raise NotImplementedError("Can't change stim channel after "
"loading data")
_check_option('stim_code', stim_code, ['binary', 'channel'])
if stim is not None:
if isinstance(stim, str):
picks = _default_stim_chs(info)
if stim == '<':
stim = picks[::-1]
elif stim == '>':
stim = picks
else:
raise ValueError("stim needs to be list of int, '>' or "
"'<', not %r" % str(stim))
else:
stim = np.asarray(stim, int)
if stim.max() >= self._raw_extras[0]['nchan']:
raise ValueError(
'Got stim=%s, but sqd file only has %i channels' %
(stim, self._raw_extras[0]['nchan']))
# modify info
nchan = self._raw_extras[0]['nchan'] + 1
info['chs'].append(dict(
cal=KIT.CALIB_FACTOR, logno=nchan, scanno=nchan, range=1.0,
unit=FIFF.FIFF_UNIT_NONE, unit_mul=FIFF.FIFF_UNITM_NONE,
ch_name='STI 014',
coil_type=FIFF.FIFFV_COIL_NONE, loc=np.full(12, np.nan),
kind=FIFF.FIFFV_STIM_CH, coord_frame=FIFF.FIFFV_COORD_UNKNOWN))
info._update_redundant()
self._raw_extras[0]['stim'] = stim
self._raw_extras[0]['stim_code'] = stim_code
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
sqd = self._raw_extras[fi]
nchan = sqd['nchan']
data_left = (stop - start) * nchan
conv_factor = sqd['conv_factor']
n_bytes = sqd['dtype'].itemsize
assert n_bytes in (2, 4)
# Read up to 100 MB of data at a time.
blk_size = min(data_left, (100000000 // n_bytes // nchan) * nchan)
with open(self._filenames[fi], 'rb', buffering=0) as fid:
# extract data
pointer = start * nchan * n_bytes
fid.seek(sqd['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'] + pointer)
stim = sqd['stim']
for blk_start in np.arange(0, data_left, blk_size) // nchan:
blk_size = min(blk_size, data_left - blk_start * nchan)
block = np.fromfile(fid, dtype=sqd['dtype'], count=blk_size)
block = block.reshape(nchan, -1, order='F').astype(float)
blk_stop = blk_start + block.shape[1]
data_view = data[:, blk_start:blk_stop]
block *= conv_factor
# Create a synthetic stim channel
if stim is not None:
stim_ch = _make_stim_channel(
block[stim, :], sqd['slope'], sqd['stimthresh'],
sqd['stim_code'], stim)
block = np.vstack((block, stim_ch))
_mult_cal_one(data_view, block, idx, cals, mult)
# cals are all unity, so can be ignored
def _default_stim_chs(info):
"""Return default stim channels for SQD files."""
return pick_types(info, meg=False, ref_meg=False, misc=True,
exclude=[])[:8]
def _make_stim_channel(trigger_chs, slope, threshold, stim_code,
trigger_values):
"""Create synthetic stim channel from multiple trigger channels."""
if slope == '+':
trig_chs_bin = trigger_chs > threshold
elif slope == '-':
trig_chs_bin = trigger_chs < threshold
else:
raise ValueError("slope needs to be '+' or '-'")
# trigger value
if stim_code == 'binary':
trigger_values = 2 ** np.arange(len(trigger_chs))
elif stim_code != 'channel':
raise ValueError("stim_code must be 'binary' or 'channel', got %s" %
repr(stim_code))
trig_chs = trig_chs_bin * trigger_values[:, np.newaxis]
return np.array(trig_chs.sum(axis=0), ndmin=2)
class EpochsKIT(BaseEpochs):
"""Epochs Array object from KIT SQD file.
Parameters
----------
input_fname : str
Path to the sqd file.
events : str | array, shape (n_events, 3)
Path to events file. If array, it is the events typically returned
by the read_events function. If some events don't match the events
of interest as specified by event_id,they will be marked as 'IGNORED'
in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
tmin : float
Start time before event.
baseline : None or tuple of length 2 (default (None, 0))
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
The baseline (a, b) includes both endpoints, i.e. all
timepoints t such that a <= t <= b.
reject : dict | None
Rejection parameters based on peak-to-peak amplitude.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg'.
If reject is None then no rejection is done. Example::
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # V (EEG channels)
eog=250e-6 # V (EOG channels)
)
flat : dict | None
Rejection parameters based on flatness of signal.
Valid keys are 'grad' | 'mag' | 'eeg' | 'eog' | 'ecg', and values
are floats that set the minimum acceptable peak-to-peak amplitude.
If flat is None then no rejection is done.
reject_tmin : scalar | None
Start of the time window used to reject epochs (with the default None,
the window will start with tmin).
reject_tmax : scalar | None
End of the time window used to reject epochs (with the default None,
the window will end with tmax).
mrk : None | str | array_like, shape = (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape = (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape = (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10`000 points are in the head shape, they are automatically decimated.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Notes
-----
``elp`` and ``hsp`` are usually the exported text files (*.txt) from the
Polhemus FastScan system. hsp refers to the headshape surface points. elp
refers to the points in head-space that corresponds to the HPI points.
Currently, '*.elp' and '*.hsp' files are NOT supported.
See Also
--------
mne.Epochs : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, events, event_id=None, tmin=0,
baseline=None, reject=None, flat=None, reject_tmin=None,
reject_tmax=None, mrk=None, elp=None, hsp=None,
allow_unknown_format=False, standardize_names=None,
verbose=None): # noqa: D102
if isinstance(events, str):
events = read_events(events)
logger.info('Extracting KIT Parameters from %s...' % input_fname)
input_fname = op.abspath(input_fname)
self.info, kit_info = get_kit_info(
input_fname, allow_unknown_format, standardize_names)
kit_info.update(filename=input_fname)
self._raw_extras = [kit_info]
self._filenames = []
if len(events) != self._raw_extras[0]['n_epochs']:
raise ValueError('Event list does not match number of epochs.')
if self._raw_extras[0]['acq_type'] == KIT.EPOCHS:
self._raw_extras[0]['data_length'] = KIT.INT
else:
raise TypeError('SQD file contains raw data, not epochs or '
'average. Wrong reader.')
if event_id is None: # convert to int to make typing-checks happy
event_id = {str(e): int(e) for e in np.unique(events[:, 2])}
for key, val in event_id.items():
if val not in events[:, 2]:
raise ValueError('No matching events found for %s '
'(event id %i)' % (key, val))
data = self._read_kit_data()
assert data.shape == (self._raw_extras[0]['n_epochs'],
self.info['nchan'],
self._raw_extras[0]['frame_length'])
tmax = ((data.shape[2] - 1) / self.info['sfreq']) + tmin
super(EpochsKIT, self).__init__(
self.info, data, events, event_id, tmin, tmax, baseline,
reject=reject, flat=flat, reject_tmin=reject_tmin,
reject_tmax=reject_tmax, filename=input_fname, verbose=verbose)
self.info = _call_digitization(
info=self.info, mrk=mrk, elp=elp, hsp=hsp, kit_info=kit_info)
logger.info('Ready.')
def _read_kit_data(self):
"""Read epochs data.
Returns
-------
data : array, [channels x samples]
the data matrix (channels x samples).
times : array, [samples]
returns the time values corresponding to the samples.
"""
info = self._raw_extras[0]
epoch_length = info['frame_length']
n_epochs = info['n_epochs']
n_samples = info['n_samples']
filename = info['filename']
dtype = info['dtype']
nchan = info['nchan']
with open(filename, 'rb', buffering=0) as fid:
fid.seek(info['dirs'][KIT.DIR_INDEX_RAW_DATA]['offset'])
count = n_samples * nchan
data = np.fromfile(fid, dtype=dtype, count=count)
data = data.reshape((n_samples, nchan)).T
data = data * info['conv_factor']
data = data.reshape((nchan, n_epochs, epoch_length))
data = data.transpose((1, 0, 2))
return data
def _read_dir(fid):
return dict(offset=np.fromfile(fid, np.uint32, 1)[0],
size=np.fromfile(fid, np.int32, 1)[0],
max_count=np.fromfile(fid, np.int32, 1)[0],
count=np.fromfile(fid, np.int32, 1)[0])
@verbose
def get_kit_info(rawfile, allow_unknown_format, standardize_names=None,
verbose=None):
"""Extract all the information from the sqd/con file.
Parameters
----------
rawfile : str
KIT file to be read.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
info : instance of Info
An Info for the instance.
sqd : dict
A dict containing all the sqd parameter settings.
"""
sqd = dict()
sqd['rawfile'] = rawfile
unsupported_format = False
sqd['dirs'] = dirs = list()
with open(rawfile, 'rb', buffering=0) as fid: # buffering=0 for np bug
#
# directories (0)
#
dirs.append(_read_dir(fid))
dirs.extend(_read_dir(fid) for _ in range(dirs[0]['count'] - 1))
assert len(dirs) == dirs[KIT.DIR_INDEX_DIR]['count']
#
# system (1)
#
fid.seek(dirs[KIT.DIR_INDEX_SYSTEM]['offset'])
# check file format version
version, revision = unpack('2i', fid.read(2 * KIT.INT))
if version < 2 or (version == 2 and revision < 3):
version_string = "V%iR%03i" % (version, revision)
if allow_unknown_format:
unsupported_format = True
logger.warning("Force loading KIT format %s", version_string)
else:
raise UnsupportedKITFormat(
version_string,
"SQD file format %s is not officially supported. "
"Set allow_unknown_format=True to load it anyways." %
(version_string,))
sysid = unpack('i', fid.read(KIT.INT))[0]
# basic info
system_name = unpack('128s', fid.read(128))[0].decode()
# model name
model_name = unpack('128s', fid.read(128))[0].decode()
# channels
sqd['nchan'] = channel_count = unpack('i', fid.read(KIT.INT))[0]
comment = unpack('256s', fid.read(256))[0].decode()
create_time, last_modified_time = unpack('2i', fid.read(2 * KIT.INT))
fid.seek(KIT.INT * 3, SEEK_CUR) # reserved
dewar_style = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
fll_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
trigger_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 3, SEEK_CUR) # spare
adboard_type = unpack('i', fid.read(KIT.INT))[0]
fid.seek(KIT.INT * 29, SEEK_CUR) # reserved
if version < 2 or (version == 2 and revision <= 3):
adc_range = float(unpack('i', fid.read(KIT.INT))[0])
else:
adc_range = unpack('d', fid.read(KIT.DOUBLE))[0]
adc_polarity, adc_allocated, adc_stored = unpack('3i',
fid.read(3 * KIT.INT))
system_name = system_name.replace('\x00', '')
system_name = system_name.strip().replace('\n', '/')
model_name = model_name.replace('\x00', '')
model_name = model_name.strip().replace('\n', '/')
full_version = f'V{version:d}R{revision:03d}'
logger.debug("SQD file basic information:")
logger.debug("Meg160 version = %s", full_version)
logger.debug("System ID = %i", sysid)
logger.debug("System name = %s", system_name)
logger.debug("Model name = %s", model_name)
logger.debug("Channel count = %i", channel_count)
logger.debug("Comment = %s", comment)
logger.debug("Dewar style = %i", dewar_style)
logger.debug("FLL type = %i", fll_type)
logger.debug("Trigger type = %i", trigger_type)
logger.debug("A/D board type = %i", adboard_type)
logger.debug("ADC range = +/-%s[V]", adc_range / 2.)
logger.debug("ADC allocate = %i[bit]", adc_allocated)
logger.debug("ADC bit = %i[bit]", adc_stored)
# MGH description: 'acquisition (megacq) VectorView system at NMR-MGH'
description = \
f'{system_name} ({sysid}) {full_version} {model_name}'
sqd['dtype'] = np.dtype(getattr(np, f'int{adc_allocated}'))
# check that we can read this file
if fll_type not in KIT.FLL_SETTINGS:
fll_types = sorted(KIT.FLL_SETTINGS.keys())
use_fll_type = fll_types[
np.searchsorted(fll_types, fll_type) - 1]
warn('Unknown site filter settings (FLL) for system '
'"%s" model "%s" (ID %s), will assume FLL %d->%d, check '
'your data for correctness, including channel scales and '
'filter settings!'
% (system_name, model_name, sysid, fll_type, use_fll_type))
fll_type = use_fll_type
#
# channel information (4)
#
chan_dir = dirs[KIT.DIR_INDEX_CHANNELS]
chan_offset, chan_size = chan_dir['offset'], chan_dir['size']
sqd['channels'] = channels = []
exg_gains = list()
for i in range(channel_count):
fid.seek(chan_offset + chan_size * i)
channel_type, = unpack('i', fid.read(KIT.INT))
# System 52 mislabeled reference channels as NULL. This was fixed
# in system 53; not sure about 51...
if sysid == 52 and i < 160 and channel_type == KIT.CHANNEL_NULL:
channel_type = KIT.CHANNEL_MAGNETOMETER_REFERENCE
if channel_type in KIT.CHANNELS_MEG:
if channel_type not in KIT.CH_TO_FIFF_COIL:
raise NotImplementedError(
"KIT channel type %i can not be read. Please contact "
"the mne-python developers." % channel_type)
channels.append({
'type': channel_type,
# (x, y, z, theta, phi) for all MEG channels. Some channel
# types have additional information which we're not using.
'loc': np.fromfile(fid, dtype='d', count=5),
})
if channel_type in KIT.CHANNEL_NAME_NCHAR:
fid.seek(16, SEEK_CUR) # misc fields
channels[-1]['name'] = _read_name(fid, channel_type)
elif channel_type in KIT.CHANNELS_MISC:
channel_no, = unpack('i', fid.read(KIT.INT))
fid.seek(4, SEEK_CUR)
name = _read_name(fid, channel_type)
channels.append({
'type': channel_type,
'no': channel_no,
'name': name,
})
if channel_type in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG):
offset = 6 if channel_type == KIT.CHANNEL_EEG else 8
fid.seek(offset, SEEK_CUR)
exg_gains.append(np.fromfile(fid, 'd', 1)[0])
elif channel_type == KIT.CHANNEL_NULL:
channels.append({'type': channel_type})
else:
raise IOError("Unknown KIT channel type: %i" % channel_type)
exg_gains = np.array(exg_gains)
#
# Channel sensitivity information: (5)
#
# only sensor channels requires gain. the additional misc channels
# (trigger channels, audio and voice channels) are passed
# through unaffected
fid.seek(dirs[KIT.DIR_INDEX_CALIBRATION]['offset'])
# (offset [Volt], gain [Tesla/Volt]) for each channel
sensitivity = np.fromfile(fid, dtype='d', count=channel_count * 2)
sensitivity.shape = (channel_count, 2)
channel_offset, channel_gain = sensitivity.T
assert (channel_offset == 0).all() # otherwise we have a problem
#
# amplifier gain (7)
#
fid.seek(dirs[KIT.DIR_INDEX_AMP_FILTER]['offset'])
amp_data = unpack('i', fid.read(KIT.INT))[0]
if fll_type >= 100: # Kapper Type
# gain: mask bit
gain1 = (amp_data & 0x00007000) >> 12
gain2 = (amp_data & 0x70000000) >> 28
gain3 = (amp_data & 0x07000000) >> 24
amp_gain = (KIT.GAINS[gain1] * KIT.GAINS[gain2] * KIT.GAINS[gain3])
# filter settings
hpf = (amp_data & 0x00000700) >> 8
lpf = (amp_data & 0x00070000) >> 16
bef = (amp_data & 0x00000003) >> 0
else: # Hanger Type
# gain
input_gain = (amp_data & 0x1800) >> 11
output_gain = (amp_data & 0x0007) >> 0
amp_gain = KIT.GAINS[input_gain] * KIT.GAINS[output_gain]
# filter settings
hpf = (amp_data & 0x007) >> 4
lpf = (amp_data & 0x0700) >> 8
bef = (amp_data & 0xc000) >> 14
hpf_options, lpf_options, bef_options = KIT.FLL_SETTINGS[fll_type]
sqd['highpass'] = KIT.HPFS[hpf_options][hpf]
sqd['lowpass'] = KIT.LPFS[lpf_options][lpf]
sqd['notch'] = KIT.BEFS[bef_options][bef]
#
# Acquisition Parameters (8)
#
fid.seek(dirs[KIT.DIR_INDEX_ACQ_COND]['offset'])
sqd['acq_type'], = acq_type, = unpack('i', fid.read(KIT.INT))
sqd['sfreq'], = unpack('d', fid.read(KIT.DOUBLE))
if acq_type == KIT.CONTINUOUS:
# samples_count, = unpack('i', fid.read(KIT.INT))
fid.seek(KIT.INT, SEEK_CUR)
sqd['n_samples'], = unpack('i', fid.read(KIT.INT))
elif acq_type == KIT.EVOKED or acq_type == KIT.EPOCHS:
sqd['frame_length'], = unpack('i', fid.read(KIT.INT))
sqd['pretrigger_length'], = unpack('i', fid.read(KIT.INT))
sqd['average_count'], = unpack('i', fid.read(KIT.INT))
sqd['n_epochs'], = unpack('i', fid.read(KIT.INT))
if acq_type == KIT.EVOKED:
sqd['n_samples'] = sqd['frame_length']
else:
sqd['n_samples'] = sqd['frame_length'] * sqd['n_epochs']
else:
raise IOError("Invalid acquisition type: %i. Your file is neither "
"continuous nor epoched data." % (acq_type,))
#
# digitization information (12 and 26)
#
dig_dir = dirs[KIT.DIR_INDEX_DIG_POINTS]
cor_dir = dirs[KIT.DIR_INDEX_COREG]
dig = dict()
hsp = list()
if dig_dir['count'] > 0 and cor_dir['count'] > 0:
# directories (0)
fid.seek(dig_dir['offset'])
for _ in range(dig_dir['count']):
name = _read_name(fid, n=8).strip()
# Sometimes there are mismatches (e.g., AFz vs AFZ) between
# the channel name and its digitized, name, so let's be case
# insensitive. It will also prevent collisions with HSP
name = name.lower()
rr = np.fromfile(fid, 'd', 3)
if name:
assert name not in dig
dig[name] = rr
else:
hsp.append(rr)
# nasion, lpa, rpa, HPI in native space
elp = [dig.pop(key) for key in (
'fidnz', 'fidt9', 'fidt10',
'hpi_1', 'hpi_2', 'hpi_3', 'hpi_4')]
if 'hpi_5' in dig and dig['hpi_5'].any():
elp.append(dig.pop('hpi_5'))
elp = np.array(elp)
hsp = np.array(hsp, float).reshape(-1, 3)
assert elp.shape in ((7, 3), (8, 3))
# coregistration
fid.seek(cor_dir['offset'])
mrk = np.zeros((elp.shape[0] - 3, 3))
for _ in range(cor_dir['count']):
done = np.fromfile(fid, np.int32, 1)[0]
fid.seek(16 * KIT.DOUBLE + # meg_to_mri
16 * KIT.DOUBLE, # mri_to_meg
SEEK_CUR)
marker_count = np.fromfile(fid, np.int32, 1)[0]
if not done:
continue
assert marker_count >= len(mrk)
for mi in range(len(mrk)):
mri_type, meg_type, mri_done, meg_done = \
np.fromfile(fid, np.int32, 4)
assert meg_done
fid.seek(3 * KIT.DOUBLE, SEEK_CUR) # mri_pos
mrk[mi] = np.fromfile(fid, 'd', 3)
fid.seek(256, SEEK_CUR) # marker_file (char)
sqd.update(hsp=hsp, elp=elp, mrk=mrk)
all_names = set(ch.get('name', '') for ch in channels)
if standardize_names is None and all_names.difference({'', 'EEG'}):
standardize_names = True
warn('standardize_names defaults to True in 0.21 but will change '
'to False in 0.22', DeprecationWarning)
# precompute conversion factor for reading data
if unsupported_format:
if sysid not in LEGACY_AMP_PARAMS:
raise IOError("Legacy parameters for system ID %i unavailable" %
(sysid,))
adc_range, adc_stored = LEGACY_AMP_PARAMS[sysid]
is_meg = np.array([ch['type'] in KIT.CHANNELS_MEG for ch in channels])
ad_to_volt = adc_range / (2 ** adc_stored)
ad_to_tesla = ad_to_volt / amp_gain * channel_gain
conv_factor = np.where(is_meg, ad_to_tesla, ad_to_volt)
# XXX this is a bit of a hack. Should probably do this more cleanly at
# some point... the 2 ** (adc_stored - 14) was emperically determined using
# the test files with known amplitudes. The conv_factors need to be
# replaced by these values otherwise we're off by a factor off 5000.0
# for the EEG data.
is_exg = [ch['type'] in (KIT.CHANNEL_EEG, KIT.CHANNEL_ECG)
for ch in channels]
exg_gains /= 2 ** (adc_stored - 14)
conv_factor[is_exg] = exg_gains
sqd['conv_factor'] = conv_factor[:, np.newaxis]
# Create raw.info dict for raw fif object with SQD data
info = _empty_info(float(sqd['sfreq']))
info.update(meas_date=_stamp_to_dt((create_time, 0)),
lowpass=sqd['lowpass'],
highpass=sqd['highpass'], kit_system_id=sysid,
description=description)
# Creates a list of dicts of meg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = fiff_channels = []
channel_index = defaultdict(lambda: 0)
sqd['eeg_dig'] = OrderedDict()
for idx, ch in enumerate(channels, 1):
if ch['type'] in KIT.CHANNELS_MEG:
ch_name = ch.get('name', '')
if ch_name == '' or standardize_names:
ch_name = 'MEG %03d' % idx
# create three orthogonal vector
# ch_angles[0]: theta, ch_angles[1]: phi
theta, phi = np.radians(ch['loc'][3:])
x = sin(theta) * cos(phi)
y = sin(theta) * sin(phi)
z = cos(theta)
vec_z = np.array([x, y, z])
vec_z /= linalg.norm(vec_z)
vec_x = np.zeros(vec_z.size, dtype=np.float64)
if vec_z[1] < vec_z[2]:
if vec_z[0] < vec_z[1]:
vec_x[0] = 1.0
else:
vec_x[1] = 1.0
elif vec_z[0] < vec_z[2]:
vec_x[0] = 1.0
else:
vec_x[2] = 1.0
vec_x -= np.sum(vec_x * vec_z) * vec_z
vec_x /= linalg.norm(vec_x)
vec_y = np.cross(vec_z, vec_x)
# transform to Neuromag like coordinate space
vecs = np.vstack((ch['loc'][:3], vec_x, vec_y, vec_z))
vecs = apply_trans(als_ras_trans, vecs)
unit = FIFF.FIFF_UNIT_T
loc = vecs.ravel()
else:
ch_type_label = KIT.CH_LABEL[ch['type']]
channel_index[ch_type_label] += 1
ch_type_index = channel_index[ch_type_label]
ch_name = ch.get('name', '')
eeg_name = ch_name.lower()
# some files have all EEG labeled as EEG
if ch_name in ('', 'EEG') or standardize_names:
ch_name = '%s %03i' % (ch_type_label, ch_type_index)
unit = FIFF.FIFF_UNIT_V
loc = np.zeros(12)
if eeg_name and eeg_name in dig:
loc[:3] = sqd['eeg_dig'][eeg_name] = dig[eeg_name]
fiff_channels.append(dict(
cal=KIT.CALIB_FACTOR, logno=idx, scanno=idx, range=KIT.RANGE,
unit=unit, unit_mul=KIT.UNIT_MUL, ch_name=ch_name,
coord_frame=FIFF.FIFFV_COORD_DEVICE,
coil_type=KIT.CH_TO_FIFF_COIL[ch['type']],
kind=KIT.CH_TO_FIFF_KIND[ch['type']], loc=loc))
info._update_redundant()
return info, sqd
def _read_name(fid, ch_type=None, n=None):
n = n if ch_type is None else KIT.CHANNEL_NAME_NCHAR[ch_type]
return fid.read(n).split(b'\x00')[0].decode('utf-8')
@fill_doc
def read_raw_kit(input_fname, mrk=None, elp=None, hsp=None, stim='>',
slope='-', stimthresh=1, preload=False, stim_code='binary',
allow_unknown_format=False, standardize_names=None,
verbose=None):
"""Reader function for Ricoh/KIT conversion to FIF.
Parameters
----------
input_fname : str
Path to the sqd file.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
stim : list of int | '<' | '>'
Channel-value correspondence when converting KIT trigger channels to a
Neuromag-style stim channel. For '<', the largest values are assigned
to the first channel (default). For '>', the largest values are
assigned to the last channel. Can also be specified as a list of
trigger channel indexes.
slope : '+' | '-'
How to interpret values on KIT trigger channels when synthesizing a
Neuromag-style stim channel. With '+', a positive slope (low-to-high)
is interpreted as an event. With '-', a negative slope (high-to-low)
is interpreted as an event.
stimthresh : float
The threshold level for accepting voltage changes in KIT trigger
channels as a trigger event.
%(preload)s
stim_code : 'binary' | 'channel'
How to decode trigger values from stim channels. 'binary' read stim
channel events as binary code, 'channel' encodes channel number.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
raw : instance of RawKIT
A Raw object containing KIT data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
If mrk, hsp or elp are array_like inputs, then the numbers in xyz
coordinates should be in units of meters.
"""
return RawKIT(input_fname=input_fname, mrk=mrk, elp=elp, hsp=hsp,
stim=stim, slope=slope, stimthresh=stimthresh,
preload=preload, stim_code=stim_code,
allow_unknown_format=allow_unknown_format,
standardize_names=standardize_names, verbose=verbose)
@fill_doc
def read_epochs_kit(input_fname, events, event_id=None, mrk=None, elp=None,
hsp=None, allow_unknown_format=False,
standardize_names=None, verbose=None):
"""Reader function for Ricoh/KIT epochs files.
Parameters
----------
input_fname : str
Path to the sqd file.
events : array, shape (n_events, 3)
The events typically returned by the read_events function.
If some events don't match the events of interest as specified
by event_id, they will be marked as 'IGNORED' in the drop log.
event_id : int | list of int | dict | None
The id of the event to consider. If dict,
the keys can later be used to access associated events. Example:
dict(auditory=1, visual=3). If int, a dict will be created with
the id as string. If a list, all events with the IDs specified
in the list are used. If None, all events will be used with
and a dict is created with string integer names corresponding
to the event id integers.
mrk : None | str | array_like, shape (5, 3) | list of str or array_like
Marker points representing the location of the marker coils with
respect to the MEG Sensors, or path to a marker file.
If list, all of the markers will be averaged together.
elp : None | str | array_like, shape (8, 3)
Digitizer points representing the location of the fiducials and the
marker coils with respect to the digitized head shape, or path to a
file containing these points.
hsp : None | str | array, shape (n_points, 3)
Digitizer head shape points, or path to head shape file. If more than
10,000 points are in the head shape, they are automatically decimated.
allow_unknown_format : bool
Force reading old data that is not officially supported. Alternatively,
read and re-save the data with the KIT MEG Laboratory application.
%(standardize_names)s
%(verbose)s
Returns
-------
epochs : instance of Epochs
The epochs.
Notes
-----
.. versionadded:: 0.9.0
"""
epochs = EpochsKIT(input_fname=input_fname, events=events,
event_id=event_id, mrk=mrk, elp=elp, hsp=hsp,
allow_unknown_format=allow_unknown_format,
standardize_names=standardize_names,
verbose=verbose)
return epochs
| 43.437759 | 79 | 0.58676 | [
"Apache-2.0"
] | alexisicte/aviate | venv/lib/python3.8/site-packages/mne/io/kit/kit.py | 41,874 | Python |
#!/usr/bin/python3
# --- 001 > U5W2P1_Task6_w1
def solution( n ):
if(n > 2 and n < 7 ):
return True;
else:
return False;
if __name__ == "__main__":
print('----------start------------')
n = 10
print(solution( n ))
print('------------end------------')
| 19.466667 | 40 | 0.445205 | [
"MIT"
] | MingjunGeng/Code-Knowledge | src/CodeLearn/plaintextCode/BloomTech/BTU5W1/U5W1P2_Task6_w1.py | 292 | Python |
from typing import Optional, Dict
from tabulate import tabulate
import pandas as pd
from mdrsl.utils.value_collection import ValueCollector
class MIDSObjectiveFunctionStatistics:
def __init__(self):
self.last_f0: Optional[int] = None
self.last_f1: Optional[int] = None
self.last_f2: Optional[int] = None
self.last_f3: Optional[int] = None
self.last_f4: Optional[int] = None
self.last_f5: Optional[int] = None
self.last_f6: Optional[int] = None
self.last_f7: Optional[int] = None
self.last_f_total: Optional[int] = None
self.value_collectors = dict(
f0=ValueCollector(),
f1=ValueCollector(),
f2=ValueCollector(),
f3=ValueCollector(),
f4=ValueCollector(),
f5=ValueCollector(),
f6=ValueCollector(),
f_total=ValueCollector()
)
def add_values(self, f0, f1, f2, f3, f4, f5, f6, f_total):
self.last_f0 = f0
self.last_f1 = f1
self.last_f2 = f2
self.last_f3 = f3
self.last_f4 = f4
self.last_f5 = f5
self.last_f6 = f6
self.last_f_total = f_total
self.value_collectors['f0'].add_value(f0)
self.value_collectors['f1'].add_value(f1)
self.value_collectors['f2'].add_value(f2)
self.value_collectors['f3'].add_value(f3)
self.value_collectors['f4'].add_value(f4)
self.value_collectors['f5'].add_value(f5)
self.value_collectors['f6'].add_value(f6)
self.value_collectors['f_total'].add_value(f_total)
def values_to_pandas_dataframe(self) -> Optional[pd.DataFrame]:
if ValueCollector.collect_values:
columns = ['type', 'value']
data = []
for function_name, value_collector in self.value_collectors.items():
for value in value_collector.values:
data.append([function_name, value])
df = pd.DataFrame(data=data, columns=columns)
return df
else:
return None
def values_to_pandas_dataframe2(self) -> Optional[pd.DataFrame]:
if ValueCollector.collect_values:
columns = ['call_index', 'type', 'value']
data = []
for function_name, value_collector in self.value_collectors.items():
for call_index, value in enumerate(value_collector.values):
data.append([call_index, function_name, value])
df = pd.DataFrame(data=data, columns=columns)
return df
else:
return None
def get_last_f_values(self) -> Dict[str, float]:
return dict(
f0=self.last_f0,
f1=self.last_f1,
f2=self.last_f2,
f3=self.last_f3,
f4=self.last_f4,
f5=self.last_f5,
f6=self.last_f6,
f_total=self.last_f_total)
def __str__(self):
table_str = tabulate(
[
['count',
self.value_collectors['f0'].count,
self.value_collectors['f1'].count,
self.value_collectors['f2'].count,
self.value_collectors['f3'].count,
self.value_collectors['f4'].count,
self.value_collectors['f5'].count,
self.value_collectors['f6'].count,
self.value_collectors['f_total'].count
],
['sum',
self.value_collectors['f0'].sum,
self.value_collectors['f1'].sum,
self.value_collectors['f2'].sum,
self.value_collectors['f3'].sum,
self.value_collectors['f4'].sum,
self.value_collectors['f5'].sum,
self.value_collectors['f6'].sum,
self.value_collectors['f_total'].sum
],
['min',
self.value_collectors['f0'].min,
self.value_collectors['f1'].min,
self.value_collectors['f2'].min,
self.value_collectors['f3'].min,
self.value_collectors['f4'].min,
self.value_collectors['f5'].min,
self.value_collectors['f6'].min,
self.value_collectors['f_total'].min
],
['avg',
self.value_collectors['f0'].get_avg(),
self.value_collectors['f1'].get_avg(),
self.value_collectors['f2'].get_avg(),
self.value_collectors['f3'].get_avg(),
self.value_collectors['f4'].get_avg(),
self.value_collectors['f5'].get_avg(),
self.value_collectors['f6'].get_avg(),
self.value_collectors['f_total'].get_avg()
],
['max',
self.value_collectors['f0'].max,
self.value_collectors['f1'].max,
self.value_collectors['f2'].max,
self.value_collectors['f3'].max,
self.value_collectors['f4'].max,
self.value_collectors['f5'].max,
self.value_collectors['f6'].max,
self.value_collectors['f_total'].max
],
['last_val',
self.last_f0,
self.last_f1,
self.last_f2,
self.last_f3,
self.last_f4,
self.last_f5,
self.last_f6,
self.last_f_total
]
],
headers=['type', 'f0', 'f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f_total']
)
return table_str
if __name__ == '__main__':
vc = ValueCollector()
vc.add_value(1)
vc.add_value(2)
vc.add_value(3)
print(vc)
| 35.409639 | 81 | 0.525859 | [
"Apache-2.0"
] | joschout/Multi-Directional-Rule-Set-Learning | mdrsl/rule_models/mids/objective_function/mids_objective_function_statistics.py | 5,878 | Python |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\objects\gardening\gardening_commands.py
# Compiled at: 2017-11-18 00:09:10
# Size of source mod 2**32: 1465 bytes
from objects.components import types
from objects.components.types import GARDENING_COMPONENT
from objects.gardening.gardening_component_fruit import GardeningFruitComponent
import services, sims4.commands
@sims4.commands.Command('gardening.cleanup_gardening_objects')
def cleanup_gardening_objects(_connection=None):
for obj in services.object_manager().get_all_objects_with_component_gen(GARDENING_COMPONENT):
gardening_component = obj.get_component(types.GARDENING_COMPONENT)
if not isinstance(gardening_component, GardeningFruitComponent):
continue
if obj.parent is None:
obj.is_in_inventory() or obj.is_on_active_lot() or sims4.commands.output('Destroyed object {} on open street was found without a parent at position {}, parent_type {}.'.format(obj, obj.position, obj.parent_type), _connection)
obj.destroy(source=obj, cause='Fruit/Flower with no parent on open street')
sims4.commands.output('Gardening cleanup complete', _connection)
return True | 59.217391 | 237 | 0.769457 | [
"Apache-2.0"
] | velocist/TS4CheatsInfo | Scripts/simulation/objects/gardening/gardening_commands.py | 1,362 | Python |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["D. Naudet"]
__license__ = "MIT"
__date__ = "15/09/2016"
from ...io.XsocsH5 import ScanPositions
from .ProjectItem import ProjectItem
from .ProjectDef import ItemClassDef
@ItemClassDef('ScanPositionsItem')
class ScanPositionsItem(ProjectItem):
def _createItem(self):
with self.xsocsH5 as h5f:
entries = h5f.entries()
entry = entries[0]
scan_positions = h5f.scan_positions(entry)
pathTpl = self.path + '/' + '{0}'
with self:
itemPath = pathTpl.format('pos_0')
self._set_array_data(itemPath, scan_positions.pos_0)
itemPath = pathTpl.format('pos_1')
self._set_array_data(itemPath, scan_positions.pos_1)
itemPath = pathTpl.format('motor_0')
self._set_scalar_data(itemPath, scan_positions.motor_0)
itemPath = pathTpl.format('motor_1')
self._set_scalar_data(itemPath, scan_positions.motor_1)
itemPath = pathTpl.format('n_0')
self._set_scalar_data(itemPath, scan_positions.shape[0])
itemPath = pathTpl.format('n_1')
self._set_scalar_data(itemPath, scan_positions.shape[1])
def positions(self):
pathTpl = self.path + '/' + '{0}'
with self:
itemPath = pathTpl.format('pos_0')
pos_0 = self._get_array_data(itemPath)
itemPath = pathTpl.format('pos_1')
pos_1 = self._get_array_data(itemPath)
itemPath = pathTpl.format('motor_0')
motor_0 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('motor_1')
motor_1 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('n_0')
n_0 = self._get_scalar_data(itemPath)
itemPath = pathTpl.format('n_1')
n_1 = self._get_scalar_data(itemPath)
return ScanPositions(motor_0=motor_0,
pos_0=pos_0,
motor_1=motor_1,
pos_1=pos_1,
shape=(n_0, n_1))
| 43.148148 | 79 | 0.621173 | [
"MIT"
] | omserta/xsocs | xsocs/gui/project/ScanPositionsItem.py | 3,495 | Python |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2022/2/9 12:09 下午
# @Author: zhoumengjie
# @File : tabledrawer.py
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
def draw_table(columns_head:[], cell_vals=[]):
# 设置字体及负数
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 画布
fig, ax = plt.subplots(figsize=(10, 4), dpi=100)
# 数据
data = [
[100, 200, 300, -100, 350],
[-120, 290, -90, 450, 150]
]
# 列与行
columns = ('一', '二', '三', '四', '五')
rows = ['A', 'B']
# 作图参数
index = np.arange(len(columns)) - 0.1
bar_width = 0.4
# 设置颜色
colors = ['turquoise', 'coral']
# 柱状图
bar1 = plt.bar(index, data[0], bar_width, color=colors[0], edgecolor='grey')
bar2 = plt.bar(index + bar_width, data[1], bar_width, color=colors[1], edgecolor='grey')
# 设置标题
ax.set_title('收益情况', fontsize=16, y=1.1, x=0.44)
ax.set_ylabel('元', fontsize=12, color='black', alpha=0.7, rotation=360)
ax.set_ylim(-150, 500)
# 显示数据标签
# ax.bar_label(bar1, label_type='edge')
# ax.bar_label(bar2, label_type='edge')
# x,y刻度不显示
ax.tick_params(axis=u'both', which=u'both', length=0)
plt.xticks([])
table = plt.table(cellText=data, rowLabels=rows,
rowColours=colors,
colLabels=columns, cellLoc='center', loc='bottom',
bbox=[0, -0.4, 1, 0.24])
cellDict = table.get_celld()
for i in range(0, len(columns)):
cellDict[(0, i)].set_height(0.6)
for j in range(1, len(rows) + 1):
cellDict[(j, i)].set_height(0.4)
cellDict[(1, -1)].set_height(0.4)
cellDict[(2, -1)].set_height(0.4)
table.auto_set_font_size(False)
table.set_fontsize(10)
for key, cell in table.get_celld().items():
cell.set_linewidth(0.6)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
name = ['', '']
ax.legend(name, handlelength=0.7, labelspacing=0.6,
bbox_to_anchor=(-0.1, -0.23), loc='upper left', frameon=False)
plt.show()
if __name__ == '__main__':
# draw_table(['A', 'B'], [['中国', '必胜'], ['你好', '谢谢']])
# print(4800 / 1100 / 1000)
data = {
'linux': [1.2, 2.2, 3.1, '中国', 2.0, 1.0, 2.1, 3.5, 4.0, 2.0, ],
'linuxmi': [5.2, 6.7, 7.9, 8.3, 1.2, 5.7, 6.1, 7.2, 8.3, '-', ],
}
df = pd.DataFrame(data)
fig, ax = plt.subplots(figsize=(3, 3))
ax.axis('off')
ax.axis('tight')
ax.table(cellText=df.values,
colLabels=df.columns,
bbox=[0, 0, 1, 1],
)
# plt.savefig('xx.png')
plt.show()
| 26.588785 | 92 | 0.557118 | [
"MIT"
] | vandyzhou/wxcloudrun-django | wxcloudrun/common/tabledrawer.py | 2,969 | Python |
import os
from setuptools import find_packages, setup
this = os.path.dirname(os.path.realpath(__file__))
def read(name):
with open(os.path.join(this, name)) as f:
return f.read()
setup(
name='pyramid_pages',
version='0.0.5',
url='http://github.com/uralbash/pyramid_pages/',
author='Svintsov Dmitry',
author_email='sacrud@uralbash.ru',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite="nose.collector",
license="MIT",
description='Tree pages for pyramid',
long_description=read('README.rst'),
install_requires=read('requirements.txt'),
tests_require=read('requirements.txt') + read('requirements-test.txt'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Framework :: Pyramid ",
"Topic :: Internet",
"Topic :: Database",
],
)
| 30.866667 | 75 | 0.62491 | [
"MIT"
] | ITCase/ps_pages | setup.py | 1,389 | Python |
from .TapChanger import TapChanger
class RatioTapChanger(TapChanger):
'''
A tap changer that changes the voltage ratio impacting the voltage magnitude but not the phase angle across the transformer.
:tculControlMode: Specifies the regulation control mode (voltage or reactive) of the RatioTapChanger. Default: None
:stepVoltageIncrement: Tap step increment, in per cent of nominal voltage, per step position. Default: 0.0
:RatioTapChangerTable: The ratio tap changer of this tap ratio table. Default: None
:TransformerEnd: Ratio tap changer associated with this transformer end. Default: None
'''
cgmesProfile = TapChanger.cgmesProfile
possibleProfileList = {'class': [cgmesProfile.EQ.value, cgmesProfile.SSH.value, ],
'tculControlMode': [cgmesProfile.EQ.value, ],
'stepVoltageIncrement': [cgmesProfile.EQ.value, ],
'RatioTapChangerTable': [cgmesProfile.EQ.value, ],
'TransformerEnd': [cgmesProfile.EQ.value, ],
}
serializationProfile = {}
__doc__ += '\n Documentation of parent class TapChanger: \n' + TapChanger.__doc__
def __init__(self, tculControlMode = None, stepVoltageIncrement = 0.0, RatioTapChangerTable = None, TransformerEnd = None, *args, **kw_args):
super().__init__(*args, **kw_args)
self.tculControlMode = tculControlMode
self.stepVoltageIncrement = stepVoltageIncrement
self.RatioTapChangerTable = RatioTapChangerTable
self.TransformerEnd = TransformerEnd
def __str__(self):
str = 'class=RatioTapChanger\n'
attributes = self.__dict__
for key in attributes.keys():
str = str + key + '={}\n'.format(attributes[key])
return str
| 39.585366 | 143 | 0.74923 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | CIM-IEC/CIMpy | cimpy/cgmes_v2_4_15/RatioTapChanger.py | 1,623 | Python |
import torch
import math
from torch import nn, Tensor
from torch.nn import functional as F
from semseg.models.backbones import *
from semseg.models.modules.common import ConvModule
class SpatialPath(nn.Module):
def __init__(self, c1, c2) -> None:
super().__init__()
ch = 64
self.conv_7x7 = ConvModule(c1, ch, 7, 2, 3)
self.conv_3x3_1 = ConvModule(ch, ch, 3, 2, 1)
self.conv_3x3_2 = ConvModule(ch, ch, 3, 2, 1)
self.conv_1x1 = ConvModule(ch, c2, 1, 1, 0)
def forward(self, x):
x = self.conv_7x7(x)
x = self.conv_3x3_1(x)
x = self.conv_3x3_2(x)
return self.conv_1x1(x)
class ContextPath(nn.Module):
def __init__(self, backbone: nn.Module) -> None:
super().__init__()
self.backbone = backbone
c3, c4 = self.backbone.channels[-2:]
self.arm16 = AttentionRefinmentModule(c3, 128)
self.arm32 = AttentionRefinmentModule(c4, 128)
self.global_context = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
ConvModule(c4, 128, 1, 1, 0)
)
self.up16 = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True)
self.up32 = nn.Upsample(scale_factor=2.0, mode='bilinear', align_corners=True)
self.refine16 = ConvModule(128, 128, 3, 1, 1)
self.refine32 = ConvModule(128, 128, 3, 1, 1)
def forward(self, x):
_, _, down16, down32 = self.backbone(x) # 4x256x64x128, 4x512x32x64
arm_down16 = self.arm16(down16) # 4x128x64x128
arm_down32 = self.arm32(down32) # 4x128x32x64
global_down32 = self.global_context(down32) # 4x128x1x1
global_down32 = F.interpolate(global_down32, size=down32.size()[2:], mode='bilinear', align_corners=True) # 4x128x32x64
arm_down32 = arm_down32 + global_down32 # 4x128x32x64
arm_down32 = self.up32(arm_down32) # 4x128x64x128
arm_down32 = self.refine32(arm_down32) # 4x128x64x128
arm_down16 = arm_down16 + arm_down32 # 4x128x64x128
arm_down16 = self.up16(arm_down16) # 4x128x128x256
arm_down16 = self.refine16(arm_down16) # 4x128x128x256
return arm_down16, arm_down32
class AttentionRefinmentModule(nn.Module):
def __init__(self, c1, c2) -> None:
super().__init__()
self.conv_3x3 = ConvModule(c1, c2, 3, 1, 1)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(c2, c2, 1, bias=False),
nn.BatchNorm2d(c2),
nn.Sigmoid()
)
def forward(self, x):
fm = self.conv_3x3(x)
fm_se = self.attention(fm)
return fm * fm_se
class FeatureFusionModule(nn.Module):
def __init__(self, c1, c2, reduction=1) -> None:
super().__init__()
self.conv_1x1 = ConvModule(c1, c2, 1, 1, 0)
self.attention = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(c2, c2 // reduction, 1, bias=False),
nn.ReLU(True),
nn.Conv2d(c2 // reduction, c2, 1, bias=False),
nn.Sigmoid()
)
def forward(self, x1, x2):
fm = torch.cat([x1, x2], dim=1)
fm = self.conv_1x1(fm)
fm_se = self.attention(fm)
return fm + fm * fm_se
class Head(nn.Module):
def __init__(self, c1, n_classes, upscale_factor, is_aux=False) -> None:
super().__init__()
ch = 256 if is_aux else 64
c2 = n_classes * upscale_factor * upscale_factor
self.conv_3x3 = ConvModule(c1, ch, 3, 1, 1)
self.conv_1x1 = nn.Conv2d(ch, c2, 1, 1, 0)
self.upscale = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.conv_1x1(self.conv_3x3(x))
return self.upscale(x)
class BiSeNetv1(nn.Module):
def __init__(self, backbone: str = 'ResNet-18', num_classes: int = 19) -> None:
super().__init__()
backbone, variant = backbone.split('-')
self.context_path = ContextPath(eval(backbone)(variant))
self.spatial_path = SpatialPath(3, 128)
self.ffm = FeatureFusionModule(256, 256)
self.output_head = Head(256, num_classes, upscale_factor=8, is_aux=False)
self.context16_head = Head(128, num_classes, upscale_factor=8, is_aux=True)
self.context32_head = Head(128, num_classes, upscale_factor=16, is_aux=True)
self.apply(self._init_weights)
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out // m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def init_pretrained(self, pretrained: str = None) -> None:
if pretrained:
self.context_path.backbone.load_state_dict(torch.load(pretrained, map_location='cpu'), strict=False)
def forward(self, x): # 4x3x1024x2048
spatial_out = self.spatial_path(x) # 4x128x128x256
context16, context32 = self.context_path(x) # 4x128x128x256, 4x128x64x128
fm_fuse = self.ffm(spatial_out, context16) # 4x256x128x256
output = self.output_head(fm_fuse) # 4xn_classesx1024x2048
if self.training:
context_out16 = self.context16_head(context16) # 4xn_classesx1024x2048
context_out32 = self.context32_head(context32) # 4xn_classesx1024x2048
return output, context_out16, context_out32
return output
if __name__ == '__main__':
model = BiSeNetv1('MobileNetV2-1.0', 19)
# model.init_pretrained('checkpoints/backbones/resnet/resnet18.pth')
model.eval()
image = torch.randn(1, 3, 224, 224)
output = model(image)
print(output.shape) | 36.940476 | 129 | 0.596197 | [
"MIT"
] | Apexsf/test | semseg/models/bisenetv1.py | 6,206 | Python |
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import collections
from collections.abc import Callable
import re
import builtins
import time
import logging
import shutil
import os
import sys
import tempfile
from tempfile import NamedTemporaryFile
import platform
import gc
import glob
import gzip
import stat
import string
import warnings
import os.path as op
from copy import copy as shallow_copy
from contextlib import contextmanager
from functools import (
lru_cache,
wraps,
)
from time import sleep
import inspect
from itertools import tee
# this import is required because other modules import opj from here.
from os.path import join as opj
from os.path import (
abspath,
basename,
commonprefix,
curdir,
dirname,
exists,
expanduser,
expandvars,
isabs,
isdir,
islink,
lexists,
normpath,
pardir,
relpath,
sep,
split,
splitdrive
)
import posixpath
from shlex import (
quote as shlex_quote,
split as shlex_split,
)
# from datalad.dochelpers import get_docstring_split
from datalad.consts import TIMESTAMP_FMT
from datalad.support.exceptions import CapturedException
unicode_srctypes = str, bytes
lgr = logging.getLogger("datalad.utils")
lgr.log(5, "Importing datalad.utils")
#
# Some useful variables
#
platform_system = platform.system().lower()
on_windows = platform_system == 'windows'
on_osx = platform_system == 'darwin'
on_linux = platform_system == 'linux'
on_msys_tainted_paths = on_windows \
and 'MSYS_NO_PATHCONV' not in os.environ \
and os.environ.get('MSYSTEM', '')[:4] in ('MSYS', 'MING')
# Takes ~200msec, so should not be called at import time
@lru_cache() # output should not change through life time of datalad process
def get_linux_distribution():
"""Compatibility wrapper for {platform,distro}.linux_distribution().
"""
if hasattr(platform, "linux_distribution"):
# Use deprecated (but faster) method if it's available.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
result = platform.linux_distribution()
else:
import distro # We require this for Python 3.8 and above.
result = distro.linux_distribution(full_distribution_name=False)
return result
# Those weren't used for any critical decision making, thus we just set them to None
# Use get_linux_distribution() directly where needed
linux_distribution_name = linux_distribution_release = None
# Maximal length of cmdline string
# Query the system and use hardcoded "knowledge" if None
# probably getconf ARG_MAX might not be available
# The last one would be the most conservative/Windows
CMD_MAX_ARG_HARDCODED = 2097152 if on_linux else 262144 if on_osx else 32767
try:
CMD_MAX_ARG = os.sysconf('SC_ARG_MAX')
assert CMD_MAX_ARG > 0
if CMD_MAX_ARG > CMD_MAX_ARG_HARDCODED * 1e6:
# workaround for some kind of a bug which comes up with python 3.4
# see https://github.com/datalad/datalad/issues/3150
# or on older CentOS with conda and python as new as 3.9
# see https://github.com/datalad/datalad/issues/5943
# TODO: let Yarik know that the world is a paradise now whenever 1e6
# is not large enough
CMD_MAX_ARG = min(CMD_MAX_ARG, CMD_MAX_ARG_HARDCODED)
except Exception as exc:
# ATM (20181005) SC_ARG_MAX available only on POSIX systems
# so exception would be thrown e.g. on Windows, or
# somehow during Debian build for nd14.04 it is coming up with -1:
# https://github.com/datalad/datalad/issues/3015
CMD_MAX_ARG = CMD_MAX_ARG_HARDCODED
lgr.debug(
"Failed to query or got useless SC_ARG_MAX sysconf, "
"will use hardcoded value: %s", exc)
# Even with all careful computations we do, due to necessity to account for
# environment and what not, we still could not figure out "exact" way to
# estimate it, but it was shown that 300k safety margin on linux was sufficient.
# https://github.com/datalad/datalad/pull/2977#issuecomment-436264710
# 300k is ~15%, so to be safe, and for paranoid us we will just use up to 50%
# of the length for "safety margin". We might probably still blow due to
# env vars, unicode, etc... so any hard limit imho is not a proper solution
CMD_MAX_ARG = int(0.5 * CMD_MAX_ARG)
lgr.debug(
"Maximal length of cmdline string (adjusted for safety margin): %d",
CMD_MAX_ARG)
#
# Little helpers
#
# `getargspec` has been deprecated in Python 3.
ArgSpecFake = collections.namedtuple(
"ArgSpecFake", ["args", "varargs", "keywords", "defaults"])
def getargspec(func, *, include_kwonlyargs=False):
"""Compat shim for getargspec deprecated in python 3.
The main difference from inspect.getargspec (and inspect.getfullargspec
for that matter) is that by using inspect.signature we are providing
correct args/defaults for functools.wraps'ed functions.
`include_kwonlyargs` option was added to centralize getting all args,
even the ones which are kwonly (follow the ``*,``).
For internal use and not advised for use in 3rd party code.
Please use inspect.signature directly.
"""
# We use signature, and not getfullargspec, because only signature properly
# "passes" args from a functools.wraps decorated function.
# Note: getfullargspec works Ok on wrapt-decorated functions
f_sign = inspect.signature(func)
# Loop through parameters and compose argspec
args4 = [[], None, None, {}]
# Collect all kwonlyargs into a dedicated dict - name: default
kwonlyargs = {}
# shortcuts
args, defaults = args4[0], args4[3]
P = inspect.Parameter
for p_name, p in f_sign.parameters.items():
if p.kind in (P.POSITIONAL_ONLY, P.POSITIONAL_OR_KEYWORD):
assert not kwonlyargs # yoh: must not come after kwonlyarg
args.append(p_name)
if p.default is not P.empty:
defaults[p_name] = p.default
elif p.kind == P.VAR_POSITIONAL:
args4[1] = p_name
elif p.kind == P.VAR_KEYWORD:
args4[2] = p_name
elif p.kind == P.KEYWORD_ONLY:
assert p.default is not P.empty
kwonlyargs[p_name] = p.default
if kwonlyargs:
if not include_kwonlyargs:
raise ValueError(
'Function has keyword-only parameters or annotations, either use '
'inspect.signature() API which can support them, or provide include_kwonlyargs=True '
'to this function'
)
else:
args.extend(list(kwonlyargs))
defaults.update(kwonlyargs)
# harmonize defaults to how original getargspec returned them -- just a tuple
args4[3] = None if not defaults else tuple(defaults.values())
return ArgSpecFake(*args4)
def any_re_search(regexes, value):
"""Return if any of regexes (list or str) searches successfully for value"""
for regex in ensure_tuple_or_list(regexes):
if re.search(regex, value):
return True
return False
def not_supported_on_windows(msg=None):
"""A little helper to be invoked to consistently fail whenever functionality is
not supported (yet) on Windows
"""
if on_windows:
raise NotImplementedError("This functionality is not yet implemented for Windows OS"
+ (": %s" % msg if msg else ""))
def get_home_envvars(new_home):
"""Return dict with env variables to be adjusted for a new HOME
Only variables found in current os.environ are adjusted.
Parameters
----------
new_home: str or Path
New home path, in native to OS "schema"
"""
new_home = str(new_home)
out = {'HOME': new_home}
if on_windows:
# requires special handling, since it has a number of relevant variables
# and also Python changed its behavior and started to respect USERPROFILE only
# since python 3.8: https://bugs.python.org/issue36264
out['USERPROFILE'] = new_home
out['HOMEDRIVE'], out['HOMEPATH'] = splitdrive(new_home)
return {v: val for v, val in out.items() if v in os.environ}
def shortened_repr(value, l=30):
try:
if hasattr(value, '__repr__') and (value.__repr__ is not object.__repr__):
value_repr = repr(value)
if not value_repr.startswith('<') and len(value_repr) > l:
value_repr = "<<%s++%d chars++%s>>" % (
value_repr[:l - 16],
len(value_repr) - (l - 16 + 4),
value_repr[-4:]
)
elif value_repr.startswith('<') and value_repr.endswith('>') and ' object at 0x':
raise ValueError("I hate those useless long reprs")
else:
raise ValueError("gimme class")
except Exception as e:
value_repr = "<%s>" % value.__class__.__name__.split('.')[-1]
return value_repr
def __auto_repr__(obj):
attr_names = tuple()
if hasattr(obj, '__dict__'):
attr_names += tuple(obj.__dict__.keys())
if hasattr(obj, '__slots__'):
attr_names += tuple(obj.__slots__)
items = []
for attr in sorted(set(attr_names)):
if attr.startswith('_'):
continue
value = getattr(obj, attr)
# TODO: should we add this feature to minimize some talktative reprs
# such as of URL?
#if value is None:
# continue
items.append("%s=%s" % (attr, shortened_repr(value)))
return "%s(%s)" % (obj.__class__.__name__, ', '.join(items))
def auto_repr(cls):
"""Decorator for a class to assign it an automagic quick and dirty __repr__
It uses public class attributes to prepare repr of a class
Original idea: http://stackoverflow.com/a/27799004/1265472
"""
cls.__repr__ = __auto_repr__
return cls
def _is_stream_tty(stream):
try:
# TODO: check on windows if hasattr check would work correctly and
# add value:
return stream.isatty()
except ValueError as exc:
# Who knows why it is a ValueError, but let's try to be specific
# If there is a problem with I/O - non-interactive, otherwise reraise
if "I/O" in str(exc):
return False
raise
def is_interactive():
"""Return True if all in/outs are open and tty.
Note that in a somewhat abnormal case where e.g. stdin is explicitly
closed, and any operation on it would raise a
`ValueError("I/O operation on closed file")` exception, this function
would just return False, since the session cannot be used interactively.
"""
return all(_is_stream_tty(s) for s in (sys.stdin, sys.stdout, sys.stderr))
def get_ipython_shell():
"""Detect if running within IPython and returns its `ip` (shell) object
Returns None if not under ipython (no `get_ipython` function)
"""
try:
return get_ipython()
except NameError:
return None
def md5sum(filename):
"""Compute an MD5 sum for the given file
"""
from datalad.support.digests import Digester
return Digester(digests=['md5'])(filename)['md5']
# unused in -core
def sorted_files(path):
"""Return a (sorted) list of files under path
"""
return sorted(sum([[op.join(r, f)[len(path) + 1:] for f in files]
for r, d, files in os.walk(path)
if not '.git' in r], []))
_encoded_dirsep = r'\\' if on_windows else r'/'
_VCS_REGEX = r'%s\.(?:git|gitattributes|svn|bzr|hg)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
_DATALAD_REGEX = r'%s\.(?:datalad)(?:%s|$)' % (
_encoded_dirsep, _encoded_dirsep)
def find_files(regex, topdir=curdir, exclude=None, exclude_vcs=True, exclude_datalad=False, dirs=False):
"""Generator to find files matching regex
Parameters
----------
regex: basestring
exclude: basestring, optional
Matches to exclude
exclude_vcs:
If True, excludes commonly known VCS subdirectories. If string, used
as regex to exclude those files (regex: `%r`)
exclude_datalad:
If True, excludes files known to be datalad meta-data files (e.g. under
.datalad/ subdirectory) (regex: `%r`)
topdir: basestring, optional
Directory where to search
dirs: bool, optional
Whether to match directories as well as files
"""
for dirpath, dirnames, filenames in os.walk(topdir):
names = (dirnames + filenames) if dirs else filenames
# TODO: might want to uniformize on windows to use '/'
paths = (op.join(dirpath, name) for name in names)
for path in filter(re.compile(regex).search, paths):
path = path.rstrip(sep)
if exclude and re.search(exclude, path):
continue
if exclude_vcs and re.search(_VCS_REGEX, path):
continue
if exclude_datalad and re.search(_DATALAD_REGEX, path):
continue
yield path
find_files.__doc__ %= (_VCS_REGEX, _DATALAD_REGEX)
def expandpath(path, force_absolute=True):
"""Expand all variables and user handles in a path.
By default return an absolute path
"""
path = expandvars(expanduser(path))
if force_absolute:
path = abspath(path)
return path
def posix_relpath(path, start=None):
"""Behave like os.path.relpath, but always return POSIX paths...
on any platform."""
# join POSIX style
return posixpath.join(
# split and relpath native style
# python2.7 ntpath implementation of relpath cannot handle start=None
*split(
relpath(path, start=start if start is not None else '')))
def is_explicit_path(path):
"""Return whether a path explicitly points to a location
Any absolute path, or relative path starting with either '../' or
'./' is assumed to indicate a location on the filesystem. Any other
path format is not considered explicit."""
path = expandpath(path, force_absolute=False)
return isabs(path) \
or path.startswith(os.curdir + os.sep) \
or path.startswith(os.pardir + os.sep)
# handle this dance once, and import pathlib from here
# in all other places
from pathlib import (
Path,
PurePath,
PurePosixPath,
)
def rotree(path, ro=True, chmod_files=True):
"""To make tree read-only or writable
Parameters
----------
path : string
Path to the tree/directory to chmod
ro : bool, optional
Whether to make it R/O (default) or RW
chmod_files : bool, optional
Whether to operate also on files (not just directories)
"""
if ro:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode & ~stat.S_IWRITE)
else:
chmod = lambda f: os.chmod(f, os.stat(f).st_mode | stat.S_IWRITE | stat.S_IREAD)
for root, dirs, files in os.walk(path, followlinks=False):
if chmod_files:
for f in files:
fullf = op.join(root, f)
# might be the "broken" symlink which would fail to stat etc
if exists(fullf):
chmod(fullf)
chmod(root)
def rmtree(path, chmod_files='auto', children_only=False, *args, **kwargs):
"""To remove git-annex .git it is needed to make all files and directories writable again first
Parameters
----------
path: Path or str
Path to remove
chmod_files : string or bool, optional
Whether to make files writable also before removal. Usually it is just
a matter of directories to have write permissions.
If 'auto' it would chmod files on windows by default
children_only : bool, optional
If set, all files and subdirectories would be removed while the path
itself (must be a directory) would be preserved
`*args` :
`**kwargs` :
Passed into shutil.rmtree call
"""
# Give W permissions back only to directories, no need to bother with files
if chmod_files == 'auto':
chmod_files = on_windows
# TODO: yoh thinks that if we could quickly check our Flyweight for
# repos if any of them is under the path, and could call .precommit
# on those to possibly stop batched processes etc, we did not have
# to do it on case by case
# Check for open files
assert_no_open_files(path)
# TODO the whole thing should be reimplemented with pathlib, but for now
# at least accept Path
path = str(path)
if children_only:
if not isdir(path):
raise ValueError("Can remove children only of directories")
for p in os.listdir(path):
rmtree(op.join(path, p))
return
if not (islink(path) or not isdir(path)):
rotree(path, ro=False, chmod_files=chmod_files)
if on_windows:
# shutil fails to remove paths that exceed 260 characters on Windows machines
# that did not enable long path support. A workaround to remove long paths
# anyway is to preprend \\?\ to the path.
# https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces
path = r'\\?\ '.strip() + path
_rmtree(path, *args, **kwargs)
else:
# just remove the symlink
unlink(path)
def rmdir(path, *args, **kwargs):
"""os.rmdir with our optional checking for open files"""
assert_no_open_files(path)
os.rmdir(path)
def get_open_files(path, log_open=False):
"""Get open files under a path
Note: This function is very slow on Windows.
Parameters
----------
path : str
File or directory to check for open files under
log_open : bool or int
If set - logger level to use
Returns
-------
dict
path : pid
"""
# Original idea: https://stackoverflow.com/a/11115521/1265472
import psutil
files = {}
# since the ones returned by psutil would not be aware of symlinks in the
# path we should also get realpath for path
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
path = str(Path(path).resolve().absolute())
for proc in psutil.process_iter():
try:
open_paths = [p.path for p in proc.open_files()] + [proc.cwd()]
for p in open_paths:
# note: could be done more efficiently so we do not
# renormalize path over and over again etc
if path_startswith(p, path):
files[p] = proc
# Catch a race condition where a process ends
# before we can examine its files
except psutil.NoSuchProcess:
pass
except psutil.AccessDenied:
pass
if files and log_open:
lgr.log(log_open, "Open files under %s: %s", path, files)
return files
_assert_no_open_files_cfg = os.environ.get('DATALAD_ASSERT_NO_OPEN_FILES')
if _assert_no_open_files_cfg:
def assert_no_open_files(path):
files = get_open_files(path, log_open=40)
if _assert_no_open_files_cfg == 'assert':
assert not files, "Got following files still open: %s" % ','.join(files)
elif files:
if _assert_no_open_files_cfg == 'pdb':
import pdb
pdb.set_trace()
elif _assert_no_open_files_cfg == 'epdb':
import epdb
epdb.serve()
pass
# otherwise we would just issue that error message in the log
else:
def assert_no_open_files(*args, **kwargs):
pass
def rmtemp(f, *args, **kwargs):
"""Wrapper to centralize removing of temp files so we could keep them around
It will not remove the temporary file/directory if DATALAD_TESTS_TEMP_KEEP
environment variable is defined
"""
if not os.environ.get('DATALAD_TESTS_TEMP_KEEP'):
if not os.path.lexists(f):
lgr.debug("Path %s does not exist, so can't be removed", f)
return
lgr.log(5, "Removing temp file: %s", f)
# Can also be a directory
if isdir(f):
rmtree(f, *args, **kwargs)
else:
unlink(f)
else:
lgr.info("Keeping temp file: %s", f)
def file_basename(name, return_ext=False):
"""
Strips up to 2 extensions of length up to 4 characters and starting with alpha
not a digit, so we could get rid of .tar.gz etc
"""
bname = basename(name)
fbname = re.sub(r'(\.[a-zA-Z_]\S{1,4}){0,2}$', '', bname)
if return_ext:
return fbname, bname[len(fbname) + 1:]
else:
return fbname
# unused in -core
def escape_filename(filename):
"""Surround filename in "" and escape " in the filename
"""
filename = filename.replace('"', r'\"').replace('`', r'\`')
filename = '"%s"' % filename
return filename
# unused in -core
def encode_filename(filename):
"""Encode unicode filename
"""
if isinstance(filename, str):
return filename.encode(sys.getfilesystemencoding())
else:
return filename
# unused in -core
def decode_input(s):
"""Given input string/bytes, decode according to stdin codepage (or UTF-8)
if not defined
If fails -- issue warning and decode allowing for errors
being replaced
"""
if isinstance(s, str):
return s
else:
encoding = sys.stdin.encoding or 'UTF-8'
try:
return s.decode(encoding)
except UnicodeDecodeError as exc:
lgr.warning(
"Failed to decode input string using %s encoding. "
"Decoding allowing for errors", encoding)
return s.decode(encoding, errors='replace')
# unused in -core
if on_windows:
def lmtime(filepath, mtime):
"""Set mtime for files. On Windows a merely adapter to os.utime
"""
os.utime(filepath, (time.time(), mtime))
else:
def lmtime(filepath, mtime):
"""Set mtime for files, while not de-referencing symlinks.
To overcome absence of os.lutime
Works only on linux and OSX ATM
"""
from .cmd import WitlessRunner
# convert mtime to format touch understands [[CC]YY]MMDDhhmm[.SS]
smtime = time.strftime("%Y%m%d%H%M.%S", time.localtime(mtime))
lgr.log(3, "Setting mtime for %s to %s == %s", filepath, mtime, smtime)
WitlessRunner().run(['touch', '-h', '-t', '%s' % smtime, filepath])
filepath = Path(filepath)
rfilepath = filepath.resolve()
if filepath.is_symlink() and rfilepath.exists():
# trust no one - adjust also of the target file
# since it seemed like downloading under OSX (was it using curl?)
# didn't bother with timestamps
lgr.log(3, "File is a symlink to %s Setting mtime for it to %s",
rfilepath, mtime)
os.utime(str(rfilepath), (time.time(), mtime))
# doesn't work on OSX
# Runner().run(['touch', '-h', '-d', '@%s' % mtime, filepath])
def ensure_tuple_or_list(obj):
"""Given an object, wrap into a tuple if not list or tuple
"""
if isinstance(obj, (list, tuple)):
return obj
return (obj,)
def ensure_iter(s, cls, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
cls: class
Which iterable class to ensure
copy: bool, optional
If correct iterable is passed, it would generate its shallow copy
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
if isinstance(s, cls):
return s if not copy else shallow_copy(s)
elif isinstance(s, str):
return cls((s,))
elif iterate and hasattr(s, '__iter__'):
return cls(s)
elif s is None:
return cls()
else:
return cls((s,))
def ensure_list(s, copy=False, iterate=True):
"""Given not a list, would place it into a list. If None - empty list is returned
Parameters
----------
s: list or anything
copy: bool, optional
If list is passed, it would generate a shallow copy of the list
iterate: bool, optional
If it is not a list, but something iterable (but not a str)
iterate over it.
"""
return ensure_iter(s, list, copy=copy, iterate=iterate)
def ensure_list_from_str(s, sep='\n'):
"""Given a multiline string convert it to a list of return None if empty
Parameters
----------
s: str or list
"""
if not s:
return None
if isinstance(s, list):
return s
return s.split(sep)
def ensure_dict_from_str(s, **kwargs):
"""Given a multiline string with key=value items convert it to a dictionary
Parameters
----------
s: str or dict
Returns None if input s is empty
"""
if not s:
return None
if isinstance(s, dict):
return s
out = {}
for value_str in ensure_list_from_str(s, **kwargs):
if '=' not in value_str:
raise ValueError("{} is not in key=value format".format(repr(value_str)))
k, v = value_str.split('=', 1)
if k in out:
err = "key {} was already defined in {}, but new value {} was provided".format(k, out, v)
raise ValueError(err)
out[k] = v
return out
def ensure_bytes(s, encoding='utf-8'):
"""Convert/encode unicode string to bytes.
If `s` isn't a string, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. "utf-8" is the default
"""
if not isinstance(s, str):
return s
return s.encode(encoding)
def ensure_unicode(s, encoding=None, confidence=None):
"""Convert/decode bytestring to unicode.
If `s` isn't a bytestring, return it as is.
Parameters
----------
encoding: str, optional
Encoding to use. If None, "utf-8" is tried, and then if not a valid
UTF-8, encoding will be guessed
confidence: float, optional
A value between 0 and 1, so if guessing of encoding is of lower than
specified confidence, ValueError is raised
"""
if not isinstance(s, bytes):
return s
if encoding is None:
# Figure out encoding, defaulting to 'utf-8' which is our common
# target in contemporary digital society
try:
return s.decode('utf-8')
except UnicodeDecodeError as exc:
lgr.debug("Failed to decode a string as utf-8: %s",
CapturedException(exc))
# And now we could try to guess
from chardet import detect
enc = detect(s)
denc = enc.get('encoding', None)
if denc:
denc_confidence = enc.get('confidence', 0)
if confidence is not None and denc_confidence < confidence:
raise ValueError(
"Failed to auto-detect encoding with high enough "
"confidence. Highest confidence was %s for %s"
% (denc_confidence, denc)
)
lgr.log(5, "Auto-detected encoding to be %s", denc)
return s.decode(denc)
else:
raise ValueError(
"Could not decode value as utf-8, or to guess its encoding: %s"
% repr(s)
)
else:
return s.decode(encoding)
def ensure_bool(s):
"""Convert value into boolean following convention for strings
to recognize on,True,yes as True, off,False,no as False
"""
if isinstance(s, str):
if s.isdigit():
return bool(int(s))
sl = s.lower()
if sl in {'y', 'yes', 'true', 'on'}:
return True
elif sl in {'n', 'no', 'false', 'off'}:
return False
else:
raise ValueError("Do not know how to treat %r as a boolean" % s)
return bool(s)
def as_unicode(val, cast_types=object):
"""Given an arbitrary value, would try to obtain unicode value of it
For unicode it would return original value, for python2 str or python3
bytes it would use ensure_unicode, for None - an empty (unicode) string,
and for any other type (see `cast_types`) - would apply the unicode
constructor. If value is not an instance of `cast_types`, TypeError
is thrown
Parameters
----------
cast_types: type
Which types to cast to unicode by providing to constructor
"""
if val is None:
return u''
elif isinstance(val, str):
return val
elif isinstance(val, unicode_srctypes):
return ensure_unicode(val)
elif isinstance(val, cast_types):
return str(val)
else:
raise TypeError(
"Value %r is not of any of known or provided %s types"
% (val, cast_types))
def unique(seq, key=None, reverse=False):
"""Given a sequence return a list only with unique elements while maintaining order
This is the fastest solution. See
https://www.peterbe.com/plog/uniqifiers-benchmark
and
http://stackoverflow.com/a/480227/1265472
for more information.
Enhancement -- added ability to compare for uniqueness using a key function
Parameters
----------
seq:
Sequence to analyze
key: callable, optional
Function to call on each element so we could decide not on a full
element, but on its member etc
reverse: bool, optional
If True, uniqueness checked in the reverse order, so that the later ones
will take the order
"""
seen = set()
seen_add = seen.add
trans = reversed if reverse else lambda x: x
if not key:
out = [x for x in trans(seq) if not (x in seen or seen_add(x))]
else:
# OPT: could be optimized, since key is called twice, but for our cases
# should be just as fine
out = [x for x in trans(seq) if not (key(x) in seen or seen_add(key(x)))]
return out[::-1] if reverse else out
def all_same(items):
"""Quick check if all items are the same.
Identical to a check like len(set(items)) == 1 but
should be more efficient while working on generators, since would
return False as soon as any difference detected thus possibly avoiding
unnecessary evaluations
"""
first = True
first_item = None
for item in items:
if first:
first = False
first_item = item
else:
if item != first_item:
return False
# So we return False if was empty
return not first
def map_items(func, v):
"""A helper to apply `func` to all elements (keys and values) within dict
No type checking of values passed to func is done, so `func`
should be resilient to values which it should not handle
Initial usecase - apply_recursive(url_fragment, ensure_unicode)
"""
# map all elements within item
return v.__class__(
item.__class__(map(func, item))
for item in v.items()
)
def partition(items, predicate=bool):
"""Partition `items` by `predicate`.
Parameters
----------
items : iterable
predicate : callable
A function that will be mapped over each element in `items`. The
elements will partitioned based on whether the return value is false or
true.
Returns
-------
A tuple with two generators, the first for 'false' items and the second for
'true' ones.
Notes
-----
Taken from Peter Otten's snippet posted at
https://nedbatchelder.com/blog/201306/filter_a_list_into_two_parts.html
"""
a, b = tee((predicate(item), item) for item in items)
return ((item for pred, item in a if not pred),
(item for pred, item in b if pred))
def generate_chunks(container, size):
"""Given a container, generate chunks from it with size up to `size`
"""
# There could be a "smarter" solution but I think this would suffice
assert size > 0, "Size should be non-0 positive"
while container:
yield container[:size]
container = container[size:]
def generate_file_chunks(files, cmd=None):
"""Given a list of files, generate chunks of them to avoid exceeding cmdline length
Parameters
----------
files: list of str
cmd: str or list of str, optional
Command to account for as well
"""
files = ensure_list(files)
cmd = ensure_list(cmd)
maxl = max(map(len, files)) if files else 0
chunk_size = max(
1, # should at least be 1. If blows then - not our fault
(CMD_MAX_ARG
- sum((len(x) + 3) for x in cmd)
- 4 # for '--' below
) // (maxl + 3) # +3 for possible quotes and a space
)
# TODO: additional treatment for "too many arguments"? although
# as https://github.com/datalad/datalad/issues/1883#issuecomment
# -436272758
# shows there seems to be no hardcoded limit on # of arguments,
# but may be we decide to go for smth like follow to be on safe side
# chunk_size = min(10240 - len(cmd), chunk_size)
file_chunks = generate_chunks(files, chunk_size)
return file_chunks
#
# Generators helpers
#
def saved_generator(gen):
"""Given a generator returns two generators, where 2nd one just replays
So the first one would be going through the generated items and 2nd one
would be yielding saved items
"""
saved = []
def gen1():
for x in gen: # iterating over original generator
saved.append(x)
yield x
def gen2():
for x in saved: # yielding saved entries
yield x
return gen1(), gen2()
#
# Decorators
#
# Originally better_wraps was created to provide `wrapt`-based, instead of
# `functools.wraps` implementation to preserve the correct signature of the
# decorated function. By using inspect.signature in our getargspec, which
# works fine on `functools.wraps`ed functions, we mediated this necessity.
better_wraps = wraps
# Borrowed from pandas
# Copyright: 2011-2014, Lambda Foundry, Inc. and PyData Development Team
# License: BSD-3
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, `*args`, `**kwargs`)"""
@better_wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and isinstance(args[0], Callable)
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# TODO: just provide decorators for tempfile.mk* functions. This is ugly!
def get_tempfile_kwargs(tkwargs=None, prefix="", wrapped=None):
"""Updates kwargs to be passed to tempfile. calls depending on env vars
"""
if tkwargs is None:
tkwargs_ = {}
else:
# operate on a copy of tkwargs to avoid any side-effects
tkwargs_ = tkwargs.copy()
# TODO: don't remember why I had this one originally
# if len(targs)<2 and \
if 'prefix' not in tkwargs_:
tkwargs_['prefix'] = '_'.join(
['datalad_temp'] +
([prefix] if prefix else []) +
([''] if (on_windows or not wrapped) else [wrapped.__name__]))
directory = os.environ.get('TMPDIR')
if directory and 'dir' not in tkwargs_:
tkwargs_['dir'] = directory
return tkwargs_
@optional_args
def line_profile(func):
"""Q&D helper to line profile the function and spit out stats
"""
import line_profiler
prof = line_profiler.LineProfiler()
@wraps(func)
def _wrap_line_profile(*args, **kwargs):
try:
pfunc = prof(func)
return pfunc(*args, **kwargs)
finally:
prof.print_stats()
return _wrap_line_profile
# unused in -core
@optional_args
def collect_method_callstats(func):
"""Figure out methods which call the method repeatedly on the same instance
Use case(s):
- .repo is expensive since does all kinds of checks.
- .config is expensive transitively since it calls .repo each time
TODO:
- fancy one could look through the stack for the same id(self) to see if
that location is already in memo. That would hint to the cases where object
is not passed into underlying functions, causing them to redo the same work
over and over again
- ATM might flood with all "1 lines" calls which are not that informative.
The underlying possibly suboptimal use might be coming from their callers.
It might or not relate to the previous TODO
"""
from collections import defaultdict
import traceback
from time import time
memo = defaultdict(lambda: defaultdict(int)) # it will be a dict of lineno: count
# gross timing
times = []
toppath = dirname(__file__) + sep
@wraps(func)
def _wrap_collect_method_callstats(*args, **kwargs):
try:
self = args[0]
stack = traceback.extract_stack()
caller = stack[-2]
stack_sig = \
"{relpath}:{s.name}".format(
s=caller, relpath=relpath(caller.filename, toppath))
sig = (id(self), stack_sig)
# we will count based on id(self) + wherefrom
memo[sig][caller.lineno] += 1
t0 = time()
return func(*args, **kwargs)
finally:
times.append(time() - t0)
pass
def print_stats():
print("The cost of property {}:".format(func.__name__))
if not memo:
print("None since no calls")
return
# total count
counts = {k: sum(v.values()) for k,v in memo.items()}
total = sum(counts.values())
ids = {self_id for (self_id, _) in memo}
print(" Total: {} calls from {} objects with {} contexts taking {:.2f} sec"
.format(total, len(ids), len(memo), sum(times)))
# now we need to sort by value
for (self_id, caller), count in sorted(counts.items(), key=lambda x: x[1], reverse=True):
print(" {} {}: {} from {} lines"
.format(self_id, caller, count, len(memo[(self_id, caller)])))
# Upon total exit we print the stats
import atexit
atexit.register(print_stats)
return _wrap_collect_method_callstats
# Borrowed from duecredit to wrap duecredit-handling to guarantee failsafe
def never_fail(f):
"""Assure that function never fails -- all exceptions are caught
Returns `None` if function fails internally.
"""
@wraps(f)
def wrapped_func(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
lgr.warning(
"DataLad internal failure while running %s: %r. "
"Please report at https://github.com/datalad/datalad/issues"
% (f, e)
)
if os.environ.get('DATALAD_ALLOW_FAIL', False):
return f
else:
return wrapped_func
#
# Context Managers
#
# unused in -core
@contextmanager
def nothing_cm():
"""Just a dummy cm to programmically switch context managers"""
yield
@contextmanager
def swallow_outputs():
"""Context manager to help consuming both stdout and stderr, and print()
stdout is available as cm.out and stderr as cm.err whenever cm is the
yielded context manager.
Internally uses temporary files to guarantee absent side-effects of swallowing
into StringIO which lacks .fileno.
print mocking is necessary for some uses where sys.stdout was already bound
to original sys.stdout, thus mocking it later had no effect. Overriding
print function had desired effect
"""
class StringIOAdapter(object):
"""Little adapter to help getting out/err values
"""
def __init__(self):
kw = get_tempfile_kwargs({}, prefix="outputs")
self._out = NamedTemporaryFile(delete=False, mode='w', **kw)
self._err = NamedTemporaryFile(delete=False, mode='w', **kw)
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if not self._out.closed:
self._out.flush()
return self._read(self._out)
@property
def err(self):
if not self._err.closed:
self._err.flush()
return self._read(self._err)
@property
def handles(self):
return self._out, self._err
def cleanup(self):
self._out.close()
self._err.close()
out_name = self._out.name
err_name = self._err.name
from datalad import cfg
if cfg.getbool('datalad.log', 'outputs', default=False) \
and lgr.getEffectiveLevel() <= logging.DEBUG:
for s, sname in ((self.out, 'stdout'),
(self.err, 'stderr')):
if s:
pref = os.linesep + "| "
lgr.debug("Swallowed %s:%s%s", sname, pref, s.replace(os.linesep, pref))
else:
lgr.debug("Nothing was swallowed for %s", sname)
del self._out
del self._err
gc.collect()
rmtemp(out_name)
rmtemp(err_name)
def fake_print(*args, **kwargs):
sep = kwargs.pop('sep', ' ')
end = kwargs.pop('end', '\n')
file = kwargs.pop('file', sys.stdout)
if file in (oldout, olderr, sys.stdout, sys.stderr):
# we mock
try:
sys.stdout.write(sep.join(args) + end)
except UnicodeEncodeError as exc:
lgr.error(
"Failed to write to mocked stdout, got %s, continue as it "
"didn't happen", exc)
else:
# must be some other file one -- leave it alone
oldprint(*args, sep=sep, end=end, file=file)
from .ui import ui
# preserve -- they could have been mocked already
oldprint = getattr(builtins, 'print')
oldout, olderr = sys.stdout, sys.stderr
olduiout = ui.out
adapter = StringIOAdapter()
try:
sys.stdout, sys.stderr = adapter.handles
ui.out = adapter.handles[0]
setattr(builtins, 'print', fake_print)
yield adapter
finally:
sys.stdout, sys.stderr, ui.out = oldout, olderr, olduiout
setattr(builtins, 'print', oldprint)
adapter.cleanup()
@contextmanager
def swallow_logs(new_level=None, file_=None, name='datalad'):
"""Context manager to consume all logs.
"""
lgr = logging.getLogger(name)
# Keep old settings
old_level = lgr.level
old_handlers = lgr.handlers
# Let's log everything into a string
# TODO: generalize with the one for swallow_outputs
class StringIOAdapter(object):
"""Little adapter to help getting out values
And to stay consistent with how swallow_outputs behaves
"""
def __init__(self):
if file_ is None:
kw = get_tempfile_kwargs({}, prefix="logs")
self._out = NamedTemporaryFile(mode='a', delete=False, **kw)
else:
out_file = file_
# PY3 requires clearly one or another. race condition possible
self._out = open(out_file, 'a')
self._final_out = None
def _read(self, h):
with open(h.name) as f:
return f.read()
@property
def out(self):
if self._final_out is not None:
# we closed and cleaned up already
return self._final_out
else:
self._out.flush()
return self._read(self._out)
@property
def lines(self):
return self.out.split('\n')
@property
def handle(self):
return self._out
def cleanup(self):
# store for access while object exists
self._final_out = self.out
self._out.close()
out_name = self._out.name
del self._out
gc.collect()
if not file_:
rmtemp(out_name)
def assert_logged(self, msg=None, level=None, regex=True, **kwargs):
"""Provide assertion on whether a msg was logged at a given level
If neither `msg` nor `level` provided, checks if anything was logged
at all.
Parameters
----------
msg: str, optional
Message (as a regular expression, if `regex`) to be searched.
If no msg provided, checks if anything was logged at a given level.
level: str, optional
String representing the level to be logged
regex: bool, optional
If False, regular `assert_in` is used
**kwargs: str, optional
Passed to `assert_re_in` or `assert_in`
"""
from datalad.tests.utils import assert_re_in
from datalad.tests.utils import assert_in
if regex:
match = r'\[%s\] ' % level if level else r"\[\S+\] "
else:
match = '[%s] ' % level if level else ''
if msg:
match += msg
if match:
(assert_re_in if regex else assert_in)(match, self.out, **kwargs)
else:
assert not kwargs, "no kwargs to be passed anywhere"
assert self.out, "Nothing was logged!?"
adapter = StringIOAdapter()
# TODO: it does store messages but without any formatting, i.e. even without
# date/time prefix etc. IMHO it should preserve formatting in case if file_ is
# set
swallow_handler = logging.StreamHandler(adapter.handle)
# we want to log levelname so we could test against it
swallow_handler.setFormatter(
logging.Formatter('[%(levelname)s] %(message)s'))
swallow_handler.filters = sum([h.filters for h in old_handlers],
[])
lgr.handlers = [swallow_handler]
if old_level < logging.DEBUG: # so if HEAVYDEBUG etc -- show them!
lgr.handlers += old_handlers
if isinstance(new_level, str):
new_level = getattr(logging, new_level)
if new_level is not None:
lgr.setLevel(new_level)
try:
yield adapter
# TODO: if file_ and there was an exception -- most probably worth logging it?
# although ideally it should be the next log outside added to that file_ ... oh well
finally:
lgr.handlers = old_handlers
lgr.setLevel(old_level)
adapter.cleanup()
# TODO: May be melt in with swallow_logs at some point:
@contextmanager
def disable_logger(logger=None):
"""context manager to temporarily disable logging
This is to provide one of swallow_logs' purposes without unnecessarily
creating temp files (see gh-1865)
Parameters
----------
logger: Logger
Logger whose handlers will be ordered to not log anything.
Default: datalad's topmost Logger ('datalad')
"""
class NullFilter(logging.Filter):
"""Filter class to reject all records
"""
def filter(self, record):
return 0
if logger is None:
# default: all of datalad's logging:
logger = logging.getLogger('datalad')
filter_ = NullFilter(logger.name)
[h.addFilter(filter_) for h in logger.handlers]
try:
yield logger
finally:
[h.removeFilter(filter_) for h in logger.handlers]
#
# Additional handlers
#
_sys_excepthook = sys.excepthook # Just in case we ever need original one
def setup_exceptionhook(ipython=False):
"""Overloads default sys.excepthook with our exceptionhook handler.
If interactive, our exceptionhook handler will invoke
pdb.post_mortem; if not interactive, then invokes default handler.
"""
def _datalad_pdb_excepthook(type, value, tb):
import traceback
traceback.print_exception(type, value, tb)
print()
if is_interactive():
import pdb
pdb.post_mortem(tb)
if ipython:
from IPython.core import ultratb
sys.excepthook = ultratb.FormattedTB(mode='Verbose',
# color_scheme='Linux',
call_pdb=is_interactive())
else:
sys.excepthook = _datalad_pdb_excepthook
def ensure_dir(*args):
"""Make sure directory exists.
Joins the list of arguments to an os-specific path to the desired
directory and creates it, if it not exists yet.
"""
dirname = op.join(*args)
if not exists(dirname):
os.makedirs(dirname)
return dirname
def updated(d, update):
"""Return a copy of the input with the 'update'
Primarily for updating dictionaries
"""
d = d.copy()
d.update(update)
return d
_pwd_mode = None
def _switch_to_getcwd(msg, *args):
global _pwd_mode
_pwd_mode = 'cwd'
lgr.debug(
msg + ". From now on will be returning os.getcwd(). Directory"
" symlinks in the paths will be resolved",
*args
)
# TODO: we might want to mitigate by going through all flywheighted
# repos and tuning up their .paths to be resolved?
def getpwd():
"""Try to return a CWD without dereferencing possible symlinks
This function will try to use PWD environment variable to provide a current
working directory, possibly with some directories along the path being
symlinks to other directories. Unfortunately, PWD is used/set only by the
shell and such functions as `os.chdir` and `os.getcwd` nohow use or modify
it, thus `os.getcwd()` returns path with links dereferenced.
While returning current working directory based on PWD env variable we
verify that the directory is the same as `os.getcwd()` after resolving all
symlinks. If that verification fails, we fall back to always use
`os.getcwd()`.
Initial decision to either use PWD env variable or os.getcwd() is done upon
the first call of this function.
"""
global _pwd_mode
if _pwd_mode is None:
# we need to decide!
try:
pwd = os.environ['PWD']
if on_windows and pwd and pwd.startswith('/'):
# It should be a path from MSYS.
# - it might start with a drive letter or not
# - it seems to be "illegal" to have a single letter directories
# under / path, i.e. if created - they aren't found
# - 'ln -s' does not fail to create a "symlink" but it just
# copies!
# so we are not likely to need original PWD purpose on
# those systems
# Verdict:
_pwd_mode = 'cwd'
else:
_pwd_mode = 'PWD'
except KeyError:
_pwd_mode = 'cwd'
if _pwd_mode == 'cwd':
return os.getcwd()
elif _pwd_mode == 'PWD':
try:
cwd = os.getcwd()
except OSError as exc:
if "o such file" in str(exc):
# directory was removed but we promised to be robust and
# still report the path we might know since we are still in PWD
# mode
cwd = None
else:
raise
try:
pwd = os.environ['PWD']
# do absolute() in addition to always get an absolute path
# even with non-existing paths on windows
pwd_real = str(Path(pwd).resolve().absolute())
# This logic would fail to catch the case where chdir did happen
# to the directory where current PWD is pointing to, e.g.
# $> ls -ld $PWD
# lrwxrwxrwx 1 yoh yoh 5 Oct 11 13:27 /home/yoh/.tmp/tmp -> /tmp//
# hopa:~/.tmp/tmp
# $> python -c 'import os; os.chdir("/tmp"); from datalad.utils import getpwd; print(getpwd(), os.getcwd())'
# ('/home/yoh/.tmp/tmp', '/tmp')
# but I guess that should not be too harmful
if cwd is not None and pwd_real != cwd:
_switch_to_getcwd(
"realpath of PWD=%s is %s whenever os.getcwd()=%s",
pwd, pwd_real, cwd
)
return cwd
return pwd
except KeyError:
_switch_to_getcwd("PWD env variable is no longer available")
return cwd # Must not happen, but may be someone
# evil purges PWD from environ?
else:
raise RuntimeError(
"Must have not got here. "
"pwd_mode must be either cwd or PWD. And it is now %r" % (_pwd_mode,)
)
class chpwd(object):
"""Wrapper around os.chdir which also adjusts environ['PWD']
The reason is that otherwise PWD is simply inherited from the shell
and we have no ability to assess directory path without dereferencing
symlinks.
If used as a context manager it allows to temporarily change directory
to the given path
"""
def __init__(self, path, mkdir=False, logsuffix=''):
if path:
pwd = getpwd()
self._prev_pwd = pwd
else:
self._prev_pwd = None
return
if not isabs(path):
path = normpath(op.join(pwd, path))
if not os.path.exists(path) and mkdir:
self._mkdir = True
os.mkdir(path)
else:
self._mkdir = False
lgr.debug("chdir %r -> %r %s", self._prev_pwd, path, logsuffix)
os.chdir(path) # for grep people -- ok, to chdir here!
os.environ['PWD'] = str(path)
def __enter__(self):
# nothing more to do really, chdir was in the constructor
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if self._prev_pwd:
# Need to use self.__class__ so this instance, if the entire
# thing mocked during the test, still would use correct chpwd
self.__class__(self._prev_pwd, logsuffix="(coming back)")
def dlabspath(path, norm=False):
"""Symlinks-in-the-cwd aware abspath
os.path.abspath relies on os.getcwd() which would not know about symlinks
in the path
TODO: we might want to norm=True by default to match behavior of
os .path.abspath?
"""
if not isabs(path):
# if not absolute -- relative to pwd
path = op.join(getpwd(), path)
return normpath(path) if norm else path
def with_pathsep(path):
"""Little helper to guarantee that path ends with /"""
return path + sep if not path.endswith(sep) else path
def get_path_prefix(path, pwd=None):
"""Get path prefix (for current directory)
Returns relative path to the topdir, if we are under topdir, and if not
absolute path to topdir. If `pwd` is not specified - current directory
assumed
"""
pwd = pwd or getpwd()
path = dlabspath(path)
path_ = with_pathsep(path)
pwd_ = with_pathsep(pwd)
common = commonprefix((path_, pwd_))
if common.endswith(sep) and common in {path_, pwd_}:
# we are in subdir or above the path = use relative path
location_prefix = relpath(path, pwd)
# if benign "here" - cut off
if location_prefix in (curdir, curdir + sep):
location_prefix = ''
return location_prefix
else:
# just return absolute path
return path
def _get_normalized_paths(path, prefix):
if isabs(path) != isabs(prefix):
raise ValueError("Both paths must either be absolute or relative. "
"Got %r and %r" % (path, prefix))
path = with_pathsep(path)
prefix = with_pathsep(prefix)
return path, prefix
def path_startswith(path, prefix):
"""Return True if path starts with prefix path
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return path.startswith(prefix)
def path_is_subpath(path, prefix):
"""Return True if path is a subpath of prefix
It will return False if path == prefix.
Parameters
----------
path: str
prefix: str
"""
path, prefix = _get_normalized_paths(path, prefix)
return (len(prefix) < len(path)) and path.startswith(prefix)
def knows_annex(path):
"""Returns whether at a given path there is information about an annex
It is just a thin wrapper around GitRepo.is_with_annex() classmethod
which also checks for `path` to exist first.
This includes actually present annexes, but also uninitialized ones, or
even the presence of a remote annex branch.
"""
from os.path import exists
if not exists(path):
lgr.debug("No annex: test path {0} doesn't exist".format(path))
return False
from datalad.support.gitrepo import GitRepo
return GitRepo(path, init=False, create=False).is_with_annex()
@contextmanager
def make_tempfile(content=None, wrapped=None, **tkwargs):
"""Helper class to provide a temporary file name and remove it at the end (context manager)
Parameters
----------
mkdir : bool, optional (default: False)
If True, temporary directory created using tempfile.mkdtemp()
content : str or bytes, optional
Content to be stored in the file created
wrapped : function, optional
If set, function name used to prefix temporary file name
`**tkwargs`:
All other arguments are passed into the call to tempfile.mk{,d}temp(),
and resultant temporary filename is passed as the first argument into
the function t. If no 'prefix' argument is provided, it will be
constructed using module and function names ('.' replaced with
'_').
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
>>> from os.path import exists
>>> from datalad.utils import make_tempfile
>>> with make_tempfile() as fname:
... k = open(fname, 'w').write('silly test')
>>> assert not exists(fname) # was removed
>>> with make_tempfile(content="blah") as fname:
... assert open(fname).read() == "blah"
"""
if tkwargs.get('mkdir', None) and content is not None:
raise ValueError("mkdir=True while providing content makes no sense")
tkwargs_ = get_tempfile_kwargs(tkwargs, wrapped=wrapped)
# if DATALAD_TESTS_TEMP_DIR is set, use that as directory,
# let mktemp handle it otherwise. However, an explicitly provided
# dir=... will override this.
mkdir = tkwargs_.pop('mkdir', False)
filename = {False: tempfile.mktemp,
True: tempfile.mkdtemp}[mkdir](**tkwargs_)
# MIH: not clear to me why we need to perform this (possibly expensive)
# resolve. It was already part of the original implementation
# 008d9ab8cc3e0170c0a9b8479e80dee9ffe6eb7f
filename = Path(filename).resolve()
if content:
(filename.write_bytes
if isinstance(content, bytes)
else filename.write_text)(content)
# TODO globbing below can also be done with pathlib
filename = str(filename)
if __debug__:
lgr.debug(
'Created temporary %s named %s',
'directory' if mkdir else 'file',
filename)
try:
yield filename
finally:
# glob here for all files with the same name (-suffix)
# would be useful whenever we requested .img filename,
# and function creates .hdr as well
# MIH: this is undocumented behavior, and undesired in the general
# case. it should be made conditional and explicit
lsuffix = len(tkwargs_.get('suffix', ''))
filename_ = lsuffix and filename[:-lsuffix] or filename
filenames = glob.glob(filename_ + '*')
if len(filename_) < 3 or len(filenames) > 5:
# For paranoid yoh who stepped into this already ones ;-)
lgr.warning("It is unlikely that it was intended to remove all"
" files matching %r. Skipping" % filename_)
return
for f in filenames:
try:
rmtemp(f)
except OSError: # pragma: no cover
pass
def _path_(*p):
"""Given a path in POSIX" notation, regenerate one in native to the env one"""
if on_windows:
return op.join(*map(lambda x: op.join(*x.split('/')), p))
else:
# Assume that all others as POSIX compliant so nothing to be done
return op.join(*p)
def get_timestamp_suffix(time_=None, prefix='-'):
"""Return a time stamp (full date and time up to second)
primarily to be used for generation of log files names
"""
args = []
if time_ is not None:
if isinstance(time_, int):
time_ = time.gmtime(time_)
args.append(time_)
return time.strftime(prefix + TIMESTAMP_FMT, *args)
# unused in -core
def get_logfilename(dspath, cmd='datalad'):
"""Return a filename to use for logging under a dataset/repository
directory would be created if doesn't exist, but dspath must exist
and be a directory
"""
assert(exists(dspath))
assert(isdir(dspath))
ds_logdir = ensure_dir(dspath, '.git', 'datalad', 'logs') # TODO: use WEB_META_LOG whenever #789 merged
return op.join(ds_logdir, 'crawl-%s.log' % get_timestamp_suffix())
def get_trace(edges, start, end, trace=None):
"""Return the trace/path to reach a node in a tree.
Parameters
----------
edges : sequence(2-tuple)
The tree given by a sequence of edges (parent, child) tuples. The
nodes can be identified by any value and data type that supports
the '==' operation.
start :
Identifier of the start node. Must be present as a value in the parent
location of an edge tuple in order to be found.
end :
Identifier of the target/end node. Must be present as a value in the child
location of an edge tuple in order to be found.
trace : list
Mostly useful for recursive calls, and used internally.
Returns
-------
None or list
Returns a list with the trace to the target (the starts and the target
are not included in the trace, hence if start and end are directly connected
an empty list is returned), or None when no trace to the target can be found,
or start and end are identical.
"""
# the term trace is used to avoid confusion with a path in the sense
# of a filesystem path, but the analogy fits and nodes can be paths
if trace is None:
trace = []
if not edges:
raise ValueError("no edges given")
for cand in edges:
cand_super, cand_sub = cand
if cand_sub in trace:
# only DAGs, skip any cyclic traces
continue
if trace and cand_super != trace[-1]:
# only consider edges that lead off the end of the trace
continue
if not trace and cand_super != start:
# we got nothing yet, and this edges is not matching the start
continue
if cand_sub == end:
return trace
# dive into potential subnodes
cand_trace = get_trace(
edges,
start,
end,
trace + [cand_sub])
if cand_trace:
return cand_trace
return None
def get_dataset_root(path):
"""Return the root of an existent dataset containing a given path
The root path is returned in the same absolute or relative form
as the input argument. If no associated dataset exists, or the
input path doesn't exist, None is returned.
If `path` is a symlink or something other than a directory, its
the root dataset containing its parent directory will be reported.
If none can be found, at a symlink at `path` is pointing to a
dataset, `path` itself will be reported as the root.
Parameters
----------
path : Path-like
Returns
-------
str or None
"""
path = str(path)
suffix = '.git'
altered = None
if islink(path) or not isdir(path):
altered = path
path = dirname(path)
apath = abspath(path)
# while we can still go up
while split(apath)[1]:
if exists(op.join(path, suffix)):
return path
# new test path in the format we got it
path = normpath(op.join(path, os.pardir))
# no luck, next round
apath = abspath(path)
# if we applied dirname() at the top, we give it another go with
# the actual path, if it was itself a symlink, it could be the
# top-level dataset itself
if altered and exists(op.join(altered, suffix)):
return altered
return None
# ATM used in datalad_crawler extension, so do not remove yet
def try_multiple(ntrials, exception, base, f, *args, **kwargs):
"""Call f multiple times making exponentially growing delay between the calls"""
for trial in range(1, ntrials+1):
try:
return f(*args, **kwargs)
except exception as exc:
if trial == ntrials:
raise # just reraise on the last trial
t = base ** trial
lgr.warning("Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
@optional_args
def try_multiple_dec(
f, ntrials=None, duration=0.1, exceptions=None, increment_type=None,
exceptions_filter=None,
logger=None,
):
"""Decorator to try function multiple times.
Main purpose is to decorate functions dealing with removal of files/directories
and which might need a few seconds to work correctly on Windows which takes
its time to release files/directories.
Parameters
----------
ntrials: int, optional
duration: float, optional
Seconds to sleep before retrying.
increment_type: {None, 'exponential'}
Note that if it is exponential, duration should typically be > 1.0
so it grows with higher power
exceptions: Exception or tuple of Exceptions, optional
Exception or a tuple of multiple exceptions, on which to retry
exceptions_filter: callable, optional
If provided, this function will be called with a caught exception
instance. If function returns True - we will re-try, if False - exception
will be re-raised without retrying.
logger: callable, optional
Logger to log upon failure. If not provided, will use stock logger
at the level of 5 (heavy debug).
"""
if not exceptions:
exceptions = (OSError, WindowsError, PermissionError) \
if on_windows else OSError
if not ntrials:
# Life goes fast on proper systems, no need to delay it much
ntrials = 100 if on_windows else 10
if logger is None:
def logger(*args, **kwargs):
return lgr.log(5, *args, **kwargs)
assert increment_type in {None, 'exponential'}
@wraps(f)
def _wrap_try_multiple_dec(*args, **kwargs):
t = duration
for trial in range(ntrials):
try:
return f(*args, **kwargs)
except exceptions as exc:
if exceptions_filter and not exceptions_filter(exc):
raise
if trial < ntrials - 1:
if increment_type == 'exponential':
t = duration ** (trial + 1)
logger(
"Caught %s on trial #%d. Sleeping %f and retrying",
CapturedException(exc), trial, t)
sleep(t)
else:
raise
return _wrap_try_multiple_dec
@try_multiple_dec
def unlink(f):
"""'Robust' unlink. Would try multiple times
On windows boxes there is evidence for a latency of more than a second
until a file is considered no longer "in-use".
WindowsError is not known on Linux, and if IOError or any other
exception
is thrown then if except statement has WindowsError in it -- NameError
also see gh-2533
"""
# Check for open files
assert_no_open_files(f)
return os.unlink(f)
@try_multiple_dec
def _rmtree(*args, **kwargs):
"""Just a helper to decorate shutil.rmtree.
rmtree defined above does more and ideally should not itself be decorated
since a recursive definition and does checks for open files inside etc -
might be too runtime expensive
"""
return shutil.rmtree(*args, **kwargs)
def slash_join(base, extension):
"""Join two strings with a '/', avoiding duplicate slashes
If any of the strings is None the other is returned as is.
"""
if extension is None:
return base
if base is None:
return extension
return '/'.join(
(base.rstrip('/'),
extension.lstrip('/')))
#
# IO Helpers
#
# unused in -core
def open_r_encdetect(fname, readahead=1000):
"""Return a file object in read mode with auto-detected encoding
This is helpful when dealing with files of unknown encoding.
Parameters
----------
readahead: int, optional
How many bytes to read for guessing the encoding type. If
negative - full file will be read
"""
from chardet import detect
import io
# read some bytes from the file
with open(fname, 'rb') as f:
head = f.read(readahead)
enc = detect(head)
denc = enc.get('encoding', None)
lgr.debug("Auto-detected encoding %s for file %s (confidence: %s)",
denc,
fname,
enc.get('confidence', 'unknown'))
return io.open(fname, encoding=denc)
def read_file(fname, decode=True):
"""A helper to read file passing content via ensure_unicode
Parameters
----------
decode: bool, optional
if False, no ensure_unicode and file content returned as bytes
"""
with open(fname, 'rb') as f:
content = f.read()
return ensure_unicode(content) if decode else content
def read_csv_lines(fname, dialect=None, readahead=16384, **kwargs):
"""A generator of dict records from a CSV/TSV
Automatically guesses the encoding for each record to convert to UTF-8
Parameters
----------
fname: str
Filename
dialect: str, optional
Dialect to specify to csv.reader. If not specified -- guessed from
the file, if fails to guess, "excel-tab" is assumed
readahead: int, optional
How many bytes to read from the file to guess the type
**kwargs
Passed to `csv.reader`
"""
import csv
if dialect is None:
with open(fname) as tsvfile:
# add robustness, use a sniffer
try:
dialect = csv.Sniffer().sniff(tsvfile.read(readahead))
except Exception as exc:
lgr.warning(
'Could not determine file-format, assuming TSV: %s',
CapturedException(exc)
)
dialect = 'excel-tab'
kw = dict(encoding='utf-8')
with open(fname, 'r', **kw) as tsvfile:
# csv.py doesn't do Unicode; encode temporarily as UTF-8:
csv_reader = csv.reader(
tsvfile,
dialect=dialect,
**kwargs
)
header = None
for row in csv_reader:
# decode UTF-8 back to Unicode, cell by cell:
row_unicode = map(ensure_unicode, row)
if header is None:
header = list(row_unicode)
else:
yield dict(zip(header, row_unicode))
def import_modules(modnames, pkg, msg="Failed to import {module}", log=lgr.debug):
"""Helper to import a list of modules without failing if N/A
Parameters
----------
modnames: list of str
List of module names to import
pkg: str
Package under which to import
msg: str, optional
Message template for .format() to log at DEBUG level if import fails.
Keys {module} and {package} will be provided and ': {exception}' appended
log: callable, optional
Logger call to use for logging messages
"""
from importlib import import_module
_globals = globals()
mods_loaded = []
if pkg and not pkg in sys.modules:
# with python 3.5.1 (ok with 3.5.5) somehow kept running into
# Failed to import dlsub1: Parent module 'dltestm1' not loaded
# while running the test. Preloading pkg resolved the issue
import_module(pkg)
for modname in modnames:
try:
_globals[modname] = mod = import_module(
'.{}'.format(modname),
pkg)
mods_loaded.append(mod)
except Exception as exc:
from datalad.support.exceptions import CapturedException
ce = CapturedException(exc)
log((msg + ': {exception}').format(
module=modname, package=pkg, exception=ce.message))
return mods_loaded
def import_module_from_file(modpath, pkg=None, log=lgr.debug):
"""Import provided module given a path
TODO:
- RF/make use of it in pipeline.py which has similar logic
- join with import_modules above?
Parameters
----------
pkg: module, optional
If provided, and modpath is under pkg.__path__, relative import will be
used
"""
assert(modpath.endswith('.py')) # for now just for .py files
log("Importing %s" % modpath)
modname = basename(modpath)[:-3]
relmodpath = None
if pkg:
for pkgpath in pkg.__path__:
if path_is_subpath(modpath, pkgpath):
# for now relying on having .py extension -- assertion above
relmodpath = '.' + relpath(modpath[:-3], pkgpath).replace(sep, '.')
break
try:
if relmodpath:
from importlib import import_module
mod = import_module(relmodpath, pkg.__name__)
else:
dirname_ = dirname(modpath)
try:
sys.path.insert(0, dirname_)
mod = __import__(modname, level=0)
finally:
if dirname_ in sys.path:
sys.path.pop(sys.path.index(dirname_))
else:
log("Expected path %s to be within sys.path, but it was gone!" % dirname_)
except Exception as e:
raise RuntimeError(
"Failed to import module from %s" % modpath) from e
return mod
def get_encoding_info():
"""Return a dictionary with various encoding/locale information"""
import sys, locale
from collections import OrderedDict
return OrderedDict([
('default', sys.getdefaultencoding()),
('filesystem', sys.getfilesystemencoding()),
('locale.prefered', locale.getpreferredencoding()),
])
def get_envvars_info():
from collections import OrderedDict
envs = []
for var, val in os.environ.items():
if (
var.startswith('PYTHON') or
var.startswith('LC_') or
var.startswith('GIT_') or
var in ('LANG', 'LANGUAGE', 'PATH')
):
envs.append((var, val))
return OrderedDict(envs)
# This class is modified from Snakemake (v5.1.4)
class SequenceFormatter(string.Formatter):
"""string.Formatter subclass with special behavior for sequences.
This class delegates formatting of individual elements to another
formatter object. Non-list objects are formatted by calling the
delegate formatter's "format_field" method. List-like objects
(list, tuple, set, frozenset) are formatted by formatting each
element of the list according to the specified format spec using
the delegate formatter and then joining the resulting strings with
a separator (space by default).
"""
def __init__(self, separator=" ", element_formatter=string.Formatter(),
*args, **kwargs):
self.separator = separator
self.element_formatter = element_formatter
def format_element(self, elem, format_spec):
"""Format a single element
For sequences, this is called once for each element in a
sequence. For anything else, it is called on the entire
object. It is intended to be overridden in subclases.
"""
return self.element_formatter.format_field(elem, format_spec)
def format_field(self, value, format_spec):
if isinstance(value, (list, tuple, set, frozenset)):
return self.separator.join(self.format_element(v, format_spec)
for v in value)
else:
return self.format_element(value, format_spec)
# TODO: eventually we might want to make use of attr module
class File(object):
"""Helper for a file entry in the create_tree/@with_tree
It allows to define additional settings for entries
"""
def __init__(self, name, executable=False):
"""
Parameters
----------
name : str
Name of the file
executable: bool, optional
Make it executable
"""
self.name = name
self.executable = executable
def __str__(self):
return self.name
def create_tree_archive(path, name, load, overwrite=False, archives_leading_dir=True):
"""Given an archive `name`, create under `path` with specified `load` tree
"""
from datalad.support.archives import compress_files
dirname = file_basename(name)
full_dirname = op.join(path, dirname)
os.makedirs(full_dirname)
create_tree(full_dirname, load, archives_leading_dir=archives_leading_dir)
# create archive
if archives_leading_dir:
compress_files([dirname], name, path=path, overwrite=overwrite)
else:
compress_files(list(map(basename, glob.glob(op.join(full_dirname, '*')))),
op.join(pardir, name),
path=op.join(path, dirname),
overwrite=overwrite)
# remove original tree
rmtree(full_dirname)
def create_tree(path, tree, archives_leading_dir=True, remove_existing=False):
"""Given a list of tuples (name, load) create such a tree
if load is a tuple itself -- that would create either a subtree or an archive
with that content and place it into the tree if name ends with .tar.gz
"""
lgr.log(5, "Creating a tree under %s", path)
if not exists(path):
os.makedirs(path)
if isinstance(tree, dict):
tree = tree.items()
for file_, load in tree:
if isinstance(file_, File):
executable = file_.executable
name = file_.name
else:
executable = False
name = file_
full_name = op.join(path, name)
if remove_existing and lexists(full_name):
rmtree(full_name, chmod_files=True)
if isinstance(load, (tuple, list, dict)):
if name.endswith('.tar.gz') or name.endswith('.tar') or name.endswith('.zip'):
create_tree_archive(
path, name, load,
archives_leading_dir=archives_leading_dir)
else:
create_tree(
full_name, load,
archives_leading_dir=archives_leading_dir,
remove_existing=remove_existing)
else:
open_func = open
if full_name.endswith('.gz'):
open_func = gzip.open
elif full_name.split('.')[-1] in ('xz', 'lzma'):
import lzma
open_func = lzma.open
with open_func(full_name, "wb") as f:
f.write(ensure_bytes(load, 'utf-8'))
if executable:
os.chmod(full_name, os.stat(full_name).st_mode | stat.S_IEXEC)
def get_suggestions_msg(values, known, sep="\n "):
"""Return a formatted string with suggestions for values given the known ones
"""
import difflib
suggestions = []
for value in ensure_list(values): # might not want to do it if we change presentation below
suggestions += difflib.get_close_matches(value, known)
suggestions = unique(suggestions)
msg = "Did you mean any of these?"
if suggestions:
if '\n' in sep:
# if separator includes new line - we add entire separator right away
msg += sep
else:
msg += ' '
return msg + "%s\n" % sep.join(suggestions)
return ''
def bytes2human(n, format='%(value).1f %(symbol)sB'):
"""
Convert n bytes into a human readable string based on format.
symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
see: http://goo.gl/kTQMs
>>> from datalad.utils import bytes2human
>>> bytes2human(1)
'1.0 B'
>>> bytes2human(1024)
'1.0 KB'
>>> bytes2human(1048576)
'1.0 MB'
>>> bytes2human(1099511627776127398123789121)
'909.5 YB'
>>> bytes2human(10000, "%(value).1f %(symbol)s/sec")
'9.8 K/sec'
>>> # precision can be adjusted by playing with %f operator
>>> bytes2human(10000, format="%(value).5f %(symbol)s")
'9.76562 K'
Taken from: http://goo.gl/kTQMs and subsequently simplified
Original Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
License: MIT
"""
n = int(n)
if n < 0:
raise ValueError("n < 0")
symbols = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
for symbol in reversed(symbols[1:]):
if n >= prefix[symbol]:
value = float(n) / prefix[symbol]
return format % locals()
return format % dict(symbol=symbols[0], value=n)
def quote_cmdlinearg(arg):
"""Perform platform-appropriate argument quoting"""
# https://stackoverflow.com/a/15262019
return '"{}"'.format(
arg.replace('"', '""')
) if on_windows else shlex_quote(arg)
def guard_for_format(arg):
"""Replace { and } with {{ and }}
To be used in cases if arg is not expected to have provided
by user .format() placeholders, but 'arg' might become a part
of a composite passed to .format(), e.g. via 'Run'
"""
return arg.replace('{', '{{').replace('}', '}}')
def join_cmdline(args):
"""Join command line args into a string using quote_cmdlinearg
"""
return ' '.join(map(quote_cmdlinearg, args))
def split_cmdline(s):
"""Perform platform-appropriate command line splitting.
Identical to `shlex.split()` on non-windows platforms.
Modified from https://stackoverflow.com/a/35900070
"""
if not on_windows:
return shlex_split(s)
# the rest is for windows
RE_CMD_LEX = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)'''
args = []
accu = None # collects pieces of one arg
for qs, qss, esc, pipe, word, white, fail in re.findall(RE_CMD_LEX, s):
if word:
pass # most frequent
elif esc:
word = esc[1]
elif white or pipe:
if accu is not None:
args.append(accu)
if pipe:
args.append(pipe)
accu = None
continue
elif fail:
raise ValueError("invalid or incomplete shell string")
elif qs:
word = qs.replace('\\"', '"').replace('\\\\', '\\')
if platform == 0:
word = word.replace('""', '"')
else:
word = qss # may be even empty; must be last
accu = (accu or '') + word
if accu is not None:
args.append(accu)
return args
def get_wrapped_class(wrapped):
"""Determine the command class a wrapped __call__ belongs to"""
mod = sys.modules[wrapped.__module__]
command_class_name = wrapped.__qualname__.split('.')[-2]
_func_class = mod.__dict__[command_class_name]
lgr.debug("Determined class of decorated function: %s", _func_class)
return _func_class
def _make_assure_kludge(fn):
old_name = fn.__name__.replace("ensure", "assure")
@wraps(fn)
def compat_fn(*args, **kwargs):
warnings.warn(
"{} is deprecated and will be removed in a future release. "
"Use {} instead."
.format(old_name, fn.__name__),
DeprecationWarning)
return fn(*args, **kwargs)
compat_fn.__doc__ = ("Note: This function is deprecated. Use {} instead."
.format(fn.__name__))
return compat_fn
assure_tuple_or_list = _make_assure_kludge(ensure_tuple_or_list)
assure_iter = _make_assure_kludge(ensure_iter)
assure_list = _make_assure_kludge(ensure_list)
assure_list_from_str = _make_assure_kludge(ensure_list_from_str)
assure_dict_from_str = _make_assure_kludge(ensure_dict_from_str)
assure_bytes = _make_assure_kludge(ensure_bytes)
assure_unicode = _make_assure_kludge(ensure_unicode)
assure_bool = _make_assure_kludge(ensure_bool)
assure_dir = _make_assure_kludge(ensure_dir)
lgr.log(5, "Done importing datalad.utils")
def check_symlink_capability(path, target):
"""helper similar to datalad.tests.utils.has_symlink_capability
However, for use in a datalad command context, we shouldn't
assume to be able to write to tmpfile and also not import a whole lot from
datalad's test machinery. Finally, we want to know, whether we can create a
symlink at a specific location, not just somewhere. Therefore use
arbitrary path to test-build a symlink and delete afterwards. Suitable
location can therefore be determined by high lever code.
Parameters
----------
path: Path
target: Path
Returns
-------
bool
"""
try:
target.touch()
path.symlink_to(target)
return True
except Exception:
return False
finally:
if path.exists():
path.unlink()
if target.exists():
target.unlink()
| 32.859834 | 123 | 0.616432 | [
"MIT"
] | AKSoo/datalad | datalad/utils.py | 87,210 | Python |
"""
ORY Keto
A cloud native access control server providing best-practice patterns (RBAC, ABAC, ACL, AWS IAM Policies, Kubernetes Roles, ...) via REST APIs. # noqa: E501
The version of the OpenAPI document: v0.0.0
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from ory_keto_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from ory_keto_client.model.delete_ory_access_control_policy_internal_server_error_body import DeleteOryAccessControlPolicyInternalServerErrorBody
globals()['DeleteOryAccessControlPolicyInternalServerErrorBody'] = DeleteOryAccessControlPolicyInternalServerErrorBody
class DeleteOryAccessControlPolicyInternalServerError(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'payload': (DeleteOryAccessControlPolicyInternalServerErrorBody,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'payload': 'Payload', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""DeleteOryAccessControlPolicyInternalServerError - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
payload (DeleteOryAccessControlPolicyInternalServerErrorBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 40.405714 | 161 | 0.606845 | [
"Apache-2.0"
] | Stackwalkerllc/sdk | clients/keto/python/ory_keto_client/model/delete_ory_access_control_policy_internal_server_error.py | 7,071 | Python |
from io import BytesIO
from uniborg import util
from telethon import types
from telethon.errors import PhotoInvalidDimensionsError
from telethon.tl.functions.messages import SendMediaRequest
@borg.on(util.admin_cmd(r"^\.i$"))
async def on_file_to_photo(event):
await event.delete()
target = await event.get_reply_message()
try:
image = target.media.document
except AttributeError:
return
if not image.mime_type.startswith('image/'):
return # This isn't an image
if image.mime_type == 'image/webp':
return # Telegram doesn't let you directly send stickers as photos
if image.size > 10 * 1024 * 1024:
return # We'd get PhotoSaveFileInvalidError otherwise
file = await borg.download_media(target, file=BytesIO())
file.seek(0)
img = await borg.upload_file(file)
img.name = 'image.png'
try:
await borg(SendMediaRequest(
peer=await event.get_input_chat(),
media=types.InputMediaUploadedPhoto(img),
message=target.message,
entities=target.entities,
reply_to_msg_id=target.id
))
except PhotoInvalidDimensionsError:
return
| 29.975 | 75 | 0.676397 | [
"MPL-2.0"
] | anandvfc/UniBorg | stdplugins/file to img.py | 1,199 | Python |
#!/bin/python
import json
import re
import sys
from datetime import datetime
import dateutil.parser
from dateutil.tz import tzutc
from six.moves import range
from mtools.util.pattern import json2pattern
class DateTimeEncoder(json.JSONEncoder):
"""Custom datetime encoder for json output."""
def default(self, obj):
if isinstance(obj, datetime):
return obj.isoformat()
return json.JSONEncoder.default(self, obj)
class LogEvent(object):
"""
Extract information from log line and store properties/variables.
line_str: the original line string
split_tokens: a list of string tokens after splitting line_str using
whitespace as split points
datetime: a datetime object for the logevent. For logfiles created with
version 2.4+, it also contains micro-seconds
duration: the duration of a timed operation in ms
thread: the thread name (e.g. "conn1234") as string
operation: insert, update, remove, query, command, getmore, None
namespace: the namespace of the operation, or None
command: the type of command, if the operation was a "command"
pattern: the query pattern for queries, updates, counts, etc
...
Certain operations also add the number of affected/scanned documents.
If applicable, the following variables are also set, otherwise the
default is None: nscanned, ntoreturn, nreturned, ninserted, nupdated
For performance reason, all fields are evaluated lazily upon first
request.
"""
# datetime handler for json encoding
dthandler = lambda obj: obj.isoformat() if isinstance(obj,
datetime) else None
weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
log_operations = ['query', 'insert', 'update', 'remove', 'getmore',
'command']
log_levels = ['D', 'F', 'E', 'W', 'I', 'U']
log_components = ['-', 'ACCESS', 'COMMAND', 'CONTROL', 'GEO', 'INDEX',
'NETWORK', 'QUERY', 'REPL', 'SHARDING', 'STORAGE',
'JOURNAL', 'WRITE', 'TOTAL']
def __init__(self, doc_or_str):
self._year_rollover = False
if isinstance(doc_or_str, bytes):
doc_or_str = doc_or_str.decode("utf-8")
if isinstance(doc_or_str, str) or (sys.version_info.major == 2 and
isinstance(doc_or_str, unicode)):
# create from string, remove line breaks at end of _line_str
self.from_string = True
self._line_str = doc_or_str.rstrip()
self._profile_doc = None
self._reset()
else:
self.from_string = False
self._profile_doc = doc_or_str
# docs don't need to be parsed lazily, they are fast
self._parse_document()
def _reset(self):
self._split_tokens_calculated = False
self._split_tokens = None
self._duration_calculated = False
self._duration = None
self._datetime_calculated = False
self._datetime = None
self._datetime_nextpos = None
self._datetime_format = None
self._datetime_str = ''
self._thread_calculated = False
self._thread = None
self._operation_calculated = False
self._operation = None
self._namespace = None
self._pattern = None
self._sort_pattern = None
self._command_calculated = False
self._command = None
self._counters_calculated = False
# TODO: refactor from the legacy names to modern
# (eg: nscanned => keysExamined). Currently _extract_counters()
# maps newer property names into legacy equivalents for
# broader log file support.
self._nscanned = None # keysExamined
self._nscannedObjects = None # docsExamined
self._ntoreturn = None
self._nupdated = None # nModified
self._nreturned = None # nReturned or nMatched (updates)
self._ninserted = None # nInserted
self._ndeleted = None # nDeleted
self._numYields = None
self._planSummary = None
self._writeConflicts = None
self._r = None
self._w = None
self._conn = None
self._level_calculated = False
self._level = None
self._component = None
self.merge_marker_str = ''
def set_line_str(self, line_str):
"""
Set line_str.
Line_str is only writeable if LogEvent was created from a string,
not from a system.profile documents.
"""
if not self.from_string:
raise ValueError("can't set line_str for LogEvent created from "
"system.profile documents.")
if line_str != self._line_str:
self._line_str = line_str.rstrip()
self._reset()
def get_line_str(self):
"""Return line_str depending on source, logfile or system.profile."""
if self.from_string:
return ' '.join([s for s in [self.merge_marker_str,
self._datetime_str,
self._line_str] if s])
else:
return ' '.join([s for s in [self._datetime_str,
self._line_str] if s])
line_str = property(get_line_str, set_line_str)
@property
def split_tokens(self):
"""Split string into tokens (lazy)."""
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens
@property
def duration(self):
"""Calculate duration if available (lazy)."""
if not self._duration_calculated:
self._duration_calculated = True
# split_tokens = self.split_tokens
line_str = self.line_str
if (line_str
and line_str.endswith('ms')
and 'Scheduled new oplog query' not in line_str):
try:
# find duration from end
space_pos = line_str.rfind(" ")
if space_pos == -1:
return
self._duration = int(line_str[line_str.rfind(" ") +
1:-2].replace(',', ''))
except ValueError:
self._duration = None
elif "flushing" in self.line_str:
matchobj = re.search(r'flushing mmaps took (\d+)ms',
self.line_str)
if matchobj:
self._duration = int(matchobj.group(1))
return self._duration
@property
def datetime(self):
"""Extract datetime if available (lazy)."""
if not self._datetime_calculated:
self._datetime_calculated = True
# if no datetime after 10 tokens, break to avoid parsing
# very long lines
split_tokens = self.split_tokens[:10]
for offs in range(len(split_tokens)):
dt = self._match_datetime_pattern(split_tokens[offs:offs + 4])
if dt:
self._datetime = dt
self._datetime_nextpos = offs
if self._datetime_format.startswith("iso8601"):
self._datetime_nextpos += 1
else:
self._datetime_nextpos += 4
# separate datetime str and linestr
self._line_str = (' '.join(self.split_tokens
[self._datetime_nextpos:]))
if self.level:
self._datetime_nextpos += 2
self._reformat_timestamp(self._datetime_format)
break
return self._datetime
@property
def datetime_format(self):
if not self._datetime_calculated:
_ = self.datetime
return self._datetime_format
@property
def datetime_nextpos(self):
if self._datetime_nextpos is None and not self._datetime_calculated:
_ = self.datetime
return self._datetime_nextpos
def set_datetime_hint(self, format, nextpos, rollover):
self._datetime_format = format
self._datetime_nextpos = nextpos
self._year_rollover = rollover
# Fast check if timestamp format changed.
# If it has, trigger datetime evaluation.
if format.startswith('ctime'):
if (len(self.split_tokens) < 4 or
self.split_tokens[self._datetime_nextpos - 4] not in
self.weekdays):
_ = self.datetime
return False
return True
else:
if len(self.split_tokens) == 0:
# empty line, no need to parse datetime
self._datetime_calculated = True
return False
try:
if not (self.split_tokens[self._datetime_nextpos - 1][0]
.isdigit()):
# not the timestamp format that was hinted
_ = self.datetime
return False
except Exception:
pass
return True
def _match_datetime_pattern(self, tokens):
"""
Match the datetime pattern at the beginning of the token list.
There are several formats that this method needs to understand
and distinguish between (see MongoDB's SERVER-7965):
ctime-pre2.4 Wed Dec 31 19:00:00
ctime Wed Dec 31 19:00:00.000
iso8601-utc 1970-01-01T00:00:00.000Z
iso8601-local 1969-12-31T19:00:00.000+0500
"""
# first check: less than 4 tokens can't be ctime
assume_iso8601_format = len(tokens) < 4
# check for ctime-pre-2.4 or ctime format
if not assume_iso8601_format:
weekday, month, day, time = tokens[:4]
if (len(tokens) < 4 or (weekday not in self.weekdays) or
(month not in self.months) or not day.isdigit()):
assume_iso8601_format = True
if assume_iso8601_format:
# sanity check, because the dateutil parser could interpret
# any numbers as a valid date
if not re.match(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}',
tokens[0]):
return None
# convinced that this is a ISO-8601 format, the dateutil parser
# will do the rest
dt = dateutil.parser.parse(tokens[0])
self._datetime_format = "iso8601-utc" \
if tokens[0].endswith('Z') else "iso8601-local"
else:
# assume current year unless self.year_rollover
# is set (from LogFile)
year = datetime.now().year
dt = dateutil.parser.parse(' '.join(tokens[: 4]),
default=datetime(year, 1, 1))
if dt.tzinfo is None:
dt = dt.replace(tzinfo=tzutc())
if self._year_rollover and dt > self._year_rollover:
dt = dt.replace(year=year - 1)
self._datetime_format = "ctime" \
if '.' in tokens[3] else "ctime-pre2.4"
return dt
@property
def thread(self):
"""Extract thread name if available (lazy)."""
if not self._thread_calculated:
self._thread_calculated = True
split_tokens = self.split_tokens
if not self.datetime_nextpos:
return None
if len(split_tokens) <= self.datetime_nextpos:
return None
connection_token = split_tokens[self.datetime_nextpos]
match = re.match(r'^\[([^\]]*)\]$', connection_token)
if match:
self._thread = match.group(1)
if self._thread is not None:
if self._thread in ['initandlisten', 'mongosMain']:
if len(split_tokens) >= 5 and split_tokens[-5][0] == '#':
self._conn = 'conn' + split_tokens[-5][1:]
elif self._thread.startswith('conn'):
self._conn = self._thread
return self._thread
@property
def conn(self):
r"""
Extract conn name if available (lazy).
This value is None for all lines except the log lines related to
connections, that is lines matching '\[conn[0-9]+\]' or
'\[(initandlisten|mongosMain)\] .* connection accepted from'.
"""
self.thread
return self._conn
@property
def operation(self):
"""
Extract operation if available (lazy).
Operations: query, insert, update, remove, getmore, command
"""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._operation
@property
def namespace(self):
"""Extract namespace if available (lazy)."""
if not self._operation_calculated:
self._operation_calculated = True
self._extract_operation_and_namespace()
return self._namespace
def _extract_operation_and_namespace(self):
"""
Helper method to extract both operation and namespace from a logevent.
It doesn't make sense to only extract one as they appear back to back
in the token list.
"""
split_tokens = self.split_tokens
if not self._datetime_nextpos:
# force evaluation of thread to get access to datetime_offset and
# to protect from changes due to line truncation.
_ = self.thread
if not self._datetime_nextpos or (len(split_tokens) <=
self._datetime_nextpos + 2):
return
op = split_tokens[self._datetime_nextpos + 1].lower()
if op == 'warning:':
# check if this log line got truncated
if ("warning: log line attempted" in self._line_str and
"over max size" in self._line_str):
self._datetime_nextpos = split_tokens.index('...')
op = split_tokens[self._datetime_nextpos + 1]
else:
# unknown warning, bail out
return
if op in self.log_operations:
self._operation = op
self._namespace = split_tokens[self._datetime_nextpos + 2]
@property
def pattern(self):
"""Extract query pattern from operations."""
if not self._pattern:
# trigger evaluation of operation
if (self.operation in ['query', 'getmore', 'update', 'remove'] or
self.command in ['count', 'findandmodify']):
self._pattern = self._find_pattern('query: ')
elif self.command == 'find':
self._pattern = self._find_pattern('filter: ')
return self._pattern
@property
def sort_pattern(self):
"""Extract query pattern from operations."""
if not self._sort_pattern:
# trigger evaluation of operation
if self.operation in ['query', 'getmore']:
self._sort_pattern = self._find_pattern('orderby: ')
return self._sort_pattern
@property
def command(self):
"""Extract query pattern from operations."""
if not self._command_calculated:
self._command_calculated = True
if self.operation == 'command':
try:
command_idx = self.split_tokens.index('command:')
command = self.split_tokens[command_idx + 1]
if command == '{':
# workaround for <= 2.2 log files,
# where command was not listed separately
command = self.split_tokens[command_idx + 2][:-1]
self._command = command.lower()
except ValueError:
pass
return self._command
@property
def nscanned(self):
"""Extract nscanned or keysExamined counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscanned
@property
def nscannedObjects(self):
"""
Extract counters if available (lazy).
Looks for nscannedObjects or docsExamined.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nscannedObjects
@property
def ntoreturn(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ntoreturn
@property
def writeConflicts(self):
"""Extract ntoreturn counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._writeConflicts
@property
def nreturned(self):
"""
Extract counters if available (lazy).
Looks for nreturned, nReturned, or nMatched counter.
"""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nreturned
@property
def ninserted(self):
"""Extract ninserted or nInserted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ninserted
@property
def ndeleted(self):
"""Extract ndeleted or nDeleted counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._ndeleted
@property
def nupdated(self):
"""Extract nupdated or nModified counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._nupdated
@property
def numYields(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._numYields
@property
def planSummary(self):
"""Extract numYields counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._planSummary
@property
def r(self):
"""Extract read lock (r) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._r
@property
def w(self):
"""Extract write lock (w) counter if available (lazy)."""
if not self._counters_calculated:
self._counters_calculated = True
self._extract_counters()
return self._w
def _extract_counters(self):
"""Extract counters like nscanned and nreturned from the logevent."""
# extract counters (if present)
counters = ['nscanned', 'nscannedObjects', 'ntoreturn', 'nreturned',
'ninserted', 'nupdated', 'ndeleted', 'r', 'w', 'numYields',
'planSummary', 'writeConflicts', 'keyUpdates']
# TODO: refactor mtools to use current counter names throughout
# Transitionary hack: mapping of current names into prior equivalents
counter_equiv = {
'docsExamined': 'nscannedObjects',
'keysExamined': 'nscanned',
'nDeleted': 'ndeleted',
'nInserted': 'ninserted',
'nMatched': 'nreturned',
'nModified': 'nupdated'
}
counters.extend(counter_equiv.keys())
split_tokens = self.split_tokens
# trigger operation evaluation to get access to offset
if self.operation:
for t, token in enumerate(split_tokens[self.datetime_nextpos +
2:]):
for counter in counters:
if token.startswith('%s:' % counter):
try:
# Remap counter to standard name, if applicable
counter = counter_equiv.get(counter, counter)
vars(self)['_' + counter] = int((token.split(':')
[-1]).replace(',',
''))
except ValueError:
# see if this is a pre-2.5.2 numYields with space
# in between (e.g. "numYields: 2")
# https://jira.mongodb.org/browse/SERVER-10101
if (counter == 'numYields' and
token.startswith('numYields')):
try:
self._numYields = int((split_tokens[t + 1 + self.datetime_nextpos + 2]).replace(',', ''))
except ValueError:
pass
if (counter == 'planSummary' and
token.startswith('planSummary')):
try:
self._planSummary = split_tokens[t + 1 + self.datetime_nextpos + 2]
except ValueError:
pass
# token not parsable, skip
break
@property
def level(self):
"""Extract log level if available (lazy)."""
if not self._level_calculated:
self._level_calculated = True
self._extract_level()
return self._level
@property
def component(self):
"""Extract log component if available (lazy)."""
self.level
return self._component
def _extract_level(self):
"""Extract level and component if available (lazy)."""
if self._level is None:
split_tokens = self.split_tokens
if not split_tokens:
self._level = False
self._component = False
return
x = (self.log_levels.index(split_tokens[1])
if split_tokens[1] in self.log_levels else None)
if x is not None:
self._level = split_tokens[1]
self._component = split_tokens[2]
else:
self._level = False
self._component = False
def parse_all(self):
"""
Trigger extraction of all information.
These values are usually evaluated lazily.
"""
tokens = self.split_tokens
duration = self.duration
datetime = self.datetime
thread = self.thread
operation = self.operation
namespace = self.namespace
pattern = self.pattern
nscanned = self.nscanned
nscannedObjects = self.nscannedObjects
ntoreturn = self.ntoreturn
nreturned = self.nreturned
ninserted = self.ninserted
ndeleted = self.ndeleted
nupdated = self.nupdated
numYields = self.numYields
w = self.w
r = self.r
def _find_pattern(self, trigger):
# get start of json query pattern
start_idx = self.line_str.rfind(trigger)
if start_idx == -1:
# no query pattern found
return None
stop_idx = 0
brace_counter = 0
search_str = self.line_str[start_idx + len(trigger):]
for match in re.finditer(r'{|}', search_str):
stop_idx = match.start()
if search_str[stop_idx] == '{':
brace_counter += 1
else:
brace_counter -= 1
if brace_counter == 0:
break
search_str = search_str[:stop_idx + 1].strip()
if search_str:
return json2pattern(search_str)
else:
return None
def _reformat_timestamp(self, format, force=False):
if format not in ['ctime', 'ctime-pre2.4', 'iso8601-utc',
'iso8601-local']:
raise ValueError('invalid datetime format %s, choose from ctime, '
'ctime-pre2.4, iso8601-utc, iso8601-local.')
if ((self.datetime_format is None or
(self.datetime_format == format and
self._datetime_str != '')) and not force):
return
elif self.datetime is None:
return
elif format.startswith('ctime'):
dt_string = (self.weekdays[self.datetime.weekday()] + ' ' +
self.datetime.strftime("%b %d %H:%M:%S"))
# remove zero-padding from day number
tokens = dt_string.split(' ')
if tokens[2].startswith('0'):
tokens[2] = tokens[2].replace('0', ' ', 1)
dt_string = ' '.join(tokens)
if format == 'ctime':
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)
elif format == 'iso8601-local':
dt_string = self.datetime.isoformat()
if self.datetime.utcoffset() is None:
dt_string += '+00:00'
ms_str = str(int(self.datetime.microsecond / 1000)).zfill(3)[:3]
# change isoformat string to have 3 digit milliseconds and no :
# in offset
dt_string = re.sub(r'(\.\d+)?([+-])(\d\d):(\d\d)',
'.%s\\2\\3\\4' % ms_str, dt_string, count=1)
elif format == 'iso8601-utc':
if self.datetime.utcoffset():
dt_string = self.datetime.astimezone(tzutc()).strftime("%Y-%m-"
"%dT%H:"
"%M:%S")
else:
dt_string = self.datetime.strftime("%Y-%m-%dT%H:%M:%S")
dt_string += '.' + str(int(self.datetime.microsecond /
1000)).zfill(3)[:3] + 'Z'
# set new string and format
self._datetime_str = dt_string
self._datetime_format = format
def __str__(self):
"""Default string conversion for LogEvent object is its line_str."""
return str(self.line_str)
def to_dict(self, labels=None):
"""Convert LogEvent object to a dictionary."""
output = {}
if labels is None:
labels = ['line_str', 'split_tokens', 'datetime', 'operation',
'thread', 'namespace', 'nscanned', 'ntoreturn',
'nreturned', 'ninserted', 'nupdated', 'ndeleted',
'duration', 'r', 'w', 'numYields']
for label in labels:
value = getattr(self, label, None)
if value is not None:
output[label] = value
return output
def to_json(self, labels=None):
"""Convert LogEvent object to valid JSON."""
output = self.to_dict(labels)
return json.dumps(output, cls=DateTimeEncoder, ensure_ascii=False)
def _parse_document(self):
"""Parse system.profile doc, copy all values to member variables."""
self._reset()
doc = self._profile_doc
self._split_tokens_calculated = True
self._split_tokens = None
self._duration_calculated = True
self._duration = doc[u'millis']
self._datetime_calculated = True
self._datetime = doc[u'ts']
if self._datetime.tzinfo is None:
self._datetime = self._datetime.replace(tzinfo=tzutc())
self._datetime_format = None
self._reformat_timestamp('ctime', force=True)
self._thread_calculated = True
self._thread = doc['thread']
self._operation_calculated = True
self._operation = doc[u'op']
self._namespace = doc[u'ns']
self._command_calculated = True
if self.operation == 'command':
self._command = doc[u'command'].keys()[0]
# query pattern for system.profile events, all three cases.
# See SERVER-13245
if 'query' in doc:
if 'query' in doc['query'] and isinstance(doc['query']['query'],
dict):
self._pattern = str(doc['query']['query']).replace("'", '"')
elif '$query' in doc['query']:
self._pattern = str(doc['query']['$query']).replace("'", '"')
else:
self._pattern = str(doc['query']).replace("'", '"')
# sort pattern
if ('orderby' in doc['query'] and
isinstance(doc['query']['orderby'], dict)):
self._sort_pattern = str(doc['query']
['orderby']).replace("'", '"')
elif '$orderby' in doc['query']:
self._sort_pattern = str(doc['query']
['$orderby']).replace("'", '"')
else:
self._sort_pattern = None
self._counters_calculated = True
self._nscanned = doc[u'nscanned'] if 'nscanned' in doc else None
self._ntoreturn = doc[u'ntoreturn'] if 'ntoreturn' in doc else None
self._nupdated = doc[u'nupdated'] if 'nupdated' in doc else None
self._nreturned = doc[u'nreturned'] if 'nreturned' in doc else None
self._ninserted = doc[u'ninserted'] if 'ninserted' in doc else None
self._ndeleted = doc[u'ndeleted'] if 'ndeleted' in doc else None
self._numYields = doc[u'numYield'] if 'numYield' in doc else None
if u'lockStats' in doc:
self._r = doc[u'lockStats'][u'timeLockedMicros'][u'r']
self._w = doc[u'lockStats'][u'timeLockedMicros'][u'w']
self._r_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'r']
self._w_acquiring = doc[u'lockStats']['timeAcquiringMicros'][u'w']
locks = 'w:%i' % self.w if self.w is not None else 'r:%i' % self.r
elif u'locks' in doc:
locks = json.dumps(doc[u'locks'])
else:
locks = ''
# build a fake line_str
payload = ''
if 'query' in doc:
payload += ('query: %s' % str(doc[u'query'])
.replace("u'", "'").replace("'", '"'))
if 'command' in doc:
payload += ('command: %s' % str(doc[u'command'])
.replace("u'", "'").replace("'", '"'))
if 'updateobj' in doc:
payload += (' update: %s' % str(doc[u'updateobj'])
.replace("u'", "'").replace("'", '"'))
scanned = 'nscanned:%i' % self._nscanned if 'nscanned' in doc else ''
yields = 'numYields:%i' % self._numYields if 'numYield' in doc else ''
duration = '%ims' % self.duration if self.duration is not None else ''
self._line_str = ("[{thread}] {operation} {namespace} {payload} "
"{scanned} {yields} locks(micros) {locks} "
"{duration}".format(datetime=self.datetime,
thread=self.thread,
operation=self.operation,
namespace=self.namespace,
payload=payload, scanned=scanned,
yields=yields, locks=locks,
duration=duration))
| 36.661055 | 125 | 0.54015 | [
"Apache-2.0"
] | sindbach/mtools | mtools/util/logevent.py | 32,665 | Python |
import subprocess
import sys
from distutils.version import LooseVersion
from re import fullmatch
def get_shell_version():
try:
for line in (
subprocess.check_output(["gnome-shell", "--version"]).decode().splitlines()
):
m = fullmatch(r"GNOME Shell (?P<version>[0-9.]+)", line)
if m:
return m.group("version")
except BaseException:
print("Warning, cannot retrieve current Gnome Shell version", file=sys.stderr)
def version_comparator(a, b):
if a == b:
return 0
if a is None:
return 1
if b is None:
return -1
a, b = LooseVersion(str(a)), LooseVersion(str(b))
if a < b:
return 1
if a > b:
return -1
return 0
| 23.78125 | 87 | 0.576873 | [
"Apache-2.0"
] | essembeh/gnome-extensions-cli | src/gnome_extensions_cli/utils.py | 761 | Python |
import functools
import warnings
def deprecated_alias(**aliases):
def deco(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def rename_kwargs(func_name, kwargs, aliases): # noqa
for alias, new in aliases.items():
if alias in kwargs:
if new in kwargs:
raise TypeError("{} received both {} and {}".format(func_name, alias, new))
warnings.warn("{} is deprecated; use {}".format(alias, new), DeprecationWarning, 3)
if alias == "device":
if kwargs[alias].__contains__("cuda"):
kwargs.pop(alias)
kwargs[new] = 1
elif kwargs[alias].__contains__("cpu"):
kwargs.pop(alias)
kwargs[new] = 0
else:
kwargs[new] = kwargs.pop(alias)
elif alias == "multi_gpu":
kwargs.pop(alias)
else:
kwargs[new] = kwargs.pop(alias)
| 29.868421 | 95 | 0.5163 | [
"Apache-2.0"
] | ahmed-dj/bio-transformers | biotransformers/utils/deprecated.py | 1,135 | Python |
# -*- coding: utf-8 -*-
"""
Calculation of cumulant expressions for non-linear response functions
of the third order for a multilevel three band system.
"""
from quantarhei.symbolic.cumulant import Ugde, Uedg, Uged, Uegd #, ExpdV
from quantarhei.symbolic.cumulant import gg #, g1, g2
from quantarhei.symbolic.cumulant import CumulantExpr
from quantarhei.symbolic.abc import a, b, f, tau, tau1, tau2, tau3, c, d #, e, t, T, tau, x, y
from quantarhei.symbolic.abc import t1, t2, t3
from quantarhei.symbolic.lang import python_code
from quantarhei.symbolic.lang import fortran_code
import time
def evaluate_cumulant(cum, positive_times = [], leading_index=None,
lang = "Python", arrays=None):
"""
"""
t0 = time.time()
A = cum.rewrite(gg)
expr = CumulantExpr(A)
expr = expr.evaluate()
t1 = time.time()
for tt in positive_times:
expr = CumulantExpr(expr)._make_positive(tt)
t2 = time.time()
#a = leading_index[0]
if leading_index is not None:
D = expr._leading_index(leading_index)
expr = D._getExpr()
t3 = time.time()
if lang == "Fortran":
ss = fortran_code(expr.__str__())
elif lang == "Python":
ss = python_code(expr.__str__(),arrays=arrays)
else:
raise Exception("Unknown language")
print(t1-t0)
print(t2-t1)
print(t3-t2)
return ss
def R1g():
"""
"""
A = Ugde(b,t1)*Uedg(b,t1+t2)*Ugde(a,t1+t2+t3)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R2g():
"""
"""
A = Uedg(a,t1+t2)*Ugde(b,t1+t2+t3)*Uedg(b,t1)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R3g():
"""
"""
A = Uedg(a,t1)*Ugde(b,t1+t2+t3)*Uedg(b,t1+t2)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R4g():
"""
"""
A = Ugde(b,t1+t2+t3)*Uedg(b,t1+t2)*Ugde(a,t1)
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R1fs():
"""
"""
A = (Uedg(a,t1+t2+t3)*Ugde(f,t1+t2+t3)*Uedg(f,t1+t2)
*Ugde(b,t1+t2)*Uedg(b,t1))
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def R2fs():
"""
"""
A = (Ugde(b,t1)*Uedg(b,t1+t2+t3)*Ugde(f,t1+t2+t3)
*Uedg(f,t1+t2)*Ugde(a,t1+t2))
return evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"])
def print_R1gt():
"""
"""
A = Ugde(b,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Ugde(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R2gt():
"""
"""
A = Ugde(b,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Uedg(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R1fst():
"""
"""
A = Uedg(b,t3)*Ugde(f,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Uedg(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_R2fst():
"""
"""
A = Uedg(b,t3)*Ugde(f,t3)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
B = Ugde(a,t1)
print(evaluate_cumulant(B, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g():
"""
"""
A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
*Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g_alt():
"""
"""
#A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
# *Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
A = (Uged(a,t1)*Uedg(a,tau1)*Ugde(b,tau1)*Uedg(b,t2)*Ugde(b,t2+t3)*Uedg(b,tau1)*Ugde(a,tau1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def print_trans_R2g_alt2():
"""
"""
#A = (Uedg(a,t1+tau)*Ugde(b,t1+tau)*Uedg(b,t1+t2)*Ugde(b,t1+t2+t3)
# *Uedg(b,t1+tau)*Ugde(a,t1+tau)*Uedg(a,t1))
#A = (Uged(a,t1)*Uedg(a,tau1)*Ugde(b,tau1)*Uedg(b,t2)*Ugde(b,t2+t3)*Uedg(b,tau1)*Ugde(a,tau1))
A = (Uged(a,t1+tau1)*Uedg(b,t2-tau1)*Ugde(b,t2+t3-tau1)*Uegd(a,tau1))
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def generate_nth_order_R2g(states_tuple, times_tuple):
order = len(states_tuple)
if order != len(times_tuple):
raise Exception("Wrong tuple/list length")
# starting state
a = states_tuple[0]
# final state (can be the same as starting)
b = states_tuple[len(states_tuple)-1]
# final time (must be t2)
tt = times_tuple[len(times_tuple)-1]
AL = Uged(a,t1)
Amid = Uedg(b,tt)*Ugde(b,t3+tt)
filL = 1
filR = 1
for k in range(len(times_tuple)-1):
tau = times_tuple[k]
s1 = states_tuple[k]
s2 = states_tuple[k+1]
filL = filL*Uedg(s1,tau)*Ugde(s2,tau)
filR = Uedg(s2,tau)*Ugde(s1,tau)*filR
A = AL*filL*Amid*filR
print(A)
print(evaluate_cumulant(A, positive_times=(t1, tt, t3),
leading_index=a, arrays=["gg"]))
def test():
A = Uged(a,t1+t2)*Ugde(d,t3)*Uegd(a,t2)
print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
leading_index=a, arrays=["gg"]))
def oneex_twoex():
A = Uedg(f,t1)*Ugde(a,t1)
print(evaluate_cumulant(A, positive_times=(t1,), leading_index=a,
arrays="gg"))
# =============================================================================
# print("R1g:")
# st_R1g = "numpy.exp("+R1g()+")"
# print(st_R1g)
#
# print("")
# print("R2g:")
# print(R2g())
#
# print("")
# print("R3g:")
# print(R3g())
#
# print("")
# print("R4g:")
# print(R4g())
#
# print("")
# print("R1fs:")
# print(R1fs())
#
# print("")
# print("R2fs:")
# print(R2fs())
#
# print("")
# print("R1gt")
# print_R1gt()
#
# print("")
# print("R2gt")
# print_R2gt()
#
# print("")
# print("R1fst")
# print_R1fst()
#
# print("")
# print("R2fst")
# print_R2fst()
#
# =============================================================================
#print("")
#print("Trans_R2g")
#print_trans_R2g()
#
#print("")
#print("Trans_R2g_alt")
#print_trans_R2g_alt()
#
#print("")
#print("Trans_R2g_alt2")
#print_trans_R2g_alt2()
#print("***")
#states = (a, c, b) #(a,c,b)
#times = (tau1, tau2, t2) # (tau1,tau2,t2)
#generate_nth_order_R2g(states, times)
#
#print("===")
#A = Uged(a,t1)*Uedg(a,tau1)*Ugde(c,tau1)*Uedg(c,tau2)*Ugde(b,tau2)*Uedg(b,t2)*Ugde(b,t2 + t3)*Uedg(b,tau2)*Ugde(c,tau2)*Uedg(c,tau1)*Ugde(a,tau1)
#
#print(evaluate_cumulant(A, positive_times=(t1, t2, t3),
# leading_index=a, arrays=["gg"]))
#print("***")
#states = (a,b,c, d) #(a,c,b)
#times = (tau1, tau2, tau3, t2) # (tau1,tau2,t2)
#states = (a,c,b)
#times = (tau1,tau2,t2)
#generate_nth_order_R2g(states, times)
#test()
oneex_twoex() | 24.616564 | 146 | 0.52947 | [
"MIT"
] | MichalPt/quantarhei | examples/symbolic/test_symbolic_8.py | 8,025 | Python |
# This example is inspired by https://github.com/dasguptar/treelstm.pytorch
import argparse, cPickle, math, os, random
import logging
logging.basicConfig(level=logging.INFO)
import numpy as np
from tqdm import tqdm
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd as ag
from tree_lstm import SimilarityTreeLSTM
from dataset import Vocab, SICKDataIter
parser = argparse.ArgumentParser(description='TreeLSTM for Sentence Similarity on Dependency Trees')
parser.add_argument('--data', default='data/sick/',
help='path to raw dataset. required when preprocessed dataset is not available.')
parser.add_argument('--word_embed', default='data/glove/glove.840B.300d.txt',
help='directory with word embeddings. required when preprocessed dataset is not available.')
parser.add_argument('--batch_size', type=int, default=25,
help='training batch size per device (CPU/GPU).')
parser.add_argument('--epochs', default=50, type=int,
help='number of total epochs to run')
parser.add_argument('--lr', default=0.02, type=float,
help='initial learning rate')
parser.add_argument('--wd', default=0.0001, type=float,
help='weight decay factor')
parser.add_argument('--optimizer', default='adagrad',
help='optimizer (default: adagrad)')
parser.add_argument('--seed', default=123, type=int,
help='random seed (default: 123)')
parser.add_argument('--use-gpu', action='store_true',
help='whether to use GPU.')
opt = parser.parse_args()
logging.info(opt)
context = [mx.gpu(0) if opt.use_gpu else mx.cpu()]
rnn_hidden_size, sim_hidden_size, num_classes = 150, 50, 5
optimizer = opt.optimizer.lower()
mx.random.seed(opt.seed)
np.random.seed(opt.seed)
random.seed(opt.seed)
batch_size = opt.batch_size
# read dataset
if os.path.exists('dataset.cPickle'):
with open('dataset.cPickle', 'rb') as f:
train_iter, dev_iter, test_iter, vocab = cPickle.load(f)
else:
root_dir = opt.data
segments = ['train', 'dev', 'test']
token_files = [os.path.join(root_dir, seg, '%s.toks'%tok)
for tok in ['a', 'b']
for seg in segments]
vocab = Vocab(filepaths=token_files, embedpath=opt.word_embed)
train_iter, dev_iter, test_iter = [SICKDataIter(os.path.join(root_dir, segment), vocab, num_classes)
for segment in segments]
with open('dataset.cPickle', 'wb') as f:
cPickle.dump([train_iter, dev_iter, test_iter, vocab], f)
logging.info('==> SICK vocabulary size : %d ' % vocab.size)
logging.info('==> Size of train data : %d ' % len(train_iter))
logging.info('==> Size of dev data : %d ' % len(dev_iter))
logging.info('==> Size of test data : %d ' % len(test_iter))
# get network
net = SimilarityTreeLSTM(sim_hidden_size, rnn_hidden_size, vocab.size, vocab.embed.shape[1], num_classes)
# use pearson correlation and mean-square error for evaluation
metric = mx.metric.create(['pearsonr', 'mse'])
def to_target(x):
target = np.zeros((1, num_classes))
ceil = int(math.ceil(x))
floor = int(math.floor(x))
if ceil==floor:
target[0][floor-1] = 1
else:
target[0][floor-1] = ceil - x
target[0][ceil-1] = x - floor
return mx.nd.array(target)
def to_score(x):
levels = mx.nd.arange(1, 6, ctx=x.context)
return [mx.nd.sum(levels*mx.nd.exp(x), axis=1).reshape((-1,1))]
# when evaluating in validation mode, check and see if pearson-r is improved
# if so, checkpoint and run evaluation on test dataset
def test(ctx, data_iter, best, mode='validation', num_iter=-1):
data_iter.reset()
batches = len(data_iter)
data_iter.set_context(ctx[0])
preds = []
labels = [mx.nd.array(data_iter.labels, ctx=ctx[0]).reshape((-1,1))]
for _ in tqdm(range(batches), desc='Testing in {} mode'.format(mode)):
l_tree, l_sent, r_tree, r_sent, label = data_iter.next()
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
preds.append(z)
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info(mode+' acc: %s=%f'%(name, acc))
if name == 'pearsonr':
test_r = acc
if mode == 'validation' and num_iter >= 0:
if test_r >= best:
best = test_r
logging.info('New optimum found: {}. Checkpointing.'.format(best))
net.collect_params().save('childsum_tree_lstm_{}.params'.format(num_iter))
test(ctx, test_iter, -1, 'test')
return best
def train(epoch, ctx, train_data, dev_data):
# initialization with context
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx[0])
net.embed.weight.set_data(vocab.embed.as_in_context(ctx[0]))
train_data.set_context(ctx[0])
dev_data.set_context(ctx[0])
# set up trainer for optimizing the network.
trainer = gluon.Trainer(net.collect_params(), optimizer, {'learning_rate': opt.lr, 'wd': opt.wd})
best_r = -1
Loss = gluon.loss.KLDivLoss()
for i in range(epoch):
train_data.reset()
num_batches = len(train_data)
# collect predictions and labels for evaluation metrics
preds = []
labels = [mx.nd.array(train_data.labels, ctx=ctx[0]).reshape((-1,1))]
for j in tqdm(range(num_batches), desc='Training epoch {}'.format(i)):
# get next batch
l_tree, l_sent, r_tree, r_sent, label = train_data.next()
# use autograd to record the forward calculation
with ag.record():
# forward calculation. the output is log probability
z = net(mx.nd, l_sent, r_sent, l_tree, r_tree)
# calculate loss
loss = Loss(z, to_target(label).as_in_context(ctx[0]))
# backward calculation for gradients.
loss.backward()
preds.append(z)
# update weight after every batch_size samples
if (j+1) % batch_size == 0:
trainer.step(batch_size)
# translate log-probability to scores, and evaluate
preds = to_score(mx.nd.concat(*preds, dim=0))
metric.update(preds, labels)
names, values = metric.get()
metric.reset()
for name, acc in zip(names, values):
logging.info('training acc at epoch %d: %s=%f'%(i, name, acc))
best_r = test(ctx, dev_data, best_r, num_iter=i)
train(opt.epochs, context, train_iter, dev_iter)
| 39.284884 | 112 | 0.636969 | [
"Apache-2.0"
] | ChidanandKumarKS/mxnet | example/gluon/tree_lstm/main.py | 6,757 | Python |
import time
from threading import Thread, Condition
class StingySpendy:
money = 100
cv = Condition()
def stingy(self):
for i in range(1000000):
self.cv.acquire()
self.money += 10
self.cv.notify()
self.cv.release()
print("Stingy Done")
def spendy(self):
for i in range(500000):
self.cv.acquire()
while self.money < 20:
self.cv.wait()
self.money -= 20
if self.money < 0:
print("Money in bank", self.money)
self.cv.release()
print("Spendy Done")
ss = StingySpendy()
Thread(target=ss.stingy, args=()).start()
Thread(target=ss.spendy, args=()).start()
time.sleep(5)
print("Money in the end", ss.money)
| 23.264706 | 50 | 0.539823 | [
"MIT"
] | ajvill/multithreadinginpython | condition_variables/stingy_spendy_cond_variable.py | 791 | Python |
import os
import glob
import sys
from typing import Optional, List, Union
from .utils.utils import calc_mean_score, save_json, image_dir_to_json, image_file_to_json
from .handlers.model_builder import Nima
from deepinsight_iqa.common.utility import thread_safe_singleton, set_gpu_limit
from deepinsight_iqa.data_pipeline.nima_gen.nima_datagen import NimaDataGenerator as TestDataGenerator
import tensorflow as tf
import six
import logging
logger = logging.getLogger(__name__)
@six.add_metaclass(thread_safe_singleton)
class Prediction:
def __init__(self, weights_file: str, base_model_name: str):
""" Invoke a predict method of this class to predict image quality using nima model
"""
try:
# set_gpu_limit()
self.nima = Nima(base_model_name, weights=None)
self.nima.build()
self.nima.nima_model.load_weights(weights_file)
except Exception as e:
print("Unable to load NIMA weights", str(e))
sys.exit(1)
def predict(
self,
image_source: str,
predictions_file: Optional[str] = None,
img_format: str = 'jpg'
) -> List:
# load samples
if os.path.isfile(image_source):
image_dir, samples = image_file_to_json(image_source)
else:
image_dir = image_source
samples = image_dir_to_json(image_source, img_type='jpg')
# initialize data generator
n_classes = 10
batch_size = 64
samples = []
sample = {"imgage_id": "img_1"}
samples.append(sample)
data_generator = TestDataGenerator(
samples, image_dir, batch_size, n_classes,
self.nima.preprocessing_function(), img_format=img_format
)
# get predictions
predictions = self.nima.nima_model.predict_generator(
data_generator, workers=1, use_multiprocessing=False, verbose=1)
# calc mean scores and add to samples
for i, sample in enumerate(samples):
sample['mean_score_prediction'] = calc_mean_score(predictions[i])
# print(json.dumps(samples, indent=2))
if predictions_file is not None:
save_json(samples, predictions_file)
return samples
| 32.942029 | 102 | 0.66564 | [
"Apache-2.0"
] | sandyz1000/deepinsight-iqa | deepinsight_iqa/nima/predict.py | 2,273 | Python |
import os
class MockRequests:
def __init__(self):
return
def get(self, source):
source_no_http = source.replace("http://","")
test_website_path = f"{os.path.dirname(os.path.abspath(__file__))}/test_data/test_website/{source_no_http}"
with open(test_website_path,'r') as website_file:
return MockData(website_file.read())
class MockData:
def __init__(self,text):
self.text = text
| 26.277778 | 116 | 0.621564 | [
"MIT"
] | OtGabaldon/multiSourceWordMap | tests/mock_requests.py | 473 | Python |
# qubit number=3
# total number=60
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[2]) # number=38
prog.cz(input_qubit[0],input_qubit[2]) # number=39
prog.h(input_qubit[2]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=42
prog.cz(input_qubit[0],input_qubit[2]) # number=43
prog.h(input_qubit[2]) # number=44
prog.h(input_qubit[2]) # number=48
prog.cz(input_qubit[0],input_qubit[2]) # number=49
prog.h(input_qubit[2]) # number=50
prog.cx(input_qubit[0],input_qubit[2]) # number=54
prog.x(input_qubit[2]) # number=55
prog.h(input_qubit[2]) # number=57
prog.cz(input_qubit[0],input_qubit[2]) # number=58
prog.h(input_qubit[2]) # number=59
prog.cx(input_qubit[0],input_qubit[2]) # number=47
prog.cx(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=51
prog.cz(input_qubit[0],input_qubit[2]) # number=52
prog.h(input_qubit[2]) # number=53
prog.h(input_qubit[2]) # number=25
prog.cz(input_qubit[0],input_qubit[2]) # number=26
prog.h(input_qubit[2]) # number=27
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.rx(0.17592918860102857,input_qubit[2]) # number=34
prog.rx(-0.3989822670059037,input_qubit[1]) # number=30
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=22
prog.cz(input_qubit[2],input_qubit[1]) # number=23
prog.h(input_qubit[1]) # number=24
prog.z(input_qubit[2]) # number=3
prog.z(input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
prog.x(input_qubit[2]) # number=21
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC292.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 31.572072 | 140 | 0.637466 | [
"BSD-3-Clause"
] | UCLA-SEAL/QDiff | data/p3BR/R2/benchmark/startQiskit_QC292.py | 7,009 | Python |
import socket, threading, sys, traceback, os, tkinter
from ui import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import QtCore, QtGui, QtWidgets
from tkinter import *
from PIL import Image, ImageTk
from tkinter import messagebox, Tk
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from RtpPacket import RtpPacket
RECV_SIZE = 20480 + 14
HIGHT = 500
CACHE_FILE_NAME = "cache-"
CACHE_FILE_EXT = ".jpg"
class Client:
INIT = 0
READY = 1
PLAYING = 2
state = INIT
SETUP = 0
PLAY = 1
PAUSE = 2
TEARDOWN = 3
FASTER = 4
SLOWER = 5
# Initiation..
def __init__(self, serveraddr, serverport, rtpport, filename):
self.page_main = Ui_MainWindow()
self.state == self.READY
self.serverAddr = serveraddr
self.serverPort = int(serverport)
self.rtpPort = int(rtpport)
self.fileName = filename
self.rtspSeq = 0
self.sessionId = 0
self.requestSent = -1
self.teardownAcked = 0
self.connectToServer()
self.frameNbr = 0
self.createWidgets()
def createWidgets(self):
app = QtWidgets.QApplication(sys.argv)
page_tmp = QtWidgets.QMainWindow()
self.page_main.setupUi(page_tmp)
page_tmp.show()
self.page_main.btn_setup.clicked.connect(lambda: self.setupMovie())
self.page_main.btn_play.clicked.connect(lambda: self.playMovie())
self.page_main.btn_pause.clicked.connect(lambda: self.pauseMovie())
self.page_main.btn_teardown.clicked.connect(lambda: self.exitClient())
self.page_main.btn_faster.clicked.connect(lambda: self.fasterMovie())
self.page_main.btn_slower.clicked.connect(lambda: self.slowerMovie())
sys.exit(app.exec_())
def fasterMovie(self):
"""Let movie faster."""
if self.state == self.PLAYING or self.state == self.READY:
self.sendRtspRequest(self.FASTER)
def slowerMovie(self):
"""Let movie slower."""
if self.state == self.PLAYING or self.state == self.READY:
self.sendRtspRequest(self.SLOWER)
def setupMovie(self):
"""Setup init."""
if self.state == self.INIT:
self.sendRtspRequest(self.SETUP)
def exitClient(self):
"""Teardown the client."""
self.sendRtspRequest(self.TEARDOWN)
sys.exit(0) # Close the gui window
print(os.remove(CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT)) # Delete the cache image from video
def pauseMovie(self):
"""Pause movie."""
if self.state == self.PLAYING:
self.sendRtspRequest(self.PAUSE)
def playMovie(self):
"""Play movie."""
if self.state == self.READY:
# Create a new thread to listen for RTP packets
threading.Thread(target=self.listenRtp).start()
self.playEvent = threading.Event()
self.playEvent.clear()
self.sendRtspRequest(self.PLAY)
def listenRtp(self):
"""Listen for RTP packets."""
while 1:
try:
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb+")
while 1:
data = self.rtpSocket.recv(RECV_SIZE)
if data:
rtpPacket = RtpPacket()
rtpPacket.decode(data)
# self.cutFrameList.append(rtpPacket.getPayload())
currFrameNbr = rtpPacket.seqNum()
file.write(rtpPacket.getPayload())
print("Current Seq Num: " + str(currFrameNbr))
if currFrameNbr > self.frameNbr and rtpPacket.getIfEnd(): # Discard the late packet
self.frameNbr = currFrameNbr
self.updateMovie(cachename)
file.close()
break
except:
# Stop listening upon requesting PAUSE or TEARDOWN
if self.playEvent.isSet():
break
print('Frame receiving failed!')
# Upon receiving ACK for TEARDOWN request,
# close the RTP socket
if self.teardownAcked == 1:
self.rtpSocket.shutdown(socket.SHUT_RDWR)
self.rtpSocket.close()
break
def writeFrame(self):
"""Write the received frame to a temp image file. Return the image file."""
cachename = CACHE_FILE_NAME + str(self.sessionId) + CACHE_FILE_EXT
file = open(cachename, "wb")
for item in self.cutFrameList:
file.write(item)
file.close()
return cachename
def updateMovie(self, imageFile):
"""Update the image file as video frame in the GUI."""
pixmap = QtGui.QPixmap(imageFile)
self.page_main.label_display.setPixmap(pixmap)
self.page_main.label_display.setScaledContents(True)
def connectToServer(self):
"""Connect to the Server. Start a new RTSP/TCP session."""
self.rtspSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.rtspSocket.connect((self.serverAddr, self.serverPort))
except:
# tkMessageBox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr)
messagebox.showwarning('Connection Failed', 'Connection to \'%s\' failed.' %self.serverAddr)
def sendRtspRequest(self, requestCode):
"""Send RTSP request to the server."""
# Setup
if requestCode == self.SETUP and self.state == self.INIT:
threading.Thread(target=self.recvRtspReply).start()
# Update RTSP sequence number.
self.rtspSeq += 1
# Write the RTSP request to be sent.
request = 'SETUP ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nTransport: RTP/UDP; client_port= ' + str(self.rtpPort)
# Keep track of the sent request.
self.requestSent = self.SETUP
# Play
elif requestCode == self.PLAY and self.state == self.READY:
self.rtspSeq += 1
request = 'PLAY ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.PLAY
# Pause
elif requestCode == self.PAUSE and self.state == self.PLAYING:
self.rtspSeq += 1
request = 'PAUSE ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.PAUSE
# Teardown
elif requestCode == self.TEARDOWN and not self.state == self.INIT:
self.rtspSeq += 1
request = 'TEARDOWN ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
self.requestSent = self.TEARDOWN
# Faster
elif requestCode == self.FASTER and (self.state == self.PLAYING or self.state == self.READY):
self.rtspSeq += 1
request = 'FASTER ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
# Slower
elif requestCode == self.SLOWER and (self.state == self.PLAYING or self.state == self.READY):
self.rtspSeq += 1
request = 'SLOWER ' + self.fileName + ' RTSP/1.0\nCSeq: ' + str(self.rtspSeq) + '\nSession: ' + str(self.sessionId)
else:
return
# Send the RTSP request using rtspSocket.
self.rtspSocket.send(request.encode())
print('\nData sent:\n' + request)
def recvRtspReply(self):
"""Receive RTSP reply from the server."""
while True:
reply = self.rtspSocket.recv(1024)
if reply:
self.parseRtspReply(reply.decode("utf-8"))
# Close the RTSP socket upon requesting Teardown
if self.requestSent == self.TEARDOWN:
self.rtspSocket.shutdown(socket.SHUT_RDWR)
self.rtspSocket.close()
break
def parseRtspReply(self, data):
"""Parse the RTSP reply from the server."""
lines = str(data).split('\n')
seqNum = int(lines[1].split(' ')[1])
# Process only if the server reply's sequence number is the same as the request's
if seqNum == self.rtspSeq:
session = int(lines[2].split(' ')[1])
# New RTSP session ID
if self.sessionId == 0:
self.sessionId = session
# Process only if the session ID is the same
if self.sessionId == session:
if int(lines[0].split(' ')[1]) == 200:
if self.requestSent == self.SETUP:
# Update RTSP state.
self.state = self.READY
# Open RTP port.
self.openRtpPort()
elif self.requestSent == self.PLAY:
self.state = self.PLAYING
elif self.requestSent == self.PAUSE:
self.state = self.READY
# The play thread exits. A new thread is created on resume.
self.playEvent.set()
elif self.requestSent == self.TEARDOWN:
self.state = self.INIT
# Flag the teardownAcked to close the socket.
self.teardownAcked = 1
def openRtpPort(self):
"""Open RTP socket binded to a specified port."""
# Create a new datagram socket to receive RTP packets from the server
self.rtpSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Set the timeout value of the socket to 0.5sec
self.rtpSocket.settimeout(0.5)
try:
# Bind the socket to the address using the RTP port given by the client user
self.rtpSocket.bind(("", self.rtpPort))
except:
messagebox.showwarning('Unable to Bind', 'Unable to bind PORT=%d' %self.rtpPort)
def handler(self):
"""Handler on explicitly closing the GUI window."""
self.pauseMovie()
if messagebox.askokcancel("Quit?", "Are you sure you want to quit?"):
self.exitClient()
else: # When the user presses cancel, resume playing.
self.playMovie()
if __name__ == "__main__":
try:
# serverAddr = sys.argv[1]
# serverPort = sys.argv[2]
# rtpPort = sys.argv[3]
# fileName = sys.argv[4]
serverAddr = sys.argv[1]
serverPort = sys.argv[4]
rtpPort = sys.argv[3]
fileName = sys.argv[2]
except:
print ("[Usage: ClientLauncher.py Server_name Server_port RTP_port Video_file]\n")
# root = tkinter.Tk()
client = Client(serverAddr, serverPort, rtpPort, fileName)
# client.master.title('RTP Client')
# root.mainloop() | 38.13 | 148 | 0.559577 | [
"MIT"
] | Aiemu/CourseCN-Proj-RTP | Task2/Client_dev.py | 11,439 | Python |
""" core app configuration """
import os
environment = os.getenv('LAMBTASTIC_ENV', 'development')
if environment == 'testing':
from .testing import *
elif environment == 'production':
from .production import *
else:
from .development import *
| 21.416667 | 56 | 0.696498 | [
"Unlicense"
] | ppold/lambtastic | settings/__init__.py | 257 | Python |
# -*- coding: utf-8 -*-
TIME_OUT = 60
EXCEPT_FILE = ['test.py','login.py','mix.py']
class Api(object):
login = "/api/users/login"
user_info="/api/users/info"
signin = "/api/users/sign/signIn"
map = "/api/RedEnvelope/updateUserMap"
find_redbag = "/api/RedEnvelope/findReds"
get_redbag = "/api/redUser/getRed"
test= "/api/sys/testJson" | 28 | 45 | 0.64011 | [
"MIT"
] | weigun/StressTest | config.py | 364 | Python |
"""
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import os
import common
from inmanta.loader import SourceInfo
from inmanta.module import Project
def test_collect_python_requirements(tmpdir):
# Create project
common.makeproject(tmpdir, "test-project", deps=[("mod1", ""), ("mod2", "")], imports=["mod1", "mod2"])
project_dir = os.path.join(tmpdir, "test-project")
libs_dir = os.path.join(project_dir, "libs")
# Create mod1
common.makemodule(libs_dir, "mod1", project=False)
mod1 = os.path.join(libs_dir, "mod1")
mod1_req_txt = """iplib@git+https://github.com/bartv/python3-iplib
pytest\
>=\
1.5
iplib>=0.0.1
"""
common.add_file(mod1, "requirements.txt", mod1_req_txt, msg="initial commit")
# Create mod2
common.makemodule(libs_dir, "mod2", project=False)
mod2 = os.path.join(libs_dir, "mod2")
mod2_req_txt = """# A comment
dummy-yummy # A comment
# Another comment
"""
common.add_file(mod2, "requirements.txt", mod2_req_txt, msg="initial commit")
project = Project(project_dir, venv_path=os.path.join(project_dir, ".env"))
Project.set(project)
project.load_module("mod1", allow_v1=True)
project.load_module("mod2", allow_v1=True)
reqs = project.collect_python_requirements()
expected_reqs = ["iplib@git+https://github.com/bartv/python3-iplib", "pytest>=1.5", "iplib>=0.0.1", "dummy-yummy"]
assert sorted(reqs) == sorted(expected_reqs)
def test_requirements_from_source_info(tmpdir):
"""Test the code path used by the exporter"""
common.makeproject(tmpdir, "test-project", deps=[("mod1", "")], imports=["mod1"])
project_dir = os.path.join(tmpdir, "test-project")
libs_dir = os.path.join(project_dir, "libs")
common.makemodule(libs_dir, "mod1", project=False)
mod1 = os.path.join(libs_dir, "mod1")
mod1_req_txt = """# I'm a comment
pytest\
>=\
1.5
iplib>=0.0.1
"""
common.add_file(mod1, "requirements.txt", mod1_req_txt, msg="initial commit")
project = Project(project_dir, venv_path=os.path.join(project_dir, ".env"))
Project.set(project)
project.load_module("mod1", allow_v1=True)
requirements = SourceInfo(mod1, "inmanta_plugins.mod1").requires
assert sorted(requirements) == sorted(["pytest>=1.5", "iplib>=0.0.1"])
# This would fail if the comments weren't filtered out
project.virtualenv.install_from_list(requirements)
| 34.988235 | 118 | 0.697377 | [
"Apache-2.0"
] | inmanta/inmanta-core | tests/moduletool/test_python_dependencies.py | 2,974 | Python |
from __future__ import division
import fa
import sys
import os
from fa import chunker
if __name__ == "__main__":
from sys import stderr
import argparse
parser = argparse.ArgumentParser(description=(
"Create a set of synthetic genomes consisting "
"of subgroups per tax level. Some kmers are unique, "
"some are shared, and this provides a case where we can test"
" the efficacy and behavior of our bitmap method."))
parser.add_argument("-n", "--num-nucleotides-per-leaf",
type=int, default=13000)
parser.add_argument("-N", "--num-nucs-shared-per-subgroup",
type=int, default=2000)
parser.add_argument("-l", "--num-nucs-shared-per-level",
type=int, default=8000)
parser.add_argument("-d", "--tree-depth",
type=int, default=4)
parser.add_argument("-s", "--split-size", type=int,
default=3,
help=("Number of subgroups for "
"each parent node."))
parser.add_argument("--parent-map", "-p",
help="Path to which to write synthetic taxonomy.",
default="nodes.dmp")
parser.add_argument("-S", "--subgroup-size", type=int,
default=3,
help="Number of genomes for each subgroup")
parser.add_argument("-o", "--outdir", default=".", type=str)
parser.add_argument("--name-id-map", "-m", default="synth_nameidmap.txt")
args = parser.parse_args()
# Variables/settings for constructing synthetic genome
# and accessory files.
mult_per_layer = args.split_size * args.subgroup_size
depth = args.tree_depth
nleaves = mult_per_layer ** (depth - 1)
leaf_seqs = [fa.SeqId(fa.gen_seq(args.num_nucleotides_per_leaf), i) for
i in range(nleaves)]
nleaf_seq = len(leaf_seqs)
outdir = args.outdir
if not os.path.isdir(outdir):
if os.path.isfile(outdir):
raise Exception("Path set for outdir ('%s') is a"
" file... Nah, dawg." % outdir)
os.mkdir(outdir)
outdir = outdir + '/' # Append slash
name_id_map = outdir + args.name_id_map
parent_map = outdir + args.parent_map
# Variables for constructing the parent_map dictionary.
pcmap = {}
used_seqids = set(i.taxid() for i in leaf_seqs)
ctax = max(used_seqids) + 1
last_layer = []
for i in range(1, depth):
nchunks = nleaf_seq // (mult_per_layer ** i)
chunk_size = nleaf_seq // nchunks
assert nleaf_seq % chunk_size == 0
for seqsetid, seqset in enumerate(chunker(leaf_seqs, chunk_size)):
print("seqset len: %i" % len(seqset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_level)
for seq in seqset:
seq.seq += add
seq.subsets[i] = seqsetid
for sssid, seqsubset in enumerate(chunker(seqset,
args.subgroup_size)):
# print("seqsubset len: %i" % len(seqsubset), file=stderr)
add = fa.gen_seq(args.num_nucs_shared_per_subgroup)
for seq in seqset:
seq.seq += add
seq.subgroups[i] = seqsetid
if i == 1: # or it not last_layer
# Add leaf node to parent connections
for seq in seqset:
pcmap[seq.taxid()] = ctax + seqsetid
if i > 1:
# Add higher nodes to parent connections
if i == depth - 1:
pcmap.update((el, 1) for el in last_layer)
break
# This leaves the loop on the last layer in the tree
# because the root is 1 by construction
else:
# pcmap.update((tax, i + ctax) for tax in
# last_layer[i:i+mult_per_layer] for
# i in range(mult_per_layer))
for i in range(mult_per_layer):
for tax in last_layer[i:i + mult_per_layer]:
pcmap[tax] = i + ctax
last_layer = [ctax + i for i in range(nchunks)]
used_seqids.update(last_layer)
ctax = max(used_seqids) + 1
del used_seqids
del ctax
del last_layer
{seq.write(outdir + seq.filename()) for seq in leaf_seqs}
print("[1/3] Successfully created synthetic genomes.", file=stderr)
filenames = [outdir + seq.filename() for seq in leaf_seqs]
fa.write_nameid_map(name_id_map, filenames)
print("[2/3] Successfully wrote nameidmap to %s." % name_id_map,
file=stderr)
fa.write_parent_map(parent_map, pcmap)
print("[3/3] Successfully wrote child->parent map.", file=stderr)
stderr.write("Genomes: %s\n" % ', '.join(filenames))
stderr.write("Nameidmap: %s\n" % name_id_map)
stderr.write("Taxonomy: %s\n" % parent_map)
| 43.626087 | 77 | 0.568069 | [
"MIT"
] | dnbaker/bonsai | sim/main.py | 5,017 | Python |
# Copyright 2019 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests generating test combinations."""
from collections import OrderedDict
# Dependency imports
from tensorflow_probability.python.internal import test_combinations
from tensorflow_probability.python.internal import test_util
class TestingCombinationsTest(test_util.TestCase):
def test_combine(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 1,
"b": 3
}, {
"a": 2,
"b": 2
}, {
"a": 2,
"b": 3
}], test_combinations.combine(a=[1, 2], b=[2, 3]))
def test_arguments_sorted(self):
self.assertEqual([
OrderedDict([("aa", 1), ("ab", 2)]),
OrderedDict([("aa", 1), ("ab", 3)]),
OrderedDict([("aa", 2), ("ab", 2)]),
OrderedDict([("aa", 2), ("ab", 3)])
], test_combinations.combine(ab=[2, 3], aa=[1, 2]))
def test_combine_single_parameter(self):
self.assertEqual([{
"a": 1,
"b": 2
}, {
"a": 2,
"b": 2
}], test_combinations.combine(a=[1, 2], b=2))
def test_add(self):
self.assertEqual(
[{
"a": 1
}, {
"a": 2
}, {
"b": 2
}, {
"b": 3
}],
(test_combinations.combine(a=[1, 2]) +
test_combinations.combine(b=[2, 3])))
@test_combinations.generate(
test_combinations.combine(a=[1, 0], b=[2, 3], c=[1]))
class CombineTheTestSuite(test_util.TestCase):
def test_add_things(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def test_add_things_one_more(self, a, b, c):
self.assertLessEqual(3, a + b + c)
self.assertLessEqual(a + b + c, 5)
def not_a_test(self, a=0, b=0, c=0):
del a, b, c
self.fail()
def _test_but_private(self, a=0, b=0, c=0):
del a, b, c
self.fail()
# Check that nothing funny happens to a non-callable that starts with "_test".
test_member = 0
if __name__ == "__main__":
test_util.main()
| 26.838384 | 80 | 0.579601 | [
"Apache-2.0"
] | AI-App/TensorFlow-Probability | tensorflow_probability/python/internal/test_combinations_test.py | 2,657 | Python |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import os
from celery import Celery
from airflow.config_templates.default_celery import DEFAULT_CELERY_CONFIG
from airflow.exceptions import AirflowException
from airflow import configuration
from xTool.utils.log.logging_mixin import LoggingMixin
from xTool.utils.module_loading import import_string
from xTool.executors.celery_executor import CeleryExecutor
'''
To start the celery worker, run the command:
airflow worker
'''
# 获得配置文件的路径,并导入celery默认配置
if configuration.conf.has_option('celery', 'celery_config_options'):
celery_configuration = import_string(
configuration.conf.get('celery', 'celery_config_options')
)
else:
celery_configuration = DEFAULT_CELERY_CONFIG
# 创建一个celery客户端
celery_app_name = configuration.conf.get('celery', 'CELERY_APP_NAME')
app = Celery(
celery_app_name,
config_source=celery_configuration)
@app.task
def execute_command(command):
"""airflow worker 执行shell命令 ."""
log = LoggingMixin().log
log.info("Executing command in Celery: %s", command)
env = os.environ.copy()
try:
# celery worker 收到消息后,执行消息中的shell命令
subprocess.check_call(command, shell=True, stderr=subprocess.STDOUT,
close_fds=True, env=env)
except subprocess.CalledProcessError as e:
log.exception('execute_command encountered a CalledProcessError')
log.error(e.output)
raise AirflowException('Celery command failed')
| 34.907692 | 76 | 0.75584 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | fengzhongzhu1621/XAirflow | airflow/executors/celery_executor.py | 2,353 | Python |
from django.conf.urls import url
from . import views
app_name = 'reports'
urlpatterns = [
# url(r'^graph/', views.graph, name='graph'),
url(r'^graph/', views.statistics, name='graph'),
url(r'^csv_export/', views.csv_export, name='csv_export'),
]
| 19.142857 | 63 | 0.641791 | [
"MIT"
] | peachman05/Pwcrew | reports/urls.py | 268 | Python |
"""Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.is_visible:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user.pk in [
author.pk for author in obj.authors.all()]):
return obj
raise Http404(_('No entry found matching the query'))
| 32.884615 | 65 | 0.62924 | [
"BSD-3-Clause"
] | Admoroux/django-blog-zinnia | zinnia/views/mixins/entry_preview.py | 855 | Python |
from libs import reaction as reactioncommand
class Reaction(reactioncommand.AdminReactionAddCommand):
'''Retries a text command
**Usage**
React to the message you want to re-run with the retry emoji
(The emoji is server-defined; ask your fellow server members for the correct emoji)'''
def matches(self, reaction, user):
return user == reaction.message.author
def action(self, reaction, user, client):
yield from client.on_message(reaction.message)
| 32.066667 | 86 | 0.742204 | [
"MIT"
] | IdeaBot/dev-addons | retry.py | 481 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Marc-Olivier Buob, Maxime Raynal"
__maintainer__ = "Marc-Olivier Buob, Maxime Raynal"
__email__ = "{marc-olivier.buob,maxime.raynal}@nokia.com"
__copyright__ = "Copyright (C) 2020, Nokia"
__license__ = "BSD-3"
from collections import defaultdict
from pybgl.graph import Graph
from pybgl.incidence_automaton import (
IncidenceAutomaton, finals, initial, remove_vertex, vertices
)
from pybgl.depth_first_search import depth_first_search_graph
from pybgl.property_map import make_assoc_property_map
from pybgl.reverse import reverse_graph
def find_reachable_vertices(g: Graph, sources: set) -> set:
"""
Returns the set of vertices of a graph which are reachable
from a set of source vertices.
Args:
g: Graph, an instance of `Graph`
sources: set, a set of integers representing the source vertices
Returns:
The set of vertices that are reachable from the source vertices
"""
map_vcolor = defaultdict(int)
pmap_vcolor = make_assoc_property_map(map_vcolor)
depth_first_search_graph(g, sources, pmap_vcolor=pmap_vcolor)
return set(map_vcolor.keys())
def prune_incidence_automaton(g: IncidenceAutomaton):
"""
Prunes the vertices of an IncidenceAutomaton that cannot be reached
from the intial state, or that cannot reach a final state.
Args:
g: IncidenceAutomaton, an instance of IncidenceAutomaton
"""
to_keep = find_reachable_vertices(g, {initial(g)})
reverse_graph(g)
to_keep &= find_reachable_vertices(g, finals(g))
reverse_graph(g)
to_remove = set(vertices(g)) - to_keep
for q in to_remove:
remove_vertex(q, g)
| 35.979592 | 72 | 0.708452 | [
"BSD-3-Clause"
] | nokia/PyBGL | pybgl/prune_incidence_automaton.py | 1,763 | Python |
"""Module containing examples of report builder functions and classes."""
from collections import OrderedDict
import numpy as np
def example_fn_build_report(report, pvarray):
"""Example function that builds a report when used in the
:py:class:`~pvfactors.engine.PVEngine` with full mode simulations.
Here it will be a dictionary with lists of calculated values.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
# Initialize the report
if report is None:
list_keys = ['qinc_front', 'qinc_back', 'iso_front', 'iso_back']
report = OrderedDict({key: [] for key in list_keys})
# Add elements to the report
if pvarray is not None:
pvrow = pvarray.pvrows[1] # use center pvrow
report['qinc_front'].append(
pvrow.front.get_param_weighted('qinc'))
report['qinc_back'].append(
pvrow.back.get_param_weighted('qinc'))
report['iso_front'].append(
pvrow.front.get_param_weighted('isotropic'))
report['iso_back'].append(
pvrow.back.get_param_weighted('isotropic'))
else:
# No calculation was performed, because sun was down
report['qinc_front'].append(np.nan)
report['qinc_back'].append(np.nan)
report['iso_front'].append(np.nan)
report['iso_back'].append(np.nan)
return report
class ExampleReportBuilder(object):
"""A class is required to build reports when running calculations with
multiprocessing because of python constraints"""
@staticmethod
def build(report, pvarray):
"""Method that will build the simulation report. Here we're using the
previously defined
:py:function:`~pvfactors.report.example_fn_build_report`.
Parameters
----------
report : dict
Initially ``None``, this will be passed and updated by the function
pvarray : PV array object
PV array with updated calculation values
Returns
-------
report : dict
Report updated with newly calculated values
"""
return example_fn_build_report(report, pvarray)
@staticmethod
def merge(reports):
"""Method used to merge multiple reports together. Here it simply
concatenates the lists of values saved in the different reports.
Parameters
----------
reports : list of dict
List of reports that need to be concatenated together
Returns
-------
report : dict
Final report with all concatenated values
"""
report = reports[0]
# Merge only if more than 1 report
if len(reports) > 1:
keys_report = list(reports[0].keys())
for other_report in reports[1:]:
for key in keys_report:
report[key] += other_report[key]
return report
| 32.895833 | 79 | 0.622863 | [
"BSD-3-Clause"
] | tcapelle/pvfactors | pvfactors/report.py | 3,158 | Python |
from mmcv.runner import load_checkpoint as mmcv_load_checkpoint
from mmcv.runner.checkpoint import load_url_dist
import urllib
mmskeleton_model_urls = {
'st_gcn/kinetics-skeleton': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.kinetics-6fa43f73.pth",
'st_gcn/ntu-xsub': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.ntu-xsub-300b57d4.pth",
'st_gcn/ntu-xview': "https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/st-gcn/st_gcn.ntu-xview-9ba67746.pth",
'mmdet/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/mmdet/htc_dconv_c3-c5_mstrain_400_1400_x101_64x4d_fpn_20e_20190408-0e50669c.pth',
'pose_estimation/pose_hrnet_w32_256x192': 'https://open-mmlab.s3.ap-northeast-2.amazonaws.com/mmskeleton/models/pose_estimation/pose_hrnet_w32_256x192-76ea353b.pth',
'mmdet/cascade_rcnn_r50_fpn_20e': 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/cascade_rcnn_r50_fpn_20e_20181123-db483a09.pth',
} # yapf: disable
def load_checkpoint(model, filename, *args, **kwargs):
try:
filename = get_mmskeleton_url(filename)
return mmcv_load_checkpoint(model, filename, *args, **kwargs)
except (urllib.error.HTTPError, urllib.error.URLError) as e:
raise Exception(url_error_message.format(filename)) from e
def get_mmskeleton_url(filename):
if filename.startswith('mmskeleton://'):
model_name = filename[13:]
model_url = (mmskeleton_model_urls[model_name])
return model_url
return filename
def cache_checkpoint(filename):
try:
filename = get_mmskeleton_url(filename)
load_url_dist(get_mmskeleton_url(filename))
except (urllib.error.HTTPError, urllib.error.URLError) as e:
raise Exception(url_error_message.format(filename)) from e
url_error_message = """
==================================================
MMSkeleton fail to load checkpoint from url:
{}
Please check your network connection. Or manually download checkpoints according to the instructor:
https://github.com/open-mmlab/mmskeleton/blob/master/doc/MODEL_ZOO.md
""" | 47.638298 | 216 | 0.748995 | [
"Apache-2.0"
] | GlenGGG/DR-GCN | mmskeleton/utils/checkpoint.py | 2,239 | Python |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 27 17:38:25 2020
@author: Wu Yichen
"""
from PIL import Image
import os
import os.path
import errno
import numpy as np
import sys
import pickle
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
import torch
import torch.nn.functional as F
from torch.autograd import Variable as V
import wideresnet as wrn
import torchvision.transforms as transforms
def uniform_mix_C(mixing_ratio, num_classes):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob, num_classes, seed=1):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i])] = corruption_prob
return C
def flip_labels_C_two(corruption_prob, num_classes, seed=1):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][np.random.choice(row_indices[row_indices != i], 2, replace=False)] = corruption_prob / 2
return C
class CIFAR10(data.Dataset):
base_folder = 'cifar-10-batches-py'
url = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
def __init__(self, root='', train=True, meta=True, num_meta=1000,
corruption_prob=0, corruption_type='unif', transform=None, target_transform=None,
download=False, seed=1):
self.count = 0
self.root = root
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.meta = meta
self.corruption_prob = corruption_prob
self.num_meta = num_meta
if download:
self.download()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
# now load the picked numpy arrays
if self.train:
self.train_data = []
self.train_labels = []
self.train_coarse_labels = []
self.train_labels_true = []
self.soft_labels = []
for fentry in self.train_list:
f = fentry[0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.train_data.append(entry['data'])
if 'labels' in entry:
self.train_labels += entry['labels']
self.train_labels_true += entry['labels']
img_num_list = [int(self.num_meta/10)] * 10
num_classes = 10
else:
self.train_labels += entry['fine_labels']
self.train_labels_true += entry['fine_labels']
self.train_coarse_labels += entry['coarse_labels']
img_num_list = [int(self.num_meta/100)] * 100
num_classes = 100
fo.close()
self.train_data = np.concatenate(self.train_data)
self.train_data = self.train_data.reshape((50000, 3, 32, 32))
self.train_data = self.train_data.transpose((0, 2, 3, 1)) # convert to HWC
data_list_val = {}
for j in range(num_classes):
data_list_val[j] = [i for i, label in enumerate(self.train_labels) if label == j]
idx_to_meta = []
idx_to_train = []
print(img_num_list)
for cls_idx, img_id_list in data_list_val.items():
np.random.shuffle(img_id_list)
img_num = img_num_list[int(cls_idx)]
idx_to_meta.extend(img_id_list[:img_num])
idx_to_train.extend(img_id_list[img_num:])
if meta is True:
self.train_data = self.train_data[idx_to_meta]
self.train_labels = list(np.array(self.train_labels)[idx_to_meta])
else:
self.train_data = self.train_data[idx_to_train]
self.train_labels = list(np.array(self.train_labels)[idx_to_train])
self.train_labels_true = list(np.array(self.train_labels_true)[idx_to_train])
self.soft_labels = list(np.zeros((len(self.train_data),num_classes),dtype=np.float32))
self.prediction = np.zeros((len(self.train_data),10,num_classes),dtype=np.float32)
clean_labels = self.train_labels
np.save('clean_labels.npy', clean_labels)
if corruption_type == 'unif':
C = uniform_mix_C(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'flip':
C = flip_labels_C(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'flip2':
C = flip_labels_C_two(self.corruption_prob, num_classes)
print(C)
self.C = C
elif corruption_type == 'hierarchical':
assert num_classes == 100, 'You must use CIFAR-100 with the hierarchical corruption.'
coarse_fine = []
for i in range(20):
coarse_fine.append(set())
for i in range(len(self.train_labels)):
coarse_fine[self.train_coarse_labels[i]].add(self.train_labels[i])
for i in range(20):
coarse_fine[i] = list(coarse_fine[i])
C = np.eye(num_classes) * (1 - corruption_prob)
for i in range(20):
tmp = np.copy(coarse_fine[i])
for j in range(len(tmp)):
tmp2 = np.delete(np.copy(tmp), j)
C[tmp[j], tmp2] += corruption_prob * 1/len(tmp2)
self.C = C
print(C)
elif corruption_type == 'clabels':
net = wrn.WideResNet(40, num_classes, 2, dropRate=0.3).cuda()
model_name = './cifar{}_labeler'.format(num_classes)
net.load_state_dict(torch.load(model_name))
net.eval()
else:
assert False, "Invalid corruption type '{}' given. Must be in {'unif', 'flip', 'hierarchical'}".format(corruption_type)
np.random.seed(seed)
if corruption_type == 'clabels':
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
# obtain sampling probabilities
sampling_probs = []
print('Starting labeling')
for i in range((len(self.train_labels) // 64) + 1):
current = self.train_data[i*64:(i+1)*64]
current = [Image.fromarray(current[i]) for i in range(len(current))]
current = torch.cat([test_transform(current[i]).unsqueeze(0) for i in range(len(current))], dim=0)
data = V(current).cuda()
logits = net(data)
smax = F.softmax(logits / 5) # temperature of 1
sampling_probs.append(smax.data.cpu().numpy())
sampling_probs = np.concatenate(sampling_probs, 0)
print('Finished labeling 1')
new_labeling_correct = 0
argmax_labeling_correct = 0
for i in range(len(self.train_labels)):
old_label = self.train_labels[i]
new_label = np.random.choice(num_classes, p=sampling_probs[i])
self.train_labels[i] = new_label
if old_label == new_label:
new_labeling_correct += 1
if old_label == np.argmax(sampling_probs[i]):
argmax_labeling_correct += 1
print('Finished labeling 2')
print('New labeling accuracy:', new_labeling_correct / len(self.train_labels))
print('Argmax labeling accuracy:', argmax_labeling_correct / len(self.train_labels))
else:
for i in range(len(self.train_labels)):
self.train_labels_true[i] = self.train_labels[i]
for i in range(len(self.train_labels)):
self.train_labels[i] = np.random.choice(num_classes, p=C[self.train_labels[i]])
print('train',len(self.train_labels))
print('type',type(self.train_labels))
self.corruption_matrix = C
noise_labels = self.train_labels
np.save('noise_labels.npy', noise_labels)
else:
f = self.test_list[0][0]
file = os.path.join(root, self.base_folder, f)
fo = open(file, 'rb')
if sys.version_info[0] == 2:
entry = pickle.load(fo)
else:
entry = pickle.load(fo, encoding='latin1')
self.test_data = entry['data']
if 'labels' in entry:
self.test_labels = entry['labels']
else:
self.test_labels = entry['fine_labels']
fo.close()
self.test_data = self.test_data.reshape((10000, 3, 32, 32))
self.test_data = self.test_data.transpose((0, 2, 3, 1)) # convert to HWC
def label_update(self, results):
self.count += 1
# While updating the noisy label y_i by the probability s, we used the average output probability of the network of the past 10 epochs as s.
idx = (self.count - 1) % 10#10 #10
self.prediction[:, idx] = results
#self.prediction[:] =results
#print(self.prediction)
if self.count == 79: #79
self.soft_labels = self.prediction.mean(axis=1)
#print(self.soft_labels.shape)
#print(self.soft_labels)
#self.soft_labels = list(np.argmax(self.soft_labels, axis=1).astype(np.int64))
if self.count > 79:
self.soft_labels = results
#self.soft_labels = list(np.argmax(self.soft_labels, axis=1).astype(np.int64))
def __getitem__(self, index):
if self.train:
if self.meta:
#print(self.train_labels[index])
img, target, target_true= self.train_data[index], self.train_labels[index],self.train_labels_true[index]
else:
img, target, target_true= self.train_data[index], self.train_labels[index],self.train_labels_true[index]
soft_labels = self.soft_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if self.train :
if self.meta:
return img, target
else:
return img,target,target_true,soft_labels,index
else:
return img, target
def __len__(self):
if self.train:
if self.meta is True:
return self.num_meta
else:
return 50000 - self.num_meta
else:
return 10000
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
root = self.root
download_url(self.url, root, self.filename, self.tgz_md5)
# extract file
cwd = os.getcwd()
tar = tarfile.open(os.path.join(root, self.filename), "r:gz")
os.chdir(root)
tar.extractall()
tar.close()
os.chdir(cwd)
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
| 41.125348 | 149 | 0.5424 | [
"MIT"
] | WuYichen-97/Learning-to-Purify-Noisy-Labels-via-Meta-Soft-Label-Corrector | dataloader.py | 14,764 | Python |
import os
import torch
from typing import List
from dqc.utils.datastruct import CGTOBasis
__all__ = ["loadbasis"]
_dtype = torch.double
_device = torch.device("cpu")
def loadbasis(cmd: str, dtype: torch.dtype = _dtype,
device: torch.device = _device, requires_grad: bool = False) -> \
List[CGTOBasis]:
"""
Load basis from a file and return the list of CGTOBasis.
Arguments
---------
cmd: str
This can be a file path where the basis is stored or a
string in format ``"atomz:basis"``, e.g. ``"1:6-311++G**"``.
dtype: torch.dtype
Tensor data type for ``alphas`` and ``coeffs`` of the GTO basis
device: torch.device
Tensor device for ``alphas`` and ``coeffs``
requires_grad: bool
If ``True``, the ``alphas`` and ``coeffs`` tensors become differentiable
Returns
-------
list of CGTOBasis
List of GTO basis loaded from the given file
"""
res = []
if not os.path.exists(cmd):
file = _get_basis_file(cmd)
else:
file = cmd
# read the content
with open(file, "r") as f:
lines = f.read().split("\n")
# skip the header
while True:
line = lines.pop(0)
if line == "":
continue
if line.startswith("!"):
continue
break
# now it is at the orbital description
while len(lines) > 0:
line = lines.pop(0)
if line.startswith("**"):
break
desc = line.split()
nlines = int(desc[1])
if nlines == 0:
raise RuntimeError("Zero line on basis %s" % file)
# read the exponents and the coefficients
alphas = []
coeffsT = []
for i in range(nlines):
alphacoeff = [_read_float(f) for f in lines.pop(0).split()]
alphas.append(alphacoeff[0])
coeffsT.append(alphacoeff[1:])
# coeffsT: list with shape (nbasis, ncontr)
# coeffs: list with shape (ncontr, nbasis)
coeffs = list(zip(*coeffsT))
ncoeffs = len(coeffs)
angmoms = _expand_angmoms(desc[0], ncoeffs)
# convert to tensor
alpha = torch.tensor(alphas, dtype=dtype, device=device, requires_grad=requires_grad)
for i in range(ncoeffs):
coeff = torch.tensor(coeffs[i], dtype=dtype, device=device, requires_grad=requires_grad)
basis = CGTOBasis(angmom=angmoms[i], alphas=alpha, coeffs=coeff)
basis.wfnormalize_()
res.append(basis)
return res
def _read_float(s: str) -> float:
s = s.replace("D", "E")
return float(s)
def _get_basis_file(cmd: str) -> str:
# parse the string command, check if the basis has already been downloaded
# (download if not), and return the file name
# parse to get the atomz and the basisname
atomz_str, raw_basisname = cmd.split(":")
raw_basisname = raw_basisname.strip()
atomz = int(atomz_str)
# get the path to the database
basisname = _normalize_basisname(raw_basisname)
thisdir = os.path.dirname(os.path.realpath(__file__))
fname = "%02d.gaussian94" % atomz
fdir = os.path.join(thisdir, ".database", basisname)
fpath = os.path.join(fdir, fname)
# if the file does not exist, download it
if not os.path.exists(fpath):
print("The %s basis for atomz %d does not exist, but we will download it" %
(raw_basisname, atomz))
if not os.path.exists(fdir):
os.makedirs(fdir)
_download_basis(fpath, atomz, raw_basisname)
return fpath
def _normalize_basisname(basisname: str) -> str:
b = basisname.lower()
b = b.replace("+", "p")
b = b.replace("*", "s")
b = b.replace("(", "_")
b = b.replace(")", "_")
b = b.replace(",", "_")
return b
def _download_basis(fname: str, atomz: int, basisname: str) -> None:
import basis_set_exchange as bse
s = bse.get_basis(basisname, elements=[atomz], fmt="gaussian94")
with open(fname, "w") as f:
f.write(s)
print("Downloaded to %s" % fname)
def _expand_angmoms(s: str, n: int) -> List[int]:
# convert the angular momentum characters into angmom and returns a list
# of n integer containing the angular momentums
if len(s) == n:
pass
elif n % len(s) == 0:
s = s * (n // len(s))
else:
raise RuntimeError("Do not know how to read orbital %s with %d coefficient columns" %
(s, n))
s = s.lower()
spdfmap = {
"s": 0,
"p": 1,
"d": 2,
"f": 3,
"g": 4,
"h": 5,
"i": 6,
}
angmoms = [spdfmap[c] for c in s]
return angmoms
| 31.647059 | 101 | 0.564436 | [
"Apache-2.0"
] | Jaikinator/dqc | dqc/api/loadbasis.py | 4,842 | Python |
import datetime
import threading
import contextlib
import pyotp
import qrcode
from errbot import BotPlugin, botcmd, arg_botcmd, cmdfilter
# OTP expires every hour
_OTP_EXPIRE = datetime.timedelta(hours=1)
_BASE_TIME = datetime.datetime(year=datetime.MINYEAR, month=1, day=1)
class otp(BotPlugin):
'''
Implement One Time Passwords for command filtering.
'''
# lock protects storage
lock = threading.Lock()
def activate(self):
super(otp, self).activate()
# Set the data directory for the plugin
self.DATA_DIR = '{0}/ '.format(self.bot_config.BOT_DATA_DIR)
if 'commands' not in self:
self['commands'] = set()
if 'secrets' not in self:
self['secrets'] = dict()
@contextlib.contextmanager
def stored(self, key):
'''
This is a convenience tool to make plugin storage easier.
'''
value = self[key]
try:
yield value
finally:
self[key] = value
def get_configuration_template(self):
return dict(
provision_via_chat=False,
max_retries=10
)
def build_qrcode(self, user, url):
'''Internal method used to build the QRCode image for token provisioning.'''
prefix = self.DATA_DIR
qrcode.make(url).save('{0}{1}-qrcode.png'.format(prefix, user), format='png')
def get_identity(self, message):
'''Wrapper to make sure the correct identity object is used.'''
try:
return message.frm.aclattr
except AttributeError:
return message.frm.person
@botcmd(admin_only=True)
def otp_delete_all(self, message, args):
'''
WARNING: This command removes ALL OTP entries.
'''
self['commands'] = set()
self['secrets'] = dict()
return 'Removed **all** OTP tokens and command filters.'
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_add_command')
def otp_add_command(self, message, cmd=None):
'''
Add a command to OTP command filtering.
'''
with self.lock:
with self.stored('commands') as commands:
commands.add(cmd)
return dict(command=cmd)
#return 'Added {0} to OTP filtered commands.'.format(cmd)
@arg_botcmd('cmd', type=str, admin_only=True, template='otp_remove_command')
def otp_remove_command(self, message, cmd=None):
'''
Remove a command from OTP command filtering.
'''
with self.lock:
with self.stored('commands') as commands:
if cmd not in commands:
return dict(err=True, command=cmd)
commands.remove(cmd)
return dict(err=False, command=cmd)
@botcmd(admin_only=True, template='otp_commands')
def otp_commands(self, message, args):
'''
List the commands that are filtered by OTP.
'''
return dict(commands=self['commands'])
@arg_botcmd('user', type=str, admin_only=True, template='otp_secret_create')
def otp_secret_create(self, message, user=None):
'''
Send a new secret for a user.
'''
secret = pyotp.random_base32()
with self.lock:
with self.stored('secrets') as secrets:
secrets[user] = (secret, 0, _BASE_TIME)
totp = pyotp.TOTP(secret)
url = totp.provisioning_uri(user)
self.build_qrcode(user, url)
if self.config:
if self.config.get('provision_via_chat'):
f = open('{0}{1}-qrcode.png'.format(self.DATA_DIR, user), 'rb')
self.send_stream_request(self.build_identifier(user), f, name='OTP-secret.png')
self.send_templated(self.build_identifier(user), 'otp_secret_create_pm', dict(url=url))
return dict(chat_enrollment=True, user=user)
return dict(chat_enrollment=False, user=user)
@arg_botcmd('otp', type=int, template='otp_auth')
def otp_auth(self, message, otp=None):
'''
Authenticate with OTP to the bot to pass OTP filtering.
'''
# OTP shouldn't be done in a group chat channel.
if message.is_group:
return dict(group_chat=True)
identity = self.get_identity(message)
if identity not in self['secrets']:
return dict(not_enrolled=True)
secret, attempts, _ = self['secrets'][identity]
totp = pyotp.TOTP(secret)
if totp.verify(otp):
with self.lock:
with self.stored('secrets') as secrets:
secret, _, _ = secrets[identity]
secrets[identity] = (secret, 0, datetime.datetime.now())
return dict(success=True)
else:
# Increase the number of attempts, or burn secret
with self.lock:
with self.stored('secrets') as secrets:
secret, attempts, ts = secrets[identity]
if attempts > self.config.get('max_retries'):
secret = ''
secrets[identity] = (secret, attempts+1, ts)
return dict(success=False)
@cmdfilter
def otp_filter(self, message, command, args, dry_run):
'''
Filter commands to determine if user has recently validated with OTP.
'''
with self.lock:
if command in self['commands']:
self.log.info('{0} is protected by OTP. Processing.'.format(command))
identity = self.get_identity(message)
secrets = self['secrets']
if identity not in secrets:
# Command is filtered, user doesn't have an OTP token
self.send_templated(message.frm, 'otp_filter', dict(not_enrolled=True))
return None, None, None
_, _, lastotp = secrets[identity]
if datetime.datetime.now() - lastotp > _OTP_EXPIRE:
self.log.info('{0} has not authenticated with OTP since expire'.format(identity))
self.send_templated(message.frm, 'otp_filter', dict(auth_required=True))
return None, None, None
self.log.info('OTP ok, permit command.')
return message, command, args | 30.831461 | 92 | 0.676749 | [
"BSD-3-Clause"
] | hosom/jarvis | plugins/otp/otp.py | 5,488 | Python |
"""
WSGI config for kweetservice project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kweetservice.settings")
application = get_wsgi_application()
| 23.588235 | 78 | 0.790524 | [
"MIT"
] | teunw/JEA6-Kweeter | kweetservice/kweetservice/wsgi.py | 401 | Python |
lengths = {0: 0, 1: 1}
def sequenceLength(n: int) -> int:
global lengths
if n not in lengths:
if n % 2 == 0:
lengths[n] = sequenceLength(n//2) + 1
else:
lengths[n] = sequenceLength(3 * n + 1) + 1
return lengths[n]
def solution(n: int = 1000000) -> int:
result = 0
maxLength = 0
for i in range(n):
counter = sequenceLength(i)
if counter > maxLength:
result = i
maxLength = counter
return result
print(solution()) | 22.826087 | 56 | 0.531429 | [
"Unlicense"
] | gashev/algorithms | project-euler/14/solution.py | 525 | Python |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ConvBERT model."""
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
_CONFIG_FOR_DOC = "ConvBertConfig"
_TOKENIZER_FOR_DOC = "ConvBertTokenizer"
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"YituTech/conv-bert-base",
"YituTech/conv-bert-medium-small",
"YituTech/conv-bert-small",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
]
# Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
class TFConvBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: ConvBertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
past_key_values_length=0,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(
tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFConvBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
if new_num_attention_heads < 1:
self.head_ratio = config.num_attention_heads
num_attention_heads = 1
else:
num_attention_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.num_attention_heads = num_attention_heads
self.conv_kernel_size = config.conv_kernel_size
assert (
config.hidden_size % self.num_attention_heads == 0
), "hidden_size should be divisible by num_attention_heads"
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.key_conv_attn_layer = tf.keras.layers.SeparableConv1D(
self.all_head_size,
self.conv_kernel_size,
padding="same",
activation=None,
depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
pointwise_initializer=get_initializer(config.initializer_range),
name="key_conv_attn_layer",
)
self.conv_kernel_layer = tf.keras.layers.Dense(
self.num_attention_heads * self.conv_kernel_size,
activation=None,
name="conv_kernel_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.conv_out_layer = tf.keras.layers.Dense(
self.all_head_size,
activation=None,
name="conv_out_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = tf.nn.softmax(conv_kernel_layer, axis=1)
paddings = tf.constant(
[
[
0,
0,
],
[int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
[0, 0],
]
)
conv_out_layer = self.conv_out_layer(hidden_states)
conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
unfold_conv_out_layer = tf.stack(
[
tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
for i in range(self.conv_kernel_size)
],
axis=-1,
)
conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
value_layer = tf.reshape(
mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
)
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = tf.concat([context_layer, conv_out], 2)
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TFConvBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFConvBertSelfAttention(config, name="self")
self.dense_output = TFConvBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
self_outputs = self.self_attention(
input_tensor, attention_mask, head_mask, output_attentions, training=training
)
attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class GroupedLinearLayer(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.output_size = output_size
self.num_groups = num_groups
self.kernel_initializer = kernel_initializer
self.group_in_dim = self.input_size // self.num_groups
self.group_out_dim = self.output_size // self.num_groups
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
initializer=self.kernel_initializer,
trainable=True,
)
self.bias = self.add_weight(
"bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
)
def call(self, hidden_states):
batch_size = shape_list(hidden_states)[0]
x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [batch_size, -1, self.output_size])
x = tf.nn.bias_add(value=x, bias=self.bias)
return x
class TFConvBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.hidden_size,
config.intermediate_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFConvBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.intermediate_size,
config.hidden_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFConvBertAttention(config, name="attention")
self.intermediate = TFConvBertIntermediate(config, name="intermediate")
self.bert_output = TFConvBertOutput(config, name="output")
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions, training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFConvBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFConvBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@keras_serializable
class TFConvBertMainLayer(tf.keras.layers.Layer):
config_class = ConvBertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFConvBertEncoder(config, name="encoder")
self.config = config
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
hidden_states = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
extended_attention_mask = self.get_extended_attention_mask(
inputs["attention_mask"], input_shape, hidden_states.dtype
)
inputs["head_mask"] = self.get_head_mask(inputs["head_mask"])
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=inputs["training"])
hidden_states = self.encoder(
hidden_states,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
return hidden_states
class TFConvBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ConvBertConfig
base_model_prefix = "convbert"
CONVBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ConvBertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
CONVBERT_START_DOCSTRING,
)
class TFConvBertModel(TFConvBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
class TFConvBertMaskedLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
class TFConvBertGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_tf_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, **kwargs)
self.vocab_size = config.vocab_size
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
def get_lm_head(self):
return self.generator_lm_head
def get_prefix_bias_name(self):
return self.name + "/" + self.generator_lm_head.name
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
generator_hidden_states = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs["training"])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + generator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=generator_hidden_states.hidden_states,
attentions=generator_hidden_states.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFConvBertClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
self.config = config
def call(self, hidden_states, **kwargs):
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = get_tf_activation(self.config.hidden_act)(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.classifier = TFConvBertClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(outputs[0], training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.sequence_summary = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="sequence_summary"
)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.convbert(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(outputs[0], training=inputs["training"])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
| 40.59154 | 132 | 0.663164 | [
"Apache-2.0"
] | AK391/transformers | src/transformers/models/convbert/modeling_tf_convbert.py | 58,533 | Python |
""" Third party api wrappers"""
import os
import json
import nexmo
import africastalking
username = os.getenv('africastalking_username')
api_key = os.getenv('africastalking_api_key')
africastalking.initialize(username, api_key)
sms = africastalking.SMS
class ProvidersWrapper:
""" Class with all the thirdy party helper functions"""
def send_message(number, message):
client = nexmo.Client(key=os.getenv('nexmokey'), secret=os.getenv('nexmosecret'))
response = client.send_message({
'from': 'Nexmo',
'to': number,
'text': message,
})
if response["messages"][0]["status"] != "0":
response = sms.send(message, ['+' + number])
return response
| 27.592593 | 89 | 0.641611 | [
"MIT"
] | kwanj-k/sibsco | providers.py | 745 | Python |
import time
import cv2
import numpy as np
from collections import defaultdict
class Tracker(object):
def __init__(self, pLK=None):
if pLK is None:
# default LK param
pLK = self.pLK0()
self.lk_ = cv2.SparsePyrLKOpticalFlow_create(
**pLK)
self.tmp_ = defaultdict(lambda:None)
def pLK0(self):
"""
Default LK Params.
"""
return dict(
winSize = (12,6),
maxLevel = 4, # == effective winsize up to 32*(2**4) = 512x256
crit= (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 0.03),
flags = 0,
minEigThreshold = 1e-3 # TODO : disable eig?
)
def __call__(self,
img1, img2,
pt1, pt2=None,
thresh=2.0,
return_msk=False
):
"""
Arguments:
img1(np.ndarray) : previous image. (color/mono) (HxWx?)
img2(np.ndarray) : current image (color/mono) (HxWx?)
pt1(np.ndarray) : previous points. (Mx2)
pt2(np.ndarray) : [Optional] current points estimate (Mx2)
thresh(float) : Flow Back-projection Error threshold
Returns:
pt2(np.ndarray) : current points. (Mx2)
idx(np.ndarray) : valid tracked indices from pt1 & pt2.
"""
if pt1.size <= 0:
# soft fail
pt2 = np.empty([0,2], dtype=np.float32)
if return_msk:
msk = np.empty([0], dtype=np.bool)
return pt2, msk
idx = np.empty([0], dtype=np.int32)
return pt2, idx
# stat img
h, w = np.shape(img2)[:2]
# convert to grayscale
# TODO : check if already gray/mono
if (np.ndim(img1) == 2) or img1.shape[2] == 1:
# already monochromatic
img1_gray = img1
img2_gray = img2
else:
# handle image # 1 + pre-allocated data cache
if self.tmp_['img1g'] is not None:
cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY, self.tmp_['img1g'])
img1_gray = self.tmp_['img1g']
else:
img1_gray = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
self.tmp_['img1g'] = np.empty_like(img1_gray)
# handle image # 2 + pre-allocated data cache
if self.tmp_['img2g'] is not None:
cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY, self.tmp_['img2g'])
img2_gray = self.tmp_['img2g']
else:
img2_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
self.tmp_['img2g'] = np.empty_like(img2_gray)
# forward flow
if pt2 is not None:
# set initial flow flags
self.lk_.setFlags(self.lk_.getFlags() | cv2.OPTFLOW_USE_INITIAL_FLOW )
pt2, st, _ = self.lk_.calc(
img1_gray, img2_gray, pt1, pt2
)
else:
pt2, st, _ = self.lk_.calc(
img1_gray, img2_gray, pt1, None
)
st_fw = st[:,0].astype(np.bool)
# backward flow
# unset initial flow flags
self.lk_.setFlags(self.lk_.getFlags() & ~cv2.OPTFLOW_USE_INITIAL_FLOW )
pt1_r, st, _ = self.lk_.calc(
img2_gray, img1_gray, pt2, None
)
st_bw = st[:,0].astype(np.bool)
# override error with reprojection error
# (default error doesn't make much sense anyways)
err = np.linalg.norm(pt1 - pt1_r, axis=-1)
# apply mask
msk = np.logical_and.reduce([
# error check
err < thresh,
# bounds check
0 <= pt2[:,0],
0 <= pt2[:,1],
pt2[:,0] < w,
pt2[:,1] < h,
# status check
st_fw,
st_bw,
])
if return_msk:
return pt2, msk
else:
idx = np.where(msk)[0]
return pt2, idx
def main():
from matplotlib import pyplot as plt
# params
w = 2*640
h = 2*480
n = 2*1024
di = 8
dj = 32
track = Tracker()
img1 = np.random.randint(0, 255, size=(h,w,3), dtype=np.uint8)
#img2 = np.random.randint(0, 255, size=(480,640,3), dtype=np.uint8)
img2 = np.roll(img1, di, axis=0)
img2 = np.roll(img2, dj, axis=1)
#img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
#img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
pt1 = np.random.uniform((0,0), (w,h), size=(n,2)).astype(np.float32)
pt2, idx = track(img1, img2, pt1)
#pt2, idx = track(img1, img2, pt1, pt2)
fig, ax = plt.subplots(1,2)
ax[0].imshow(img1, alpha=0.5)
ax[0].plot(pt1[:,0], pt1[:,1], 'r+')
ax[1].imshow(img2, alpha=0.5)
ax[1].plot(pt1[:,0], pt1[:,1], 'bx')
ax[1].plot(pt2[:,0], pt2[:,1], 'r+')
plt.show()
if __name__ == "__main__":
main()
| 31.15 | 83 | 0.505618 | [
"MIT"
] | yycho0108/MoRoL | core/track.py | 4,984 | Python |
import unittest
from PyStacks.PyStacks.template import templateCF
class TestTemplate(unittest.TestCase):
def test_templateCF_Route53Zone(self):
resources = {
'route53_zone': {
'testr53zone': {
'name': 'example.com',
'comment': 'testzonecomment',
'hostedzone': {
'Name': 'testname',
'Tag2': 'testtagstuff'
},
'vpcs': {
'vpc-12345678': 'ap-southeast-2',
'vpc-87654321': 'us-west-2'
}
}
}
}
expected = {
'testr53zone': {
'Type': 'AWS::Route53::HostedZone',
'Properties': {
'HostedZoneConfig': {
'Comment': 'testzonecomment'
},
'HostedZoneTags': [
{
'Key': 'Name',
'Value': 'testname'
},
{
'Key': 'Tag2',
'Value': 'testtagstuff'
}
],
'VPCs': [
{
'VPCId': 'vpc-87654321',
'VPCRegion': 'us-west-2'
},
{
'VPCId': 'vpc-12345678',
'VPCRegion': 'ap-southeast-2'
}
],
'Name': 'example.com'
}
}
}
actual = templateCF(resources, 'resources')
self.assertDictEqual(actual, expected)
def test_templateCF_Route53Record(self):
resources = {
'route53_record': {
'testr53record': {
'comment': 'testcomment',
'zoneid': 'testzoneid',
'recordsets': [
[
'atest',
'A',
'1.2.3.4',
'900',
'0',
'base'
],
[
'cnametest',
'CNAME',
'example.com',
'900',
'0',
'base'
]
]
}
}
}
expected = {
'testr53record': {
'Type': 'AWS::Route53::RecordSetGroup',
'Properties': {
'Comment': 'testcomment',
'HostedZoneId': {
'Fn::ImportValue': {
'Fn::Sub': [
'${DNSStack}-Route53-testzoneid-Zone',
{
'DNSStack': {
'Ref': 'DNSStack'
}
}
]
}
},
'RecordSets': [
{
'Name': 'atest',
'Type': 'A',
'ResourceRecords': ['1.2.3.4'],
'TTL': '900',
'Weight': '0',
'SetIdentifier': 'base'
},
{
'Name': 'cnametest',
'Type': 'CNAME',
'ResourceRecords': ['example.com'],
'TTL': '900',
'Weight': '0',
'SetIdentifier': 'base'
}
]
}
}
}
actual = templateCF(resources, 'resources')
self.assertDictEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| 32.734848 | 70 | 0.265448 | [
"MIT"
] | 0xack13/PyStacks | PyStacks/test/templates/test_route53.py | 4,321 | Python |
from escpos.printer import Usb
from pathlib import Path
image = Path("/tamamo-no-mae/me-cloudy.png")
printer = Usb(0x0416, 0x5011, 0, profile="ZJ-5870")
printer.image(image);
printer.cut()
# with printer() as that:
# that.write('Hello, world!\n\n')
# # 000000000111111111122222222223
# # 123456789012345678901234567890
# that.write('Soluta sed voluptatem ut\n')
# that.write('facere aut. Modi placeat et\n')
# that.write('eius voluptate sint ut.\n')
# that.write('Facilis minima ex quia quia\n')
# that.write('consectetur ex ipsa. Neque et\n')
# that.write('voluptatem ipsa enim error\n')
# that.write('rthatrehenderit ex dolore.\n')
# that.write('Cupiditate ad voluptatem nisi.\n\n\n\n')
# ZJ-5870 | 37.95 | 58 | 0.670619 | [
"MIT"
] | paulhoule/usb_receipt_printer | demo.py | 759 | Python |
import operator
import numpy
import pytest
import cupy
from cupy import testing
class TestArrayElementwiseOp:
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6, accept_error=TypeError)
def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
if swap:
return op(y_type(3), a)
else:
return op(a, y_type(3))
def test_add_scalar(self):
self.check_array_scalar_op(operator.add)
def test_radd_scalar(self):
self.check_array_scalar_op(operator.add, swap=True)
def test_iadd_scalar(self):
self.check_array_scalar_op(operator.iadd)
def test_sub_scalar(self):
self.check_array_scalar_op(operator.sub, no_bool=True)
def test_rsub_scalar(self):
self.check_array_scalar_op(operator.sub, swap=True, no_bool=True)
def test_isub_scalar(self):
self.check_array_scalar_op(operator.isub, no_bool=True)
def test_mul_scalar(self):
self.check_array_scalar_op(operator.mul)
def test_rmul_scalar(self):
self.check_array_scalar_op(operator.mul, swap=True)
def test_imul_scalar(self):
self.check_array_scalar_op(operator.imul)
def test_truediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.truediv)
def test_rtruediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.truediv, swap=True)
def test_itruediv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.itruediv)
def test_floordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.floordiv, no_complex=True)
def test_rfloordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.floordiv, swap=True,
no_complex=True)
def test_ifloordiv_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(operator.ifloordiv, no_complex=True)
def test_pow_scalar(self):
self.check_array_scalar_op(operator.pow)
def test_rpow_scalar(self):
self.check_array_scalar_op(operator.pow, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_ipow_scalar(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
return operator.ipow(a, y_type(3))
def test_ipow_scalar(self):
self.check_ipow_scalar()
def test_divmod0_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_divmod1_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_rdivmod0_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[0], swap=True,
no_complex=True)
def test_rdivmod1_scalar(self):
with numpy.errstate(divide='ignore'):
self.check_array_scalar_op(lambda x, y: divmod(x, y)[1], swap=True,
no_complex=True)
def test_lt_scalar(self):
self.check_array_scalar_op(operator.lt, no_complex=False)
def test_le_scalar(self):
self.check_array_scalar_op(operator.le, no_complex=False)
def test_gt_scalar(self):
self.check_array_scalar_op(operator.gt, no_complex=False)
def test_ge_scalar(self):
self.check_array_scalar_op(operator.ge, no_complex=False)
def test_eq_scalar(self):
self.check_array_scalar_op(operator.eq)
def test_ne_scalar(self):
self.check_array_scalar_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_array_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return op(a, b)
def test_add_array(self):
self.check_array_array_op(operator.add)
def test_iadd_array(self):
self.check_array_array_op(operator.iadd)
def test_sub_array(self):
self.check_array_array_op(operator.sub, no_bool=True)
def test_isub_array(self):
self.check_array_array_op(operator.isub, no_bool=True)
def test_mul_array(self):
self.check_array_array_op(operator.mul)
def test_imul_array(self):
self.check_array_array_op(operator.imul)
def test_truediv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.truediv)
def test_itruediv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.itruediv)
def test_floordiv_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.floordiv, no_complex=True)
def test_ifloordiv_array(self):
if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with numpy.errstate(divide='ignore'):
self.check_array_array_op(operator.ifloordiv, no_complex=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)
def check_pow_array(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return operator.pow(a, b)
def test_pow_array(self):
# There are some precission issues in HIP that prevent
# checking with atol=0
if cupy.cuda.runtime.is_hip:
self.check_pow_array()
else:
self.check_array_array_op(operator.pow)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_ipow_array(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[6, 5, 4], [3, 2, 1]], y_type)
return operator.ipow(a, b)
def test_ipow_array(self):
self.check_ipow_array()
def test_divmod0_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(lambda x, y: divmod(x, y)[0])
def test_divmod1_array(self):
with numpy.errstate(divide='ignore'):
self.check_array_array_op(lambda x, y: divmod(x, y)[1])
def test_lt_array(self):
self.check_array_array_op(operator.lt, no_complex=True)
def test_le_array(self):
self.check_array_array_op(operator.le, no_complex=True)
def test_gt_array(self):
self.check_array_array_op(operator.gt, no_complex=True)
def test_ge_array(self):
self.check_array_array_op(operator.ge, no_complex=True)
def test_eq_array(self):
self.check_array_array_op(operator.eq)
def test_ne_array(self):
self.check_array_array_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return op(a, b)
def test_broadcasted_add(self):
self.check_array_broadcasted_op(operator.add)
def test_broadcasted_iadd(self):
self.check_array_broadcasted_op(operator.iadd)
def test_broadcasted_sub(self):
# TODO(unno): sub for boolean array is deprecated in numpy>=1.13
self.check_array_broadcasted_op(operator.sub, no_bool=True)
def test_broadcasted_isub(self):
# TODO(unno): sub for boolean array is deprecated in numpy>=1.13
self.check_array_broadcasted_op(operator.isub, no_bool=True)
def test_broadcasted_mul(self):
self.check_array_broadcasted_op(operator.mul)
def test_broadcasted_imul(self):
self.check_array_broadcasted_op(operator.imul)
def test_broadcasted_truediv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.truediv)
def test_broadcasted_itruediv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.itruediv)
def test_broadcasted_floordiv(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.floordiv, no_complex=True)
def test_broadcasted_ifloordiv(self):
if '1.16.1' <= numpy.lib.NumpyVersion(numpy.__version__) < '1.18.0':
self.skipTest("NumPy Issue #12927")
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(operator.ifloordiv,
no_complex=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1e-5, rtol=1e-6, accept_error=TypeError)
def check_broadcasted_pow(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return operator.pow(a, b)
def test_broadcasted_pow(self):
# There are some precission issues in HIP that prevent
# checking with atol=0
if cupy.cuda.runtime.is_hip:
self.check_broadcasted_pow()
else:
self.check_array_broadcasted_op(operator.pow)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(atol=1.0, accept_error=TypeError)
def check_broadcasted_ipow(self, xp, x_type, y_type):
a = xp.array([[1, 2, 3], [4, 5, 6]], x_type)
b = xp.array([[1], [2]], y_type)
return operator.ipow(a, b)
def test_broadcasted_ipow(self):
self.check_broadcasted_ipow()
def test_broadcasted_divmod0(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_broadcasted_divmod1(self):
with numpy.errstate(divide='ignore'):
self.check_array_broadcasted_op(lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_broadcasted_lt(self):
self.check_array_broadcasted_op(operator.lt, no_complex=True)
def test_broadcasted_le(self):
self.check_array_broadcasted_op(operator.le, no_complex=True)
def test_broadcasted_gt(self):
self.check_array_broadcasted_op(operator.gt, no_complex=True)
def test_broadcasted_ge(self):
self.check_array_broadcasted_op(operator.ge, no_complex=True)
def test_broadcasted_eq(self):
self.check_array_broadcasted_op(operator.eq)
def test_broadcasted_ne(self):
self.check_array_broadcasted_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(rtol=1e-6)
def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type,
no_bool=False, no_complex=False):
x_dtype = numpy.dtype(x_type)
y_dtype = numpy.dtype(y_type)
if no_bool and x_dtype == '?' and y_dtype == '?':
return xp.array(True)
if no_complex and (x_dtype.kind == 'c' or y_dtype.kind == 'c'):
return xp.array(True)
a = xp.array([[[1, 2, 3]], [[4, 5, 6]]], x_type)
b = xp.array([[1], [2], [3]], y_type)
return op(a, b)
def test_doubly_broadcasted_add(self):
self.check_array_doubly_broadcasted_op(operator.add)
def test_doubly_broadcasted_sub(self):
self.check_array_doubly_broadcasted_op(operator.sub, no_bool=True)
def test_doubly_broadcasted_mul(self):
self.check_array_doubly_broadcasted_op(operator.mul)
def test_doubly_broadcasted_truediv(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_doubly_broadcasted_op(operator.truediv)
def test_doubly_broadcasted_floordiv(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(operator.floordiv,
no_complex=True)
def test_doubly_broadcasted_pow(self):
self.check_array_doubly_broadcasted_op(operator.pow)
def test_doubly_broadcasted_divmod0(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(
lambda x, y: divmod(x, y)[0],
no_complex=True)
def test_doubly_broadcasted_divmod1(self):
with numpy.errstate(divide='ignore'):
self.check_array_doubly_broadcasted_op(
lambda x, y: divmod(x, y)[1],
no_complex=True)
def test_doubly_broadcasted_lt(self):
self.check_array_doubly_broadcasted_op(operator.lt, no_complex=True)
def test_doubly_broadcasted_le(self):
self.check_array_doubly_broadcasted_op(operator.le, no_complex=True)
def test_doubly_broadcasted_gt(self):
self.check_array_doubly_broadcasted_op(operator.gt, no_complex=True)
def test_doubly_broadcasted_ge(self):
self.check_array_doubly_broadcasted_op(operator.ge, no_complex=True)
def test_doubly_broadcasted_eq(self):
self.check_array_doubly_broadcasted_op(operator.eq)
def test_doubly_broadcasted_ne(self):
self.check_array_doubly_broadcasted_op(operator.ne)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def check_array_reversed_op(self, op, xp, x_type, y_type, no_bool=False):
if no_bool and x_type == numpy.bool_ and y_type == numpy.bool_:
return xp.array(True)
a = xp.array([1, 2, 3, 4, 5], x_type)
b = xp.array([1, 2, 3, 4, 5], y_type)
return op(a, b[::-1])
def test_array_reversed_add(self):
self.check_array_reversed_op(operator.add)
def test_array_reversed_sub(self):
self.check_array_reversed_op(operator.sub, no_bool=True)
def test_array_reversed_mul(self):
self.check_array_reversed_op(operator.mul)
@testing.for_all_dtypes(no_bool=True)
def check_typecast(self, val, dtype):
operators = [
operator.add, operator.sub, operator.mul, operator.truediv]
for op in operators:
with numpy.errstate(divide='ignore', invalid='ignore'):
a = op(val, (testing.shaped_arange((5,), numpy, dtype) - 2))
b = op(val, (testing.shaped_arange((5,), cupy, dtype) - 2))
assert a.dtype == b.dtype
def test_typecast_bool1(self):
self.check_typecast(True)
def test_typecast_bool2(self):
self.check_typecast(False)
def test_typecast_int1(self):
self.check_typecast(0)
def test_typecast_int2(self):
self.check_typecast(-127)
def test_typecast_int3(self):
self.check_typecast(255)
def test_typecast_int4(self):
self.check_typecast(-32768)
def test_typecast_int5(self):
self.check_typecast(65535)
def test_typecast_int6(self):
self.check_typecast(-2147483648)
def test_typecast_int7(self):
self.check_typecast(4294967295)
def test_typecast_float1(self):
self.check_typecast(0.0)
def test_typecast_float2(self):
self.check_typecast(100000.0)
# Skip float16 because of NumPy #19514
@testing.for_all_dtypes(name='x_type', no_float16=True)
@testing.numpy_cupy_allclose()
def check_array_boolarray_op(self, op, xp, x_type):
a = xp.array([[2, 7, 1], [8, 2, 8]], x_type)
# Cast from np.bool8 array should not read bytes
b = xp.array([[3, 1, 4], [-1, -5, -9]], numpy.int8).view(bool)
return op(a, b)
def test_add_array_boolarray(self):
self.check_array_boolarray_op(operator.add)
def test_iadd_array_boolarray(self):
self.check_array_boolarray_op(operator.iadd)
class TestArrayIntElementwiseOp:
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_scalar_op(self, op, xp, x_type, y_type, swap=False):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
if swap:
return op(y_type(2), a)
else:
return op(a, y_type(2))
def test_lshift_scalar(self):
self.check_array_scalar_op(operator.lshift)
def test_rlshift_scalar(self):
self.check_array_scalar_op(operator.lshift, swap=True)
def test_rshift_scalar(self):
self.check_array_scalar_op(operator.rshift)
def test_rrshift_scalar(self):
self.check_array_scalar_op(operator.rshift, swap=True)
def test_and_scalar(self):
self.check_array_scalar_op(operator.and_)
def test_rand_scalar(self):
self.check_array_scalar_op(operator.and_, swap=True)
def test_or_scalar(self):
self.check_array_scalar_op(operator.or_)
def test_ror_scalar(self):
self.check_array_scalar_op(operator.or_, swap=True)
def test_xor_scalar(self):
self.check_array_scalar_op(operator.xor)
def test_rxor_scalar(self):
self.check_array_scalar_op(operator.xor, swap=True)
def test_mod_scalar(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalar_op(operator.mod)
def test_rmod_scalar(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalar_op(operator.mod, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_scalarzero_op(self, op, xp, x_type, y_type, swap=False):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
if swap:
return op(y_type(0), a)
else:
return op(a, y_type(0))
def test_lshift_scalarzero(self):
self.check_array_scalarzero_op(operator.lshift)
def test_rlshift_scalarzero(self):
self.check_array_scalarzero_op(operator.lshift, swap=True)
def test_rshift_scalarzero(self):
self.check_array_scalarzero_op(operator.rshift)
def test_rrshift_scalarzero(self):
self.check_array_scalarzero_op(operator.rshift, swap=True)
def test_and_scalarzero(self):
self.check_array_scalarzero_op(operator.and_)
def test_rand_scalarzero(self):
self.check_array_scalarzero_op(operator.and_, swap=True)
def test_or_scalarzero(self):
self.check_array_scalarzero_op(operator.or_)
def test_ror_scalarzero(self):
self.check_array_scalarzero_op(operator.or_, swap=True)
def test_xor_scalarzero(self):
self.check_array_scalarzero_op(operator.xor)
def test_rxor_scalarzero(self):
self.check_array_scalarzero_op(operator.xor, swap=True)
def test_mod_scalarzero(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalarzero_op(operator.mod)
def test_rmod_scalarzero(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_scalarzero_op(operator.mod, swap=True)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_array_op(self, op, xp, x_type, y_type):
a = xp.array([[0, 1, 2], [1, 0, 2]], dtype=x_type)
b = xp.array([[0, 0, 1], [0, 1, 2]], dtype=y_type)
return op(a, b)
def test_lshift_array(self):
self.check_array_array_op(operator.lshift)
def test_ilshift_array(self):
self.check_array_array_op(operator.ilshift)
def test_rshift_array(self):
self.check_array_array_op(operator.rshift)
def test_irshift_array(self):
self.check_array_array_op(operator.irshift)
def test_and_array(self):
self.check_array_array_op(operator.and_)
def test_iand_array(self):
self.check_array_array_op(operator.iand)
def test_or_array(self):
self.check_array_array_op(operator.or_)
def test_ior_array(self):
self.check_array_array_op(operator.ior)
def test_xor_array(self):
self.check_array_array_op(operator.xor)
def test_ixor_array(self):
self.check_array_array_op(operator.ixor)
def test_mod_array(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_array_op(operator.mod)
def test_imod_array(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_array_op(operator.imod)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_broadcasted_op(self, op, xp, x_type, y_type):
a = xp.array([[0, 1, 2], [1, 0, 2], [2, 1, 0]], dtype=x_type)
b = xp.array([[0, 0, 1]], dtype=y_type)
return op(a, b)
def test_broadcasted_lshift(self):
self.check_array_broadcasted_op(operator.lshift)
def test_broadcasted_ilshift(self):
self.check_array_broadcasted_op(operator.ilshift)
def test_broadcasted_rshift(self):
self.check_array_broadcasted_op(operator.rshift)
def test_broadcasted_irshift(self):
self.check_array_broadcasted_op(operator.irshift)
def test_broadcasted_and(self):
self.check_array_broadcasted_op(operator.and_)
def test_broadcasted_iand(self):
self.check_array_broadcasted_op(operator.iand)
def test_broadcasted_or(self):
self.check_array_broadcasted_op(operator.or_)
def test_broadcasted_ior(self):
self.check_array_broadcasted_op(operator.ior)
def test_broadcasted_xor(self):
self.check_array_broadcasted_op(operator.xor)
def test_broadcasted_ixor(self):
self.check_array_broadcasted_op(operator.ixor)
def test_broadcasted_mod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_broadcasted_op(operator.mod)
def test_broadcasted_imod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_broadcasted_op(operator.imod)
@testing.for_all_dtypes_combination(names=['x_type', 'y_type'])
@testing.numpy_cupy_allclose(accept_error=TypeError)
def check_array_doubly_broadcasted_op(self, op, xp, x_type, y_type):
a = xp.array([[[0, 1, 2]], [[1, 0, 2]]], dtype=x_type)
b = xp.array([[0], [0], [1]], dtype=y_type)
return op(a, b)
def test_doubly_broadcasted_lshift(self):
self.check_array_doubly_broadcasted_op(operator.lshift)
def test_doubly_broadcasted_rshift(self):
self.check_array_doubly_broadcasted_op(operator.rshift)
def test_doubly_broadcasted_and(self):
self.check_array_doubly_broadcasted_op(operator.and_)
def test_doubly_broadcasted_or(self):
self.check_array_doubly_broadcasted_op(operator.or_)
def test_doubly_broadcasted_xor(self):
self.check_array_doubly_broadcasted_op(operator.xor)
def test_doubly_broadcasted_mod(self):
with numpy.errstate(divide='ignore', invalid='ignore'):
self.check_array_doubly_broadcasted_op(operator.mod)
@pytest.mark.parametrize('value', [
None,
Ellipsis,
object(),
numpy._NoValue,
])
class TestArrayObjectComparison:
@pytest.mark.parametrize('swap', [False, True])
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_eq_object(self, xp, dtype, value, swap):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
if swap:
return value == a
else:
return a == value
@pytest.mark.parametrize('swap', [False, True])
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_ne_object(self, xp, dtype, value, swap):
a = xp.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
if swap:
return value != a
else:
return a != value
class HasEq:
def __eq__(self, other):
return (other == 2) | (other == 4)
class HasNe:
def __ne__(self, other):
return (other == 2) | (other == 4)
class HasEqSub(HasEq):
pass
class CustomInt(int):
pass
@pytest.mark.parametrize('dtype', ['int32', 'float64'])
@pytest.mark.parametrize('value', [
HasEq(),
HasNe(), # eq test passes because `==` does not fall back to `__ne__`.
HasEqSub(),
CustomInt(3),
])
class TestArrayObjectComparisonDifficult:
# OK to raise TypeError.
# If CuPy returns a result, it should match with NumPy's result.
def test_eq_object(self, dtype, value):
expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) == value
a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
try:
res = a == value
except TypeError:
pytest.skip()
cupy.testing.assert_array_equal(res, expected)
def test_ne_object(self, dtype, value):
expected = numpy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) != value
a = cupy.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
try:
res = a != value
except TypeError:
pytest.skip()
cupy.testing.assert_array_equal(res, expected)
| 34.835052 | 79 | 0.660328 | [
"MIT"
] | Onkar627/cupy | tests/cupy_tests/core_tests/test_ndarray_elementwise_op.py | 27,032 | Python |
def read_fasta(filename):
"""Returns a list of tuples of each header and sequence in a fasta (or multifasta) file.
first element in tuple is header and second the sequence.
Key Arguments:
filename -- fasta file.
"""
tmp_seq = None
seqs_list = []
with open(filename, 'r') as fasta_file:
for line in fasta_file:
line = line.replace('\n','')
if '>' in line:
if tmp_seq != None:
seqs_list.append((hd, tmp_seq))
tmp_seq = ''
hd = line.replace('>','')
else:
tmp_seq += line
seqs_list.append((hd, tmp_seq))
try:
assert len(seqs_list) > 0
except AssertionError:
print('The selected file is not a Fasta file.')
else:
return seqs_list
def write_fasta(outfile, seq_dict):
"""Writes fasta with dictionary where keys are headers and values sequences.
Key Arguments:
outfile.
"""
step = 70
with open(outfile, 'w') as file:
for header, sequence in seq_dict.items():
sequence_list = [sequence[i - step: i] for i in range(step, len(sequence) + 1, step)]
last = sequence[step * (len(sequence) // step):]
if last != '':
sequence_list.append(last)
sequence = '\n'.join(sequence_list)
file.write('>' + header + '\n' + sequence + '\n')
def reads_generator(fasta_file, read_length, k):
"""This function simulates the reads generation from a fasta file with a coverage not less than 50.
It will return a list of tuples. First element in tuple is read ID and second the sequence.
Key Arguments:
fasta_file -- fasta file.
read_length -- size of reads.
"""
reads_list = []
overlap = k - 1
input_header, input_seq = read_fasta(fasta_file)[0]
n = len(input_seq)
for i in range(0, n - overlap, read_length - overlap):
read_seq = input_seq[i: i + read_length]
reads_list.append(read_seq)
return [('{}_{}'.format(input_header, i), read) for i, read in enumerate(reads_list)]
def write_fastq(reads_list, filename):
"""This function created a FASTQ file from a list of read generated by the reads_generator function.
Key Arguments:
reads_list -- list of reads generated with reads_generator.
filename -- name of output file WITH EXTENSION.
"""
with open(filename, 'w') as fastq_file:
for read_id, read in reads_list:
fastq_file.write('@{}\n'.format(read_id))
fastq_file.write(read + '\n')
fastq_file.write('+\n')
fastq_file.write('I' * len(read) + '\n') # max possible score
def read_fastq(filename):
"""This function reads a FASTQ file storing the read and its ID in a dictionary where keys are IDs and read value.
This function does not consider + and score lines.
Key Arguments:
filename -- name of FASTQ input file.
"""
reads_dict = dict()
with open(filename, 'r') as fastq_file:
for line in fastq_file:
if '@' in line:
reads_dict[line[1:].replace('\n', '')] = next(
fastq_file).replace('\n', '')
next(fastq_file)
next(fastq_file)
return reads_dict
| 37.266667 | 118 | 0.589147 | [
"MIT"
] | Mirindi95/PrIDcon | pridcon/utils.py | 3,354 | Python |
from django import template
from home.models import Recipe, MixingAgent, Base, Ingredient, FacePack, CustomFacePack
import pdb
register = template.Library()
@register.inclusion_tag('facepack.html')
def facepack_display(item_id):
if not item_id:
return
mandatory = []
type = "primary"
for cfp in CustomFacePack.objects.filter(facepack=item_id):
ing = cfp.optional_ingredient if cfp.optional_ingredient else cfp.recipe.mandatory_ingredient
mandatory.append({
'name' : ing.name,
'id' : ing.id,
'r_id' : cfp.recipe.id,
'image' : ing.image,
})
if cfp.optional_ingredient:
type = "secondary"
fp = FacePack.objects.get(pk=item_id)
res = {
'item_id' : item_id,
'name' : fp.name,
'mandatory' : mandatory,
'base' : fp.base.name,
'mixing_agent' : fp.mixing_agent.name,
'image' : fp.image,
'type' : type,
}
return {'item': res }
def facepack_display_abs(base_url, item_id):
if not item_id:
return
mandatory = []
type = "primary"
for cfp in CustomFacePack.objects.filter(facepack=item_id):
ing = cfp.optional_ingredient if cfp.optional_ingredient else cfp.recipe.mandatory_ingredient
mandatory.append({
'name' : ing.name,
'id' : ing.id,
'r_id' : cfp.recipe.id,
'image' : ing.image,
})
if cfp.optional_ingredient:
type = "secondary"
fp = FacePack.objects.get(pk=item_id)
res = {
'item_id' : item_id,
'name' : fp.name,
'mandatory' : mandatory,
'base' : fp.base.name,
'mixing_agent' : fp.mixing_agent.name,
'image' : fp.image,
'type' : type,
#'base_url' : request.get_raw_uri().replace(request.get_full_path(),''),
'base_url' : base_url,
}
return {'item': res }
| 31.952381 | 101 | 0.562842 | [
"MIT"
] | dev1farms2face/f2f | f2f/farms2face/home/templatetags/common_tags.py | 2,013 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
FlowClient is a Python client to FlowAPI.
"""
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
from flowclient.api_query import APIQuery
from .connection import Connection
from flowclient.client import connect
from flowclient.async_api_query import ASyncAPIQuery
from .async_connection import ASyncConnection
from flowclient.async_client import connect_async
from .client import (
get_geography,
get_result,
get_result_by_query_id,
get_geojson_result,
get_status,
query_is_ready,
run_query,
get_available_dates,
)
from .query_specs import (
daily_location_spec,
modal_location_spec,
modal_location_from_dates_spec,
radius_of_gyration_spec,
unique_location_counts_spec,
topup_balance_spec,
subscriber_degree_spec,
topup_amount_spec,
event_count_spec,
displacement_spec,
pareto_interactions_spec,
nocturnal_events_spec,
handset_spec,
random_sample_spec,
unique_locations_spec,
most_frequent_location_spec,
total_active_periods_spec,
location_visits_spec,
majority_location_spec,
coalesced_location_spec,
mobility_classification_spec,
)
from . import aggregates
from .aggregates import (
location_event_counts,
meaningful_locations_aggregate,
meaningful_locations_between_label_od_matrix,
meaningful_locations_between_dates_od_matrix,
flows,
unique_subscriber_counts,
location_introversion,
total_network_objects,
aggregate_network_objects,
spatial_aggregate,
joined_spatial_aggregate,
histogram_aggregate,
active_at_reference_location_counts,
unmoving_at_reference_location_counts,
unmoving_counts,
consecutive_trips_od_matrix,
trips_od_matrix,
labelled_spatial_aggregate,
labelled_flows,
)
__all__ = [
"aggregates",
"connect_async",
"connect",
"get_geography",
"get_result",
"get_result_by_query_id",
"get_geojson_result",
"get_status",
"query_is_ready",
"run_query",
"get_available_dates",
"APIQuery",
"ASyncAPIQuery",
"location_event_counts",
"meaningful_locations_aggregate",
"meaningful_locations_between_label_od_matrix",
"meaningful_locations_between_dates_od_matrix",
"flows",
"unique_subscriber_counts",
"location_introversion",
"total_network_objects",
"aggregate_network_objects",
"spatial_aggregate",
"joined_spatial_aggregate",
"histogram_aggregate",
"active_at_reference_location_counts",
"unique_locations_spec",
"unmoving_at_reference_location_counts",
"unmoving_counts",
"consecutive_trips_od_matrix",
"trips_od_matrix",
"labelled_spatial_aggregate",
"labelled_flows",
]
| 26.221239 | 69 | 0.759703 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Flowminder/FlowK | flowclient/flowclient/__init__.py | 2,963 | Python |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
API_KEY = demisto.getParam('APIKey')
SERVER_URL = 'https://analyze.intezer.com/api'
API_VERSION = '/v2-0'
BASE_URL = SERVER_URL + API_VERSION
IS_AVAILABLE_URL = 'is-available'
ERROR_PREFIX = 'Error from Intezer:'
ACCEPTABLE_HTTP_CODES = {200, 201, 202}
USE_SSL = not demisto.params().get('insecure', False)
http_status_to_error_massage = {
400: '400 Bad Request - Wrong or invalid parameters',
401: '401 Unauthorized - Wrong or invalid api key',
403: '403 Forbidden - The account is not allowed to preform this task',
404: '404 Not Found - Analysis was not found',
410: '410 Gone - Analysis no longer exists in the service',
500: '500 Internal Server Error - Internal error',
503: '503 Service Unavailable'
}
dbot_score_by_verdict = {
'malicious': 3,
'suspicious': 2,
'trusted': 1,
'neutral': 1,
'no_threats': 1
}
''' HELPER FUNCTIONS '''
def handle_response(response, acceptable_http_status_codes):
if response.status_code not in acceptable_http_status_codes:
error_msg = http_status_to_error_massage.get(response.status_code, "Failed to perform request")
return_error(f'{ERROR_PREFIX} {error_msg}')
try:
return response.json()
except json.decoder.JSONDecodeError:
# This error is unlikely to happen, as the return code should indicate of error beforehand
return_error(f'Response returned with no data. This might be an issue with Intezer.\nPlease try again later\n'
f'Response content:\n{response.content}')
def get_session():
response = requests.post(BASE_URL + '/get-access-token', json={'api_key': API_KEY}, verify=USE_SSL)
response = handle_response(response, {200})
session = requests.session()
session.headers['Authorization'] = f'Bearer {response["result"]}'
return session
''' COMMANDS '''
def check_is_available():
url = f'{SERVER_URL}/{IS_AVAILABLE_URL}'
result = SESSION.get(url, verify=USE_SSL)
return 'ok' if result.json()['is_available'] else None
def analyze_by_hash_command():
file_hash = demisto.getArg('file_hash')
response = make_analyze_by_hash_request(file_hash)
handle_analyze_by_hash_response(response, file_hash)
def get_latest_result_command():
file_hash = demisto.getArg('file_hash')
response = make_get_latest_report_request(file_hash)
handle_get_latest_result_response(response, file_hash)
def make_analyze_by_hash_request(file_hash):
data = {'hash': file_hash}
return SESSION.post(BASE_URL + '/analyze-by-hash', json=data, verify=USE_SSL)
def make_get_latest_report_request(file_hash):
return SESSION.get(f'{BASE_URL}/files/{file_hash}', verify=USE_SSL)
def handle_analyze_by_hash_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
handle_analyze_response(response)
def handle_get_latest_result_response(response, file_hash):
if response.status_code == 404:
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': file_hash,
'Score': 0
}
hr = f'Hash {file_hash} does not exist on Intezer genome database'
ec = {'DBotScore': dbot}
return_outputs(hr, ec)
return
elif response.status_code == 400:
return_error('File hash is not valid.\nIntezer file hash reputation supports only SHA-256, '
'SHA-1 and MD5 hash formats.\n')
analysis_result = response.json()
enrich_dbot_and_display_file_analysis_results(analysis_result['result'])
def analyze_by_uploaded_file_command():
response = make_analyze_by_file_request(demisto.getArg('file_entry_id'))
handle_analyze_response(response)
def make_analyze_by_file_request(file_id):
file_data = demisto.getFilePath(file_id)
with open(file_data['path'], 'rb') as file_to_upload:
files = {'file': (file_data['name'], file_to_upload)}
return SESSION.post(BASE_URL + '/analyze', files=files, verify=USE_SSL)
def handle_analyze_response(response):
response = handle_response(response, ACCEPTABLE_HTTP_CODES)
result_url = response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(obj.ID === val.ID)': {'ID': analysis_id, 'Status': 'Created', 'type': 'File'}}
return_outputs('Analysis created successfully: {}'.format(analysis_id), context_json, response)
def check_analysis_status_and_get_results_command():
analysis_type = demisto.args().get('analysis_type', 'File')
analysis_ids = argToList(demisto.args().get('analysis_id'))
indicator_name = demisto.args().get('indicator_name')
for analysis_id in analysis_ids:
response = make_analysis_status_request(analysis_id, analysis_type)
analysis_result = handle_analysis_result(response)
if analysis_result and analysis_type == 'Endpoint':
enrich_dbot_and_display_endpoint_analysis_results(analysis_result, indicator_name)
elif analysis_result and analysis_type == 'File':
enrich_dbot_and_display_file_analysis_results(analysis_result)
def make_analysis_status_request(analysis_id, analysis_type):
analysis_endpoint = 'endpoint-analyses/' if analysis_type == 'Endpoint' else 'analyses/'
result_url = f'{BASE_URL}/{analysis_endpoint}{analysis_id}'
return SESSION.get(result_url, verify=USE_SSL)
def handle_analysis_result(response):
json_response = handle_response(response, ACCEPTABLE_HTTP_CODES)
if response.status_code != 200:
result_url = json_response['result_url']
analysis_id = result_url.rsplit('/', 1)[-1]
context_json = {'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id,
'Status': 'InProgress'}}
return_outputs('Analysis is still in progress', context_json)
return
return json_response['result']
def enrich_dbot_and_display_file_analysis_results(result):
verdict = result.get('verdict')
sha256 = result.get('sha256')
analysis_id = result.get('analysis_id')
dbot = {
'Vendor': 'Intezer',
'Type': 'hash',
'Indicator': sha256,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
file = {'SHA256': sha256, 'Metadata': result, 'ExistsInIntezer': True}
if verdict == 'malicious':
file['Malicious'] = {'Vendor': 'Intezer'}
presentable_result = '## Intezer File analysis result\n'
presentable_result += f' SHA256: {sha256}\n'
presentable_result += f' Verdict: **{verdict}** ({result["sub_verdict"]})\n'
if 'family_name' in result:
presentable_result += f'Family: **{result["family_name"]}**\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
demisto.results({
'Type': entryTypes['note'],
'EntryContext': {
outputPaths['dbotscore']: dbot,
outputPaths['file']: file,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}},
'HumanReadable': presentable_result,
'ContentsFormat': formats['json'],
'Contents': result
})
def enrich_dbot_and_display_endpoint_analysis_results(result, indicator_name=None):
verdict = result['verdict']
computer_name = result['computer_name']
analysis_id = result['analysis_id']
dbot = {
'Vendor': 'Intezer',
'Type': 'hostname',
'Indicator': indicator_name if indicator_name else computer_name,
'Score': dbot_score_by_verdict.get(verdict, 0)
}
endpoint = {'Metadata': result}
presentable_result = '## Intezer Endpoint analysis result\n'
presentable_result += f'Host Name: {computer_name}\n'
presentable_result += f' Verdict: **{verdict}**\n'
if result.get('families') is not None:
presentable_result += f'Families: **{result["families"]}**\n'
presentable_result += f' Scan Time: {result["scan_start_time"]}\n'
presentable_result += f'[Analysis Link]({result["analysis_url"]})\n'
ec = {
'DBotScore': dbot,
'Endpoint': endpoint,
'Intezer.Analysis(val.ID === obj.ID)': {'ID': analysis_id, 'Status': 'Done'}
}
return_outputs(presentable_result, ec, result)
''' EXECUTION CODE '''
try:
SESSION = get_session()
except Exception as e:
return_error(str(e))
def main():
try:
handle_proxy()
if demisto.command() == 'test-module':
demisto.results(check_is_available())
elif demisto.command() == 'intezer-analyze-by-hash':
analyze_by_hash_command()
elif demisto.command() == 'intezer-analyze-by-file':
analyze_by_uploaded_file_command()
elif demisto.command() == 'intezer-get-latest-report':
get_latest_result_command()
elif demisto.command() == 'intezer-get-analysis-result':
check_analysis_status_and_get_results_command()
except Exception as e:
return_error(str(e))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| 34.059028 | 118 | 0.672036 | [
"MIT"
] | Axonius/conten | Packs/Intezer/Integrations/IntezerV2/IntezerV2.py | 9,809 | Python |
from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class Regex:
att: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"pattern": r"[\C\?a-c\?]+",
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
elem: List[Regex] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
}
)
| 17.178571 | 40 | 0.503119 | [
"MIT"
] | tefra/xsdata-w3c-tests | output/models/ms_data/regex/re_g18_xsd/re_g18.py | 481 | Python |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, \
BaseUserManager, PermissionsMixin
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
"""Creates and saves a new user"""
if not email:
raise ValueError('User must have an email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""Creates and saves a new super user"""
user = self.create_user(email, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model that supports using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
| 31.128205 | 76 | 0.667216 | [
"MIT"
] | StoikovOleh/recipe-app-api | app/core/models.py | 1,214 | Python |
from django.apps import AppConfig
class EnquiriesConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'src.enquiries'
| 22 | 56 | 0.766234 | [
"BSD-3-Clause"
] | kkamara/django-app | src/enquiries/apps.py | 154 | Python |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating Nodes via the DB API"""
import datetime
import mock
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from ironic.common import exception
from ironic.common import states
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils
class DbNodeTestCase(base.DbTestCase):
def test_create_node(self):
node = utils.create_test_node()
self.assertEqual([], node.tags)
self.assertEqual([], node.traits)
def test_create_node_with_tags(self):
self.assertRaises(exception.InvalidParameterValue,
utils.create_test_node,
tags=['tag1', 'tag2'])
def test_create_node_with_traits(self):
self.assertRaises(exception.InvalidParameterValue,
utils.create_test_node,
traits=['trait1', 'trait2'])
def test_create_node_already_exists(self):
utils.create_test_node()
self.assertRaises(exception.NodeAlreadyExists,
utils.create_test_node)
def test_create_node_instance_already_associated(self):
instance = uuidutils.generate_uuid()
utils.create_test_node(uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
self.assertRaises(exception.InstanceAssociated,
utils.create_test_node,
uuid=uuidutils.generate_uuid(),
instance_uuid=instance)
def test_create_node_name_duplicate(self):
node = utils.create_test_node(name='spam')
self.assertRaises(exception.DuplicateName,
utils.create_test_node,
name=node.name)
def test_get_node_by_id(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_id(node.id)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_uuid(node.uuid)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_name(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_name(node.name)
self.assertEqual(node.id, res.id)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual(node.name, res.name)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, 99)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid,
'12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_name,
'spam-eggs-bacon-spam')
def test_get_nodeinfo_list_defaults(self):
node_id_list = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
node_id_list.append(node.id)
res = [i[0] for i in self.dbapi.get_nodeinfo_list()]
self.assertEqual(sorted(res), sorted(node_id_list))
def test_get_nodeinfo_list_with_cols(self):
uuids = {}
extras = {}
for i in range(1, 6):
uuid = uuidutils.generate_uuid()
extra = {'foo': i}
node = utils.create_test_node(extra=extra, uuid=uuid)
uuids[node.id] = uuid
extras[node.id] = extra
res = self.dbapi.get_nodeinfo_list(columns=['id', 'extra', 'uuid'])
self.assertEqual(extras, dict((r[0], r[1]) for r in res))
self.assertEqual(uuids, dict((r[0], r[2]) for r in res))
def test_get_nodeinfo_list_with_filters(self):
node1 = utils.create_test_node(
driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid(),
maintenance=True,
fault='boom',
resource_class='foo',
conductor_group='group1')
node3 = utils.create_test_node(
driver='driver-one',
uuid=uuidutils.generate_uuid(),
reservation='another-fake-host')
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'driver-one'})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': True})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'associated': False})
self.assertEqual(sorted([node2.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'reserved': True})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r[0] for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'reserved': False})
self.assertEqual([node2.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'maintenance': False})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r.id for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'resource_class': 'foo'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'conductor_group': 'group1'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'conductor_group': 'group2'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(
filters={'reserved_by_any_of': ['fake-host',
'another-fake-host']})
self.assertEqual(sorted([node1.id, node3.id]),
sorted([r.id for r in res]))
res = self.dbapi.get_nodeinfo_list(filters={'id': node1.id})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
# ensure unknown filters explode
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_nodeinfo_list,
filters=filters)
# even with good filters present
filters = {'bad_filter': 'foo', 'id': node1.id}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_nodeinfo_list,
filters=filters)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_provision(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
# node with provision_updated timeout
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=past)
# node with None in provision_updated_at
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.DEPLOYWAIT)
# node without timeout
utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_updated_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(filters={'provisioned_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.DEPLOYWAIT})
self.assertEqual([node2.id], [r[0] for r in res])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_get_nodeinfo_list_inspection(self, mock_utcnow):
past = datetime.datetime(2000, 1, 1, 0, 0)
next = past + datetime.timedelta(minutes=8)
present = past + datetime.timedelta(minutes=10)
mock_utcnow.return_value = past
# node with provision_updated timeout
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=past)
# node with None in provision_updated_at
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
provision_state=states.INSPECTING)
# node without timeout
utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=next)
mock_utcnow.return_value = present
res = self.dbapi.get_nodeinfo_list(
filters={'inspection_started_before': 300})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'provision_state':
states.INSPECTING})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_nodeinfo_list_description(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='Hello')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='World!')
res = self.dbapi.get_nodeinfo_list(
filters={'description_contains': 'Hello'})
self.assertEqual([node1.id], [r[0] for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'description_contains':
'World!'})
self.assertEqual([node2.id], [r[0] for r in res])
def test_get_node_list(self):
uuids = []
for i in range(1, 6):
node = utils.create_test_node(uuid=uuidutils.generate_uuid())
uuids.append(six.text_type(node['uuid']))
res = self.dbapi.get_node_list()
res_uuids = [r.uuid for r in res]
six.assertCountEqual(self, uuids, res_uuids)
for r in res:
self.assertEqual([], r.tags)
self.assertEqual([], r.traits)
def test_get_node_list_with_filters(self):
ch1 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
ch2 = utils.create_test_chassis(uuid=uuidutils.generate_uuid())
node1 = utils.create_test_node(
driver='driver-one',
instance_uuid=uuidutils.generate_uuid(),
reservation='fake-host',
uuid=uuidutils.generate_uuid(),
chassis_id=ch1['id'])
node2 = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid(),
chassis_id=ch2['id'],
maintenance=True,
fault='boom',
resource_class='foo',
conductor_group='group1',
power_state='power on')
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch1['uuid']})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'chassis_uuid': ch2['uuid']})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'driver-one'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'driver': 'bad-driver'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'associated': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': True})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'reserved': False})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'maintenance': False})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'boom'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_nodeinfo_list(filters={'fault': 'moob'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'resource_class': 'foo'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'conductor_group': 'group1'})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'conductor_group': 'group2'})
self.assertEqual([], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'id': node1.id})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'uuid': node1.uuid})
self.assertEqual([node1.id], [r.id for r in res])
uuids = [uuidutils.generate_uuid(),
node1.uuid,
uuidutils.generate_uuid()]
res = self.dbapi.get_node_list(filters={'uuid_in': uuids})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'with_power_state': True})
self.assertEqual([node2.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={'with_power_state': False})
self.assertEqual([node1.id], [r.id for r in res])
# ensure unknown filters explode
filters = {'bad_filter': 'foo'}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_node_list,
filters=filters)
# even with good filters present
filters = {'bad_filter': 'foo', 'id': node1.id}
self.assertRaisesRegex(ValueError,
'bad_filter',
self.dbapi.get_node_list,
filters=filters)
def test_get_node_list_description(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='Hello')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
description='World!')
res = self.dbapi.get_node_list(filters={
'description_contains': 'Hello'})
self.assertEqual([node1.id], [r.id for r in res])
res = self.dbapi.get_node_list(filters={
'description_contains': 'World!'})
self.assertEqual([node2.id], [r.id for r in res])
def test_get_node_list_chassis_not_found(self):
self.assertRaises(exception.ChassisNotFound,
self.dbapi.get_node_list,
{'chassis_uuid': uuidutils.generate_uuid()})
def test_get_node_by_instance(self):
node = utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
res = self.dbapi.get_node_by_instance(node.instance_uuid)
self.assertEqual(node.uuid, res.uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
def test_get_node_by_instance_wrong_uuid(self):
utils.create_test_node(
instance_uuid='12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.InstanceNotFound,
self.dbapi.get_node_by_instance,
'12345678-9999-0000-bbbb-123456789012')
def test_get_node_by_instance_invalid_uuid(self):
self.assertRaises(exception.InvalidUUID,
self.dbapi.get_node_by_instance,
'fake_uuid')
def test_destroy_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_id, node.id)
def test_destroy_node_by_uuid(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.get_node_by_uuid, node.uuid)
def test_destroy_node_that_does_not_exist(self):
self.assertRaises(exception.NodeNotFound,
self.dbapi.destroy_node,
'12345678-9999-0000-aaaa-123456789012')
def test_ports_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_ports_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
port = utils.create_test_port(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_id, port.id)
def test_tags_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
self.assertTrue(self.dbapi.node_tag_exists(node.id, tag.tag))
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_tag_exists, node.id, tag.tag)
def test_tags_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
self.assertTrue(self.dbapi.node_tag_exists(node.id, tag.tag))
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_tag_exists, node.id, tag.tag)
def test_volume_connector_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
connector = utils.create_test_volume_connector(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_id, connector.id)
def test_volume_connector_get_destroyed_after_destroying_a_node_uuid(self):
node = utils.create_test_node()
connector = utils.create_test_volume_connector(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.VolumeConnectorNotFound,
self.dbapi.get_volume_connector_by_id, connector.id)
def test_volume_target_gets_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
target = utils.create_test_volume_target(node_id=node.id)
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_id, target.id)
def test_volume_target_gets_destroyed_after_destroying_a_node_uuid(self):
node = utils.create_test_node()
target = utils.create_test_volume_target(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.VolumeTargetNotFound,
self.dbapi.get_volume_target_by_id, target.id)
def test_traits_get_destroyed_after_destroying_a_node(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait))
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
def test_traits_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
self.assertTrue(self.dbapi.node_trait_exists(node.id, trait.trait))
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.NodeNotFound,
self.dbapi.node_trait_exists, node.id, trait.trait)
def test_allocations_get_destroyed_after_destroying_a_node_by_uuid(self):
node = utils.create_test_node()
allocation = utils.create_test_allocation(node_id=node.id)
self.dbapi.destroy_node(node.uuid)
self.assertRaises(exception.AllocationNotFound,
self.dbapi.get_allocation_by_id, allocation.id)
def test_update_node(self):
node = utils.create_test_node()
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual(new_extra, res.extra)
self.assertEqual([], res.tags)
self.assertEqual([], res.traits)
def test_update_node_with_tags(self):
node = utils.create_test_node()
tag = utils.create_test_node_tag(node_id=node.id)
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual([tag.tag], [t.tag for t in res.tags])
def test_update_node_with_traits(self):
node = utils.create_test_node()
trait = utils.create_test_node_trait(node_id=node.id)
old_extra = node.extra
new_extra = {'foo': 'bar'}
self.assertNotEqual(old_extra, new_extra)
res = self.dbapi.update_node(node.id, {'extra': new_extra})
self.assertEqual([trait.trait], [t.trait for t in res.traits])
def test_update_node_not_found(self):
node_uuid = uuidutils.generate_uuid()
new_extra = {'foo': 'bar'}
self.assertRaises(exception.NodeNotFound, self.dbapi.update_node,
node_uuid, {'extra': new_extra})
def test_update_node_uuid(self):
node = utils.create_test_node()
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.update_node, node.id,
{'uuid': ''})
def test_update_node_associate_and_disassociate(self):
node = utils.create_test_node()
new_i_uuid = uuidutils.generate_uuid()
res = self.dbapi.update_node(node.id, {'instance_uuid': new_i_uuid})
self.assertEqual(new_i_uuid, res.instance_uuid)
res = self.dbapi.update_node(node.id, {'instance_uuid': None})
self.assertIsNone(res.instance_uuid)
def test_update_node_instance_already_associated(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid())
new_i_uuid = uuidutils.generate_uuid()
self.dbapi.update_node(node1.id, {'instance_uuid': new_i_uuid})
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.InstanceAssociated,
self.dbapi.update_node,
node2.id,
{'instance_uuid': new_i_uuid})
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_provision(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
self.assertEqual(mocked_time,
timeutils.normalize_time(res['provision_updated_at']))
def test_update_node_name_duplicate(self):
node1 = utils.create_test_node(uuid=uuidutils.generate_uuid(),
name='spam')
node2 = utils.create_test_node(uuid=uuidutils.generate_uuid())
self.assertRaises(exception.DuplicateName,
self.dbapi.update_node,
node2.id,
{'name': node1.name})
def test_update_node_no_provision(self):
node = utils.create_test_node()
res = self.dbapi.update_node(node.id, {'extra': {'foo': 'bar'}})
self.assertIsNone(res['provision_updated_at'])
self.assertIsNone(res['inspection_started_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_started_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_started_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_started_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_finished_at'])
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_update_node_inspection_finished_at(self, mock_utcnow):
mocked_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = mocked_time
node = utils.create_test_node(uuid=uuidutils.generate_uuid(),
inspection_finished_at=mocked_time)
res = self.dbapi.update_node(node.id, {'provision_state': 'fake'})
result = res['inspection_finished_at']
self.assertEqual(mocked_time,
timeutils.normalize_time(result))
self.assertIsNone(res['inspection_started_at'])
def test_reserve_node(self):
node = utils.create_test_node()
self.dbapi.set_node_tags(node.id, ['tag1', 'tag2'])
utils.create_test_node_traits(node_id=node.id,
traits=['trait1', 'trait2'])
uuid = node.uuid
r1 = 'fake-reservation'
# reserve the node
res = self.dbapi.reserve_node(r1, uuid)
self.assertItemsEqual(['tag1', 'tag2'], [tag.tag for tag in res.tags])
self.assertItemsEqual(['trait1', 'trait2'],
[trait.trait for trait in res.traits])
# check reservation
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r1, res.reservation)
def test_release_reservation(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
self.dbapi.reserve_node(r1, uuid)
# release reservation
self.dbapi.release_node(r1, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertIsNone(res.reservation)
def test_reservation_of_reserved_node_fails(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
# reserve the node
self.dbapi.reserve_node(r1, uuid)
# another host fails to reserve or release
self.assertRaises(exception.NodeLocked,
self.dbapi.reserve_node,
r2, uuid)
self.assertRaises(exception.NodeLocked,
self.dbapi.release_node,
r2, uuid)
def test_reservation_after_release(self):
node = utils.create_test_node()
uuid = node.uuid
r1 = 'fake-reservation'
r2 = 'another-reservation'
self.dbapi.reserve_node(r1, uuid)
self.dbapi.release_node(r1, uuid)
# another host succeeds
self.dbapi.reserve_node(r2, uuid)
res = self.dbapi.get_node_by_uuid(uuid)
self.assertEqual(r2, res.reservation)
def test_reservation_in_exception_message(self):
node = utils.create_test_node()
uuid = node.uuid
r = 'fake-reservation'
self.dbapi.reserve_node(r, uuid)
exc = self.assertRaises(exception.NodeLocked, self.dbapi.reserve_node,
'another', uuid)
self.assertIn(r, str(exc))
def test_reservation_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.reserve_node, 'fake', node.uuid)
def test_release_non_existent_node(self):
node = utils.create_test_node()
self.dbapi.destroy_node(node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotFound,
self.dbapi.release_node, 'fake', node.uuid)
def test_release_non_locked_node(self):
node = utils.create_test_node()
self.assertIsNone(node.reservation)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.id)
self.assertRaises(exception.NodeNotLocked,
self.dbapi.release_node, 'fake', node.uuid)
@mock.patch.object(timeutils, 'utcnow', autospec=True)
def test_touch_node_provisioning(self, mock_utcnow):
test_time = datetime.datetime(2000, 1, 1, 0, 0)
mock_utcnow.return_value = test_time
node = utils.create_test_node()
# assert provision_updated_at is None
self.assertIsNone(node.provision_updated_at)
self.dbapi.touch_node_provisioning(node.uuid)
node = self.dbapi.get_node_by_uuid(node.uuid)
# assert provision_updated_at has been updated
self.assertEqual(test_time,
timeutils.normalize_time(node.provision_updated_at))
def test_touch_node_provisioning_not_found(self):
self.assertRaises(
exception.NodeNotFound,
self.dbapi.touch_node_provisioning, uuidutils.generate_uuid())
def test_get_node_by_port_addresses(self):
wrong_node = utils.create_test_node(
driver='driver-one',
uuid=uuidutils.generate_uuid())
node = utils.create_test_node(
driver='driver-two',
uuid=uuidutils.generate_uuid())
addresses = []
for i in (1, 2, 3):
address = '52:54:00:cf:2d:4%s' % i
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node.id, address=address)
if i > 1:
addresses.append(address)
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=wrong_node.id,
address='aa:bb:cc:dd:ee:ff')
res = self.dbapi.get_node_by_port_addresses(addresses)
self.assertEqual(node.uuid, res.uuid)
self.assertEqual([], res.traits)
def test_get_node_by_port_addresses_not_found(self):
node = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node.id,
address='aa:bb:cc:dd:ee:ff')
self.assertRaisesRegex(exception.NodeNotFound,
'was not found',
self.dbapi.get_node_by_port_addresses,
['11:22:33:44:55:66'])
def test_get_node_by_port_addresses_multiple_found(self):
node1 = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
node2 = utils.create_test_node(
driver='driver',
uuid=uuidutils.generate_uuid())
addresses = ['52:54:00:cf:2d:4%s' % i for i in (1, 2)]
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node1.id,
address=addresses[0])
utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=node2.id,
address=addresses[1])
self.assertRaisesRegex(exception.NodeNotFound,
'Multiple nodes',
self.dbapi.get_node_by_port_addresses,
addresses)
| 41.608645 | 79 | 0.610411 | [
"Apache-2.0"
] | Rachit7194/ironic | ironic/tests/unit/db/test_nodes.py | 35,617 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session.conf.set(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| 38.833846 | 100 | 0.598605 | [
"Apache-2.0"
] | DislabNJU/Spark | python/pyspark/sql/session.py | 25,242 | Python |
""" director subsystem's configuration
- config-file schema
- settings
"""
from typing import Dict
import trafaret as T
from aiohttp import ClientSession, web
from yarl import URL
from servicelib.application_keys import APP_CLIENT_SESSION_KEY, APP_CONFIG_KEY
APP_DIRECTOR_API_KEY = __name__ + ".director_api"
CONFIG_SECTION_NAME = "director"
schema = T.Dict(
{
T.Key("enabled", default=True, optional=True): T.Bool(),
T.Key("host", default="director",): T.String(),
T.Key("port", default=8001): T.ToInt(),
T.Key("version", default="v0"): T.Regexp(
regexp=r"^v\d+"
), # storage API version basepath
}
)
def build_api_url(config: Dict) -> URL:
api_baseurl = URL.build(
scheme="http", host=config["host"], port=config["port"]
).with_path(config["version"])
return api_baseurl
def get_config(app: web.Application) -> Dict:
return app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]
def get_client_session(app: web.Application) -> ClientSession:
return app[APP_CLIENT_SESSION_KEY]
| 24.976744 | 78 | 0.679702 | [
"MIT"
] | KZzizzle/osparc-simcore | services/web/server/src/simcore_service_webserver/director/config.py | 1,074 | Python |