repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
annarev/tensorflow | tensorflow/python/keras/distribute/keras_rnn_model_correctness_test.py | 4 | 5523 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Correctness tests for tf.keras RNN models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.eager import context
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import keras_correctness_test_base
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
from tensorflow.python.keras.mixed_precision import policy
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_keras
class _DistributionStrategyRnnModelCorrectnessTest(
keras_correctness_test_base
.TestDistributionStrategyEmbeddingModelCorrectnessBase):
def _get_layer_class(self):
raise NotImplementedError
def get_model(self,
max_words=10,
initial_weights=None,
distribution=None,
input_shapes=None):
del input_shapes
rnn_cls = self._get_layer_class()
with keras_correctness_test_base.MaybeDistributionScope(distribution):
word_ids = keras.layers.Input(
shape=(max_words,), dtype=np.int32, name='words')
word_embed = keras.layers.Embedding(input_dim=20, output_dim=10)(word_ids)
rnn_embed = rnn_cls(units=4, return_sequences=False)(word_embed)
dense_output = keras.layers.Dense(2)(rnn_embed)
preds = keras.layers.Softmax(dtype='float32')(dense_output)
model = keras.Model(inputs=[word_ids], outputs=[preds])
if initial_weights:
model.set_weights(initial_weights)
optimizer_fn = gradient_descent_keras.SGD
model.compile(
optimizer=optimizer_fn(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
@testing_utils.run_all_without_tensor_float_32(
'Uses Dense layers, which call matmul')
class DistributionStrategyGruModelCorrectnessTest(
_DistributionStrategyRnnModelCorrectnessTest):
def _get_layer_class(self):
if tf2.enabled():
if not context.executing_eagerly():
self.skipTest("GRU v2 and legacy graph mode don't work together.")
return rnn_v2.GRU
else:
return rnn_v1.GRU
@ds_combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_gru_model_correctness(self, distribution, use_numpy,
use_validation_data):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@testing_utils.run_all_without_tensor_float_32(
'Uses Dense layers, which call matmul')
class DistributionStrategyLstmModelCorrectnessTest(
_DistributionStrategyRnnModelCorrectnessTest):
def _get_layer_class(self):
if tf2.enabled():
if not context.executing_eagerly():
self.skipTest("LSTM v2 and legacy graph mode don't work together.")
return rnn_v2.LSTM
else:
return rnn_v1.LSTM
@ds_combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model() +
keras_correctness_test_base.multi_worker_mirrored_eager())
def test_lstm_model_correctness(self, distribution, use_numpy,
use_validation_data):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@ds_combinations.generate(
keras_correctness_test_base.test_combinations_for_embedding_model() +
keras_correctness_test_base.multi_worker_mirrored_eager())
@testing_utils.enable_v2_dtype_behavior
def test_lstm_model_correctness_mixed_precision(self, distribution, use_numpy,
use_validation_data):
if isinstance(distribution,
(central_storage_strategy.CentralStorageStrategy,
central_storage_strategy.CentralStorageStrategyV1)):
self.skipTest('CentralStorageStrategy is not supported by '
'mixed precision.')
if isinstance(distribution,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)):
policy_name = 'mixed_bfloat16'
else:
policy_name = 'mixed_float16'
with policy.policy_scope(policy_name):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
if __name__ == '__main__':
multi_process_runner.test_main()
| apache-2.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_3_0/infra_device_broker.py | 16 | 210714 | from ..broker import Broker
class InfraDeviceBroker(Broker):
controller = "infra_devices"
def index(self, **kwargs):
"""Lists the available infra devices. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceFirstOccurrenceTime: The date/time that this device was first seen on the network.
:type DeviceFirstOccurrenceTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFirstOccurrenceTime: The date/time that this device was first seen on the network.
:type DeviceFirstOccurrenceTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceIPDotted: The management IP address of the device, in dotted (or colon-delimited for IPv6) format.
:type DeviceIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceIPDotted: The management IP address of the device, in dotted (or colon-delimited for IPv6) format.
:type DeviceIPDotted: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceIPNumeric: The numerical value of the device IP address.
:type DeviceIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceIPNumeric: The numerical value of the device IP address.
:type DeviceIPNumeric: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceName: The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
:type DeviceName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceName: The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
:type DeviceName: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceRebootTime: The date/time this device was last rebooted.
:type DeviceRebootTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceRebootTime: The date/time this device was last rebooted.
:type DeviceRebootTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceType: The NetMRI-determined device type.
:type DeviceType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceType: The NetMRI-determined device type.
:type DeviceType: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ParentDeviceID: The internal NetMRI identifier for the device containing this virtual device.
:type ParentDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ParentDeviceID: The internal NetMRI identifier for the device containing this virtual device.
:type ParentDeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VirtualNetworkID: The internal NetMRI identifier of the Virtual Network to which the management address of this device belongs.
:type VirtualNetworkID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VirtualNetworkID: The internal NetMRI identifier of the Virtual Network to which the management address of this device belongs.
:type VirtualNetworkID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the infra devices as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of infra device methods. The listed methods will be called on each infra device returned and included in the output. Available methods are: DeviceCommunitySecure, DeviceRank, DeviceCommunity, DeviceFirstOccurrence, group, parent_device, gateway_device, running_config, running_config_text, saved_config, saved_config_text, running_config_diff, saved_config_diff, virtual_child_count, asset_type, device_setting, data_collection_status, control_capabilities, network_name, interfaces, issue_details, device_routes, device_physicals, if_addrs, config_revisions, detected_changes, device_ports, privileged_polling, DeviceStartTime, DeviceEndTime, cap_description_ind, cap_admin_status_ind, cap_vlan_assignment_ind, cap_voice_vlan_ind, cap_net_provisioning_ind, cap_net_vlan_provisioning_ind, cap_net_deprovisioning_ind, cap_description_na_reason, cap_admin_status_na_reason, cap_vlan_assignment_na_reason, cap_voice_vlan_na_reason, cap_net_provisioning_na_reason, cap_net_vlan_provisioning_na_reason, cap_net_deprovisioning_na_reason, chassis_serial_number, available_mgmt_ips, rawSysDescr, rawSysVersion, rawSysModel, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device, device_setting, data_collection_status, interfaces, issue_details, device_routes, device_physicals, if_addrs, config_revisions, detected_changes, device_ports, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceID. Valid values are DataSourceID, DeviceID, InfraDeviceStartTime, InfraDeviceEndTime, InfraDeviceChangedCols, DeviceIPDotted, DeviceIPNumeric, DeviceName, DeviceType, DeviceAssurance, DeviceVendor, DeviceModel, DeviceVersion, DeviceSysName, DeviceSysDescr, DeviceSysLocation, DeviceSysContact, DeviceDNSName, DeviceConfigTimestamp, DeviceFirstOccurrenceTime, InfraDeviceTimestamp, DeviceSAAVersion, DeviceRebootTime, DeviceRunningConfigLastChangedTime, DeviceSavedConfigLastChangedTime, DeviceConfigLastCheckedTime, DevicePolicyScheduleMode, DeviceAddlInfo, DeviceMAC, ParentDeviceID, DeviceContextName, DeviceNetBIOSName, DeviceOUI, MgmtServerDeviceID, NetworkDeviceInd, RoutingInd, SwitchingInd, VirtualInd, FilteringInd, FilterProvisionData, VirtualNetworkID, VirtualNetworkingInd, DeviceUniqueKey.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each InfraDevice. Valid values are DataSourceID, DeviceID, InfraDeviceStartTime, InfraDeviceEndTime, InfraDeviceChangedCols, DeviceIPDotted, DeviceIPNumeric, DeviceName, DeviceType, DeviceAssurance, DeviceVendor, DeviceModel, DeviceVersion, DeviceSysName, DeviceSysDescr, DeviceSysLocation, DeviceSysContact, DeviceDNSName, DeviceConfigTimestamp, DeviceFirstOccurrenceTime, InfraDeviceTimestamp, DeviceSAAVersion, DeviceRebootTime, DeviceRunningConfigLastChangedTime, DeviceSavedConfigLastChangedTime, DeviceConfigLastCheckedTime, DevicePolicyScheduleMode, DeviceAddlInfo, DeviceMAC, ParentDeviceID, DeviceContextName, DeviceNetBIOSName, DeviceOUI, MgmtServerDeviceID, NetworkDeviceInd, RoutingInd, SwitchingInd, VirtualInd, FilteringInd, FilterProvisionData, VirtualNetworkID, VirtualNetworkingInd, DeviceUniqueKey. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param detail_ind: A flag to indicate whether discovery times should be included or not
:type detail_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return infra_devices: An array of the InfraDevice objects that match the specified input criteria.
:rtype infra_devices: Array of InfraDevice
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available infra devices matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceAddlInfo: Additional information about the device; IP phones will contain the extension in this field.
:type DeviceAddlInfo: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceAddlInfo: Additional information about the device; IP phones will contain the extension in this field.
:type DeviceAddlInfo: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceAssurance: The assurance level of the device type value.
:type DeviceAssurance: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceAssurance: The assurance level of the device type value.
:type DeviceAssurance: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceConfigLastCheckedTime: The date/time of the last attempted retrieval of the device's configuration file.
:type DeviceConfigLastCheckedTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceConfigLastCheckedTime: The date/time of the last attempted retrieval of the device's configuration file.
:type DeviceConfigLastCheckedTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceConfigTimestamp: The date/time the configuration file was last successfully retrieved for this device.
:type DeviceConfigTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceConfigTimestamp: The date/time the configuration file was last successfully retrieved for this device.
:type DeviceConfigTimestamp: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceContextName: The name of the virtual context of this virtual device.
:type DeviceContextName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceContextName: The name of the virtual context of this virtual device.
:type DeviceContextName: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceDNSName: The device name as reported by DNS.
:type DeviceDNSName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceDNSName: The device name as reported by DNS.
:type DeviceDNSName: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceFirstOccurrenceTime: The date/time that this device was first seen on the network.
:type DeviceFirstOccurrenceTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceFirstOccurrenceTime: The date/time that this device was first seen on the network.
:type DeviceFirstOccurrenceTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceIPDotted: The management IP address of the device, in dotted (or colon-delimited for IPv6) format.
:type DeviceIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceIPDotted: The management IP address of the device, in dotted (or colon-delimited for IPv6) format.
:type DeviceIPDotted: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceIPNumeric: The numerical value of the device IP address.
:type DeviceIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceIPNumeric: The numerical value of the device IP address.
:type DeviceIPNumeric: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceMAC: The MAC of the interface corresponding to the management IP, if available. Otherwise, it is the lowest numbered non-zero MAC for any interface on the device. If no interface records are available for the device, the lowest non-zero MAC address corresponding to the management IP address found in the global ARP table will be used.
:type DeviceMAC: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceMAC: The MAC of the interface corresponding to the management IP, if available. Otherwise, it is the lowest numbered non-zero MAC for any interface on the device. If no interface records are available for the device, the lowest non-zero MAC address corresponding to the management IP address found in the global ARP table will be used.
:type DeviceMAC: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceModel: The device model name.
:type DeviceModel: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceModel: The device model name.
:type DeviceModel: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceName: The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
:type DeviceName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceName: The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration.
:type DeviceName: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceNetBIOSName: The NetBIOS name of the device.
:type DeviceNetBIOSName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceNetBIOSName: The NetBIOS name of the device.
:type DeviceNetBIOSName: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceOUI: The NetMRI-determined device vendor using OUI.
:type DeviceOUI: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceOUI: The NetMRI-determined device vendor using OUI.
:type DeviceOUI: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyScheduleMode: Not currently used.
:type DevicePolicyScheduleMode: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyScheduleMode: Not currently used.
:type DevicePolicyScheduleMode: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceRebootTime: The date/time this device was last rebooted.
:type DeviceRebootTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceRebootTime: The date/time this device was last rebooted.
:type DeviceRebootTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceRunningConfigLastChangedTime: The date/time, as reported by SNMP, that the device's running configuration was last changed.
:type DeviceRunningConfigLastChangedTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceRunningConfigLastChangedTime: The date/time, as reported by SNMP, that the device's running configuration was last changed.
:type DeviceRunningConfigLastChangedTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceSAAVersion: The SAA version running on this device.
:type DeviceSAAVersion: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceSAAVersion: The SAA version running on this device.
:type DeviceSAAVersion: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceSavedConfigLastChangedTime: The date/time, as reported by SNMP, that the device's saved configuration was last changed.
:type DeviceSavedConfigLastChangedTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceSavedConfigLastChangedTime: The date/time, as reported by SNMP, that the device's saved configuration was last changed.
:type DeviceSavedConfigLastChangedTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceSysContact: The Device sysContact as reported by SNMP.
:type DeviceSysContact: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceSysContact: The Device sysContact as reported by SNMP.
:type DeviceSysContact: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceSysDescr: The device sysDescr as reported by SNMP.
:type DeviceSysDescr: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceSysDescr: The device sysDescr as reported by SNMP.
:type DeviceSysDescr: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceSysLocation: The device sysLocation as reported by SNMP.
:type DeviceSysLocation: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceSysLocation: The device sysLocation as reported by SNMP.
:type DeviceSysLocation: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceSysName: The device name as reported by SNMP.
:type DeviceSysName: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceSysName: The device name as reported by SNMP.
:type DeviceSysName: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceType: The NetMRI-determined device type.
:type DeviceType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceType: The NetMRI-determined device type.
:type DeviceType: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceUniqueKey: Unique key which allows duplicates detecting over different Virtual Networks.
:type DeviceUniqueKey: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceUniqueKey: Unique key which allows duplicates detecting over different Virtual Networks.
:type DeviceUniqueKey: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceVendor: The device vendor name.
:type DeviceVendor: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceVendor: The device vendor name.
:type DeviceVendor: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceVersion: The device OS version.
:type DeviceVersion: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceVersion: The device OS version.
:type DeviceVersion: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param FilterProvisionData: Internal data - do not modify, may change without warning.
:type FilterProvisionData: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FilterProvisionData: Internal data - do not modify, may change without warning.
:type FilterProvisionData: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param FilteringInd: A flag indicating whether this device is eligible for Security Device Controller
:type FilteringInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param FilteringInd: A flag indicating whether this device is eligible for Security Device Controller
:type FilteringInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InfraDeviceChangedCols: The fields that changed between this revision of the record and the previous revision.
:type InfraDeviceChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InfraDeviceChangedCols: The fields that changed between this revision of the record and the previous revision.
:type InfraDeviceChangedCols: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InfraDeviceEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type InfraDeviceEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InfraDeviceEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type InfraDeviceEndTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InfraDeviceStartTime: The starting effective time of this revision of the record.
:type InfraDeviceStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InfraDeviceStartTime: The starting effective time of this revision of the record.
:type InfraDeviceStartTime: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InfraDeviceTimestamp: The date and time this record was collected.
:type InfraDeviceTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InfraDeviceTimestamp: The date and time this record was collected.
:type InfraDeviceTimestamp: Array of DateTime
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param MgmtServerDeviceID: The Device ID of the management server for the device
:type MgmtServerDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param MgmtServerDeviceID: The Device ID of the management server for the device
:type MgmtServerDeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NetworkDeviceInd: A flag indicating whether this device is a network device or an end host.
:type NetworkDeviceInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkDeviceInd: A flag indicating whether this device is a network device or an end host.
:type NetworkDeviceInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ParentDeviceID: The internal NetMRI identifier for the device containing this virtual device.
:type ParentDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ParentDeviceID: The internal NetMRI identifier for the device containing this virtual device.
:type ParentDeviceID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RoutingInd: A flag indicating whether this device is configured with any routing capability and whether a routing table was retrieved from this device.
:type RoutingInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RoutingInd: A flag indicating whether this device is configured with any routing capability and whether a routing table was retrieved from this device.
:type RoutingInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SwitchingInd: A flag indicating whether a switch port forwarding table was retrieved from this device.
:type SwitchingInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SwitchingInd: A flag indicating whether a switch port forwarding table was retrieved from this device.
:type SwitchingInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VirtualInd: A flag indicating if the source device is a virtual device.
:type VirtualInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VirtualInd: A flag indicating if the source device is a virtual device.
:type VirtualInd: Array of Boolean
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VirtualNetworkID: The internal NetMRI identifier of the Virtual Network to which the management address of this device belongs.
:type VirtualNetworkID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VirtualNetworkID: The internal NetMRI identifier of the Virtual Network to which the management address of this device belongs.
:type VirtualNetworkID: Array of Integer
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param VirtualNetworkingInd: Set to null, 0 or 1. 0 indicates this is not a VRF Aware device. 1 Indicates it is VRF Aware.
:type VirtualNetworkingInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param VirtualNetworkingInd: Set to null, 0 or 1. 0 indicates this is not a VRF Aware device. 1 Indicates it is VRF Aware.
:type VirtualNetworkingInd: Array of Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the infra devices as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of infra device methods. The listed methods will be called on each infra device returned and included in the output. Available methods are: DeviceCommunitySecure, DeviceRank, DeviceCommunity, DeviceFirstOccurrence, group, parent_device, gateway_device, running_config, running_config_text, saved_config, saved_config_text, running_config_diff, saved_config_diff, virtual_child_count, asset_type, device_setting, data_collection_status, control_capabilities, network_name, interfaces, issue_details, device_routes, device_physicals, if_addrs, config_revisions, detected_changes, device_ports, privileged_polling, DeviceStartTime, DeviceEndTime, cap_description_ind, cap_admin_status_ind, cap_vlan_assignment_ind, cap_voice_vlan_ind, cap_net_provisioning_ind, cap_net_vlan_provisioning_ind, cap_net_deprovisioning_ind, cap_description_na_reason, cap_admin_status_na_reason, cap_vlan_assignment_na_reason, cap_voice_vlan_na_reason, cap_net_provisioning_na_reason, cap_net_vlan_provisioning_na_reason, cap_net_deprovisioning_na_reason, chassis_serial_number, available_mgmt_ips, rawSysDescr, rawSysVersion, rawSysModel, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device, device_setting, data_collection_status, interfaces, issue_details, device_routes, device_physicals, if_addrs, config_revisions, detected_changes, device_ports, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceID. Valid values are DataSourceID, DeviceID, InfraDeviceStartTime, InfraDeviceEndTime, InfraDeviceChangedCols, DeviceIPDotted, DeviceIPNumeric, DeviceName, DeviceType, DeviceAssurance, DeviceVendor, DeviceModel, DeviceVersion, DeviceSysName, DeviceSysDescr, DeviceSysLocation, DeviceSysContact, DeviceDNSName, DeviceConfigTimestamp, DeviceFirstOccurrenceTime, InfraDeviceTimestamp, DeviceSAAVersion, DeviceRebootTime, DeviceRunningConfigLastChangedTime, DeviceSavedConfigLastChangedTime, DeviceConfigLastCheckedTime, DevicePolicyScheduleMode, DeviceAddlInfo, DeviceMAC, ParentDeviceID, DeviceContextName, DeviceNetBIOSName, DeviceOUI, MgmtServerDeviceID, NetworkDeviceInd, RoutingInd, SwitchingInd, VirtualInd, FilteringInd, FilterProvisionData, VirtualNetworkID, VirtualNetworkingInd, DeviceUniqueKey.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each InfraDevice. Valid values are DataSourceID, DeviceID, InfraDeviceStartTime, InfraDeviceEndTime, InfraDeviceChangedCols, DeviceIPDotted, DeviceIPNumeric, DeviceName, DeviceType, DeviceAssurance, DeviceVendor, DeviceModel, DeviceVersion, DeviceSysName, DeviceSysDescr, DeviceSysLocation, DeviceSysContact, DeviceDNSName, DeviceConfigTimestamp, DeviceFirstOccurrenceTime, InfraDeviceTimestamp, DeviceSAAVersion, DeviceRebootTime, DeviceRunningConfigLastChangedTime, DeviceSavedConfigLastChangedTime, DeviceConfigLastCheckedTime, DevicePolicyScheduleMode, DeviceAddlInfo, DeviceMAC, ParentDeviceID, DeviceContextName, DeviceNetBIOSName, DeviceOUI, MgmtServerDeviceID, NetworkDeviceInd, RoutingInd, SwitchingInd, VirtualInd, FilteringInd, FilterProvisionData, VirtualNetworkID, VirtualNetworkingInd, DeviceUniqueKey. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against infra devices, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: DataSourceID, DeviceAddlInfo, DeviceAssurance, DeviceConfigLastCheckedTime, DeviceConfigTimestamp, DeviceContextName, DeviceDNSName, DeviceFirstOccurrenceTime, DeviceID, DeviceIPDotted, DeviceIPNumeric, DeviceMAC, DeviceModel, DeviceName, DeviceNetBIOSName, DeviceOUI, DevicePolicyScheduleMode, DeviceRebootTime, DeviceRunningConfigLastChangedTime, DeviceSAAVersion, DeviceSavedConfigLastChangedTime, DeviceSysContact, DeviceSysDescr, DeviceSysLocation, DeviceSysName, DeviceType, DeviceUniqueKey, DeviceVendor, DeviceVersion, FilterProvisionData, FilteringInd, InfraDeviceChangedCols, InfraDeviceEndTime, InfraDeviceStartTime, InfraDeviceTimestamp, MgmtServerDeviceID, NetworkDeviceInd, ParentDeviceID, RoutingInd, SwitchingInd, VirtualInd, VirtualNetworkID, VirtualNetworkingInd.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param detail_ind: A flag to indicate whether discovery times should be included or not
:type detail_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return infra_devices: An array of the InfraDevice objects that match the specified input criteria.
:rtype infra_devices: Array of InfraDevice
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available infra devices matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: DataSourceID, DeviceAddlInfo, DeviceAssurance, DeviceConfigLastCheckedTime, DeviceConfigTimestamp, DeviceContextName, DeviceDNSName, DeviceFirstOccurrenceTime, DeviceID, DeviceIPDotted, DeviceIPNumeric, DeviceMAC, DeviceModel, DeviceName, DeviceNetBIOSName, DeviceOUI, DevicePolicyScheduleMode, DeviceRebootTime, DeviceRunningConfigLastChangedTime, DeviceSAAVersion, DeviceSavedConfigLastChangedTime, DeviceSysContact, DeviceSysDescr, DeviceSysLocation, DeviceSysName, DeviceType, DeviceUniqueKey, DeviceVendor, DeviceVersion, FilterProvisionData, FilteringInd, InfraDeviceChangedCols, InfraDeviceEndTime, InfraDeviceStartTime, InfraDeviceTimestamp, MgmtServerDeviceID, NetworkDeviceInd, ParentDeviceID, RoutingInd, SwitchingInd, VirtualInd, VirtualNetworkID, VirtualNetworkingInd.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceAddlInfo: The operator to apply to the field DeviceAddlInfo. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceAddlInfo: Additional information about the device; IP phones will contain the extension in this field. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceAddlInfo: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceAddlInfo: If op_DeviceAddlInfo is specified, the field named in this input will be compared to the value in DeviceAddlInfo using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceAddlInfo must be specified if op_DeviceAddlInfo is specified.
:type val_f_DeviceAddlInfo: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceAddlInfo: If op_DeviceAddlInfo is specified, this value will be compared to the value in DeviceAddlInfo using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceAddlInfo must be specified if op_DeviceAddlInfo is specified.
:type val_c_DeviceAddlInfo: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceAssurance: The operator to apply to the field DeviceAssurance. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceAssurance: The assurance level of the device type value. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceAssurance: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceAssurance: If op_DeviceAssurance is specified, the field named in this input will be compared to the value in DeviceAssurance using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceAssurance must be specified if op_DeviceAssurance is specified.
:type val_f_DeviceAssurance: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceAssurance: If op_DeviceAssurance is specified, this value will be compared to the value in DeviceAssurance using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceAssurance must be specified if op_DeviceAssurance is specified.
:type val_c_DeviceAssurance: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceConfigLastCheckedTime: The operator to apply to the field DeviceConfigLastCheckedTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceConfigLastCheckedTime: The date/time of the last attempted retrieval of the device's configuration file. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceConfigLastCheckedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceConfigLastCheckedTime: If op_DeviceConfigLastCheckedTime is specified, the field named in this input will be compared to the value in DeviceConfigLastCheckedTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceConfigLastCheckedTime must be specified if op_DeviceConfigLastCheckedTime is specified.
:type val_f_DeviceConfigLastCheckedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceConfigLastCheckedTime: If op_DeviceConfigLastCheckedTime is specified, this value will be compared to the value in DeviceConfigLastCheckedTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceConfigLastCheckedTime must be specified if op_DeviceConfigLastCheckedTime is specified.
:type val_c_DeviceConfigLastCheckedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceConfigTimestamp: The operator to apply to the field DeviceConfigTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceConfigTimestamp: The date/time the configuration file was last successfully retrieved for this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceConfigTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceConfigTimestamp: If op_DeviceConfigTimestamp is specified, the field named in this input will be compared to the value in DeviceConfigTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceConfigTimestamp must be specified if op_DeviceConfigTimestamp is specified.
:type val_f_DeviceConfigTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceConfigTimestamp: If op_DeviceConfigTimestamp is specified, this value will be compared to the value in DeviceConfigTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceConfigTimestamp must be specified if op_DeviceConfigTimestamp is specified.
:type val_c_DeviceConfigTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceContextName: The operator to apply to the field DeviceContextName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceContextName: The name of the virtual context of this virtual device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceContextName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceContextName: If op_DeviceContextName is specified, the field named in this input will be compared to the value in DeviceContextName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceContextName must be specified if op_DeviceContextName is specified.
:type val_f_DeviceContextName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceContextName: If op_DeviceContextName is specified, this value will be compared to the value in DeviceContextName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceContextName must be specified if op_DeviceContextName is specified.
:type val_c_DeviceContextName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceDNSName: The operator to apply to the field DeviceDNSName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceDNSName: The device name as reported by DNS. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceDNSName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceDNSName: If op_DeviceDNSName is specified, the field named in this input will be compared to the value in DeviceDNSName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceDNSName must be specified if op_DeviceDNSName is specified.
:type val_f_DeviceDNSName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceDNSName: If op_DeviceDNSName is specified, this value will be compared to the value in DeviceDNSName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceDNSName must be specified if op_DeviceDNSName is specified.
:type val_c_DeviceDNSName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceFirstOccurrenceTime: The operator to apply to the field DeviceFirstOccurrenceTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceFirstOccurrenceTime: The date/time that this device was first seen on the network. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceFirstOccurrenceTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceFirstOccurrenceTime: If op_DeviceFirstOccurrenceTime is specified, the field named in this input will be compared to the value in DeviceFirstOccurrenceTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceFirstOccurrenceTime must be specified if op_DeviceFirstOccurrenceTime is specified.
:type val_f_DeviceFirstOccurrenceTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceFirstOccurrenceTime: If op_DeviceFirstOccurrenceTime is specified, this value will be compared to the value in DeviceFirstOccurrenceTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceFirstOccurrenceTime must be specified if op_DeviceFirstOccurrenceTime is specified.
:type val_c_DeviceFirstOccurrenceTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: An internal NetMRI identifier for the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceIPDotted: The operator to apply to the field DeviceIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceIPDotted: The management IP address of the device, in dotted (or colon-delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceIPDotted: If op_DeviceIPDotted is specified, the field named in this input will be compared to the value in DeviceIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceIPDotted must be specified if op_DeviceIPDotted is specified.
:type val_f_DeviceIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceIPDotted: If op_DeviceIPDotted is specified, this value will be compared to the value in DeviceIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceIPDotted must be specified if op_DeviceIPDotted is specified.
:type val_c_DeviceIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceIPNumeric: The operator to apply to the field DeviceIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceIPNumeric: The numerical value of the device IP address. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceIPNumeric: If op_DeviceIPNumeric is specified, the field named in this input will be compared to the value in DeviceIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceIPNumeric must be specified if op_DeviceIPNumeric is specified.
:type val_f_DeviceIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceIPNumeric: If op_DeviceIPNumeric is specified, this value will be compared to the value in DeviceIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceIPNumeric must be specified if op_DeviceIPNumeric is specified.
:type val_c_DeviceIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceMAC: The operator to apply to the field DeviceMAC. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceMAC: The MAC of the interface corresponding to the management IP, if available. Otherwise, it is the lowest numbered non-zero MAC for any interface on the device. If no interface records are available for the device, the lowest non-zero MAC address corresponding to the management IP address found in the global ARP table will be used. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceMAC: If op_DeviceMAC is specified, the field named in this input will be compared to the value in DeviceMAC using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceMAC must be specified if op_DeviceMAC is specified.
:type val_f_DeviceMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceMAC: If op_DeviceMAC is specified, this value will be compared to the value in DeviceMAC using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceMAC must be specified if op_DeviceMAC is specified.
:type val_c_DeviceMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceModel: The operator to apply to the field DeviceModel. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceModel: The device model name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceModel: If op_DeviceModel is specified, the field named in this input will be compared to the value in DeviceModel using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceModel must be specified if op_DeviceModel is specified.
:type val_f_DeviceModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceModel: If op_DeviceModel is specified, this value will be compared to the value in DeviceModel using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceModel must be specified if op_DeviceModel is specified.
:type val_c_DeviceModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceName: The operator to apply to the field DeviceName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceName: The NetMRI name of the device; this will be either the same as DeviceSysName or DeviceDNSName, depending on your NetMRI configuration. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceName: If op_DeviceName is specified, the field named in this input will be compared to the value in DeviceName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceName must be specified if op_DeviceName is specified.
:type val_f_DeviceName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceName: If op_DeviceName is specified, this value will be compared to the value in DeviceName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceName must be specified if op_DeviceName is specified.
:type val_c_DeviceName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceNetBIOSName: The operator to apply to the field DeviceNetBIOSName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceNetBIOSName: The NetBIOS name of the device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceNetBIOSName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceNetBIOSName: If op_DeviceNetBIOSName is specified, the field named in this input will be compared to the value in DeviceNetBIOSName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceNetBIOSName must be specified if op_DeviceNetBIOSName is specified.
:type val_f_DeviceNetBIOSName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceNetBIOSName: If op_DeviceNetBIOSName is specified, this value will be compared to the value in DeviceNetBIOSName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceNetBIOSName must be specified if op_DeviceNetBIOSName is specified.
:type val_c_DeviceNetBIOSName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceOUI: The operator to apply to the field DeviceOUI. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceOUI: The NetMRI-determined device vendor using OUI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceOUI: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceOUI: If op_DeviceOUI is specified, the field named in this input will be compared to the value in DeviceOUI using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceOUI must be specified if op_DeviceOUI is specified.
:type val_f_DeviceOUI: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceOUI: If op_DeviceOUI is specified, this value will be compared to the value in DeviceOUI using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceOUI must be specified if op_DeviceOUI is specified.
:type val_c_DeviceOUI: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DevicePolicyScheduleMode: The operator to apply to the field DevicePolicyScheduleMode. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DevicePolicyScheduleMode: Not currently used. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DevicePolicyScheduleMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DevicePolicyScheduleMode: If op_DevicePolicyScheduleMode is specified, the field named in this input will be compared to the value in DevicePolicyScheduleMode using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DevicePolicyScheduleMode must be specified if op_DevicePolicyScheduleMode is specified.
:type val_f_DevicePolicyScheduleMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DevicePolicyScheduleMode: If op_DevicePolicyScheduleMode is specified, this value will be compared to the value in DevicePolicyScheduleMode using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DevicePolicyScheduleMode must be specified if op_DevicePolicyScheduleMode is specified.
:type val_c_DevicePolicyScheduleMode: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceRebootTime: The operator to apply to the field DeviceRebootTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceRebootTime: The date/time this device was last rebooted. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceRebootTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceRebootTime: If op_DeviceRebootTime is specified, the field named in this input will be compared to the value in DeviceRebootTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceRebootTime must be specified if op_DeviceRebootTime is specified.
:type val_f_DeviceRebootTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceRebootTime: If op_DeviceRebootTime is specified, this value will be compared to the value in DeviceRebootTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceRebootTime must be specified if op_DeviceRebootTime is specified.
:type val_c_DeviceRebootTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceRunningConfigLastChangedTime: The operator to apply to the field DeviceRunningConfigLastChangedTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceRunningConfigLastChangedTime: The date/time, as reported by SNMP, that the device's running configuration was last changed. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceRunningConfigLastChangedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceRunningConfigLastChangedTime: If op_DeviceRunningConfigLastChangedTime is specified, the field named in this input will be compared to the value in DeviceRunningConfigLastChangedTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceRunningConfigLastChangedTime must be specified if op_DeviceRunningConfigLastChangedTime is specified.
:type val_f_DeviceRunningConfigLastChangedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceRunningConfigLastChangedTime: If op_DeviceRunningConfigLastChangedTime is specified, this value will be compared to the value in DeviceRunningConfigLastChangedTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceRunningConfigLastChangedTime must be specified if op_DeviceRunningConfigLastChangedTime is specified.
:type val_c_DeviceRunningConfigLastChangedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceSAAVersion: The operator to apply to the field DeviceSAAVersion. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceSAAVersion: The SAA version running on this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceSAAVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceSAAVersion: If op_DeviceSAAVersion is specified, the field named in this input will be compared to the value in DeviceSAAVersion using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceSAAVersion must be specified if op_DeviceSAAVersion is specified.
:type val_f_DeviceSAAVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceSAAVersion: If op_DeviceSAAVersion is specified, this value will be compared to the value in DeviceSAAVersion using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceSAAVersion must be specified if op_DeviceSAAVersion is specified.
:type val_c_DeviceSAAVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceSavedConfigLastChangedTime: The operator to apply to the field DeviceSavedConfigLastChangedTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceSavedConfigLastChangedTime: The date/time, as reported by SNMP, that the device's saved configuration was last changed. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceSavedConfigLastChangedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceSavedConfigLastChangedTime: If op_DeviceSavedConfigLastChangedTime is specified, the field named in this input will be compared to the value in DeviceSavedConfigLastChangedTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceSavedConfigLastChangedTime must be specified if op_DeviceSavedConfigLastChangedTime is specified.
:type val_f_DeviceSavedConfigLastChangedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceSavedConfigLastChangedTime: If op_DeviceSavedConfigLastChangedTime is specified, this value will be compared to the value in DeviceSavedConfigLastChangedTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceSavedConfigLastChangedTime must be specified if op_DeviceSavedConfigLastChangedTime is specified.
:type val_c_DeviceSavedConfigLastChangedTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceSysContact: The operator to apply to the field DeviceSysContact. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceSysContact: The Device sysContact as reported by SNMP. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceSysContact: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceSysContact: If op_DeviceSysContact is specified, the field named in this input will be compared to the value in DeviceSysContact using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceSysContact must be specified if op_DeviceSysContact is specified.
:type val_f_DeviceSysContact: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceSysContact: If op_DeviceSysContact is specified, this value will be compared to the value in DeviceSysContact using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceSysContact must be specified if op_DeviceSysContact is specified.
:type val_c_DeviceSysContact: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceSysDescr: The operator to apply to the field DeviceSysDescr. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceSysDescr: The device sysDescr as reported by SNMP. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceSysDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceSysDescr: If op_DeviceSysDescr is specified, the field named in this input will be compared to the value in DeviceSysDescr using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceSysDescr must be specified if op_DeviceSysDescr is specified.
:type val_f_DeviceSysDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceSysDescr: If op_DeviceSysDescr is specified, this value will be compared to the value in DeviceSysDescr using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceSysDescr must be specified if op_DeviceSysDescr is specified.
:type val_c_DeviceSysDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceSysLocation: The operator to apply to the field DeviceSysLocation. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceSysLocation: The device sysLocation as reported by SNMP. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceSysLocation: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceSysLocation: If op_DeviceSysLocation is specified, the field named in this input will be compared to the value in DeviceSysLocation using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceSysLocation must be specified if op_DeviceSysLocation is specified.
:type val_f_DeviceSysLocation: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceSysLocation: If op_DeviceSysLocation is specified, this value will be compared to the value in DeviceSysLocation using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceSysLocation must be specified if op_DeviceSysLocation is specified.
:type val_c_DeviceSysLocation: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceSysName: The operator to apply to the field DeviceSysName. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceSysName: The device name as reported by SNMP. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceSysName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceSysName: If op_DeviceSysName is specified, the field named in this input will be compared to the value in DeviceSysName using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceSysName must be specified if op_DeviceSysName is specified.
:type val_f_DeviceSysName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceSysName: If op_DeviceSysName is specified, this value will be compared to the value in DeviceSysName using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceSysName must be specified if op_DeviceSysName is specified.
:type val_c_DeviceSysName: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceType: The operator to apply to the field DeviceType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceType: The NetMRI-determined device type. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceType: If op_DeviceType is specified, the field named in this input will be compared to the value in DeviceType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceType must be specified if op_DeviceType is specified.
:type val_f_DeviceType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceType: If op_DeviceType is specified, this value will be compared to the value in DeviceType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceType must be specified if op_DeviceType is specified.
:type val_c_DeviceType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceUniqueKey: The operator to apply to the field DeviceUniqueKey. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceUniqueKey: Unique key which allows duplicates detecting over different Virtual Networks. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceUniqueKey: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceUniqueKey: If op_DeviceUniqueKey is specified, the field named in this input will be compared to the value in DeviceUniqueKey using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceUniqueKey must be specified if op_DeviceUniqueKey is specified.
:type val_f_DeviceUniqueKey: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceUniqueKey: If op_DeviceUniqueKey is specified, this value will be compared to the value in DeviceUniqueKey using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceUniqueKey must be specified if op_DeviceUniqueKey is specified.
:type val_c_DeviceUniqueKey: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceVendor: The operator to apply to the field DeviceVendor. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceVendor: The device vendor name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceVendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceVendor: If op_DeviceVendor is specified, the field named in this input will be compared to the value in DeviceVendor using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceVendor must be specified if op_DeviceVendor is specified.
:type val_f_DeviceVendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceVendor: If op_DeviceVendor is specified, this value will be compared to the value in DeviceVendor using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceVendor must be specified if op_DeviceVendor is specified.
:type val_c_DeviceVendor: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceVersion: The operator to apply to the field DeviceVersion. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceVersion: The device OS version. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceVersion: If op_DeviceVersion is specified, the field named in this input will be compared to the value in DeviceVersion using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceVersion must be specified if op_DeviceVersion is specified.
:type val_f_DeviceVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceVersion: If op_DeviceVersion is specified, this value will be compared to the value in DeviceVersion using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceVersion must be specified if op_DeviceVersion is specified.
:type val_c_DeviceVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FilterProvisionData: The operator to apply to the field FilterProvisionData. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FilterProvisionData: Internal data - do not modify, may change without warning. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FilterProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FilterProvisionData: If op_FilterProvisionData is specified, the field named in this input will be compared to the value in FilterProvisionData using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FilterProvisionData must be specified if op_FilterProvisionData is specified.
:type val_f_FilterProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FilterProvisionData: If op_FilterProvisionData is specified, this value will be compared to the value in FilterProvisionData using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FilterProvisionData must be specified if op_FilterProvisionData is specified.
:type val_c_FilterProvisionData: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_FilteringInd: The operator to apply to the field FilteringInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. FilteringInd: A flag indicating whether this device is eligible for Security Device Controller For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_FilteringInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_FilteringInd: If op_FilteringInd is specified, the field named in this input will be compared to the value in FilteringInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_FilteringInd must be specified if op_FilteringInd is specified.
:type val_f_FilteringInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_FilteringInd: If op_FilteringInd is specified, this value will be compared to the value in FilteringInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_FilteringInd must be specified if op_FilteringInd is specified.
:type val_c_FilteringInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InfraDeviceChangedCols: The operator to apply to the field InfraDeviceChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InfraDeviceChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InfraDeviceChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InfraDeviceChangedCols: If op_InfraDeviceChangedCols is specified, the field named in this input will be compared to the value in InfraDeviceChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InfraDeviceChangedCols must be specified if op_InfraDeviceChangedCols is specified.
:type val_f_InfraDeviceChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InfraDeviceChangedCols: If op_InfraDeviceChangedCols is specified, this value will be compared to the value in InfraDeviceChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InfraDeviceChangedCols must be specified if op_InfraDeviceChangedCols is specified.
:type val_c_InfraDeviceChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InfraDeviceEndTime: The operator to apply to the field InfraDeviceEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InfraDeviceEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InfraDeviceEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InfraDeviceEndTime: If op_InfraDeviceEndTime is specified, the field named in this input will be compared to the value in InfraDeviceEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InfraDeviceEndTime must be specified if op_InfraDeviceEndTime is specified.
:type val_f_InfraDeviceEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InfraDeviceEndTime: If op_InfraDeviceEndTime is specified, this value will be compared to the value in InfraDeviceEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InfraDeviceEndTime must be specified if op_InfraDeviceEndTime is specified.
:type val_c_InfraDeviceEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InfraDeviceStartTime: The operator to apply to the field InfraDeviceStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InfraDeviceStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InfraDeviceStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InfraDeviceStartTime: If op_InfraDeviceStartTime is specified, the field named in this input will be compared to the value in InfraDeviceStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InfraDeviceStartTime must be specified if op_InfraDeviceStartTime is specified.
:type val_f_InfraDeviceStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InfraDeviceStartTime: If op_InfraDeviceStartTime is specified, this value will be compared to the value in InfraDeviceStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InfraDeviceStartTime must be specified if op_InfraDeviceStartTime is specified.
:type val_c_InfraDeviceStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InfraDeviceTimestamp: The operator to apply to the field InfraDeviceTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InfraDeviceTimestamp: The date and time this record was collected. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InfraDeviceTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InfraDeviceTimestamp: If op_InfraDeviceTimestamp is specified, the field named in this input will be compared to the value in InfraDeviceTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InfraDeviceTimestamp must be specified if op_InfraDeviceTimestamp is specified.
:type val_f_InfraDeviceTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InfraDeviceTimestamp: If op_InfraDeviceTimestamp is specified, this value will be compared to the value in InfraDeviceTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InfraDeviceTimestamp must be specified if op_InfraDeviceTimestamp is specified.
:type val_c_InfraDeviceTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_MgmtServerDeviceID: The operator to apply to the field MgmtServerDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. MgmtServerDeviceID: The Device ID of the management server for the device For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_MgmtServerDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_MgmtServerDeviceID: If op_MgmtServerDeviceID is specified, the field named in this input will be compared to the value in MgmtServerDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_MgmtServerDeviceID must be specified if op_MgmtServerDeviceID is specified.
:type val_f_MgmtServerDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_MgmtServerDeviceID: If op_MgmtServerDeviceID is specified, this value will be compared to the value in MgmtServerDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_MgmtServerDeviceID must be specified if op_MgmtServerDeviceID is specified.
:type val_c_MgmtServerDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NetworkDeviceInd: The operator to apply to the field NetworkDeviceInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NetworkDeviceInd: A flag indicating whether this device is a network device or an end host. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NetworkDeviceInd: If op_NetworkDeviceInd is specified, the field named in this input will be compared to the value in NetworkDeviceInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NetworkDeviceInd must be specified if op_NetworkDeviceInd is specified.
:type val_f_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NetworkDeviceInd: If op_NetworkDeviceInd is specified, this value will be compared to the value in NetworkDeviceInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NetworkDeviceInd must be specified if op_NetworkDeviceInd is specified.
:type val_c_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ParentDeviceID: The operator to apply to the field ParentDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ParentDeviceID: The internal NetMRI identifier for the device containing this virtual device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ParentDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ParentDeviceID: If op_ParentDeviceID is specified, the field named in this input will be compared to the value in ParentDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ParentDeviceID must be specified if op_ParentDeviceID is specified.
:type val_f_ParentDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ParentDeviceID: If op_ParentDeviceID is specified, this value will be compared to the value in ParentDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ParentDeviceID must be specified if op_ParentDeviceID is specified.
:type val_c_ParentDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RoutingInd: The operator to apply to the field RoutingInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RoutingInd: A flag indicating whether this device is configured with any routing capability and whether a routing table was retrieved from this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RoutingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RoutingInd: If op_RoutingInd is specified, the field named in this input will be compared to the value in RoutingInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RoutingInd must be specified if op_RoutingInd is specified.
:type val_f_RoutingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RoutingInd: If op_RoutingInd is specified, this value will be compared to the value in RoutingInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RoutingInd must be specified if op_RoutingInd is specified.
:type val_c_RoutingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SwitchingInd: The operator to apply to the field SwitchingInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SwitchingInd: A flag indicating whether a switch port forwarding table was retrieved from this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SwitchingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SwitchingInd: If op_SwitchingInd is specified, the field named in this input will be compared to the value in SwitchingInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SwitchingInd must be specified if op_SwitchingInd is specified.
:type val_f_SwitchingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SwitchingInd: If op_SwitchingInd is specified, this value will be compared to the value in SwitchingInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SwitchingInd must be specified if op_SwitchingInd is specified.
:type val_c_SwitchingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VirtualInd: The operator to apply to the field VirtualInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VirtualInd: A flag indicating if the source device is a virtual device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VirtualInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VirtualInd: If op_VirtualInd is specified, the field named in this input will be compared to the value in VirtualInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VirtualInd must be specified if op_VirtualInd is specified.
:type val_f_VirtualInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VirtualInd: If op_VirtualInd is specified, this value will be compared to the value in VirtualInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VirtualInd must be specified if op_VirtualInd is specified.
:type val_c_VirtualInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VirtualNetworkID: The operator to apply to the field VirtualNetworkID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VirtualNetworkID: The internal NetMRI identifier of the Virtual Network to which the management address of this device belongs. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VirtualNetworkID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VirtualNetworkID: If op_VirtualNetworkID is specified, the field named in this input will be compared to the value in VirtualNetworkID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VirtualNetworkID must be specified if op_VirtualNetworkID is specified.
:type val_f_VirtualNetworkID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VirtualNetworkID: If op_VirtualNetworkID is specified, this value will be compared to the value in VirtualNetworkID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VirtualNetworkID must be specified if op_VirtualNetworkID is specified.
:type val_c_VirtualNetworkID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_VirtualNetworkingInd: The operator to apply to the field VirtualNetworkingInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. VirtualNetworkingInd: Set to null, 0 or 1. 0 indicates this is not a VRF Aware device. 1 Indicates it is VRF Aware. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_VirtualNetworkingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_VirtualNetworkingInd: If op_VirtualNetworkingInd is specified, the field named in this input will be compared to the value in VirtualNetworkingInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_VirtualNetworkingInd must be specified if op_VirtualNetworkingInd is specified.
:type val_f_VirtualNetworkingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_VirtualNetworkingInd: If op_VirtualNetworkingInd is specified, this value will be compared to the value in VirtualNetworkingInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_VirtualNetworkingInd must be specified if op_VirtualNetworkingInd is specified.
:type val_c_VirtualNetworkingInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_available_mgmt_ips: The operator to apply to the field available_mgmt_ips. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. available_mgmt_ips: Available Management IPs for a device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_available_mgmt_ips: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_available_mgmt_ips: If op_available_mgmt_ips is specified, the field named in this input will be compared to the value in available_mgmt_ips using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_available_mgmt_ips must be specified if op_available_mgmt_ips is specified.
:type val_f_available_mgmt_ips: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_available_mgmt_ips: If op_available_mgmt_ips is specified, this value will be compared to the value in available_mgmt_ips using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_available_mgmt_ips must be specified if op_available_mgmt_ips is specified.
:type val_c_available_mgmt_ips: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_admin_status_ind: The operator to apply to the field cap_admin_status_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_admin_status_ind: Capability of changing the Admin Status of an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_admin_status_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_admin_status_ind: If op_cap_admin_status_ind is specified, the field named in this input will be compared to the value in cap_admin_status_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_admin_status_ind must be specified if op_cap_admin_status_ind is specified.
:type val_f_cap_admin_status_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_admin_status_ind: If op_cap_admin_status_ind is specified, this value will be compared to the value in cap_admin_status_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_admin_status_ind must be specified if op_cap_admin_status_ind is specified.
:type val_c_cap_admin_status_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_admin_status_na_reason: The operator to apply to the field cap_admin_status_na_reason. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_admin_status_na_reason: Reason of non ability of changing the Admin Status of an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_admin_status_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_admin_status_na_reason: If op_cap_admin_status_na_reason is specified, the field named in this input will be compared to the value in cap_admin_status_na_reason using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_admin_status_na_reason must be specified if op_cap_admin_status_na_reason is specified.
:type val_f_cap_admin_status_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_admin_status_na_reason: If op_cap_admin_status_na_reason is specified, this value will be compared to the value in cap_admin_status_na_reason using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_admin_status_na_reason must be specified if op_cap_admin_status_na_reason is specified.
:type val_c_cap_admin_status_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_description_ind: The operator to apply to the field cap_description_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_description_ind: Capability of changing the description of an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_description_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_description_ind: If op_cap_description_ind is specified, the field named in this input will be compared to the value in cap_description_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_description_ind must be specified if op_cap_description_ind is specified.
:type val_f_cap_description_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_description_ind: If op_cap_description_ind is specified, this value will be compared to the value in cap_description_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_description_ind must be specified if op_cap_description_ind is specified.
:type val_c_cap_description_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_description_na_reason: The operator to apply to the field cap_description_na_reason. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_description_na_reason: Reason of non ability of changing the description of an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_description_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_description_na_reason: If op_cap_description_na_reason is specified, the field named in this input will be compared to the value in cap_description_na_reason using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_description_na_reason must be specified if op_cap_description_na_reason is specified.
:type val_f_cap_description_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_description_na_reason: If op_cap_description_na_reason is specified, this value will be compared to the value in cap_description_na_reason using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_description_na_reason must be specified if op_cap_description_na_reason is specified.
:type val_c_cap_description_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_net_deprovisioning_ind: The operator to apply to the field cap_net_deprovisioning_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_net_deprovisioning_ind: Capability of de-provisioning a network from this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_net_deprovisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_net_deprovisioning_ind: If op_cap_net_deprovisioning_ind is specified, the field named in this input will be compared to the value in cap_net_deprovisioning_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_net_deprovisioning_ind must be specified if op_cap_net_deprovisioning_ind is specified.
:type val_f_cap_net_deprovisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_net_deprovisioning_ind: If op_cap_net_deprovisioning_ind is specified, this value will be compared to the value in cap_net_deprovisioning_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_net_deprovisioning_ind must be specified if op_cap_net_deprovisioning_ind is specified.
:type val_c_cap_net_deprovisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_net_deprovisioning_na_reason: The operator to apply to the field cap_net_deprovisioning_na_reason. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_net_deprovisioning_na_reason: Reason of non ability of de-provisioning a network from this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_net_deprovisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_net_deprovisioning_na_reason: If op_cap_net_deprovisioning_na_reason is specified, the field named in this input will be compared to the value in cap_net_deprovisioning_na_reason using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_net_deprovisioning_na_reason must be specified if op_cap_net_deprovisioning_na_reason is specified.
:type val_f_cap_net_deprovisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_net_deprovisioning_na_reason: If op_cap_net_deprovisioning_na_reason is specified, this value will be compared to the value in cap_net_deprovisioning_na_reason using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_net_deprovisioning_na_reason must be specified if op_cap_net_deprovisioning_na_reason is specified.
:type val_c_cap_net_deprovisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_net_provisioning_ind: The operator to apply to the field cap_net_provisioning_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_net_provisioning_ind: Capability of provisioning a network on an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_net_provisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_net_provisioning_ind: If op_cap_net_provisioning_ind is specified, the field named in this input will be compared to the value in cap_net_provisioning_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_net_provisioning_ind must be specified if op_cap_net_provisioning_ind is specified.
:type val_f_cap_net_provisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_net_provisioning_ind: If op_cap_net_provisioning_ind is specified, this value will be compared to the value in cap_net_provisioning_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_net_provisioning_ind must be specified if op_cap_net_provisioning_ind is specified.
:type val_c_cap_net_provisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_net_provisioning_na_reason: The operator to apply to the field cap_net_provisioning_na_reason. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_net_provisioning_na_reason: Reason of non ability of provisioning a network on an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_net_provisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_net_provisioning_na_reason: If op_cap_net_provisioning_na_reason is specified, the field named in this input will be compared to the value in cap_net_provisioning_na_reason using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_net_provisioning_na_reason must be specified if op_cap_net_provisioning_na_reason is specified.
:type val_f_cap_net_provisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_net_provisioning_na_reason: If op_cap_net_provisioning_na_reason is specified, this value will be compared to the value in cap_net_provisioning_na_reason using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_net_provisioning_na_reason must be specified if op_cap_net_provisioning_na_reason is specified.
:type val_c_cap_net_provisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_net_vlan_provisioning_ind: The operator to apply to the field cap_net_vlan_provisioning_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_net_vlan_provisioning_ind: Capability of creating a VLAN and provision a network on its virtual interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_net_vlan_provisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_net_vlan_provisioning_ind: If op_cap_net_vlan_provisioning_ind is specified, the field named in this input will be compared to the value in cap_net_vlan_provisioning_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_net_vlan_provisioning_ind must be specified if op_cap_net_vlan_provisioning_ind is specified.
:type val_f_cap_net_vlan_provisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_net_vlan_provisioning_ind: If op_cap_net_vlan_provisioning_ind is specified, this value will be compared to the value in cap_net_vlan_provisioning_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_net_vlan_provisioning_ind must be specified if op_cap_net_vlan_provisioning_ind is specified.
:type val_c_cap_net_vlan_provisioning_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_net_vlan_provisioning_na_reason: The operator to apply to the field cap_net_vlan_provisioning_na_reason. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_net_vlan_provisioning_na_reason: Reason of non ability of creating a VLAN and provision a network on its virtual interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_net_vlan_provisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_net_vlan_provisioning_na_reason: If op_cap_net_vlan_provisioning_na_reason is specified, the field named in this input will be compared to the value in cap_net_vlan_provisioning_na_reason using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_net_vlan_provisioning_na_reason must be specified if op_cap_net_vlan_provisioning_na_reason is specified.
:type val_f_cap_net_vlan_provisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_net_vlan_provisioning_na_reason: If op_cap_net_vlan_provisioning_na_reason is specified, this value will be compared to the value in cap_net_vlan_provisioning_na_reason using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_net_vlan_provisioning_na_reason must be specified if op_cap_net_vlan_provisioning_na_reason is specified.
:type val_c_cap_net_vlan_provisioning_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_vlan_assignment_ind: The operator to apply to the field cap_vlan_assignment_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_vlan_assignment_ind: Capability of assigning a regular data VLAN to an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_vlan_assignment_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_vlan_assignment_ind: If op_cap_vlan_assignment_ind is specified, the field named in this input will be compared to the value in cap_vlan_assignment_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_vlan_assignment_ind must be specified if op_cap_vlan_assignment_ind is specified.
:type val_f_cap_vlan_assignment_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_vlan_assignment_ind: If op_cap_vlan_assignment_ind is specified, this value will be compared to the value in cap_vlan_assignment_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_vlan_assignment_ind must be specified if op_cap_vlan_assignment_ind is specified.
:type val_c_cap_vlan_assignment_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_vlan_assignment_na_reason: The operator to apply to the field cap_vlan_assignment_na_reason. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_vlan_assignment_na_reason: Reason of non ability of assigning a regular data VLAN to an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_vlan_assignment_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_vlan_assignment_na_reason: If op_cap_vlan_assignment_na_reason is specified, the field named in this input will be compared to the value in cap_vlan_assignment_na_reason using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_vlan_assignment_na_reason must be specified if op_cap_vlan_assignment_na_reason is specified.
:type val_f_cap_vlan_assignment_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_vlan_assignment_na_reason: If op_cap_vlan_assignment_na_reason is specified, this value will be compared to the value in cap_vlan_assignment_na_reason using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_vlan_assignment_na_reason must be specified if op_cap_vlan_assignment_na_reason is specified.
:type val_c_cap_vlan_assignment_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_voice_vlan_ind: The operator to apply to the field cap_voice_vlan_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_voice_vlan_ind: Capability of assigning a voice VLAN to an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_voice_vlan_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_voice_vlan_ind: If op_cap_voice_vlan_ind is specified, the field named in this input will be compared to the value in cap_voice_vlan_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_voice_vlan_ind must be specified if op_cap_voice_vlan_ind is specified.
:type val_f_cap_voice_vlan_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_voice_vlan_ind: If op_cap_voice_vlan_ind is specified, this value will be compared to the value in cap_voice_vlan_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_voice_vlan_ind must be specified if op_cap_voice_vlan_ind is specified.
:type val_c_cap_voice_vlan_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cap_voice_vlan_na_reason: The operator to apply to the field cap_voice_vlan_na_reason. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cap_voice_vlan_na_reason: Reason of non ability of assigning a voice VLAN to an interface of this device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cap_voice_vlan_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cap_voice_vlan_na_reason: If op_cap_voice_vlan_na_reason is specified, the field named in this input will be compared to the value in cap_voice_vlan_na_reason using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cap_voice_vlan_na_reason must be specified if op_cap_voice_vlan_na_reason is specified.
:type val_f_cap_voice_vlan_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cap_voice_vlan_na_reason: If op_cap_voice_vlan_na_reason is specified, this value will be compared to the value in cap_voice_vlan_na_reason using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cap_voice_vlan_na_reason must be specified if op_cap_voice_vlan_na_reason is specified.
:type val_c_cap_voice_vlan_na_reason: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_chassis_serial_number: The operator to apply to the field chassis_serial_number. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. chassis_serial_number: The combined comma separated serial numbers reported by the chassis snmp MIB. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_chassis_serial_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_chassis_serial_number: If op_chassis_serial_number is specified, the field named in this input will be compared to the value in chassis_serial_number using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_chassis_serial_number must be specified if op_chassis_serial_number is specified.
:type val_f_chassis_serial_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_chassis_serial_number: If op_chassis_serial_number is specified, this value will be compared to the value in chassis_serial_number using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_chassis_serial_number must be specified if op_chassis_serial_number is specified.
:type val_c_chassis_serial_number: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_privileged_polling: The operator to apply to the field privileged_polling. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. privileged_polling: A flag indicating whether to poll the device in privileged mode. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_privileged_polling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_privileged_polling: If op_privileged_polling is specified, the field named in this input will be compared to the value in privileged_polling using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_privileged_polling must be specified if op_privileged_polling is specified.
:type val_f_privileged_polling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_privileged_polling: If op_privileged_polling is specified, this value will be compared to the value in privileged_polling using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_privileged_polling must be specified if op_privileged_polling is specified.
:type val_c_privileged_polling: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_rawSysDescr: The operator to apply to the field rawSysDescr. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. rawSysDescr: Unprocessed Device Description value as returned by SNMP For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_rawSysDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_rawSysDescr: If op_rawSysDescr is specified, the field named in this input will be compared to the value in rawSysDescr using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_rawSysDescr must be specified if op_rawSysDescr is specified.
:type val_f_rawSysDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_rawSysDescr: If op_rawSysDescr is specified, this value will be compared to the value in rawSysDescr using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_rawSysDescr must be specified if op_rawSysDescr is specified.
:type val_c_rawSysDescr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_rawSysModel: The operator to apply to the field rawSysModel. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. rawSysModel: Unprocessed Device Model value as returned by SNMP For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_rawSysModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_rawSysModel: If op_rawSysModel is specified, the field named in this input will be compared to the value in rawSysModel using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_rawSysModel must be specified if op_rawSysModel is specified.
:type val_f_rawSysModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_rawSysModel: If op_rawSysModel is specified, this value will be compared to the value in rawSysModel using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_rawSysModel must be specified if op_rawSysModel is specified.
:type val_c_rawSysModel: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_rawSysVersion: The operator to apply to the field rawSysVersion. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. rawSysVersion: Unprocessed Device Version value as returned by SNMP For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_rawSysVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_rawSysVersion: If op_rawSysVersion is specified, the field named in this input will be compared to the value in rawSysVersion using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_rawSysVersion must be specified if op_rawSysVersion is specified.
:type val_f_rawSysVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_rawSysVersion: If op_rawSysVersion is specified, this value will be compared to the value in rawSysVersion using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_rawSysVersion must be specified if op_rawSysVersion is specified.
:type val_c_rawSysVersion: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the infra devices as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of infra device methods. The listed methods will be called on each infra device returned and included in the output. Available methods are: DeviceCommunitySecure, DeviceRank, DeviceCommunity, DeviceFirstOccurrence, group, parent_device, gateway_device, running_config, running_config_text, saved_config, saved_config_text, running_config_diff, saved_config_diff, virtual_child_count, asset_type, device_setting, data_collection_status, control_capabilities, network_name, interfaces, issue_details, device_routes, device_physicals, if_addrs, config_revisions, detected_changes, device_ports, privileged_polling, DeviceStartTime, DeviceEndTime, cap_description_ind, cap_admin_status_ind, cap_vlan_assignment_ind, cap_voice_vlan_ind, cap_net_provisioning_ind, cap_net_vlan_provisioning_ind, cap_net_deprovisioning_ind, cap_description_na_reason, cap_admin_status_na_reason, cap_vlan_assignment_na_reason, cap_voice_vlan_na_reason, cap_net_provisioning_na_reason, cap_net_vlan_provisioning_na_reason, cap_net_deprovisioning_na_reason, chassis_serial_number, available_mgmt_ips, rawSysDescr, rawSysVersion, rawSysModel, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device, device_setting, data_collection_status, interfaces, issue_details, device_routes, device_physicals, if_addrs, config_revisions, detected_changes, device_ports, data_source, device.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` DeviceID
:param sort: The data field(s) to use for sorting the output. Default is DeviceID. Valid values are DataSourceID, DeviceID, InfraDeviceStartTime, InfraDeviceEndTime, InfraDeviceChangedCols, DeviceIPDotted, DeviceIPNumeric, DeviceName, DeviceType, DeviceAssurance, DeviceVendor, DeviceModel, DeviceVersion, DeviceSysName, DeviceSysDescr, DeviceSysLocation, DeviceSysContact, DeviceDNSName, DeviceConfigTimestamp, DeviceFirstOccurrenceTime, InfraDeviceTimestamp, DeviceSAAVersion, DeviceRebootTime, DeviceRunningConfigLastChangedTime, DeviceSavedConfigLastChangedTime, DeviceConfigLastCheckedTime, DevicePolicyScheduleMode, DeviceAddlInfo, DeviceMAC, ParentDeviceID, DeviceContextName, DeviceNetBIOSName, DeviceOUI, MgmtServerDeviceID, NetworkDeviceInd, RoutingInd, SwitchingInd, VirtualInd, FilteringInd, FilterProvisionData, VirtualNetworkID, VirtualNetworkingInd, DeviceUniqueKey.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each InfraDevice. Valid values are DataSourceID, DeviceID, InfraDeviceStartTime, InfraDeviceEndTime, InfraDeviceChangedCols, DeviceIPDotted, DeviceIPNumeric, DeviceName, DeviceType, DeviceAssurance, DeviceVendor, DeviceModel, DeviceVersion, DeviceSysName, DeviceSysDescr, DeviceSysLocation, DeviceSysContact, DeviceDNSName, DeviceConfigTimestamp, DeviceFirstOccurrenceTime, InfraDeviceTimestamp, DeviceSAAVersion, DeviceRebootTime, DeviceRunningConfigLastChangedTime, DeviceSavedConfigLastChangedTime, DeviceConfigLastCheckedTime, DevicePolicyScheduleMode, DeviceAddlInfo, DeviceMAC, ParentDeviceID, DeviceContextName, DeviceNetBIOSName, DeviceOUI, MgmtServerDeviceID, NetworkDeviceInd, RoutingInd, SwitchingInd, VirtualInd, FilteringInd, FilterProvisionData, VirtualNetworkID, VirtualNetworkingInd, DeviceUniqueKey. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkID: The network id to which results would be limited.
:type NetworkID: Integer
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param detail_ind: A flag to indicate whether discovery times should be included or not
:type detail_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return infra_devices: An array of the InfraDevice objects that match the specified input criteria.
:rtype infra_devices: Array of InfraDevice
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified infra device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of infra device methods. The listed methods will be called on each infra device returned and included in the output. Available methods are: DeviceCommunitySecure, DeviceRank, DeviceCommunity, DeviceFirstOccurrence, group, parent_device, gateway_device, running_config, running_config_text, saved_config, saved_config_text, running_config_diff, saved_config_diff, virtual_child_count, asset_type, device_setting, data_collection_status, control_capabilities, network_name, interfaces, issue_details, device_routes, device_physicals, if_addrs, config_revisions, detected_changes, device_ports, privileged_polling, DeviceStartTime, DeviceEndTime, cap_description_ind, cap_admin_status_ind, cap_vlan_assignment_ind, cap_voice_vlan_ind, cap_net_provisioning_ind, cap_net_vlan_provisioning_ind, cap_net_deprovisioning_ind, cap_description_na_reason, cap_admin_status_na_reason, cap_vlan_assignment_na_reason, cap_voice_vlan_na_reason, cap_net_provisioning_na_reason, cap_net_vlan_provisioning_na_reason, cap_net_deprovisioning_na_reason, chassis_serial_number, available_mgmt_ips, rawSysDescr, rawSysVersion, rawSysModel, data_source, device.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: parent_device, device_setting, data_collection_status, interfaces, issue_details, device_routes, device_physicals, if_addrs, config_revisions, detected_changes, device_ports, data_source, device.
:type include: Array of String
| ``api version min:`` 2.10
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param detail_ind: A flag to indicate whether discovery times should be included or not
:type detail_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return infra_device: The infra device identified by the specified DeviceID.
:rtype infra_device: InfraDevice
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def update(self, **kwargs):
"""Updates an existing infra device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the updated infra device.
:rtype id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return model: The class name of the updated infra device.
:rtype model: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return uri: A URI that may be used to retrieve the updated infra device.
:rtype uri: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return infra_device: The updated infra device.
:rtype infra_device: InfraDevice
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def running_config_text(self, **kwargs):
"""The contents of the newest saved running config.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The contents of the newest saved running config.
:rtype : String
"""
return self.api_request(self._get_method_fullname("running_config_text"), kwargs)
def saved_config_text(self, **kwargs):
"""The contents of the newest saved startup config.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The contents of the newest saved startup config.
:rtype : String
"""
return self.api_request(self._get_method_fullname("saved_config_text"), kwargs)
def DeviceCommunity(self, **kwargs):
"""The community string.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The community string.
:rtype : String
"""
return self.api_request(self._get_method_fullname("DeviceCommunity"), kwargs)
def chassis_serial_number(self, **kwargs):
"""The combined comma separated serial numbers reported by the chassis snmp MIB.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The combined comma separated serial numbers reported by the chassis snmp MIB.
:rtype : String
"""
return self.api_request(self._get_method_fullname("chassis_serial_number"), kwargs)
def available_mgmt_ips(self, **kwargs):
"""Available Management IPs for a device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Available Management IPs for a device.
:rtype : Array
"""
return self.api_request(self._get_method_fullname("available_mgmt_ips"), kwargs)
def data_source(self, **kwargs):
"""The NetMRI device that collected this record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The NetMRI device that collected this record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def parent_device(self, **kwargs):
"""The device containing this virtual device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The device containing this virtual device.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("parent_device"), kwargs)
def gateway_device(self, **kwargs):
"""Returns the default gateway router for this device, based on the following in order of preference: device routing table, device configuration file, device subnet and common conventions.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Returns the default gateway router for this device, based on the following in order of preference: device routing table, device configuration file, device subnet and common conventions.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("gateway_device"), kwargs)
def running_config(self, **kwargs):
"""Returns the ConfigRevision object corresponding to the device's current running configuration.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Returns the ConfigRevision object corresponding to the device's current running configuration.
:rtype : ConfigRevision
"""
return self.api_request(self._get_method_fullname("running_config"), kwargs)
def saved_config(self, **kwargs):
"""Returns the ConfigRevision object corresponding to the device's current startup configuration.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Returns the ConfigRevision object corresponding to the device's current startup configuration.
:rtype : ConfigRevision
"""
return self.api_request(self._get_method_fullname("saved_config"), kwargs)
def device(self, **kwargs):
"""The general Device object corresponding to this infrastructure device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The general Device object corresponding to this infrastructure device.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def asset_type(self, **kwargs):
"""The physical/virtual aspect of the device (Virtual Host, Virtual Device, or Physical Device).
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The physical/virtual aspect of the device (Virtual Host, Virtual Device, or Physical Device).
:rtype : String
"""
return self.api_request(self._get_method_fullname("asset_type"), kwargs)
def DeviceCommunitySecure(self, **kwargs):
"""The secured community name
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The secured community name
:rtype : String
"""
return self.api_request(self._get_method_fullname("DeviceCommunitySecure"), kwargs)
def DeviceRank(self, **kwargs):
"""The rank of this device in its virtual brotherhood
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The rank of this device in its virtual brotherhood
:rtype : Integer
"""
return self.api_request(self._get_method_fullname("DeviceRank"), kwargs)
def DeviceFirstOccurrence(self, **kwargs):
"""The first occurrence of this device
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The first occurrence of this device
:rtype : DateTime
"""
return self.api_request(self._get_method_fullname("DeviceFirstOccurrence"), kwargs)
def virtual_child_count(self, **kwargs):
"""The number of virtual devices hosted on this device
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The number of virtual devices hosted on this device
:rtype : Integer
"""
return self.api_request(self._get_method_fullname("virtual_child_count"), kwargs)
def device_setting(self, **kwargs):
"""The settings information for this device
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The settings information for this device
:rtype : DeviceSetting
"""
return self.api_request(self._get_method_fullname("device_setting"), kwargs)
def data_collection_status(self, **kwargs):
"""All information about collection of data for this device
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : All information about collection of data for this device
:rtype : DataCollectionStatus
"""
return self.api_request(self._get_method_fullname("data_collection_status"), kwargs)
def running_config_diff(self, **kwargs):
"""The differences between the current and previous running config.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The differences between the current and previous running config.
:rtype : String
"""
return self.api_request(self._get_method_fullname("running_config_diff"), kwargs)
def saved_config_diff(self, **kwargs):
"""The differences between the current and previous saved config.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The differences between the current and previous saved config.
:rtype : String
"""
return self.api_request(self._get_method_fullname("saved_config_diff"), kwargs)
def network_name(self, **kwargs):
"""A Network View assigned to the device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : A Network View assigned to the device.
:rtype : String
"""
return self.api_request(self._get_method_fullname("network_name"), kwargs)
def control_capabilities(self, **kwargs):
"""Capabilities of configuring the interfaces of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Capabilities of configuring the interfaces of this device.
:rtype : String
"""
return self.api_request(self._get_method_fullname("control_capabilities"), kwargs)
def cap_description_ind(self, **kwargs):
"""Capability of changing the description of an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Capability of changing the description of an interface of this device.
:rtype : Boolean
"""
return self.api_request(self._get_method_fullname("cap_description_ind"), kwargs)
def cap_admin_status_ind(self, **kwargs):
"""Capability of changing the Admin Status of an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Capability of changing the Admin Status of an interface of this device.
:rtype : Boolean
"""
return self.api_request(self._get_method_fullname("cap_admin_status_ind"), kwargs)
def cap_vlan_assignment_ind(self, **kwargs):
"""Capability of assigning a regular data VLAN to an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Capability of assigning a regular data VLAN to an interface of this device.
:rtype : Boolean
"""
return self.api_request(self._get_method_fullname("cap_vlan_assignment_ind"), kwargs)
def cap_voice_vlan_ind(self, **kwargs):
"""Capability of assigning a voice VLAN to an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Capability of assigning a voice VLAN to an interface of this device.
:rtype : Boolean
"""
return self.api_request(self._get_method_fullname("cap_voice_vlan_ind"), kwargs)
def cap_net_provisioning_ind(self, **kwargs):
"""Capability of provisioning a network on an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Capability of provisioning a network on an interface of this device.
:rtype : Boolean
"""
return self.api_request(self._get_method_fullname("cap_net_provisioning_ind"), kwargs)
def cap_net_vlan_provisioning_ind(self, **kwargs):
"""Capability of creating a VLAN and provision a network on its virtual interface.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Capability of creating a VLAN and provision a network on its virtual interface.
:rtype : Boolean
"""
return self.api_request(self._get_method_fullname("cap_net_vlan_provisioning_ind"), kwargs)
def cap_net_deprovisioning_ind(self, **kwargs):
"""Capability of de-provisioning a network from this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Capability of de-provisioning a network from this device.
:rtype : Boolean
"""
return self.api_request(self._get_method_fullname("cap_net_deprovisioning_ind"), kwargs)
def cap_description_na_reason(self, **kwargs):
"""Reason of non ability of changing the description of an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Reason of non ability of changing the description of an interface of this device.
:rtype : String
"""
return self.api_request(self._get_method_fullname("cap_description_na_reason"), kwargs)
def cap_admin_status_na_reason(self, **kwargs):
"""Reason of non ability of changing the Admin Status of an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Reason of non ability of changing the Admin Status of an interface of this device.
:rtype : String
"""
return self.api_request(self._get_method_fullname("cap_admin_status_na_reason"), kwargs)
def cap_vlan_assignment_na_reason(self, **kwargs):
"""Reason of non ability of assigning a regular data VLAN to an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Reason of non ability of assigning a regular data VLAN to an interface of this device.
:rtype : String
"""
return self.api_request(self._get_method_fullname("cap_vlan_assignment_na_reason"), kwargs)
def cap_voice_vlan_na_reason(self, **kwargs):
"""Reason of non ability of assigning a voice VLAN to an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Reason of non ability of assigning a voice VLAN to an interface of this device.
:rtype : String
"""
return self.api_request(self._get_method_fullname("cap_voice_vlan_na_reason"), kwargs)
def cap_net_provisioning_na_reason(self, **kwargs):
"""Reason of non ability of provisioning a network on an interface of this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Reason of non ability of provisioning a network on an interface of this device.
:rtype : String
"""
return self.api_request(self._get_method_fullname("cap_net_provisioning_na_reason"), kwargs)
def cap_net_vlan_provisioning_na_reason(self, **kwargs):
"""Reason of non ability of creating a VLAN and provision a network on its virtual interface.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Reason of non ability of creating a VLAN and provision a network on its virtual interface.
:rtype : String
"""
return self.api_request(self._get_method_fullname("cap_net_vlan_provisioning_na_reason"), kwargs)
def cap_net_deprovisioning_na_reason(self, **kwargs):
"""Reason of non ability of de-provisioning a network from this device.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Reason of non ability of de-provisioning a network from this device.
:rtype : String
"""
return self.api_request(self._get_method_fullname("cap_net_deprovisioning_na_reason"), kwargs)
def privileged_polling(self, **kwargs):
"""A flag indicating whether to poll the device in privileged mode.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : A flag indicating whether to poll the device in privileged mode.
:rtype : Boolean
"""
return self.api_request(self._get_method_fullname("privileged_polling"), kwargs)
def rawSysModel(self, **kwargs):
"""Unprocessed Device Model value as returned by SNMP
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Unprocessed Device Model value as returned by SNMP
:rtype : String
"""
return self.api_request(self._get_method_fullname("rawSysModel"), kwargs)
def rawSysVersion(self, **kwargs):
"""Unprocessed Device Version value as returned by SNMP
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Unprocessed Device Version value as returned by SNMP
:rtype : String
"""
return self.api_request(self._get_method_fullname("rawSysVersion"), kwargs)
def rawSysDescr(self, **kwargs):
"""Unprocessed Device Description value as returned by SNMP
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: An internal NetMRI identifier for the device.
:type DeviceID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : Unprocessed Device Description value as returned by SNMP
:rtype : String
"""
return self.api_request(self._get_method_fullname("rawSysDescr"), kwargs)
| apache-2.0 |
balister/gnuradio | gr-qtgui/examples/pyqt_waterfall_c.py | 38 | 6535 | #!/usr/bin/env python
#
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import blocks
import sys
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
sys.stderr.write("Error: Program requires PyQt4 and gr-qtgui.\n")
sys.exit(1)
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
from gnuradio import channels
except ImportError:
sys.stderr.write("Error: Program requires gr-channels.\n")
sys.exit(1)
class dialog_box(QtGui.QWidget):
def __init__(self, display, control):
QtGui.QWidget.__init__(self, None)
self.setWindowTitle('PyQt Test GUI')
self.boxlayout = QtGui.QBoxLayout(QtGui.QBoxLayout.LeftToRight, self)
self.boxlayout.addWidget(display, 1)
self.boxlayout.addWidget(control)
self.resize(800, 500)
class control_box(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setWindowTitle('Control Panel')
self.setToolTip('Control the signals')
QtGui.QToolTip.setFont(QtGui.QFont('OldEnglish', 10))
self.layout = QtGui.QFormLayout(self)
# Control the first signal
self.freq1Edit = QtGui.QLineEdit(self)
self.freq1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Frequency:", self.freq1Edit)
self.connect(self.freq1Edit, QtCore.SIGNAL("editingFinished()"),
self.freq1EditText)
self.amp1Edit = QtGui.QLineEdit(self)
self.amp1Edit.setMinimumWidth(100)
self.layout.addRow("Signal 1 Amplitude:", self.amp1Edit)
self.connect(self.amp1Edit, QtCore.SIGNAL("editingFinished()"),
self.amp1EditText)
# Control the second signal
self.freq2Edit = QtGui.QLineEdit(self)
self.freq2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Frequency:", self.freq2Edit)
self.connect(self.freq2Edit, QtCore.SIGNAL("editingFinished()"),
self.freq2EditText)
self.amp2Edit = QtGui.QLineEdit(self)
self.amp2Edit.setMinimumWidth(100)
self.layout.addRow("Signal 2 Amplitude:", self.amp2Edit)
self.connect(self.amp2Edit, QtCore.SIGNAL("editingFinished()"),
self.amp2EditText)
self.quit = QtGui.QPushButton('Close', self)
self.quit.setMinimumWidth(100)
self.layout.addWidget(self.quit)
self.connect(self.quit, QtCore.SIGNAL('clicked()'),
QtGui.qApp, QtCore.SLOT('quit()'))
def attach_signal1(self, signal):
self.signal1 = signal
self.freq1Edit.setText(QtCore.QString("%1").arg(self.signal1.frequency()))
self.amp1Edit.setText(QtCore.QString("%1").arg(self.signal1.amplitude()))
def attach_signal2(self, signal):
self.signal2 = signal
self.freq2Edit.setText(QtCore.QString("%1").arg(self.signal2.frequency()))
self.amp2Edit.setText(QtCore.QString("%1").arg(self.signal2.amplitude()))
def freq1EditText(self):
try:
newfreq = float(self.freq1Edit.text())
self.signal1.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp1EditText(self):
try:
newamp = float(self.amp1Edit.text())
self.signal1.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
def freq2EditText(self):
try:
newfreq = float(self.freq2Edit.text())
self.signal2.set_frequency(newfreq)
except ValueError:
print "Bad frequency value entered"
def amp2EditText(self):
try:
newamp = float(self.amp2Edit.text())
self.signal2.set_amplitude(newamp)
except ValueError:
print "Bad amplitude value entered"
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
Rs = 8000
f1 = 100
f2 = 2000
npts = 2048
taps = filter.firdes.complex_band_pass_2(1, Rs, 1500, 2500, 100, 60)
self.qapp = QtGui.QApplication(sys.argv)
ss = open(gr.prefix() + '/share/gnuradio/themes/dark.qss')
sstext = ss.read()
ss.close()
self.qapp.setStyleSheet(sstext)
src1 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f1, 0.1, 0)
src2 = analog.sig_source_c(Rs, analog.GR_SIN_WAVE, f2, 0.1, 0)
src = blocks.add_cc()
channel = channels.channel_model(0.01)
thr = blocks.throttle(gr.sizeof_gr_complex, 100*npts)
filt = filter.fft_filter_ccc(1, taps)
self.snk1 = qtgui.waterfall_sink_c(npts, filter.firdes.WIN_BLACKMAN_hARRIS,
0, Rs,
"Complex Waterfall Example", 2)
self.connect(src1, (src,0))
self.connect(src2, (src,1))
self.connect(src, channel, thr, (self.snk1, 0))
self.connect(thr, filt, (self.snk1, 1))
self.ctrl_win = control_box()
self.ctrl_win.attach_signal1(src1)
self.ctrl_win.attach_signal2(src2)
# Get the reference pointer to the SpectrumDisplayForm QWidget
pyQt = self.snk1.pyqwidget()
# Wrap the pointer as a PyQt SIP object
# This can now be manipulated as a PyQt4.QtGui.QWidget
pyWin = sip.wrapinstance(pyQt, QtGui.QWidget)
#pyWin.show()
self.main_box = dialog_box(pyWin, self.ctrl_win)
self.main_box.show()
if __name__ == "__main__":
tb = my_top_block();
tb.start()
tb.qapp.exec_()
tb.stop()
| gpl-3.0 |
mysee1989/parallel-ssh | psshlib/cli.py | 58 | 4364 | # Copyright (c) 2009-2012, Andrew McNabb
# Copyright (c) 2003-2008, Brent N. Chun
import optparse
import os
import shlex
import sys
import textwrap
from psshlib import version
_DEFAULT_PARALLELISM = 32
_DEFAULT_TIMEOUT = 0 # "infinity" by default
def common_parser():
"""
Create a basic OptionParser with arguments common to all pssh programs.
"""
# The "resolve" conflict handler avoids errors from the hosts option
# conflicting with the help option.
parser = optparse.OptionParser(conflict_handler='resolve',
version=version.VERSION)
# Ensure that options appearing after the command are sent to ssh.
parser.disable_interspersed_args()
parser.epilog = "Example: pssh -h nodes.txt -l irb2 -o /tmp/foo uptime"
parser.add_option('-h', '--hosts', dest='host_files', action='append',
metavar='HOST_FILE',
help='hosts file (each line "[user@]host[:port]")')
parser.add_option('-H', '--host', dest='host_strings', action='append',
metavar='HOST_STRING',
help='additional host entries ("[user@]host[:port]")')
parser.add_option('-l', '--user', dest='user',
help='username (OPTIONAL)')
parser.add_option('-p', '--par', dest='par', type='int',
help='max number of parallel threads (OPTIONAL)')
parser.add_option('-o', '--outdir', dest='outdir',
help='output directory for stdout files (OPTIONAL)')
parser.add_option('-e', '--errdir', dest='errdir',
help='output directory for stderr files (OPTIONAL)')
parser.add_option('-t', '--timeout', dest='timeout', type='int',
help='timeout (secs) (0 = no timeout) per host (OPTIONAL)')
parser.add_option('-O', '--option', dest='options', action='append',
metavar='OPTION', help='SSH option (OPTIONAL)')
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='turn on warning and diagnostic messages (OPTIONAL)')
parser.add_option('-A', '--askpass', dest='askpass', action='store_true',
help='Ask for a password (OPTIONAL)')
parser.add_option('-x', '--extra-args', action='callback', type='string',
metavar='ARGS', callback=shlex_append, dest='extra',
help='Extra command-line arguments, with processing for '
'spaces, quotes, and backslashes')
parser.add_option('-X', '--extra-arg', dest='extra', action='append',
metavar='ARG', help='Extra command-line argument')
return parser
def common_defaults(**kwargs):
defaults = dict(par=_DEFAULT_PARALLELISM, timeout=_DEFAULT_TIMEOUT)
defaults.update(**kwargs)
envvars = [('user', 'PSSH_USER'),
('par', 'PSSH_PAR'),
('outdir', 'PSSH_OUTDIR'),
('errdir', 'PSSH_ERRDIR'),
('timeout', 'PSSH_TIMEOUT'),
('verbose', 'PSSH_VERBOSE'),
('print_out', 'PSSH_PRINT'),
('askpass', 'PSSH_ASKPASS'),
('inline', 'PSSH_INLINE'),
('recursive', 'PSSH_RECURSIVE'),
('archive', 'PSSH_ARCHIVE'),
('compress', 'PSSH_COMPRESS'),
('localdir', 'PSSH_LOCALDIR'),
]
for option, var, in envvars:
value = os.getenv(var)
if value:
defaults[option] = value
value = os.getenv('PSSH_OPTIONS')
if value:
defaults['options'] = [value]
value = os.getenv('PSSH_HOSTS')
if value:
message1 = ('Warning: the PSSH_HOSTS environment variable is '
'deprecated. Please use the "-h" option instead, and consider '
'creating aliases for convenience. For example:')
message2 = " alias pssh_abc='pssh -h /path/to/hosts_abc'"
sys.stderr.write(textwrap.fill(message1))
sys.stderr.write('\n')
sys.stderr.write(message2)
sys.stderr.write('\n')
defaults['host_files'] = [value]
return defaults
def shlex_append(option, opt_str, value, parser):
"""An optparse callback similar to the append action.
The given value is processed with shlex, and the resulting list is
concatenated to the option's dest list.
"""
lst = getattr(parser.values, option.dest)
if lst is None:
lst = []
setattr(parser.values, option.dest, lst)
lst.extend(shlex.split(value))
| bsd-3-clause |
saadbinakhlaq/django-oscar | oscar/apps/customer/migrations/0003_auto__add_productalert.py | 16 | 19453 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
depends_on = (
('catalogue', '0001_initial'),
)
def forwards(self, orm):
# Adding model 'ProductAlert'
db.create_table('customer_productalert', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='alerts', null=True, to=orm[AUTH_USER_MODEL])),
('email', self.gf('django.db.models.fields.EmailField')(db_index=True, max_length=75, null=True, blank=True)),
('key', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, db_index=True)),
('status', self.gf('django.db.models.fields.CharField')(default='Active', max_length=20)),
('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_confirmed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_cancelled', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('date_closed', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('customer', ['ProductAlert'])
def backwards(self, orm):
# Deleting model 'ProductAlert'
db.delete_table('customer_productalert')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 26, 13, 49, 39, 401244)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 26, 13, 49, 39, 401151)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'customer.communicationeventtype': {
'Meta': {'object_name': 'CommunicationEventType'},
'category': ('django.db.models.fields.CharField', [], {'default': "u'Order related'", 'max_length': '255'}),
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'email_body_html_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_body_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_subject_template': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sms_template': ('django.db.models.fields.CharField', [], {'max_length': '170', 'blank': 'True'})
},
'customer.email': {
'Meta': {'object_name': 'Email'},
'body_html': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'body_text': ('django.db.models.fields.TextField', [], {}),
'date_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emails'", 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
},
'customer.notification': {
'Meta': {'ordering': "('-date_sent',)", 'object_name': 'Notification'},
'body': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Inbox'", 'max_length': '32'}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'notifications'", 'to': "orm['{0}']".format(AUTH_USER_MODEL)}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['{0}']".format(AUTH_USER_MODEL), 'null': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'customer.productalert': {
'Meta': {'object_name': 'ProductAlert'},
'date_cancelled': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_confirmed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Active'", 'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'alerts'", 'null': 'True', 'to': "orm['{0}']".format(AUTH_USER_MODEL)})
}
}
complete_apps = ['customer']
| bsd-3-clause |
maestrano/openerp | openerp/addons/base_report_designer/plugin/openerp_report_designer/bin/script/Repeatln.py | 90 | 13231 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
from com.sun.star.task import XJobExecutor
if __name__<>"package":
from lib.gui import *
from lib.error import ErrorDialog
from lib.functions import *
from ServerParameter import *
from lib.logreport import *
from lib.rpc import *
from LoginTest import *
database="test_db1"
uid = 3
#class RepeatIn:
class RepeatIn( unohelper.Base, XJobExecutor ):
def __init__(self, sObject="", sVariable="", sFields="", sDisplayName="", bFromModify=False):
# Interface Design
LoginTest()
self.logobj=Logger()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 250, "RepeatIn Builder")
self.win.addFixedText("lblVariable", 2, 12, 60, 15, "Objects to loop on :")
self.win.addComboBox("cmbVariable", 180-120-2, 10, 120, 15,True, itemListenerProc=self.cmbVariable_selected)
self.insVariable = self.win.getControl( "cmbVariable" )
self.win.addFixedText("lblFields", 10, 32, 60, 15, "Field to loop on :")
self.win.addComboListBox("lstFields", 180-120-2, 30, 120, 150, False,itemListenerProc=self.lstbox_selected)
self.insField = self.win.getControl( "lstFields" )
self.win.addFixedText("lblName", 12, 187, 60, 15, "Variable name :")
self.win.addEdit("txtName", 180-120-2, 185, 120, 15,)
self.win.addFixedText("lblUName", 8, 207, 60, 15, "Displayed name :")
self.win.addEdit("txtUName", 180-120-2, 205, 120, 15,)
self.win.addButton('btnOK',-2 ,-10,45,15,'Ok', actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-2 - 45 - 5 ,-10,45,15,'Cancel', actionListenerProc = self.btnCancel_clicked )
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
# Variable Declaration
self.sValue=None
self.sObj=None
self.aSectionList=[]
self.sGVariable=sVariable
self.sGDisplayName=sDisplayName
self.aItemList=[]
self.aComponentAdd=[]
self.aObjectList=[]
self.aListRepeatIn=[]
self.aVariableList=[]
# Call method to perform Enumration on Report Document
EnumDocument(self.aItemList,self.aComponentAdd)
# Perform checking that Field-1 and Field - 4 is available or not alos get Combobox
# filled if condition is true
desktop = getDesktop()
doc = desktop.getCurrentComponent()
docinfo = doc.getDocumentInfo()
# Check weather Field-1 is available if not then exit from application
self.sMyHost= ""
if not docinfo.getUserFieldValue(3) == "" and not docinfo.getUserFieldValue(0)=="":
self.sMyHost= docinfo.getUserFieldValue(0)
self.count=0
oParEnum = doc.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
self.count += 1
getList(self.aObjectList, self.sMyHost,self.count)
cursor = doc.getCurrentController().getViewCursor()
text = cursor.getText()
tcur = text.createTextCursorByRange(cursor)
self.aVariableList.extend( filter( lambda obj: obj[:obj.find(" ")] == "List", self.aObjectList ) )
for i in range(len(self.aItemList)):
try:
anItem = self.aItemList[i][1]
component = self.aComponentAdd[i]
if component == "Document":
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextSection:
getRecersiveSection(tcur.TextSection,self.aSectionList)
if component in self.aSectionList:
sLVal = anItem[anItem.find(",'") + 2:anItem.find("')")]
self.aVariableList.extend( filter( lambda obj: obj[:obj.find("(")] == sLVal, self.aObjectList ) )
if tcur.TextTable:
if not component == "Document" and component[component.rfind(".") + 1:] == tcur.TextTable.Name:
VariableScope( tcur, self.aVariableList, self.aObjectList, self.aComponentAdd, self.aItemList, component )
except :
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('RepeatIn', LOG_ERROR, info)
self.bModify=bFromModify
if self.bModify==True:
if sObject=="":
self.insVariable.setText("List of "+docinfo.getUserFieldValue(3))
self.insField.addItem("objects",self.win.getListBoxItemCount("lstFields"))
self.win.setEditText("txtName", sVariable)
self.win.setEditText("txtUName",sDisplayName)
self.sValue= "objects"
else:
sItem=""
for anObject in self.aObjectList:
if anObject[:anObject.find("(")] == sObject:
sItem = anObject
self.insVariable.setText( sItem )
genTree(
sItem[sItem.find("(")+1:sItem.find(")")],
self.aListRepeatIn,
self.insField,
self.sMyHost,
2,
ending=['one2many','many2many'],
recur=['one2many','many2many']
)
self.sValue= self.win.getListBoxItem("lstFields",self.aListRepeatIn.index(sFields))
for var in self.aVariableList:
if var[:8] <> 'List of ':
self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[var.find("(")+1:var.find(")")])])
else:
self.model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=',var[8:])])
fields=['name','model']
self.model_res = self.sock.execute(database, uid, self.password, 'ir.model', 'read', self.model_ids,fields)
if self.model_res <> []:
if var[:8]<>'List of ':
self.insVariable.addItem(var[:var.find("(")+1] + self.model_res[0]['name'] + ")" ,self.insVariable.getItemCount())
else:
self.insVariable.addItem('List of ' + self.model_res[0]['name'] ,self.insVariable.getItemCount())
else:
self.insVariable.addItem(var ,self.insVariable.getItemCount())
self.win.doModalDialog("lstFields",self.sValue)
else:
ErrorDialog("Please Select Appropriate module" ,"Create new report from: \nOpenERP -> Open a New Report")
self.win.endExecute()
def lstbox_selected(self, oItemEvent):
sItem=self.win.getListBoxSelectedItem("lstFields")
sMain=self.aListRepeatIn[self.win.getListBoxSelectedItemPos("lstFields")]
if self.bModify==True:
self.win.setEditText("txtName", self.sGVariable)
self.win.setEditText("txtUName",self.sGDisplayName)
else:
self.win.setEditText("txtName",sMain[sMain.rfind("/")+1:])
self.win.setEditText("txtUName","|-."+sItem[sItem.rfind("/")+1:]+".-|")
def cmbVariable_selected(self, oItemEvent):
if self.count > 0 :
desktop=getDesktop()
doc =desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.win.removeListBoxItems("lstFields", 0, self.win.getListBoxItemCount("lstFields"))
sItem=self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:8]=='List of ':
if var[:8]==sItem[:8]:
sItem = var
elif var[:var.find("(")+1] == sItem[:sItem.find("(")+1]:
sItem = var
self.aListRepeatIn=[]
data = ( sItem[sItem.rfind(" ") + 1:] == docinfo.getUserFieldValue(3) ) and docinfo.getUserFieldValue(3) or sItem[sItem.find("(")+1:sItem.find(")")]
genTree( data, self.aListRepeatIn, self.insField, self.sMyHost, 2, ending=['one2many','many2many'], recur=['one2many','many2many'] )
self.win.selectListBoxItemPos("lstFields", 0, True )
else:
sItem=self.win.getComboBoxText("cmbVariable")
for var in self.aVariableList:
if var[:8]=='List of ' and var[:8] == sItem[:8]:
sItem = var
if sItem.find(".")==-1:
temp=sItem[sItem.rfind("x_"):]
else:
temp=sItem[sItem.rfind(".")+1:]
self.win.setEditText("txtName",temp)
self.win.setEditText("txtUName","|-."+temp+".-|")
self.insField.addItem("objects",self.win.getListBoxItemCount("lstFields"))
self.win.selectListBoxItemPos("lstFields", 0, True )
def btnOk_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
cursor = doc.getCurrentController().getViewCursor()
selectedItem = self.win.getListBoxSelectedItem( "lstFields" )
selectedItemPos = self.win.getListBoxSelectedItemPos( "lstFields" )
txtName = self.win.getEditText( "txtName" )
txtUName = self.win.getEditText( "txtUName" )
if selectedItem != "" and txtName != "" and txtUName != "":
sKey=u""+ txtUName
if selectedItem == "objects":
sValue=u"[[ repeatIn(" + selectedItem + ",'" + txtName + "') ]]"
else:
sObjName=self.win.getComboBoxText("cmbVariable")
sObjName=sObjName[:sObjName.find("(")]
sValue=u"[[ repeatIn(" + sObjName + self.aListRepeatIn[selectedItemPos].replace("/",".") + ",'" + txtName +"') ]]"
if self.bModify == True:
oCurObj = cursor.TextField
oCurObj.Items = (sKey,sValue)
oCurObj.update()
else:
oInputList = doc.createInstance("com.sun.star.text.TextField.DropDown")
if self.win.getListBoxSelectedItem("lstFields") == "objects":
oInputList.Items = (sKey,sValue)
doc.Text.insertTextContent(cursor,oInputList,False)
else:
sValue=u"[[ repeatIn(" + sObjName + self.aListRepeatIn[selectedItemPos].replace("/",".") + ",'" + txtName +"') ]]"
if cursor.TextTable==None:
oInputList.Items = (sKey,sValue)
doc.Text.insertTextContent(cursor,oInputList,False)
else:
oInputList.Items = (sKey,sValue)
widget = ( cursor.TextTable or selectedItem <> 'objects' ) and cursor.TextTable.getCellByName( cursor.Cell.CellName ) or doc.Text
widget.insertTextContent(cursor,oInputList,False)
self.win.endExecute()
else:
ErrorDialog("Please fill appropriate data in Object Field or Name field \nor select particular value from the list of fields.")
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
RepeatIn()
elif __name__=="package":
g_ImplementationHelper = unohelper.ImplementationHelper()
g_ImplementationHelper.addImplementation( RepeatIn, "org.openoffice.openerp.report.repeatln", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
IRI-Research/django | django/contrib/gis/db/backends/postgis/introspection.py | 12 | 4610 | from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.contrib.gis.gdal import OGRGeomType
class GeoIntrospectionError(Exception):
pass
class PostGISIntrospection(DatabaseIntrospection):
# Reverse dictionary for PostGIS geometry types not populated until
# introspection is actually performed.
postgis_types_reverse = {}
ignored_tables = DatabaseIntrospection.ignored_tables + [
'geography_columns',
'geometry_columns',
'raster_columns',
'spatial_ref_sys',
'raster_overviews',
]
def get_postgis_types(self):
"""
Returns a dictionary with keys that are the PostgreSQL object
identification integers for the PostGIS geometry and/or
geography types (if supported).
"""
cursor = self.connection.cursor()
# The OID integers associated with the geometry type may
# be different across versions; hence, this is why we have
# to query the PostgreSQL pg_type table corresponding to the
# PostGIS custom data types.
oid_sql = 'SELECT "oid" FROM "pg_type" WHERE "typname" = %s'
try:
cursor.execute(oid_sql, ('geometry',))
GEOM_TYPE = cursor.fetchone()[0]
postgis_types = {GEOM_TYPE: 'GeometryField'}
if self.connection.ops.geography:
cursor.execute(oid_sql, ('geography',))
GEOG_TYPE = cursor.fetchone()[0]
# The value for the geography type is actually a tuple
# to pass in the `geography=True` keyword to the field
# definition.
postgis_types[GEOG_TYPE] = ('GeometryField', {'geography': True})
finally:
cursor.close()
return postgis_types
def get_field_type(self, data_type, description):
if not self.postgis_types_reverse:
# If the PostGIS types reverse dictionary is not populated, do so
# now. In order to prevent unnecessary requests upon connection
# intialization, the `data_types_reverse` dictionary is not updated
# with the PostGIS custom types until introspection is actually
# performed -- in other words, when this function is called.
self.postgis_types_reverse = self.get_postgis_types()
self.data_types_reverse.update(self.postgis_types_reverse)
return super(PostGISIntrospection, self).get_field_type(data_type, description)
def get_geometry_type(self, table_name, geo_col):
"""
The geometry type OID used by PostGIS does not indicate the particular
type of field that a geometry column is (e.g., whether it's a
PointField or a PolygonField). Thus, this routine queries the PostGIS
metadata tables to determine the geometry type,
"""
cursor = self.connection.cursor()
try:
try:
# First seeing if this geometry column is in the `geometry_columns`
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geometry_columns" '
'WHERE "f_table_name"=%s AND "f_geometry_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise GeoIntrospectionError
except GeoIntrospectionError:
if self.connection.ops.geography:
cursor.execute('SELECT "coord_dimension", "srid", "type" '
'FROM "geography_columns" '
'WHERE "f_table_name"=%s AND "f_geography_column"=%s',
(table_name, geo_col))
row = cursor.fetchone()
if not row:
raise Exception('Could not find a geometry or geography column for "%s"."%s"' %
(table_name, geo_col))
# OGRGeomType does not require GDAL and makes it easy to convert
# from OGC geom type name to Django field.
field_type = OGRGeomType(row[2]).django
# Getting any GeometryField keyword arguments that are not the default.
dim = row[0]
srid = row[1]
field_params = {}
if srid != 4326:
field_params['srid'] = srid
if dim != 2:
field_params['dim'] = dim
finally:
cursor.close()
return field_type, field_params
| bsd-3-clause |
qiuzhong/crosswalk-test-suite | webapi/tct-jsenhance-html5-tests/inst.wgt.py | 372 | 6809 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
maxziv/SEApp | server/lib/werkzeug/test.py | 77 | 32616 | # -*- coding: utf-8 -*-
"""
werkzeug.test
~~~~~~~~~~~~~
This module implements a client to WSGI applications for testing.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import urlparse
import mimetypes
from time import time
from random import random
from itertools import chain
from tempfile import TemporaryFile
from cStringIO import StringIO
from cookielib import CookieJar
from urllib2 import Request as U2Request
from werkzeug._internal import _empty_stream, _get_environ
from werkzeug.wrappers import BaseRequest
from werkzeug.urls import url_encode, url_fix, iri_to_uri, _unquote
from werkzeug.wsgi import get_host, get_current_url, ClosingIterator
from werkzeug.utils import dump_cookie
from werkzeug.datastructures import FileMultiDict, MultiDict, \
CombinedMultiDict, Headers, FileStorage
def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
boundary=None, charset='utf-8'):
"""Encode a dict of values (either strings or file descriptors or
:class:`FileStorage` objects.) into a multipart encoded string stored
in a file descriptor.
"""
if boundary is None:
boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
_closure = [StringIO(), 0, False]
if use_tempfile:
def write(string):
stream, total_length, on_disk = _closure
if on_disk:
stream.write(string)
else:
length = len(string)
if length + _closure[1] <= threshold:
stream.write(string)
else:
new_stream = TemporaryFile('wb+')
new_stream.write(stream.getvalue())
new_stream.write(string)
_closure[0] = new_stream
_closure[2] = True
_closure[1] = total_length + length
else:
write = _closure[0].write
if not isinstance(values, MultiDict):
values = MultiDict(values)
for key, values in values.iterlists():
for value in values:
write('--%s\r\nContent-Disposition: form-data; name="%s"' %
(boundary, key))
reader = getattr(value, 'read', None)
if reader is not None:
filename = getattr(value, 'filename',
getattr(value, 'name', None))
content_type = getattr(value, 'content_type', None)
if content_type is None:
content_type = filename and \
mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
if filename is not None:
write('; filename="%s"\r\n' % filename)
else:
write('\r\n')
write('Content-Type: %s\r\n\r\n' % content_type)
while 1:
chunk = reader(16384)
if not chunk:
break
write(chunk)
else:
if isinstance(value, unicode):
value = value.encode(charset)
write('\r\n\r\n' + str(value))
write('\r\n')
write('--%s--\r\n' % boundary)
length = int(_closure[0].tell())
_closure[0].seek(0)
return _closure[0], length, boundary
def encode_multipart(values, boundary=None, charset='utf-8'):
"""Like `stream_encode_multipart` but returns a tuple in the form
(``boundary``, ``data``) where data is a bytestring.
"""
stream, length, boundary = stream_encode_multipart(
values, use_tempfile=False, boundary=boundary, charset=charset)
return boundary, stream.read()
def File(fd, filename=None, mimetype=None):
"""Backwards compat."""
from warnings import warn
warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
'EnvironBuilder or FileStorage instead'))
return FileStorage(fd, filename=filename, content_type=mimetype)
class _TestCookieHeaders(object):
"""A headers adapter for cookielib
"""
def __init__(self, headers):
self.headers = headers
def getheaders(self, name):
headers = []
name = name.lower()
for k, v in self.headers:
if k.lower() == name:
headers.append(v)
return headers
class _TestCookieResponse(object):
"""Something that looks like a httplib.HTTPResponse, but is actually just an
adapter for our test responses to make them available for cookielib.
"""
def __init__(self, headers):
self.headers = _TestCookieHeaders(headers)
def info(self):
return self.headers
class _TestCookieJar(CookieJar):
"""A cookielib.CookieJar modified to inject and read cookie headers from
and to wsgi environments, and wsgi application responses.
"""
def inject_wsgi(self, environ):
"""Inject the cookies as client headers into the server's wsgi
environment.
"""
cvals = []
for cookie in self:
cvals.append('%s=%s' % (cookie.name, cookie.value))
if cvals:
environ['HTTP_COOKIE'] = '; '.join(cvals)
def extract_wsgi(self, environ, headers):
"""Extract the server's set-cookie headers as cookies into the
cookie jar.
"""
self.extract_cookies(
_TestCookieResponse(headers),
U2Request(get_current_url(environ)),
)
def _iter_data(data):
"""Iterates over a dict or multidict yielding all keys and values.
This is used to iterate over the data passed to the
:class:`EnvironBuilder`.
"""
if isinstance(data, MultiDict):
for key, values in data.iterlists():
for value in values:
yield key, value
else:
for key, values in data.iteritems():
if isinstance(values, list):
for value in values:
yield key, value
else:
yield key, values
class EnvironBuilder(object):
"""This class can be used to conveniently create a WSGI environment
for testing purposes. It can be used to quickly create WSGI environments
or request objects from arbitrary data.
The signature of this class is also used in some other places as of
Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
:meth:`Client.open`). Because of this most of the functionality is
available through the constructor alone.
Files and regular form data can be manipulated independently of each
other with the :attr:`form` and :attr:`files` attributes, but are
passed with the same argument to the constructor: `data`.
`data` can be any of these values:
- a `str`: If it's a string it is converted into a :attr:`input_stream`,
the :attr:`content_length` is set and you have to provide a
:attr:`content_type`.
- a `dict`: If it's a dict the keys have to be strings and the values
any of the following objects:
- a :class:`file`-like object. These are converted into
:class:`FileStorage` objects automatically.
- a tuple. The :meth:`~FileMultiDict.add_file` method is called
with the tuple items as positional arguments.
.. versionadded:: 0.6
`path` and `base_url` can now be unicode strings that are encoded using
the :func:`iri_to_uri` function.
:param path: the path of the request. In the WSGI environment this will
end up as `PATH_INFO`. If the `query_string` is not defined
and there is a question mark in the `path` everything after
it is used as query string.
:param base_url: the base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).
:param query_string: an optional string or dict with URL parameters.
:param method: the HTTP method to use, defaults to `GET`.
:param input_stream: an optional input stream. Do not specify this and
`data`. As soon as an input stream is set you can't
modify :attr:`args` and :attr:`files` unless you
set the :attr:`input_stream` to `None` again.
:param content_type: The content type for the request. As of 0.5 you
don't have to provide this when specifying files
and form data via `data`.
:param content_length: The content length for the request. You don't
have to specify this when providing data via
`data`.
:param errors_stream: an optional error stream that is used for
`wsgi.errors`. Defaults to :data:`stderr`.
:param multithread: controls `wsgi.multithread`. Defaults to `False`.
:param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`.
:param run_once: controls `wsgi.run_once`. Defaults to `False`.
:param headers: an optional list or :class:`Headers` object of headers.
:param data: a string or dict of form data. See explanation above.
:param environ_base: an optional dict of environment defaults.
:param environ_overrides: an optional dict of environment overrides.
:param charset: the charset used to encode unicode data.
"""
#: the server protocol to use. defaults to HTTP/1.1
server_protocol = 'HTTP/1.1'
#: the wsgi version to use. defaults to (1, 0)
wsgi_version = (1, 0)
#: the default request class for :meth:`get_request`
request_class = BaseRequest
def __init__(self, path='/', base_url=None, query_string=None,
method='GET', input_stream=None, content_type=None,
content_length=None, errors_stream=None, multithread=False,
multiprocess=False, run_once=False, headers=None, data=None,
environ_base=None, environ_overrides=None, charset='utf-8'):
if query_string is None and '?' in path:
path, query_string = path.split('?', 1)
self.charset = charset
if isinstance(path, unicode):
path = iri_to_uri(path, charset)
self.path = path
if base_url is not None:
if isinstance(base_url, unicode):
base_url = iri_to_uri(base_url, charset)
else:
base_url = url_fix(base_url, charset)
self.base_url = base_url
if isinstance(query_string, basestring):
self.query_string = query_string
else:
if query_string is None:
query_string = MultiDict()
elif not isinstance(query_string, MultiDict):
query_string = MultiDict(query_string)
self.args = query_string
self.method = method
if headers is None:
headers = Headers()
elif not isinstance(headers, Headers):
headers = Headers(headers)
self.headers = headers
self.content_type = content_type
if errors_stream is None:
errors_stream = sys.stderr
self.errors_stream = errors_stream
self.multithread = multithread
self.multiprocess = multiprocess
self.run_once = run_once
self.environ_base = environ_base
self.environ_overrides = environ_overrides
self.input_stream = input_stream
self.content_length = content_length
self.closed = False
if data:
if input_stream is not None:
raise TypeError('can\'t provide input stream and data')
if isinstance(data, basestring):
self.input_stream = StringIO(data)
if self.content_length is None:
self.content_length = len(data)
else:
for key, value in _iter_data(data):
if isinstance(value, (tuple, dict)) or \
hasattr(value, 'read'):
self._add_file_from_data(key, value)
else:
self.form.setlistdefault(key).append(value)
def _add_file_from_data(self, key, value):
"""Called in the EnvironBuilder to add files from the data dict."""
if isinstance(value, tuple):
self.files.add_file(key, *value)
elif isinstance(value, dict):
from warnings import warn
warn(DeprecationWarning('it\'s no longer possible to pass dicts '
'as `data`. Use tuples or FileStorage '
'objects instead'), stacklevel=2)
value = dict(value)
mimetype = value.pop('mimetype', None)
if mimetype is not None:
value['content_type'] = mimetype
self.files.add_file(key, **value)
else:
self.files.add_file(key, value)
def _get_base_url(self):
return urlparse.urlunsplit((self.url_scheme, self.host,
self.script_root, '', '')).rstrip('/') + '/'
def _set_base_url(self, value):
if value is None:
scheme = 'http'
netloc = 'localhost'
scheme = 'http'
script_root = ''
else:
scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(value)
if qs or anchor:
raise ValueError('base url must not contain a query string '
'or fragment')
self.script_root = script_root.rstrip('/')
self.host = netloc
self.url_scheme = scheme
base_url = property(_get_base_url, _set_base_url, doc='''
The base URL is a URL that is used to extract the WSGI
URL scheme, host (server name + server port) and the
script root (`SCRIPT_NAME`).''')
del _get_base_url, _set_base_url
def _get_content_type(self):
ct = self.headers.get('Content-Type')
if ct is None and not self._input_stream:
if self.method in ('POST', 'PUT', 'PATCH'):
if self._files:
return 'multipart/form-data'
return 'application/x-www-form-urlencoded'
return None
return ct
def _set_content_type(self, value):
if value is None:
self.headers.pop('Content-Type', None)
else:
self.headers['Content-Type'] = value
content_type = property(_get_content_type, _set_content_type, doc='''
The content type for the request. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_type, _set_content_type
def _get_content_length(self):
return self.headers.get('Content-Length', type=int)
def _set_content_length(self, value):
if value is None:
self.headers.pop('Content-Length', None)
else:
self.headers['Content-Length'] = str(value)
content_length = property(_get_content_length, _set_content_length, doc='''
The content length as integer. Reflected from and to the
:attr:`headers`. Do not set if you set :attr:`files` or
:attr:`form` for auto detection.''')
del _get_content_length, _set_content_length
def form_property(name, storage, doc):
key = '_' + name
def getter(self):
if self._input_stream is not None:
raise AttributeError('an input stream is defined')
rv = getattr(self, key)
if rv is None:
rv = storage()
setattr(self, key, rv)
return rv
def setter(self, value):
self._input_stream = None
setattr(self, key, value)
return property(getter, setter, doc)
form = form_property('form', MultiDict, doc='''
A :class:`MultiDict` of form values.''')
files = form_property('files', FileMultiDict, doc='''
A :class:`FileMultiDict` of uploaded files. You can use the
:meth:`~FileMultiDict.add_file` method to add new files to the
dict.''')
del form_property
def _get_input_stream(self):
return self._input_stream
def _set_input_stream(self, value):
self._input_stream = value
self._form = self._files = None
input_stream = property(_get_input_stream, _set_input_stream, doc='''
An optional input stream. If you set this it will clear
:attr:`form` and :attr:`files`.''')
del _get_input_stream, _set_input_stream
def _get_query_string(self):
if self._query_string is None:
if self._args is not None:
return url_encode(self._args, charset=self.charset)
return ''
return self._query_string
def _set_query_string(self, value):
self._query_string = value
self._args = None
query_string = property(_get_query_string, _set_query_string, doc='''
The query string. If you set this to a string :attr:`args` will
no longer be available.''')
del _get_query_string, _set_query_string
def _get_args(self):
if self._query_string is not None:
raise AttributeError('a query string is defined')
if self._args is None:
self._args = MultiDict()
return self._args
def _set_args(self, value):
self._query_string = None
self._args = value
args = property(_get_args, _set_args, doc='''
The URL arguments as :class:`MultiDict`.''')
del _get_args, _set_args
@property
def server_name(self):
"""The server name (read-only, use :attr:`host` to set)"""
return self.host.split(':', 1)[0]
@property
def server_port(self):
"""The server port as integer (read-only, use :attr:`host` to set)"""
pieces = self.host.split(':', 1)
if len(pieces) == 2 and pieces[1].isdigit():
return int(pieces[1])
elif self.url_scheme == 'https':
return 443
return 80
def __del__(self):
self.close()
def close(self):
"""Closes all files. If you put real :class:`file` objects into the
:attr:`files` dict you can call this method to automatically close
them all in one go.
"""
if self.closed:
return
try:
files = self.files.itervalues()
except AttributeError:
files = ()
for f in files:
try:
f.close()
except Exception, e:
pass
self.closed = True
def get_environ(self):
"""Return the built environ."""
input_stream = self.input_stream
content_length = self.content_length
content_type = self.content_type
if input_stream is not None:
start_pos = input_stream.tell()
input_stream.seek(0, 2)
end_pos = input_stream.tell()
input_stream.seek(start_pos)
content_length = end_pos - start_pos
elif content_type == 'multipart/form-data':
values = CombinedMultiDict([self.form, self.files])
input_stream, content_length, boundary = \
stream_encode_multipart(values, charset=self.charset)
content_type += '; boundary="%s"' % boundary
elif content_type == 'application/x-www-form-urlencoded':
values = url_encode(self.form, charset=self.charset)
content_length = len(values)
input_stream = StringIO(values)
else:
input_stream = _empty_stream
result = {}
if self.environ_base:
result.update(self.environ_base)
def _path_encode(x):
if isinstance(x, unicode):
x = x.encode(self.charset)
return _unquote(x)
result.update({
'REQUEST_METHOD': self.method,
'SCRIPT_NAME': _path_encode(self.script_root),
'PATH_INFO': _path_encode(self.path),
'QUERY_STRING': self.query_string,
'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'HTTP_HOST': self.host,
'SERVER_PROTOCOL': self.server_protocol,
'CONTENT_TYPE': content_type or '',
'CONTENT_LENGTH': str(content_length or '0'),
'wsgi.version': self.wsgi_version,
'wsgi.url_scheme': self.url_scheme,
'wsgi.input': input_stream,
'wsgi.errors': self.errors_stream,
'wsgi.multithread': self.multithread,
'wsgi.multiprocess': self.multiprocess,
'wsgi.run_once': self.run_once
})
for key, value in self.headers.to_list(self.charset):
result['HTTP_%s' % key.upper().replace('-', '_')] = value
if self.environ_overrides:
result.update(self.environ_overrides)
return result
def get_request(self, cls=None):
"""Returns a request with the data. If the request class is not
specified :attr:`request_class` is used.
:param cls: The request wrapper to use.
"""
if cls is None:
cls = self.request_class
return cls(self.get_environ())
class ClientRedirectError(Exception):
"""
If a redirect loop is detected when using follow_redirects=True with
the :cls:`Client`, then this exception is raised.
"""
class Client(object):
"""This class allows to send requests to a wrapped application.
The response wrapper can be a class or factory function that takes
three arguments: app_iter, status and headers. The default response
wrapper just returns a tuple.
Example::
class ClientResponse(BaseResponse):
...
client = Client(MyApplication(), response_wrapper=ClientResponse)
The use_cookies parameter indicates whether cookies should be stored and
sent for subsequent requests. This is True by default, but passing False
will disable this behaviour.
If you want to request some subdomain of your application you may set
`allow_subdomain_redirects` to `True` as if not no external redirects
are allowed.
.. versionadded:: 0.5
`use_cookies` is new in this version. Older versions did not provide
builtin cookie support.
"""
def __init__(self, application, response_wrapper=None, use_cookies=True,
allow_subdomain_redirects=False):
self.application = application
if response_wrapper is None:
response_wrapper = lambda a, s, h: (a, s, h)
self.response_wrapper = response_wrapper
if use_cookies:
self.cookie_jar = _TestCookieJar()
else:
self.cookie_jar = None
self.redirect_client = None
self.allow_subdomain_redirects = allow_subdomain_redirects
def set_cookie(self, server_name, key, value='', max_age=None,
expires=None, path='/', domain=None, secure=None,
httponly=False, charset='utf-8'):
"""Sets a cookie in the client's cookie jar. The server name
is required and has to match the one that is also passed to
the open call.
"""
assert self.cookie_jar is not None, 'cookies disabled'
header = dump_cookie(key, value, max_age, expires, path, domain,
secure, httponly, charset)
environ = create_environ(path, base_url='http://' + server_name)
headers = [('Set-Cookie', header)]
self.cookie_jar.extract_wsgi(environ, headers)
def delete_cookie(self, server_name, key, path='/', domain=None):
"""Deletes a cookie in the test client."""
self.set_cookie(server_name, key, expires=0, max_age=0,
path=path, domain=domain)
def open(self, *args, **kwargs):
"""Takes the same arguments as the :class:`EnvironBuilder` class with
some additions: You can provide a :class:`EnvironBuilder` or a WSGI
environment as only argument instead of the :class:`EnvironBuilder`
arguments and two optional keyword arguments (`as_tuple`, `buffered`)
that change the type of the return value or the way the application is
executed.
.. versionchanged:: 0.5
If a dict is provided as file in the dict for the `data` parameter
the content type has to be called `content_type` now instead of
`mimetype`. This change was made for consistency with
:class:`werkzeug.FileWrapper`.
The `follow_redirects` parameter was added to :func:`open`.
Additional parameters:
:param as_tuple: Returns a tuple in the form ``(environ, result)``
:param buffered: Set this to True to buffer the application run.
This will automatically close the application for
you as well.
:param follow_redirects: Set this to True if the `Client` should
follow HTTP redirects.
"""
as_tuple = kwargs.pop('as_tuple', False)
buffered = kwargs.pop('buffered', False)
follow_redirects = kwargs.pop('follow_redirects', False)
environ = None
if not kwargs and len(args) == 1:
if isinstance(args[0], EnvironBuilder):
environ = args[0].get_environ()
elif isinstance(args[0], dict):
environ = args[0]
if environ is None:
builder = EnvironBuilder(*args, **kwargs)
try:
environ = builder.get_environ()
finally:
builder.close()
if self.cookie_jar is not None:
self.cookie_jar.inject_wsgi(environ)
rv = run_wsgi_app(self.application, environ, buffered=buffered)
if self.cookie_jar is not None:
self.cookie_jar.extract_wsgi(environ, rv[2])
# handle redirects
redirect_chain = []
status_code = int(rv[1].split(None, 1)[0])
while status_code in (301, 302, 303, 305, 307) and follow_redirects:
if not self.redirect_client:
# assume that we're not using the user defined response wrapper
# so that we don't need any ugly hacks to get the status
# code from the response.
self.redirect_client = Client(self.application)
self.redirect_client.cookie_jar = self.cookie_jar
redirect = dict(rv[2])['Location']
scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(redirect)
base_url = urlparse.urlunsplit((scheme, netloc, '', '', '')).rstrip('/') + '/'
cur_server_name = netloc.split(':', 1)[0].split('.')
real_server_name = get_host(environ).split(':', 1)[0].split('.')
if self.allow_subdomain_redirects:
allowed = cur_server_name[-len(real_server_name):] == real_server_name
else:
allowed = cur_server_name == real_server_name
if not allowed:
raise RuntimeError('%r does not support redirect to '
'external targets' % self.__class__)
redirect_chain.append((redirect, status_code))
# the redirect request should be a new request, and not be based on
# the old request
redirect_kwargs = {
'path': script_root,
'base_url': base_url,
'query_string': qs,
'as_tuple': True,
'buffered': buffered,
'follow_redirects': False,
}
environ, rv = self.redirect_client.open(**redirect_kwargs)
status_code = int(rv[1].split(None, 1)[0])
# Prevent loops
if redirect_chain[-1] in redirect_chain[:-1]:
raise ClientRedirectError("loop detected")
response = self.response_wrapper(*rv)
if as_tuple:
return environ, response
return response
def get(self, *args, **kw):
"""Like open but method is enforced to GET."""
kw['method'] = 'GET'
return self.open(*args, **kw)
def patch(self, *args, **kw):
"""Like open but method is enforced to PATCH."""
kw['method'] = 'PATCH'
return self.open(*args, **kw)
def post(self, *args, **kw):
"""Like open but method is enforced to POST."""
kw['method'] = 'POST'
return self.open(*args, **kw)
def head(self, *args, **kw):
"""Like open but method is enforced to HEAD."""
kw['method'] = 'HEAD'
return self.open(*args, **kw)
def put(self, *args, **kw):
"""Like open but method is enforced to PUT."""
kw['method'] = 'PUT'
return self.open(*args, **kw)
def delete(self, *args, **kw):
"""Like open but method is enforced to DELETE."""
kw['method'] = 'DELETE'
return self.open(*args, **kw)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.application
)
def create_environ(*args, **kwargs):
"""Create a new WSGI environ dict based on the values passed. The first
parameter should be the path of the request which defaults to '/'. The
second one can either be an absolute path (in that case the host is
localhost:80) or a full path to the request with scheme, netloc port and
the path to the script.
This accepts the same arguments as the :class:`EnvironBuilder`
constructor.
.. versionchanged:: 0.5
This function is now a thin wrapper over :class:`EnvironBuilder` which
was added in 0.5. The `headers`, `environ_base`, `environ_overrides`
and `charset` parameters were added.
"""
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_environ()
finally:
builder.close()
def run_wsgi_app(app, environ, buffered=False):
"""Return a tuple in the form (app_iter, status, headers) of the
application output. This works best if you pass it an application that
returns an iterator all the time.
Sometimes applications may use the `write()` callable returned
by the `start_response` function. This tries to resolve such edge
cases automatically. But if you don't get the expected output you
should set `buffered` to `True` which enforces buffering.
If passed an invalid WSGI application the behavior of this function is
undefined. Never pass non-conforming WSGI applications to this function.
:param app: the application to execute.
:param buffered: set to `True` to enforce buffering.
:return: tuple in the form ``(app_iter, status, headers)``
"""
environ = _get_environ(environ)
response = []
buffer = []
def start_response(status, headers, exc_info=None):
if exc_info is not None:
raise exc_info[0], exc_info[1], exc_info[2]
response[:] = [status, headers]
return buffer.append
app_iter = app(environ, start_response)
# when buffering we emit the close call early and convert the
# application iterator into a regular list
if buffered:
close_func = getattr(app_iter, 'close', None)
try:
app_iter = list(app_iter)
finally:
if close_func is not None:
close_func()
# otherwise we iterate the application iter until we have
# a response, chain the already received data with the already
# collected data and wrap it in a new `ClosingIterator` if
# we have a close callable.
else:
while not response:
buffer.append(app_iter.next())
if buffer:
close_func = getattr(app_iter, 'close', None)
app_iter = chain(buffer, app_iter)
if close_func is not None:
app_iter = ClosingIterator(app_iter, close_func)
return app_iter, response[0], response[1]
| apache-2.0 |
sjperkins/tensorflow | tensorflow/examples/learn/random_forest_mnist.py | 73 | 4030 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A stand-alone example for tf.learn's random forest model on mnist."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
# pylint: disable=g-backslash-continuation
from tensorflow.contrib.learn.python.learn\
import metric_spec
from tensorflow.contrib.learn.python.learn.estimators\
import estimator
from tensorflow.contrib.tensor_forest.client\
import eval_metrics
from tensorflow.contrib.tensor_forest.client\
import random_forest
from tensorflow.contrib.tensor_forest.python\
import tensor_forest
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.platform import app
FLAGS = None
def build_estimator(model_dir):
"""Build an estimator."""
params = tensor_forest.ForestHParams(
num_classes=10, num_features=784,
num_trees=FLAGS.num_trees, max_nodes=FLAGS.max_nodes)
graph_builder_class = tensor_forest.RandomForestGraphs
if FLAGS.use_training_loss:
graph_builder_class = tensor_forest.TrainingLossForest
# Use the SKCompat wrapper, which gives us a convenient way to split
# in-memory data like MNIST into batches.
return estimator.SKCompat(random_forest.TensorForestEstimator(
params, graph_builder_class=graph_builder_class,
model_dir=model_dir))
def train_and_eval():
"""Train and evaluate the model."""
model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir
print('model directory = %s' % model_dir)
est = build_estimator(model_dir)
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)
est.fit(x=mnist.train.images, y=mnist.train.labels,
batch_size=FLAGS.batch_size)
metric_name = 'accuracy'
metric = {metric_name:
metric_spec.MetricSpec(
eval_metrics.get_metric(metric_name),
prediction_key=eval_metrics.get_prediction_key(metric_name))}
results = est.score(x=mnist.test.images, y=mnist.test.labels,
batch_size=FLAGS.batch_size,
metrics=metric)
for key in sorted(results):
print('%s: %s' % (key, results[key]))
def main(_):
train_and_eval()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',
type=str,
default='',
help='Base directory for output models.'
)
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/data/',
help='Directory for storing data'
)
parser.add_argument(
'--train_steps',
type=int,
default=1000,
help='Number of training steps.'
)
parser.add_argument(
'--batch_size',
type=str,
default=1000,
help='Number of examples in a training batch.'
)
parser.add_argument(
'--num_trees',
type=int,
default=100,
help='Number of trees in the forest.'
)
parser.add_argument(
'--max_nodes',
type=int,
default=1000,
help='Max total nodes in a single tree.'
)
parser.add_argument(
'--use_training_loss',
type=bool,
default=False,
help='If true, use training loss as termination criteria.'
)
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
mrramazani/mysql-replicant-python-1 | lib/tests/test_commands.py | 2 | 4096 | # Copyright (c) 2010, Mats Kindahl, Charles Bell, and Lars Thalmann
# All rights reserved.
#
# Use of this source code is goverened by a BSD licence that can be
# found in the LICENCE file.
import sys
import os.path
here = os.path.dirname(os.path.abspath(__file__))
rootpath = os.path.split(here)[0]
sys.path.append(rootpath)
import re
import unittest
from mysql.replicant.roles import (
Final,
Master,
)
from mysql.replicant.server import (
User,
)
from mysql.replicant.commands import (
change_master,
fetch_master_position,
fetch_slave_position,
slave_wait_and_stop,
slave_wait_for_pos,
)
from tests.utils import load_deployment
class TestCommands(unittest.TestCase):
"""Test case to test various commands"""
def __init__(self, methodNames, options={}):
super(TestCommands, self).__init__(methodNames)
my = load_deployment(options['deployment'])
self.master = my.master
self.masters = my.servers[0:1]
self.slaves = my.servers[2:3]
def setUp(self):
master_role = Master(User("repl_user", "xyzzy"))
for master in self.masters:
master_role.imbue(master)
final_role = Final(self.masters[0])
for slave in self.slaves:
try:
final_role.imbue(slave)
except IOError:
pass
def testChangeMaster(self):
"Test the change_master function"
for slave in self.slaves:
change_master(slave, self.master)
self.master.sql("DROP TABLE IF EXISTS t1", db="test")
self.master.sql("CREATE TABLE t1 (a INT)", db="test")
self.master.disconnect()
for slave in self.slaves:
result = slave.sql("SHOW TABLES", db="test")
def testSlaveWaitForPos(self):
"Test the slave_wait_for_pos function"
slave = self.slaves[0]
master = self.master
slave.sql("STOP SLAVE")
pos1 = fetch_master_position(master)
change_master(slave, master, pos1)
slave.sql("START SLAVE")
master.sql("DROP TABLE IF EXISTS t1", db="test")
master.sql("CREATE TABLE t1 (a INT)", db="test")
master.sql("INSERT INTO t1 VALUES (1),(2)", db="test")
pos2 = fetch_master_position(master)
slave_wait_for_pos(slave, pos2)
pos3 = fetch_slave_position(slave)
self.assert_(pos3 >= pos2)
def testSlaveWaitAndStop(self):
"Test the slave_wait_and_stop function"
slave = self.slaves[0]
master = self.master
slave.sql("STOP SLAVE")
pos1 = fetch_master_position(master)
change_master(slave, master, pos1)
slave.sql("START SLAVE")
master.sql("DROP TABLE IF EXISTS t1", db="test")
master.sql("CREATE TABLE t1 (a INT)", db="test")
master.sql("INSERT INTO t1 VALUES (1),(2)", db="test")
pos2 = fetch_master_position(master)
master.sql("INSERT INTO t1 VALUES (3),(4)", db="test")
pos3 = fetch_master_position(master)
slave_wait_and_stop(slave, pos2)
pos4 = fetch_slave_position(slave)
self.assertEqual(pos2, pos4)
row = slave.sql("SELECT COUNT(*) AS count FROM t1", db="test")
self.assertEqual(row['count'], 2)
slave.sql("START SLAVE")
slave_wait_and_stop(slave, pos3)
row = slave.sql("SELECT COUNT(*) AS count FROM t1", db="test")
self.assertEqual(row['count'], 4)
def testSlaveStatusWaitUntil(self):
"Test slave_status_wait_until"
slave = self.slaves[0]
master = self.master
slave.sql("STOP SLAVE")
pos1 = fetch_master_position(master)
change_master(slave, master, pos1)
slave.sql("START SLAVE")
def suite(options={}):
if not options['deployment']:
return None
suite = unittest.TestSuite()
for test in unittest.defaultTestLoader.getTestCaseNames(TestCommands):
suite.addTest(TestCommands(test, options))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| bsd-3-clause |
Antiun/odoomrp-wip | stock_move_purchase_price/__openerp__.py | 27 | 1368 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
{
"name": "Stock move purchase price",
"version": "1.0",
"depends": ["base", "stock", "purchase"],
"author": "OdooMRP team,"
"AvanzOSC,"
"Serv. Tecnol. Avanzados - Pedro M. Baeza",
"contributors": ["Mikel Arregi <mikelarregi@avanzosc.es>"],
"category": "purchase",
"description": """
Adds price column on backorder lines
""",
'data': ['views/stock_picking_line_info.xml'],
"installable": True,
"auto_install": False,
}
| agpl-3.0 |
maciek263/django2 | myvenv/Lib/site-packages/django/contrib/admin/templatetags/admin_modify.py | 139 | 2353 | from django import template
register = template.Library()
@register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'show_delete_link': (
not is_popup and context['has_delete_permission'] and
change and context.get('show_delete', True)
),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': (
context['has_add_permission'] and not is_popup and
(not save_as or context['add'])
),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True,
'preserved_filters': context.get('preserved_filters'),
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
| mit |
10clouds/edx-platform | lms/djangoapps/verify_student/views.py | 1 | 66191 | """
Views for the verification flow
"""
import datetime
import decimal
import json
import logging
import urllib
from pytz import UTC
from ipware.ip import get_ip
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import HttpResponse, HttpResponseBadRequest, Http404
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View
import analytics
from eventtracking import tracker
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from commerce.utils import audit_log, EcommerceService
from course_modes.models import CourseMode
from courseware.url_helpers import get_redirect_url
from edx_rest_api_client.exceptions import SlumberBaseException
from edxmako.shortcuts import render_to_response, render_to_string
from embargo import api as embargo_api
from microsite_configuration import microsite
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
from openedx.core.djangoapps.user_api.accounts.api import update_account_settings
from openedx.core.djangoapps.user_api.errors import UserNotFound, AccountValidationError
from openedx.core.djangoapps.credit.api import set_credit_requirement_status
from student.models import CourseEnrollment
from shoppingcart.models import Order, CertificateItem
from shoppingcart.processors import (
get_signed_purchase_params, get_purchase_endpoint
)
from lms.djangoapps.verify_student.ssencrypt import has_valid_signature
from lms.djangoapps.verify_student.models import (
VerificationDeadline,
SoftwareSecurePhotoVerification,
VerificationCheckpoint,
VerificationStatus,
IcrvStatusEmailsConfiguration,
)
from lms.djangoapps.verify_student.image import decode_image_data, InvalidImageData
from util.json_request import JsonResponse
from util.date_utils import get_default_time_display
from util.db import outer_atomic
from xmodule.modulestore.django import modulestore
from django.contrib.staticfiles.storage import staticfiles_storage
log = logging.getLogger(__name__)
class PayAndVerifyView(View):
"""
View for the "verify and pay" flow.
This view is somewhat complicated, because the user
can enter it from a number of different places:
* From the "choose your track" page.
* After completing payment.
* From the dashboard in order to complete verification.
* From the dashboard in order to upgrade to a verified track.
The page will display different steps and requirements
depending on:
* Whether the user has submitted a photo verification recently.
* Whether the user has paid for the course.
* How the user reached the page (mostly affects messaging)
We are also super-paranoid about how users reach this page.
If they somehow aren't enrolled, or the course doesn't exist,
or they've unenrolled, or they've already paid/verified,
... then we try to redirect them to the page with the
most appropriate messaging (including the dashboard).
Note that this page does NOT handle re-verification
(photo verification that was denied or had an error);
that is handled by the "reverify" view.
"""
# Step definitions
#
# These represent the numbered steps a user sees in
# the verify / payment flow.
#
# Steps can either be:
# - displayed or hidden
# - complete or incomplete
#
# For example, when a user enters the verification/payment
# flow for the first time, the user will see steps
# for both payment and verification. As the user
# completes these steps (for example, submitting a photo)
# the steps will be marked "complete".
#
# If a user has already verified for another course,
# then the verification steps will be hidden,
# since the user has already completed them.
#
# If a user re-enters the flow from another application
# (for example, after completing payment through
# a third-party payment processor), then the user
# will resume the flow at an intermediate step.
#
INTRO_STEP = 'intro-step'
MAKE_PAYMENT_STEP = 'make-payment-step'
PAYMENT_CONFIRMATION_STEP = 'payment-confirmation-step'
FACE_PHOTO_STEP = 'face-photo-step'
ID_PHOTO_STEP = 'id-photo-step'
REVIEW_PHOTOS_STEP = 'review-photos-step'
ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step'
ALL_STEPS = [
INTRO_STEP,
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP,
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
PAYMENT_STEPS = [
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP
]
VERIFICATION_STEPS = [
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
# These steps can be skipped using the ?skip-first-step GET param
SKIP_STEPS = [
INTRO_STEP,
]
STEP_TITLES = {
INTRO_STEP: ugettext_lazy("Intro"),
MAKE_PAYMENT_STEP: ugettext_lazy("Make payment"),
PAYMENT_CONFIRMATION_STEP: ugettext_lazy("Payment confirmation"),
FACE_PHOTO_STEP: ugettext_lazy("Take photo"),
ID_PHOTO_STEP: ugettext_lazy("Take a photo of your ID"),
REVIEW_PHOTOS_STEP: ugettext_lazy("Review your info"),
ENROLLMENT_CONFIRMATION_STEP: ugettext_lazy("Enrollment confirmation"),
}
# Messages
#
# Depending on how the user entered reached the page,
# we will display different text messaging.
# For example, we show users who are upgrading
# slightly different copy than users who are verifying
# for the first time.
#
FIRST_TIME_VERIFY_MSG = 'first-time-verify'
VERIFY_NOW_MSG = 'verify-now'
VERIFY_LATER_MSG = 'verify-later'
UPGRADE_MSG = 'upgrade'
PAYMENT_CONFIRMATION_MSG = 'payment-confirmation'
# Requirements
#
# These explain to the user what he or she
# will need to successfully pay and/or verify.
#
# These are determined by the steps displayed
# to the user; for example, if the user does not
# need to complete the verification steps,
# then the photo ID and webcam requirements are hidden.
#
ACCOUNT_ACTIVATION_REQ = "account-activation-required"
PHOTO_ID_REQ = "photo-id-required"
WEBCAM_REQ = "webcam-required"
STEP_REQUIREMENTS = {
ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ],
FACE_PHOTO_STEP: [WEBCAM_REQ],
}
# Deadline types
VERIFICATION_DEADLINE = "verification"
UPGRADE_DEADLINE = "upgrade"
@method_decorator(login_required)
def get(
self, request, course_id,
always_show_payment=False,
current_step=None,
message=FIRST_TIME_VERIFY_MSG
):
"""
Render the payment and verification flow.
Arguments:
request (HttpRequest): The request object.
course_id (unicode): The ID of the course the user is trying
to enroll in.
Keyword Arguments:
always_show_payment (bool): If True, show the payment steps
even if the user has already paid. This is useful
for users returning to the flow after paying.
current_step (string): The current step in the flow.
message (string): The messaging to display.
Returns:
HttpResponse
Raises:
Http404: The course does not exist or does not
have a verified mode.
"""
# Parse the course key
# The URL regex should guarantee that the key format is valid.
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warn(u"Could not find course with ID %s.", course_id)
raise Http404
# Check whether the user has access to this course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# If the verification deadline has passed
# then show the user a message that he/she can't verify.
#
# We're making the assumptions (enforced in Django admin) that:
#
# 1) Only verified modes have verification deadlines.
#
# 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you
# let someone upgrade into a verified track if they can't complete verification?
#
verification_deadline = VerificationDeadline.deadline_for_course(course.id)
response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline)
if response is not None:
log.info(u"Verification deadline for '%s' has passed.", course.id)
return response
# Retrieve the relevant course mode for the payment/verification flow.
#
# WARNING: this is technical debt! A much better way to do this would be to
# separate out the payment flow and use the product SKU to figure out what
# the user is trying to purchase.
#
# Nonetheless, for the time being we continue to make the really ugly assumption
# that at some point there was a paid course mode we can query for the price.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment/verification, so respond with a 404.
if relevant_course_mode is not None:
if CourseMode.is_verified_mode(relevant_course_mode):
log.info(
u"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.",
request.user.id, course_id, current_step
)
else:
log.info(
u"Entering payment flow for user '%s', course '%s', with current step '%s'",
request.user.id, course_id, current_step
)
else:
# Otherwise, there has never been a verified/paid mode,
# so return a page not found response.
log.warn(
u"No paid/verified course mode found for course '%s' for verification/payment flow request",
course_id
)
raise Http404
# If the user is trying to *pay* and the upgrade deadline has passed,
# then they shouldn't be able to enter the flow.
#
# NOTE: This should match the availability dates used by the E-Commerce service
# to determine whether a user can purchase a product. The idea is that if the service
# won't fulfill the order, we shouldn't even let the user get into the payment flow.
#
user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG]
if user_is_trying_to_pay:
upgrade_deadline = relevant_course_mode.expiration_datetime
response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline)
if response is not None:
log.info(u"Upgrade deadline for '%s' has passed.", course.id)
return response
# Check whether the user has verified, paid, and enrolled.
# A user is considered "paid" if he or she has an enrollment
# with a paid course mode (such as "verified").
# For this reason, every paid user is enrolled, but not
# every enrolled user is paid.
# If the course mode is not verified(i.e only paid) then already_verified is always True
already_verified = (
self._check_already_verified(request.user)
if CourseMode.is_verified_mode(relevant_course_mode)
else True
)
already_paid, is_enrolled = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment / verification status.
redirect_response = self._redirect_if_necessary(
message,
already_verified,
already_paid,
is_enrolled,
course_key,
user_is_trying_to_pay,
request.user,
relevant_course_mode.sku
)
if redirect_response is not None:
return redirect_response
display_steps = self._display_steps(
always_show_payment,
already_verified,
already_paid,
relevant_course_mode
)
requirements = self._requirements(display_steps, request.user.is_active)
if current_step is None:
current_step = display_steps[0]['name']
# Allow the caller to skip the first page
# This is useful if we want the user to be able to
# use the "back" button to return to the previous step.
# This parameter should only work for known skip-able steps
if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS:
display_step_names = [step['name'] for step in display_steps]
current_step_idx = display_step_names.index(current_step)
if (current_step_idx + 1) < len(display_steps):
current_step = display_steps[current_step_idx + 1]['name']
courseware_url = ""
if not course.start or course.start < datetime.datetime.today().replace(tzinfo=UTC):
courseware_url = reverse(
'course_root',
kwargs={'course_id': unicode(course_key)}
)
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# If the user set a contribution amount on another page,
# use that amount to pre-fill the price selection form.
contribution_amount = request.session.get(
'donation_for_course', {}
).get(unicode(course_key), '')
# Remember whether the user is upgrading
# so we can fire an analytics event upon payment.
request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG)
# Determine the photo verification status
verification_good_until = self._verification_valid_until(request.user)
# get available payment processors
if relevant_course_mode.sku:
# transaction will be conducted via ecommerce service
processors = ecommerce_api_client(request.user).payment.processors.get()
else:
# transaction will be conducted using legacy shopping cart
processors = [settings.CC_PROCESSOR_NAME]
# Render the top-level page
context = {
'contribution_amount': contribution_amount,
'course': course,
'course_key': unicode(course_key),
'checkpoint_location': request.GET.get('checkpoint'),
'course_mode': relevant_course_mode,
'courseware_url': courseware_url,
'current_step': current_step,
'disable_courseware_js': True,
'display_steps': display_steps,
'is_active': json.dumps(request.user.is_active),
'message_key': message,
'platform_name': settings.PLATFORM_NAME,
'processors': processors,
'requirements': requirements,
'user_full_name': full_name,
'verification_deadline': (
get_default_time_display(verification_deadline)
if verification_deadline else ""
),
'already_verified': already_verified,
'verification_good_until': verification_good_until,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
'nav_hidden': True,
'is_ab_testing': 'begin-flow' in request.path,
}
return render_to_response("verify_student/pay_and_verify.html", context)
def _redirect_if_necessary(
self, message, already_verified, already_paid, is_enrolled, course_key, # pylint: disable=bad-continuation
user_is_trying_to_pay, user, sku # pylint: disable=bad-continuation
):
"""Redirect the user to a more appropriate page if necessary.
In some cases, a user may visit this page with
verification / enrollment / payment state that
we don't anticipate. For example, a user may unenroll
from the course after paying for it, then visit the
"verify now" page to complete verification.
When this happens, we try to redirect the user to
the most appropriate page.
Arguments:
message (string): The messaging of the page. Should be a key
in `MESSAGES`.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_enrolled (bool): Whether the user has an active enrollment
in the course.
course_key (CourseKey): The key for the course.
Returns:
HttpResponse or None
"""
url = None
course_kwargs = {'course_id': unicode(course_key)}
if already_verified and already_paid:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
if message != self.PAYMENT_CONFIRMATION_MSG:
url = reverse('dashboard')
elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG, self.PAYMENT_CONFIRMATION_MSG]:
if is_enrolled:
# If the user is already enrolled but hasn't yet paid,
# then the "upgrade" messaging is more appropriate.
if not already_paid:
url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs)
else:
# If the user is NOT enrolled, then send him/her
# to the first time verification page.
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
elif message == self.UPGRADE_MSG:
if is_enrolled:
if already_paid:
# If the student has paid, but not verified, redirect to the verification flow.
url = reverse('verify_student_verify_now', kwargs=course_kwargs)
else:
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
if user_is_trying_to_pay and user.is_active:
# IIf the user is trying to pay, has activated their account, and the ecommerce service
# is enabled redirect him to the ecommerce checkout page.
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(user):
url = ecommerce_service.checkout_page_url(sku)
# Redirect if necessary, otherwise implicitly return None
if url is not None:
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for a course.
The returned course mode may or may not be expired.
Unexpired modes are preferred to expired modes.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first mode that matches the following criteria:
# * Unexpired
# * Price > 0
# * Not credit
for mode in unexpired_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, find the first non credit expired paid mode
for mode in all_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode):
"""Determine which steps to display to the user.
Includes all steps by default, but removes steps
if the user has already completed them.
Arguments:
always_show_payment (bool): If True, display the payment steps
even if the user has already paid.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
Returns:
list
"""
display_steps = self.ALL_STEPS
remove_steps = set()
if already_verified or not CourseMode.is_verified_mode(course_mode):
remove_steps |= set(self.VERIFICATION_STEPS)
if already_paid and not always_show_payment:
remove_steps |= set(self.PAYMENT_STEPS)
else:
# The "make payment" step doubles as an intro step,
# so if we're showing the payment step, hide the intro step.
remove_steps |= set([self.INTRO_STEP])
return [
{
'name': step,
'title': unicode(self.STEP_TITLES[step]),
}
for step in display_steps
if step not in remove_steps
]
def _requirements(self, display_steps, is_active):
"""Determine which requirements to show the user.
For example, if the user needs to submit a photo
verification, tell the user that she will need
a photo ID and a webcam.
Arguments:
display_steps (list): The steps to display to the user.
is_active (bool): If False, adds a requirement to activate the user account.
Returns:
dict: Keys are requirement names, values are booleans
indicating whether to show the requirement.
"""
all_requirements = {
self.ACCOUNT_ACTIVATION_REQ: not is_active,
self.PHOTO_ID_REQ: False,
self.WEBCAM_REQ: False,
}
display_steps = set(step['name'] for step in display_steps)
for step, step_requirements in self.STEP_REQUIREMENTS.iteritems():
if step in display_steps:
for requirement in step_requirements:
all_requirements[requirement] = True
return all_requirements
def _verification_valid_until(self, user, date_format="%m/%d/%Y"):
"""
Check whether the user has a valid or pending verification.
Arguments:
user:
date_format: optional parameter for formatting datetime
object to string in response
Returns:
datetime object in string format
"""
photo_verifications = SoftwareSecurePhotoVerification.verification_valid_or_pending(user)
# return 'expiration_datetime' of latest photo verification if found,
# otherwise implicitly return ''
if photo_verifications:
return photo_verifications[0].expiration_datetime.strftime(date_format)
return ''
def _check_already_verified(self, user):
"""Check whether the user has a valid or pending verification.
Note that this includes cases in which the user's verification
has not been accepted (either because it hasn't been processed,
or there was an error).
This should return True if the user has done their part:
submitted photos within the expiration period.
"""
return SoftwareSecurePhotoVerification.user_has_valid_or_pending(user)
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime):
"""
Respond with some error messaging if the deadline has passed.
Arguments:
course (Course): The course the user is trying to enroll in.
deadline_name (str): One of the deadline constants.
deadline_datetime (datetime): The deadline.
Returns: HttpResponse or None
"""
if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]:
log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name)
return None
deadline_passed = (
deadline_datetime is not None and
deadline_datetime < datetime.datetime.now(UTC)
)
if deadline_passed:
context = {
'course': course,
'deadline_name': deadline_name,
'deadline': (
get_default_time_display(deadline_datetime)
if deadline_datetime else ""
)
}
return render_to_response("verify_student/missed_deadline.html", context)
class SubscriptionView(View):
@method_decorator(login_required)
def get(self, request):
"""
Render the payment flow.
Arguments:
request (HttpRequest): The request object.
Returns:
HttpResponse
Raises:
Http404: The course does not exist.
"""
# Get the course key
course_key = CourseKey.from_string(settings.SUBSCRIPTION_COURSE_KEY)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warn(u"Could not find subscription dummy course with ID %s.", course_key)
raise Http404
# Check whether the user has access to subscription course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# Retrieve the relevant course mode for the payment flow.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment, so respond with a 404.
if relevant_course_mode is not None:
log.info(
u"Entering payment flow for user '%s', course '%s', mode '%s'",
request.user.id, course_key, relevant_course_mode
)
else:
log.warn(
u"No paid course mode found for subscription course '%s' for payment flow request",
course_key
)
raise Http404
already_paid, is_active = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment status.
redirect_response = self._redirect_if_necessary(already_paid,
is_active)
if redirect_response is not None:
return redirect_response
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# get available payment processors
if relevant_course_mode.sku:
# transaction will be conducted via ecommerce service
processors = ecommerce_api_client(request.user).payment.processors.get()
else:
# transaction will be conducted using legacy shopping cart
processors = [settings.CC_PROCESSOR_NAME]
log.info(
u"Available payment processors for subscription payment flow '%s'",
processors
)
# Render the top-level page
context = {
'course': course,
'course_key': unicode(course_key),
'course_mode': relevant_course_mode,
'disable_courseware_js': True,
'is_active': json.dumps(request.user.is_active),
'platform_name': settings.PLATFORM_NAME,
'processors': processors,
'user_full_name': full_name,
'nav_hidden': False
}
return render_to_response("verify_student/subscription/pay.html", context)
def _redirect_if_necessary(self, already_paid, is_active):
"""Redirect the user to a more appropriate page if necessary.
Arguments:
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_active (bool): Whether the user is enrolled
Returns:
HttpResponse or None
"""
url = None
if already_paid and is_active:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
url = reverse('dashboard')
# Redirect if necessary, otherwise implicitly return None
if url is not None:
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for subscription course.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first mode that matches the following criteria:
# * Unexpired
# * Price > 0
# * Not credit
for mode in unexpired_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, find the first expired mode
for mode in all_modes[course_key]:
if mode.min_price > 0:
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def checkout_with_ecommerce_service(user, course_key, course_mode, processor):
""" Create a new basket and trigger immediate checkout, using the E-Commerce API. """
course_id = unicode(course_key)
try:
api = ecommerce_api_client(user)
# Make an API call to create the order and retrieve the results
result = api.baskets.post({
'products': [{'sku': course_mode.sku}],
'checkout': True,
'payment_processor_name': processor
})
# Pass the payment parameters directly from the API response.
return result.get('payment_data')
except SlumberBaseException:
params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id}
log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params)
raise
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=course_mode.slug,
processor_name=processor,
user_id=user.id
)
def checkout_with_shoppingcart(request, user, course_key, course_mode, amount):
""" Create an order and trigger checkout using shoppingcart."""
cart = Order.get_cart_for_user(user)
cart.clear()
enrollment_mode = course_mode.slug
CertificateItem.add_to_order(cart, course_key, amount, enrollment_mode)
# Change the order's status so that we don't accidentally modify it later.
# We need to do this to ensure that the parameters we send to the payment system
# match what we store in the database.
# (Ordinarily we would do this client-side when the user submits the form, but since
# the JavaScript on this page does that immediately, we make the change here instead.
# This avoids a second AJAX call and some additional complication of the JavaScript.)
# If a user later re-enters the verification / payment flow, she will create a new order.
cart.start_purchase()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
payment_data = {
'payment_processor_name': settings.CC_PROCESSOR_NAME,
'payment_page_url': get_purchase_endpoint(),
'payment_form_data': get_signed_purchase_params(
cart,
callback_url=callback_url,
extra_data=[unicode(course_key), course_mode.slug]
),
}
return payment_data
@require_POST
@login_required
def create_order(request):
"""
This endpoint is named 'create_order' for backward compatibility, but its
actual use is to add a single product to the user's cart and request
immediate checkout.
"""
course_id = request.POST['course_id']
course_id = CourseKey.from_string(course_id)
donation_for_course = request.session.get('donation_for_course', {})
contribution = request.POST.get("contribution", donation_for_course.get(unicode(course_id), 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
current_mode = None
sku = request.POST.get('sku', None)
if sku:
try:
current_mode = CourseMode.objects.get(sku=sku)
except CourseMode.DoesNotExist:
log.exception(u'Failed to find CourseMode with SKU [%s].', sku)
if not current_mode:
# Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes
# for course exist then choose the first one
paid_modes = CourseMode.paid_modes_for_course(course_id)
if paid_modes:
if len(paid_modes) > 1:
log.warn(u"Multiple paid course modes found for course '%s' for create order request", course_id)
current_mode = paid_modes[0]
# Make sure this course has a paid mode
if not current_mode:
log.warn(u"Create order requested for course '%s' without a paid mode.", course_id)
return HttpResponseBadRequest(_("This course doesn't support paid certificates"))
if CourseMode.is_professional_mode(current_mode):
amount = current_mode.min_price
if amount < current_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
if current_mode.sku:
# if request.POST doesn't contain 'processor' then the service's default payment processor will be used.
payment_data = checkout_with_ecommerce_service(
request.user,
course_id,
current_mode,
request.POST.get('processor')
)
else:
payment_data = checkout_with_shoppingcart(request, request.user, course_id, current_mode, amount)
if 'processor' not in request.POST:
# (XCOM-214) To be removed after release.
# the absence of this key in the POST payload indicates that the request was initiated from
# a stale js client, which expects a response containing only the 'payment_form_data' part of
# the payment data result.
payment_data = payment_data['payment_form_data']
return HttpResponse(json.dumps(payment_data), content_type="application/json")
class SubmitPhotosView(View):
"""
End-point for submitting photos for verification.
"""
@method_decorator(transaction.non_atomic_requests)
def dispatch(self, *args, **kwargs): # pylint: disable=missing-docstring
return super(SubmitPhotosView, self).dispatch(*args, **kwargs)
@method_decorator(login_required)
@method_decorator(outer_atomic(read_committed=True))
def post(self, request):
"""
Submit photos for verification.
This end-point is used for the following cases:
* Initial verification through the pay-and-verify flow.
* Initial verification initiated from a checkpoint within a course.
* Re-verification initiated from a checkpoint within a course.
POST Parameters:
face_image (str): base64-encoded image data of the user's face.
photo_id_image (str): base64-encoded image data of the user's photo ID.
full_name (str): The user's full name, if the user is requesting a name change as well.
course_key (str): Identifier for the course, if initiated from a checkpoint.
checkpoint (str): Location of the checkpoint in the course.
"""
# If the user already has an initial verification attempt, we can re-use the photo ID
# the user submitted with the initial attempt. This is useful for the in-course reverification
# case in which users submit only the face photo and have it matched against their ID photos
# submitted with the initial verification.
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user)
# Validate the POST parameters
params, response = self._validate_parameters(request, bool(initial_verification))
if response is not None:
return response
# If necessary, update the user's full name
if "full_name" in params:
response = self._update_full_name(request.user, params["full_name"])
if response is not None:
return response
# Retrieve the image data
# Validation ensures that we'll have a face image, but we may not have
# a photo ID image if this is a reverification.
face_image, photo_id_image, response = self._decode_image_data(
params["face_image"], params.get("photo_id_image")
)
# If we have a photo_id we do not want use the initial verification image.
if photo_id_image is not None:
initial_verification = None
if response is not None:
return response
# Submit the attempt
attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification)
# If this attempt was submitted at a checkpoint, then associate
# the attempt with the checkpoint.
submitted_at_checkpoint = "checkpoint" in params and "course_key" in params
if submitted_at_checkpoint:
checkpoint = self._associate_attempt_with_checkpoint(
request.user, attempt,
params["course_key"],
params["checkpoint"]
)
# If the submission came from an in-course checkpoint
if initial_verification is not None and submitted_at_checkpoint:
self._fire_event(request.user, "edx.bi.reverify.submitted", {
"category": "verification",
"label": unicode(params["course_key"]),
"checkpoint": checkpoint.checkpoint_name,
})
# Send a URL that the client can redirect to in order
# to return to the checkpoint in the courseware.
redirect_url = get_redirect_url(params["course_key"], params["checkpoint"])
return JsonResponse({"url": redirect_url})
# Otherwise, the submission came from an initial verification flow.
else:
self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"})
self._send_confirmation_email(request.user)
redirect_url = None
return JsonResponse({})
def _validate_parameters(self, request, has_initial_verification):
"""
Check that the POST parameters are valid.
Arguments:
request (HttpRequest): The request object.
has_initial_verification (bool): Whether the user has an initial verification attempt.
Returns:
HttpResponse or None
"""
# Pull out the parameters we care about.
params = {
param_name: request.POST[param_name]
for param_name in [
"face_image",
"photo_id_image",
"course_key",
"checkpoint",
"full_name"
]
if param_name in request.POST
}
# If the user already has an initial verification attempt, then we don't
# require the user to submit a photo ID image, since we can re-use the photo ID
# image from the initial attempt.
# If we don't have an initial verification OR a photo ID image, something has gone
# terribly wrong in the JavaScript. Log this as an error so we can track it down.
if "photo_id_image" not in params and not has_initial_verification:
log.error(
(
"User %s does not have an initial verification attempt "
"and no photo ID image data was provided. "
"This most likely means that the JavaScript client is not "
"correctly constructing the request to submit photos."
), request.user.id
)
return None, HttpResponseBadRequest(
_("Photo ID image is required if the user does not have an initial verification attempt.")
)
# The face image is always required.
if "face_image" not in params:
msg = _("Missing required parameter face_image")
return None, HttpResponseBadRequest(msg)
# If provided, parse the course key and checkpoint location
if "course_key" in params:
try:
params["course_key"] = CourseKey.from_string(params["course_key"])
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid course key"))
if "checkpoint" in params:
try:
params["checkpoint"] = UsageKey.from_string(params["checkpoint"]).replace(
course_key=params["course_key"]
)
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid checkpoint location"))
return params, None
def _update_full_name(self, user, full_name):
"""
Update the user's full name.
Arguments:
user (User): The user to update.
full_name (unicode): The user's updated full name.
Returns:
HttpResponse or None
"""
try:
update_account_settings(user, {"name": full_name})
except UserNotFound:
return HttpResponseBadRequest(_("No profile found for user"))
except AccountValidationError:
msg = _(
"Name must be at least {min_length} characters long."
).format(min_length=NAME_MIN_LENGTH)
return HttpResponseBadRequest(msg)
def _decode_image_data(self, face_data, photo_id_data=None):
"""
Decode image data sent with the request.
Arguments:
face_data (str): base64-encoded face image data.
Keyword Arguments:
photo_id_data (str): base64-encoded photo ID image data.
Returns:
tuple of (str, str, HttpResponse)
"""
try:
# Decode face image data (used for both an initial and re-verification)
face_image = decode_image_data(face_data)
# Decode the photo ID image data if it's provided
photo_id_image = (
decode_image_data(photo_id_data)
if photo_id_data is not None else None
)
return face_image, photo_id_image, None
except InvalidImageData:
msg = _("Image data is not valid.")
return None, None, HttpResponseBadRequest(msg)
def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None):
"""
Submit a verification attempt.
Arguments:
user (User): The user making the attempt.
face_image (str): Decoded face image data.
Keyword Arguments:
photo_id_image (str or None): Decoded photo ID image data.
initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt.
"""
attempt = SoftwareSecurePhotoVerification(user=user)
# We will always have face image data, so upload the face image
attempt.upload_face_image(face_image)
# If an ID photo wasn't submitted, re-use the ID photo from the initial attempt.
# Earlier validation rules ensure that at least one of these is available.
if photo_id_image is not None:
attempt.upload_photo_id_image(photo_id_image)
elif initial_verification is None:
# Earlier validation should ensure that we never get here.
log.error(
"Neither a photo ID image or initial verification attempt provided. "
"Parameter validation in the view should prevent this from happening!"
)
# Submit the attempt
attempt.mark_ready()
attempt.submit(copy_id_photo_from=initial_verification)
return attempt
def _associate_attempt_with_checkpoint(self, user, attempt, course_key, usage_id):
"""
Associate the verification attempt with a checkpoint within a course.
Arguments:
user (User): The user making the attempt.
attempt (SoftwareSecurePhotoVerification): The verification attempt.
course_key (CourseKey): The identifier for the course.
usage_key (UsageKey): The location of the checkpoint within the course.
Returns:
VerificationCheckpoint
"""
checkpoint = VerificationCheckpoint.get_or_create_verification_checkpoint(course_key, usage_id)
checkpoint.add_verification_attempt(attempt)
VerificationStatus.add_verification_status(checkpoint, user, "submitted")
return checkpoint
def _send_confirmation_email(self, user):
"""
Send an email confirming that the user submitted photos
for initial verification.
"""
context = {
'full_name': user.profile.name,
'platform_name': microsite.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
}
subject = _("Verification photos received")
message = render_to_string('emails/photo_submission_confirmation.txt', context)
from_address = microsite.get_value('default_from_email', settings.DEFAULT_FROM_EMAIL)
to_address = user.email
try:
send_mail(subject, message, from_address, [to_address], fail_silently=False)
except: # pylint: disable=bare-except
# We catch all exceptions and log them.
# It would be much, much worse to roll back the transaction due to an uncaught
# exception than to skip sending the notification email.
log.exception("Could not send notification email for initial verification for user %s", user.id)
def _fire_event(self, user, event_name, parameters):
"""
Fire an analytics event.
Arguments:
user (User): The user who submitted photos.
event_name (str): Name of the analytics event.
parameters (dict): Event parameters.
Returns: None
"""
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
context = {
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
analytics.track(user.id, event_name, parameters, context=context)
def _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
): # pylint: disable=invalid-name
"""
Compose subject and message for photo reverification email.
Args:
course_key(CourseKey): CourseKey object
user_id(str): User Id
related_assessment_location(str): Location of reverification XBlock
photo_verification(QuerySet): Queryset of SoftwareSecure objects
status(str): Approval status
is_secure(Bool): Is running on secure protocol or not
Returns:
None if any error occurred else Tuple of subject and message strings
"""
try:
usage_key = UsageKey.from_string(related_assessment_location)
reverification_block = modulestore().get_item(usage_key)
course = modulestore().get_course(course_key)
redirect_url = get_redirect_url(course_key, usage_key.replace(course_key=course_key))
subject = "Re-verification Status"
context = {
"status": status,
"course_name": course.display_name_with_default_escaped,
"assessment": reverification_block.related_assessment
}
# Allowed attempts is 1 if not set on verification block
allowed_attempts = reverification_block.attempts + 1
used_attempts = VerificationStatus.get_user_attempts(user_id, course_key, related_assessment_location)
left_attempts = allowed_attempts - used_attempts
is_attempt_allowed = left_attempts > 0
verification_open = True
if reverification_block.due:
verification_open = timezone.now() <= reverification_block.due
context["left_attempts"] = left_attempts
context["is_attempt_allowed"] = is_attempt_allowed
context["verification_open"] = verification_open
context["due_date"] = get_default_time_display(reverification_block.due)
context['platform_name'] = settings.PLATFORM_NAME
context["used_attempts"] = used_attempts
context["allowed_attempts"] = allowed_attempts
context["support_link"] = microsite.get_value('email_from_address', settings.CONTACT_EMAIL)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
related_assessment_location
)
)
context["course_link"] = request.build_absolute_uri(redirect_url)
context["reverify_link"] = request.build_absolute_uri(re_verification_link)
message = render_to_string('emails/reverification_processed.txt', context)
log.info(
"Sending email to User_Id=%s. Attempts left for this user are %s. "
"Allowed attempts %s. "
"Due Date %s",
str(user_id), left_attempts, allowed_attempts, str(reverification_block.due)
)
return subject, message
# Catch all exception to avoid raising back to view
except: # pylint: disable=bare-except
log.exception("The email for re-verification sending failed for user_id %s", user_id)
def _send_email(user_id, subject, message):
""" Send email to given user
Args:
user_id(str): User Id
subject(str): Subject lines of emails
message(str): Email message body
Returns:
None
"""
from_address = microsite.get_value(
'email_from_address',
settings.DEFAULT_FROM_EMAIL
)
user = User.objects.get(id=user_id)
user.email_user(subject, message, from_address)
def _set_user_requirement_status(attempt, namespace, status, reason=None):
"""Sets the status of a credit requirement for the user,
based on a verification checkpoint.
"""
checkpoint = None
try:
checkpoint = VerificationCheckpoint.objects.get(photo_verification=attempt)
except VerificationCheckpoint.DoesNotExist:
log.error("Unable to find checkpoint for user with id %d", attempt.user.id)
if checkpoint is not None:
try:
set_credit_requirement_status(
attempt.user.username,
checkpoint.course_id,
namespace,
checkpoint.checkpoint_location,
status=status,
reason=reason,
)
except Exception: # pylint: disable=broad-except
# Catch exception if unable to add credit requirement
# status for user
log.error("Unable to add Credit requirement status for user with id %d", attempt.user.id)
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id)
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for %s", receipt_id)
attempt.approve()
status = "approved"
_set_user_requirement_status(attempt, 'reverification', 'satisfied')
elif result == "FAIL":
log.debug("Denying verification for %s", receipt_id)
attempt.deny(json.dumps(reason), error_code=error_code)
status = "denied"
_set_user_requirement_status(
attempt, 'reverification', 'failed', json.dumps(reason)
)
elif result == "SYSTEM FAIL":
log.debug("System failure for %s -- resetting to must_retry", receipt_id)
attempt.system_error(json.dumps(reason), error_code=error_code)
status = "error"
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result %s", result)
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
checkpoints = VerificationCheckpoint.objects.filter(photo_verification=attempt).all()
VerificationStatus.add_status_from_checkpoints(checkpoints=checkpoints, user=attempt.user, status=status)
# Trigger ICRV email only if ICRV status emails config is enabled
icrv_status_emails = IcrvStatusEmailsConfiguration.current()
if icrv_status_emails.enabled and checkpoints:
user_id = attempt.user.id
course_key = checkpoints[0].course_id
related_assessment_location = checkpoints[0].checkpoint_location
subject, message = _compose_message_reverification_email(
course_key, user_id, related_assessment_location, status, request
)
_send_email(user_id, subject, message)
return HttpResponse("OK!")
class ReverifyView(View):
"""
Reverification occurs when a user's initial verification is denied
or expires. When this happens, users can re-submit photos through
the re-verification flow.
Unlike in-course reverification, this flow requires users to submit
*both* face and ID photos. In contrast, during in-course reverification,
students submit only face photos, which are matched against the ID photo
the user submitted during initial verification.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render the reverification flow.
Most of the work is done client-side by composing the same
Backbone views used in the initial verification flow.
"""
status, _ = SoftwareSecurePhotoVerification.user_status(request.user)
# If the user has no initial verification or if the verification
# process is still ongoing 'pending' or expired then allow the user to
# submit the photo verification.
# A photo verification is marked as 'pending' if its status is either
# 'submitted' or 'must_retry'.
if status in ["none", "must_reverify", "expired", "pending"]:
context = {
"user_full_name": request.user.profile.name,
"platform_name": settings.PLATFORM_NAME,
"capture_sound": staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/reverify.html", context)
else:
context = {
"status": status
}
return render_to_response("verify_student/reverify_not_allowed.html", context)
class InCourseReverifyView(View):
"""
The in-course reverification view.
In-course reverification occurs while a student is taking a course.
At points in the course, students are prompted to submit face photos,
which are matched against the ID photos the user submitted during their
initial verification.
Students are prompted to enter this flow from an "In Course Reverification"
XBlock (courseware component) that course authors add to the course.
See https://github.com/edx/edx-reverification-block for more details.
"""
@method_decorator(login_required)
def get(self, request, course_id, usage_id):
"""Display the view for face photo submission.
Args:
request(HttpRequest): HttpRequest object
course_id(str): A string of course id
usage_id(str): Location of Reverification XBlock in courseware
Returns:
HttpResponse
"""
user = request.user
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
if course is None:
log.error(u"Could not find course '%s' for in-course reverification.", course_key)
raise Http404
try:
checkpoint = VerificationCheckpoint.objects.get(course_id=course_key, checkpoint_location=usage_id)
except VerificationCheckpoint.DoesNotExist:
log.error(
u"No verification checkpoint exists for the "
u"course '%s' and checkpoint location '%s'.",
course_key, usage_id
)
raise Http404
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(user)
if not initial_verification:
return self._redirect_to_initial_verification(user, course_key, usage_id)
# emit the reverification event
self._track_reverification_events('edx.bi.reverify.started', user.id, course_id, checkpoint.checkpoint_name)
context = {
'course_key': unicode(course_key),
'course_name': course.display_name_with_default_escaped,
'checkpoint_name': checkpoint.checkpoint_name,
'platform_name': settings.PLATFORM_NAME,
'usage_id': usage_id,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/incourse_reverify.html", context)
def _track_reverification_events(self, event_name, user_id, course_id, checkpoint):
"""Track re-verification events for a user against a reverification
checkpoint of a course.
Arguments:
event_name (str): Name of event being tracked
user_id (str): The ID of the user
course_id (unicode): ID associated with the course
checkpoint (str): Checkpoint name
Returns:
None
"""
log.info(
u"In-course reverification: event %s occurred for user '%s' in course '%s' at checkpoint '%s'",
event_name, user_id, course_id, checkpoint
)
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
analytics.track(
user_id,
event_name,
{
'category': "verification",
'label': unicode(course_id),
'checkpoint': checkpoint
},
context={
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
)
def _redirect_to_initial_verification(self, user, course_key, checkpoint):
"""
Redirect because the user does not have an initial verification.
We will redirect the user to the initial verification flow,
passing the identifier for this checkpoint. When the user
submits a verification attempt, it will count for *both*
the initial and checkpoint verification.
Arguments:
user (User): The user who made the request.
course_key (CourseKey): The identifier for the course for which
the user is attempting to re-verify.
checkpoint (string): Location of the checkpoint in the courseware.
Returns:
HttpResponse
"""
log.info(
u"User %s does not have an initial verification, so "
u"he/she will be redirected to the \"verify later\" flow "
u"for the course %s.",
user.id, course_key
)
base_url = reverse('verify_student_verify_now', kwargs={'course_id': unicode(course_key)})
params = urllib.urlencode({"checkpoint": checkpoint})
full_url = u"{base}?{params}".format(base=base_url, params=params)
return redirect(full_url)
| agpl-3.0 |
cedrichu/kernel_tcp_stack | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
pcolmant/repanier | repanier/views/unsubscribe_view.py | 1 | 1150 | from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from repanier.models.customer import Customer
from repanier.tools import get_repanier_template_name
@csrf_protect
@never_cache
def unsubscribe_view(request, customer_id, token):
"""
User is immediately unsubscribed
if they came from an unexpired unsubscribe link.
"""
customer = Customer.objects.filter(id=customer_id).order_by("?").first()
if customer is not None and customer.check_token(token):
# unsubscribe them
# customer.save(update_fields=['subscribe_to_email'])
# use vvvv because ^^^^^ will call "pre_save" function which reset valid_email to None
if customer.subscribe_to_email:
Customer.objects.filter(id=customer.id).order_by("?").update(
subscribe_to_email=False
)
template_name = get_repanier_template_name("registration/unsubscribe.html")
return render(request, template_name)
else:
return HttpResponseRedirect("/")
| gpl-3.0 |
gregdek/ansible | lib/ansible/modules/network/junos/junos_lldp_interface.py | 29 | 4865 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_lldp_interface
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage LLDP interfaces configuration on Juniper JUNOS network devices
description:
- This module provides declarative management of LLDP interfaces
configuration on Juniper JUNOS network devices.
options:
name:
description:
- Name of the interface LLDP should be configured on.
state:
description:
- Value of C(present) ensures given LLDP configured on given I(interfaces)
and is enabled, for value of C(absent) LLDP configuration on given I(interfaces) deleted.
Value C(enabled) ensures LLDP protocol is enabled on given I(interfaces) and
for value of C(disabled) it ensures LLDP is disabled on given I(interfaces).
default: present
choices: ['present', 'absent', 'enabled', 'disabled']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: Configure LLDP on specific interfaces
junos_lldp_interface:
name: ge-0/0/5
state: present
- name: Disable LLDP on specific interfaces
junos_lldp_interface:
name: ge-0/0/5
state: disabled
- name: Enable LLDP on specific interfaces
junos_lldp_interface:
name: ge-0/0/5
state: enabled
- name: Delete LLDP configuration on specific interfaces
junos_lldp_interface:
name: ge-0/0/5
state: present
- name: Deactivate LLDP on specific interfaces
junos_lldp_interface:
name: ge-0/0/5
state: present
active: False
- name: Activate LLDP on specific interfaces
junos_lldp_interface:
name: ge-0/0/5
state: present
active: True
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: str
sample: >
[edit protocols lldp]
+ interface ge-0/0/5;
"""
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.junos.junos import junos_argument_spec
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele, tostring
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config
USE_PERSISTENT_CONNECTION = True
def main():
""" main entry point for module execution
"""
argument_spec = dict(
name=dict(),
state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'protocols/lldp/interface'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('name', {'xpath': 'name', 'is_key': True}),
('disable', {'xpath': 'disable', 'tag_only': True})
])
item = module.params.copy()
state = item.get('state')
item['disable'] = True if state in ('disabled', 'absent') else False
if state in ('enabled', 'disabled'):
item['state'] = 'present'
want = map_params_to_obj(module, param_to_xpath_map, param=item)
ele = map_obj_to_ele(module, want, top, param=item)
with locked_config(module):
diff = load_config(module, tostring(ele), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
rodorad/spark-tk | integration-tests/tests/test_frame_take.py | 14 | 3764 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from setup import tc
def _make_frame(tc):
schema = [('name',str), ('age', int), ('tenure', int), ('phone', str)]
rows = [['Fred', 39, 16, '555-1234'], ['Susan', 33, 3, '555-0202'], ['Thurston', 65, 26, '555-4510'], ['Judy', 44, 14, '555-2183']]
frame = tc.frame.create(rows, schema)
return frame
def test_take_python_backend(tc):
frame = _make_frame(tc)
data1 = frame.take(2, columns=['name', 'phone'])
assert(data1 == [['Fred', '555-1234'], ['Susan', '555-0202']])
data2 = frame.take(2, offset=2)
assert(data2 == [['Thurston', 65, 26, '555-4510'], ['Judy', 44, 14, '555-2183']])
data3 = frame.take(2, offset=2, columns=['name', 'tenure'])
assert(data3 == [['Thurston', 26], ['Judy', 14]])
data4 = frame.take(0, offset=2, columns=['name', 'tenure'])
assert(data4 == [])
data5 = frame.take(10)
assert(data5 == [[u'Fred', 39, 16, u'555-1234'], [u'Susan', 33, 3, u'555-0202'], [u'Thurston', 65, 26, u'555-4510'], [u'Judy', 44, 14, u'555-2183']])
data6 = frame.take(3, offset=3)
assert(data6 == [[u'Judy', 44, 14, u'555-2183']])
data7 = frame.take(3, offset=3, columns=['name', 'tenure'])
assert(data7 == [['Judy', 14]])
data8 = frame.take(2, offset=6, columns=['name', 'tenure']) # offset beyond
assert(data8 == [])
def test_take_scala_backend(tc):
frame = _make_frame(tc)
frame._scala
data1 = frame.take(2, columns=['name', 'phone'])
assert(data1 == [[u'Fred', u'555-1234'], [u'Susan', u'555-0202']])
data2 = frame.take(2, offset=2)
assert(data2 == [[u'Thurston', 65, 26, u'555-4510'], [u'Judy', 44, 14, u'555-2183']])
data3 = frame.take(2, offset=2, columns=['name', 'tenure'])
assert(data3 == [[u'Thurston', 26], [u'Judy', 14]])
data4 = frame.take(0, offset=2, columns=['name', 'tenure'])
assert(data4 == [])
data5 = frame.take(10)
assert(data5 == [[u'Fred', 39, 16, u'555-1234'], [u'Susan', 33, 3, u'555-0202'], [u'Thurston', 65, 26, u'555-4510'], [u'Judy', 44, 14, u'555-2183']])
data6 = frame.take(3, offset=3)
assert(data6 == [[u'Judy', 44, 14, u'555-2183']])
data7 = frame.take(3, offset=3, columns=['name', 'tenure'])
assert(data7 == [['Judy', 14]])
data8 = frame.take(2, offset=6, columns=['name', 'tenure']) # offset beyond
assert(data8 == [])
def test_take_python_backend_negative(tc):
frame = _make_frame(tc)
try:
frame.take(-1)
except ValueError:
pass
else:
raise RuntimeError("expected bad arugment error")
try:
frame.take(3, offset=-10)
except ValueError:
pass
else:
raise RuntimeError("expected bad arugment error")
def test_take_scala_backend_negative(tc):
frame = _make_frame(tc)
frame._scala
try:
frame.take(-1)
except ValueError:
pass
else:
raise RuntimeError("expected bad arugment error")
try:
frame.take(3, offset=-10)
except ValueError:
pass
else:
raise RuntimeError("expected bad arugment error")
| apache-2.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_0_0/iprg_broker.py | 17 | 54667 | from ..broker import Broker
class IprgBroker(Broker):
controller = "iprgs"
def show(self, **kwargs):
"""Shows the details for the specified iprg.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprg: The iprg identified by the specified IprgID.
:rtype iprg: Iprg
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available iprgs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprgs as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgID
:param sort: The data field(s) to use for sorting the output. Default is IprgID. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Iprg. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprgs: An array of the Iprg objects that match the specified input criteria.
:rtype iprgs: Array of Iprg
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available iprgs matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ActiveIprgMemberID: The internal NetMRI identifier for the HSRP/VRRP group membership details of the active router.
:type ActiveIprgMemberID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ActiveIprgMemberID: The internal NetMRI identifier for the HSRP/VRRP group membership details of the active router.
:type ActiveIprgMemberID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgActiveLastChanged: The date and time of the last change of the active or master router for this group.
:type IprgActiveLastChanged: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgActiveLastChanged: The date and time of the last change of the active or master router for this group.
:type IprgActiveLastChanged: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgAuth: The authentication method for this HSRP or VRRP group.
:type IprgAuth: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgAuth: The authentication method for this HSRP or VRRP group.
:type IprgAuth: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgChangedCols: The fields that changed between this revision of the record and the previous revision.
:type IprgChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgChangedCols: The fields that changed between this revision of the record and the previous revision.
:type IprgChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type IprgEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type IprgEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgIPDotted: The virtual IP address for this HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format.
:type IprgIPDotted: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgIPDotted: The virtual IP address for this HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format.
:type IprgIPDotted: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address.
:type IprgIPNumeric: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgMAC: The virtual MAC for this HSRP or VRRP group.
:type IprgMAC: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgMAC: The virtual MAC for this HSRP or VRRP group.
:type IprgMAC: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgNumber: The HSRP or VRRP group number.
:type IprgNumber: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgStartTime: The starting effective time of this revision of the record.
:type IprgStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgStartTime: The starting effective time of this revision of the record.
:type IprgStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgTimestamp: The date and time this record was collected or calculated.
:type IprgTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgTimestamp: The date and time this record was collected or calculated.
:type IprgTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IprgType: Designates if this is an HSRP group or a VRRP group.
:type IprgType: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IprgType: Designates if this is an HSRP group or a VRRP group.
:type IprgType: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprgs as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgID
:param sort: The data field(s) to use for sorting the output. Default is IprgID. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Iprg. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against iprgs, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: ActiveIprgMemberID, DataSourceID, IprgActiveLastChanged, IprgAuth, IprgChangedCols, IprgEndTime, IprgID, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgNumber, IprgStartTime, IprgTimestamp, IprgType.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprgs: An array of the Iprg objects that match the specified input criteria.
:rtype iprgs: Array of Iprg
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available iprgs matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: ActiveIprgMemberID, DataSourceID, IprgActiveLastChanged, IprgAuth, IprgChangedCols, IprgEndTime, IprgID, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgNumber, IprgStartTime, IprgTimestamp, IprgType.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ActiveIprgMemberID: The operator to apply to the field ActiveIprgMemberID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ActiveIprgMemberID: The internal NetMRI identifier for the HSRP/VRRP group membership details of the active router. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ActiveIprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ActiveIprgMemberID: If op_ActiveIprgMemberID is specified, the field named in this input will be compared to the value in ActiveIprgMemberID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ActiveIprgMemberID must be specified if op_ActiveIprgMemberID is specified.
:type val_f_ActiveIprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ActiveIprgMemberID: If op_ActiveIprgMemberID is specified, this value will be compared to the value in ActiveIprgMemberID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ActiveIprgMemberID must be specified if op_ActiveIprgMemberID is specified.
:type val_c_ActiveIprgMemberID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgActiveLastChanged: The operator to apply to the field IprgActiveLastChanged. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgActiveLastChanged: The date and time of the last change of the active or master router for this group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgActiveLastChanged: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgActiveLastChanged: If op_IprgActiveLastChanged is specified, the field named in this input will be compared to the value in IprgActiveLastChanged using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgActiveLastChanged must be specified if op_IprgActiveLastChanged is specified.
:type val_f_IprgActiveLastChanged: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgActiveLastChanged: If op_IprgActiveLastChanged is specified, this value will be compared to the value in IprgActiveLastChanged using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgActiveLastChanged must be specified if op_IprgActiveLastChanged is specified.
:type val_c_IprgActiveLastChanged: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgAuth: The operator to apply to the field IprgAuth. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgAuth: The authentication method for this HSRP or VRRP group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgAuth: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgAuth: If op_IprgAuth is specified, the field named in this input will be compared to the value in IprgAuth using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgAuth must be specified if op_IprgAuth is specified.
:type val_f_IprgAuth: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgAuth: If op_IprgAuth is specified, this value will be compared to the value in IprgAuth using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgAuth must be specified if op_IprgAuth is specified.
:type val_c_IprgAuth: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgChangedCols: The operator to apply to the field IprgChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgChangedCols: If op_IprgChangedCols is specified, the field named in this input will be compared to the value in IprgChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgChangedCols must be specified if op_IprgChangedCols is specified.
:type val_f_IprgChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgChangedCols: If op_IprgChangedCols is specified, this value will be compared to the value in IprgChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgChangedCols must be specified if op_IprgChangedCols is specified.
:type val_c_IprgChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgEndTime: The operator to apply to the field IprgEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgEndTime: If op_IprgEndTime is specified, the field named in this input will be compared to the value in IprgEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgEndTime must be specified if op_IprgEndTime is specified.
:type val_f_IprgEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgEndTime: If op_IprgEndTime is specified, this value will be compared to the value in IprgEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgEndTime must be specified if op_IprgEndTime is specified.
:type val_c_IprgEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgID: The operator to apply to the field IprgID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgID: The internal NetMRI identifier for this HSRP/VRRP Group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgID: If op_IprgID is specified, the field named in this input will be compared to the value in IprgID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgID must be specified if op_IprgID is specified.
:type val_f_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgID: If op_IprgID is specified, this value will be compared to the value in IprgID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgID must be specified if op_IprgID is specified.
:type val_c_IprgID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgIPDotted: The operator to apply to the field IprgIPDotted. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgIPDotted: The virtual IP address for this HSRP/VRRP group, in dotted (or colon-delimited for IPv6) format. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgIPDotted: If op_IprgIPDotted is specified, the field named in this input will be compared to the value in IprgIPDotted using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgIPDotted must be specified if op_IprgIPDotted is specified.
:type val_f_IprgIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgIPDotted: If op_IprgIPDotted is specified, this value will be compared to the value in IprgIPDotted using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgIPDotted must be specified if op_IprgIPDotted is specified.
:type val_c_IprgIPDotted: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgIPNumeric: The operator to apply to the field IprgIPNumeric. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgIPNumeric: The numerical value of the HSRP/VRRP virtual IP address. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgIPNumeric: If op_IprgIPNumeric is specified, the field named in this input will be compared to the value in IprgIPNumeric using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgIPNumeric must be specified if op_IprgIPNumeric is specified.
:type val_f_IprgIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgIPNumeric: If op_IprgIPNumeric is specified, this value will be compared to the value in IprgIPNumeric using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgIPNumeric must be specified if op_IprgIPNumeric is specified.
:type val_c_IprgIPNumeric: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgMAC: The operator to apply to the field IprgMAC. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgMAC: The virtual MAC for this HSRP or VRRP group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgMAC: If op_IprgMAC is specified, the field named in this input will be compared to the value in IprgMAC using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgMAC must be specified if op_IprgMAC is specified.
:type val_f_IprgMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgMAC: If op_IprgMAC is specified, this value will be compared to the value in IprgMAC using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgMAC must be specified if op_IprgMAC is specified.
:type val_c_IprgMAC: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgNumber: The operator to apply to the field IprgNumber. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgNumber: The HSRP or VRRP group number. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgNumber: If op_IprgNumber is specified, the field named in this input will be compared to the value in IprgNumber using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgNumber must be specified if op_IprgNumber is specified.
:type val_f_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgNumber: If op_IprgNumber is specified, this value will be compared to the value in IprgNumber using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgNumber must be specified if op_IprgNumber is specified.
:type val_c_IprgNumber: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgStartTime: The operator to apply to the field IprgStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgStartTime: If op_IprgStartTime is specified, the field named in this input will be compared to the value in IprgStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgStartTime must be specified if op_IprgStartTime is specified.
:type val_f_IprgStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgStartTime: If op_IprgStartTime is specified, this value will be compared to the value in IprgStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgStartTime must be specified if op_IprgStartTime is specified.
:type val_c_IprgStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgTimestamp: The operator to apply to the field IprgTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgTimestamp: If op_IprgTimestamp is specified, the field named in this input will be compared to the value in IprgTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgTimestamp must be specified if op_IprgTimestamp is specified.
:type val_f_IprgTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgTimestamp: If op_IprgTimestamp is specified, this value will be compared to the value in IprgTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgTimestamp must be specified if op_IprgTimestamp is specified.
:type val_c_IprgTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IprgType: The operator to apply to the field IprgType. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IprgType: Designates if this is an HSRP group or a VRRP group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IprgType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IprgType: If op_IprgType is specified, the field named in this input will be compared to the value in IprgType using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IprgType must be specified if op_IprgType is specified.
:type val_f_IprgType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IprgType: If op_IprgType is specified, this value will be compared to the value in IprgType using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IprgType must be specified if op_IprgType is specified.
:type val_c_IprgType: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the iprgs as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of iprg methods. The listed methods will be called on each iprg returned and included in the output. Available methods are: active_member, data_source.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: active_member, data_source.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` IprgID
:param sort: The data field(s) to use for sorting the output. Default is IprgID. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Iprg. Valid values are IprgID, DataSourceID, IprgStartTime, IprgEndTime, IprgTimestamp, IprgChangedCols, ActiveIprgMemberID, IprgNumber, IprgIPDotted, IprgIPNumeric, IprgMAC, IprgAuth, IprgType, IprgActiveLastChanged. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return iprgs: An array of the Iprg objects that match the specified input criteria.
:rtype iprgs: Array of Iprg
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def data_source(self, **kwargs):
"""The collector NetMRI that collected this data record.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The collector NetMRI that collected this data record.
:rtype : DataSource
"""
return self.api_request(self._get_method_fullname("data_source"), kwargs)
def active_member(self, **kwargs):
"""The HSRP/VRRP group membership details of the active router.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IprgID: The internal NetMRI identifier for this HSRP/VRRP Group.
:type IprgID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The HSRP/VRRP group membership details of the active router.
:rtype : IprgMember
"""
return self.api_request(self._get_method_fullname("active_member"), kwargs)
| apache-2.0 |
subhrm/google-code-jam-solutions | solutions/2013/Q/A/A.py | 1 | 1485 | # 2013-Q-A : Tic-Tac-Toe-Tomek
def check_row(row):
if "." not in row:
s = list(set(row))
if len(s) == 1:
return "{} won".format(s[0])
elif ("T" in s) and (len(s) == 2):
for c in s:
if c != "T":
return "{} won".format(s[0])
return ""
def solve(ip):
res = "Game has not completed"
dot_found = False
# Check rows
for row in ip:
if "." in row:
dot_found = True
r = check_row(row)
if len(r) > 0:
# print("Row: ", row, " ", r)
return r
# check col
for j in range(4):
row = [ip[0][j], ip[1][j], ip[2][j], ip[3][j]]
r = check_row(row)
if len(r) > 0:
# print("Col: :" ,j," ", row, " ",r)
return r
# Check diag-1
row = [ip[0][0], ip[1][1], ip[2][2], ip[3][3]]
r = check_row(row)
if len(r) > 0:
# print("Diag1: ", row, " ",r)
return r
# Check diag-2
row = [ip[0][3], ip[1][2], ip[2][1], ip[3][0]]
r = check_row(row)
if len(r) > 0:
# print("Diag2: ", row, " ",r)
return r
if not dot_found:
return "Draw"
return res
if __name__ == "__main__":
tc = int(input())
for ti in range(tc):
ip = []
for _ in range(4):
ip.append([c for c in input().strip()])
_ = input()
res = solve(ip)
print("Case #{0}: {1}".format(ti + 1, res))
| mit |
etuna-SBF-kog/Stadsparken | env/lib/python2.7/site-packages/django/contrib/sitemaps/tests/base.py | 87 | 1028 | import os
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test import TestCase
class SitemapTestsBase(TestCase):
protocol = 'http'
domain = 'example.com' if Site._meta.installed else 'testserver'
urls = 'django.contrib.sitemaps.tests.urls.http'
def setUp(self):
self.base_url = '%s://%s' % (self.protocol, self.domain)
self.old_USE_L10N = settings.USE_L10N
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
self.old_Site_meta_installed = Site._meta.installed
# Create a user that will double as sitemap content
User.objects.create_user('testuser', 'test@example.com', 's3krit')
def tearDown(self):
settings.USE_L10N = self.old_USE_L10N
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
Site._meta.installed = self.old_Site_meta_installed
| gpl-3.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-65/16-files/sheets/lib/python2.7/site-packages/pip/_vendor/packaging/specifiers.py | 1107 | 28025 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease and not
(prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = (
r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
)
_regex = re.compile(
r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not
x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec) and
self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]):])
right_split.append(right[len(right_split[0]):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| gpl-3.0 |
intersense/stencil-overlapped-tiling-ics | scripts/utils.py | 2 | 10594 | #
# Support routines for experiment harness
#
import os
import subprocess
import sys
import time
try:
import yaml
except:
print('Please install PyYAML')
sys.exit(1)
def _parse_range(value):
if isinstance(value, int):
return [value]
elif len(value) == 3:
return range(value[0], value[1]+1, value[2])
elif len(value) == 2:
return range(value[0], value[1]+1, 1)
elif len(value) == 1:
return [value[0]]
else:
print('Unable to handle object')
sys.exit(1)
def _query_range(data, name, default):
try:
value = data[name]
return _parse_range(value)
except KeyError:
return default
def run_experiment(filename):
try:
handle = open(filename)
except IOError,e:
print(e)
sys.exit(1)
data = yaml.load(handle)
handle.close()
dimensions = data['dimensions']
outfile = data['outfile']
binary = data['binary']
parameters = data['parameters']
elements_per_thread = _query_range(parameters, 'elements_per_thread', [1])
problem_size = _query_range(parameters, 'problem_size', [128])
time_steps = _query_range(parameters, 'time_steps', [64])
time_tile_size = _query_range(parameters, 'time_tile_size', [1])
block_size_x = _query_range(parameters, 'block_size_x', [16])
if dimensions > 1:
block_size_y = _query_range(parameters, 'block_size_y', [16])
if dimensions > 2:
block_size_z = _query_range(parameters, 'block_size_z', [8])
else:
block_size_z = [1]
else:
block_size_y = [1]
block_size_z = [1]
try:
phase_limit = data['phase_limit']
except:
phase_limit = 0
try:
counters = data['counters'].split(',')
except:
counters = []
output = open(outfile, 'w')
num_runs = len(problem_size) * len(time_steps) * len(elements_per_thread) \
* len(time_tile_size) * len(block_size_x) * len(block_size_y) \
* len(block_size_z)
print('Number of Runs: %d' % num_runs)
curr = 0
total_start = time.time()
# Run through each permutation
for ps in problem_size:
for ts in time_steps:
for elems in elements_per_thread:
for tt in time_tile_size:
for bsx in block_size_x:
for bsy in block_size_y:
for bsz in block_size_z:
# Before each run, blow away the nv cache
os.system('rm -rf ~/.nv/')
curr = curr + 1
print('Running %d of %d' % (curr, num_runs))
args = [binary,
'-n',
'%d' % ps,
'-t',
'%d' % ts,
'-e',
'%d' % elems,
'-s',
'%d' % tt,
'-x',
'%d' % bsx,
'-p',
'%d' % phase_limit,
'-w',
'/tmp/temp-kernel.cl']
if dimensions > 1:
args.append('-y')
args.append('%d' % bsy)
if dimensions > 2:
args.append('-z')
args.append('%d' % bsz)
args = ' '.join(args)
proc = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Keep a watchdog on the process
start_time = time.time()
while proc.poll() == None:
time.sleep(0.1)
now = time.time()
elapsed = now - start_time
if elapsed > 15.0:
print('Watchdog timer expired!')
proc.terminate()
proc.wait()
break
end_time = time.time()
if proc.returncode != 0:
print('- FAILURE:')
print(proc.stdout.read())
print(proc.stderr.read())
else:
for line in proc.stdout.readlines():
output.write('%d#%s' % (curr, line))
output.flush()
elapsed = end_time - start_time
total = time.time() - total_start
if proc.returncode == 0:
ret = subprocess.call('~/projects/llvm/tests/dump-cl-binary.x /tmp/temp-kernel.cl', shell=True)
assert(ret == 0)
ret = subprocess.call('ptxas -arch sm_20 /tmp/temp-kernel.cl.ptx -o /tmp/temp-kernel.o', shell=True)
assert(ret == 0)
proc = subprocess.Popen('cuobjdump -sass /tmp/temp-kernel.o', shell=True, stdout=subprocess.PIPE)
(sass_out, _) = proc.communicate()
assert(proc.returncode == 0)
num_fadd = sass_out.count('FADD')
num_fmul = sass_out.count('FMUL')
num_ffma = sass_out.count('FFMA')
num_mufu = sass_out.count('MUFU')
num_fsetp = sass_out.count('FSETP')
output.write('%d#num_fp: %d\n' % (curr, (num_fadd+num_fmul+num_ffma+num_fsetp)))
output.write('%d#num_sfu: %d\n' % (curr, num_mufu))
for cnt in counters:
if proc.returncode == 0:
with open('/tmp/experiment-profiler.conf', 'w') as conf:
conf.write(cnt)
prof_args = 'COMPUTE_PROFILE=1 COMPUTE_PROFILE_CONFIG=/tmp/experiment-profiler.conf COMPUTE_PROFILE_LOG=/tmp/prof.log COMPUTE_PROFILE_CSV=1 %s' % args
proc = subprocess.Popen(prof_args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Keep a watchdog on the process
start_time = time.time()
while proc.poll() == None:
time.sleep(0.1)
now = time.time()
elapsed = now - start_time
if elapsed > 15.0:
print('Watchdog timer expired!')
proc.terminate()
proc.wait()
break
end_time = time.time()
if proc.returncode != 0:
print('- FAILURE:')
print(proc.stdout.read())
print(proc.stderr.read())
else:
all_values = []
with open('/tmp/prof.log') as log:
for line in log.readlines():
line = line.strip()
if line.startswith('kernel_func'):
value = line.split(',')[-1]
all_values.append(float(value))
#for line in proc.stdout.readlines():
# output.write('%d#%s' % (curr, line))
value_avg = float(sum(all_values)) / float(len(all_values))
output.write('%d#%s: %f\n' % (curr, cnt.strip(), value_avg))
output.flush()
elapsed = end_time - start_time
total = time.time() - total_start
seconds_per_run = total / float(curr)
remaining_runs = float(num_runs) - float(curr)
remaining_secs = seconds_per_run * \
remaining_runs
remaining_secs = int(remaining_secs)
remaining_mins = remaining_secs / 60
remaining_secs = remaining_secs % 60
remaining_hrs = remaining_mins / 60
remaining_mins = remaining_mins % 60;
print('Elapsed: %f Total: %f Remaining: %d:%d:%d' % \
(elapsed, total, remaining_hrs,
remaining_mins, remaining_secs))
output.close()
| mit |
doantranhoang/namebench | nb_third_party/dns/rdtypes/ANY/GPOS.py | 248 | 5304 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
def _validate_float_string(what):
if what[0] == '-' or what[0] == '+':
what = what[1:]
if what.isdigit():
return
(left, right) = what.split('.')
if left == '' and right == '':
raise dns.exception.FormError
if not left == '' and not left.isdigit():
raise dns.exception.FormError
if not right == '' and not right.isdigit():
raise dns.exception.FormError
class GPOS(dns.rdata.Rdata):
"""GPOS record
@ivar latitude: latitude
@type latitude: string
@ivar longitude: longitude
@type longitude: string
@ivar altitude: altitude
@type altitude: string
@see: RFC 1712"""
__slots__ = ['latitude', 'longitude', 'altitude']
def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
super(GPOS, self).__init__(rdclass, rdtype)
if isinstance(latitude, float) or \
isinstance(latitude, int) or \
isinstance(latitude, long):
latitude = str(latitude)
if isinstance(longitude, float) or \
isinstance(longitude, int) or \
isinstance(longitude, long):
longitude = str(longitude)
if isinstance(altitude, float) or \
isinstance(altitude, int) or \
isinstance(altitude, long):
altitude = str(altitude)
_validate_float_string(latitude)
_validate_float_string(longitude)
_validate_float_string(altitude)
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
def to_text(self, origin=None, relativize=True, **kw):
return '%s %s %s' % (self.latitude, self.longitude, self.altitude)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
latitude = tok.get_string()
longitude = tok.get_string()
altitude = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, latitude, longitude, altitude)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.latitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.latitude)
l = len(self.longitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.longitude)
l = len(self.altitude)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.altitude)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
latitude = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
longitude = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
altitude = wire[current : current + l]
return cls(rdclass, rdtype, latitude, longitude, altitude)
from_wire = classmethod(from_wire)
def _cmp(self, other):
v = cmp(self.latitude, other.latitude)
if v == 0:
v = cmp(self.longitude, other.longitude)
if v == 0:
v = cmp(self.altitude, other.altitude)
return v
def _get_float_latitude(self):
return float(self.latitude)
def _set_float_latitude(self, value):
self.latitude = str(value)
float_latitude = property(_get_float_latitude, _set_float_latitude,
doc="latitude as a floating point value")
def _get_float_longitude(self):
return float(self.longitude)
def _set_float_longitude(self, value):
self.longitude = str(value)
float_longitude = property(_get_float_longitude, _set_float_longitude,
doc="longitude as a floating point value")
def _get_float_altitude(self):
return float(self.altitude)
def _set_float_altitude(self, value):
self.altitude = str(value)
float_altitude = property(_get_float_altitude, _set_float_altitude,
doc="altitude as a floating point value")
| apache-2.0 |
nikhil9/QGC | libs/mavlink/share/pyshared/pymavlink/examples/magtest.py | 30 | 3949 | #!/usr/bin/env python
'''
rotate APMs on bench to test magnetometers
'''
import sys, os, time
from math import radians
# allow import from the parent directory, where mavlink.py is
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import mavlink, mavutil
from optparse import OptionParser
parser = OptionParser("rotate.py [options]")
parser.add_option("--device1", dest="device1", default=None, help="mavlink device1")
parser.add_option("--device2", dest="device2", default=None, help="mavlink device2")
parser.add_option("--baudrate", dest="baudrate", type='int',
help="master port baud rate", default=115200)
(opts, args) = parser.parse_args()
if opts.device1 is None or opts.device2 is None:
print("You must specify a mavlink device")
sys.exit(1)
def set_attitude(rc3, rc4):
global mav1, mav2
values = [ 65535 ] * 8
values[2] = rc3
values[3] = rc4
mav1.mav.rc_channels_override_send(mav1.target_system, mav1.target_component, *values)
mav2.mav.rc_channels_override_send(mav2.target_system, mav2.target_component, *values)
# create a mavlink instance
mav1 = mavutil.mavlink_connection(opts.device1, baud=opts.baudrate)
# create a mavlink instance
mav2 = mavutil.mavlink_connection(opts.device2, baud=opts.baudrate)
print("Waiting for HEARTBEAT")
mav1.wait_heartbeat()
mav2.wait_heartbeat()
print("Heartbeat from APM (system %u component %u)" % (mav1.target_system, mav1.target_system))
print("Heartbeat from APM (system %u component %u)" % (mav2.target_system, mav2.target_system))
print("Waiting for MANUAL mode")
mav1.recv_match(type='SYS_STATUS', condition='SYS_STATUS.mode==2 and SYS_STATUS.nav_mode==4', blocking=True)
mav2.recv_match(type='SYS_STATUS', condition='SYS_STATUS.mode==2 and SYS_STATUS.nav_mode==4', blocking=True)
print("Setting declination")
mav1.mav.param_set_send(mav1.target_system, mav1.target_component,
'COMPASS_DEC', radians(12.33))
mav2.mav.param_set_send(mav2.target_system, mav2.target_component,
'COMPASS_DEC', radians(12.33))
set_attitude(1060, 1160)
event = mavutil.periodic_event(30)
pevent = mavutil.periodic_event(0.3)
rc3_min = 1060
rc3_max = 1850
rc4_min = 1080
rc4_max = 1500
rc3 = rc3_min
rc4 = 1160
delta3 = 2
delta4 = 1
use_pitch = 1
MAV_ACTION_CALIBRATE_GYRO = 17
mav1.mav.action_send(mav1.target_system, mav1.target_component, MAV_ACTION_CALIBRATE_GYRO)
mav2.mav.action_send(mav2.target_system, mav2.target_component, MAV_ACTION_CALIBRATE_GYRO)
print("Waiting for gyro calibration")
mav1.recv_match(type='ACTION_ACK')
mav2.recv_match(type='ACTION_ACK')
print("Resetting mag offsets")
mav1.mav.set_mag_offsets_send(mav1.target_system, mav1.target_component, 0, 0, 0)
mav2.mav.set_mag_offsets_send(mav2.target_system, mav2.target_component, 0, 0, 0)
def TrueHeading(SERVO_OUTPUT_RAW):
p = float(SERVO_OUTPUT_RAW.servo3_raw - rc3_min) / (rc3_max - rc3_min)
return 172 + p*(326 - 172)
while True:
mav1.recv_msg()
mav2.recv_msg()
if event.trigger():
if not use_pitch:
rc4 = 1160
set_attitude(rc3, rc4)
rc3 += delta3
if rc3 > rc3_max or rc3 < rc3_min:
delta3 = -delta3
use_pitch ^= 1
rc4 += delta4
if rc4 > rc4_max or rc4 < rc4_min:
delta4 = -delta4
if pevent.trigger():
print "hdg1: %3u hdg2: %3u ofs1: %4u, %4u, %4u ofs2: %4u, %4u, %4u" % (
mav1.messages['VFR_HUD'].heading,
mav2.messages['VFR_HUD'].heading,
mav1.messages['SENSOR_OFFSETS'].mag_ofs_x,
mav1.messages['SENSOR_OFFSETS'].mag_ofs_y,
mav1.messages['SENSOR_OFFSETS'].mag_ofs_z,
mav2.messages['SENSOR_OFFSETS'].mag_ofs_x,
mav2.messages['SENSOR_OFFSETS'].mag_ofs_y,
mav2.messages['SENSOR_OFFSETS'].mag_ofs_z,
)
time.sleep(0.01)
# 314M 326G
# 160M 172G
| agpl-3.0 |
BeiLuoShiMen/nupic | tests/integration/nupic/opf/opf_checkpoint_test/opf_checkpoint_test.py | 23 | 16466 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
import os
import shutil
from nupic.data.file_record_stream import FileRecordStream
from nupic.frameworks.opf.experiment_runner import runExperiment
from nupic.support import initLogging
from nupic.support.unittesthelpers.testcasebase import (
unittest, TestCaseBase as HelperTestCaseBase)
_EXPERIMENT_BASE = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "experiments")
class MyTestCaseBase(HelperTestCaseBase):
def shortDescription(self):
""" Override to force unittest framework to use test method names instead
of docstrings in the report.
"""
return None
@staticmethod
def getOpfNonTemporalPredictionFilepath(experimentDir, taskLabel):
path = os.path.join(experimentDir,
"inference",
"%s.nontemporal.predictionLog.csv" % taskLabel)
return os.path.abspath(path)
@staticmethod
def getOpfTemporalPredictionFilepath(experimentDir, taskLabel):
path = os.path.join(experimentDir,
"inference",
"%s.temporal.predictionLog.csv" % taskLabel)
return os.path.abspath(path)
def compareOPFPredictionFiles(self, path1, path2, temporal,
maxMismatches=None):
""" Compare temporal or non-temporal predictions for the given experiment
that just finished executing
experimentName: e.g., "gym"; this string will be used to form
a directory path to the experiments.
maxMismatches: Maximum number of row mismatches to report before
terminating the comparison; None means: report all
mismatches
Returns: True if equal; False if different
"""
experimentLabel = "%s prediction comparison" % \
("Temporal" if temporal else "Non-Temporal")
print "%s: Performing comparison of OPF prediction CSV files %r and %r" % (
experimentLabel, path1, path2)
# Open CSV readers
#
self.assertTrue(
os.path.isfile(path1),
msg="OPF prediction file path1 %s doesn't exist or is not a file" % (
path1))
(opf1CsvReader, opf1FieldNames) = self._openOpfPredictionCsvFile(path1)
self.assertTrue(
os.path.isfile(path2),
msg="OPF prediction file path2 %s doesn't exist or is not a file" % (
path2))
(opf2CsvReader, opf2FieldNames) = self._openOpfPredictionCsvFile(path2)
self.assertEqual(len(opf1FieldNames), len(opf2FieldNames),
("%s: Mismatch in number of prediction columns: "
"opf1: %s, opf2: %s") % (
experimentLabel, len(opf1FieldNames),
len(opf2FieldNames)))
self.assertEqual(opf1FieldNames, opf2FieldNames)
# Each data row is assumed to be arranged as follows:
#
# reset, actual-field1, prediction-field1, actual-field2,
# prediction-field2, etc.
#
# Presently, we only compare the predicted values that need to match.
opf1EOF = False
opf2EOF = False
opf1CurrentDataRowIndex = -1
opf2CurrentDataRowIndex = -1
if temporal:
# Skip the first data rows for temporal tests, since they don't contain
# prediction values.
_skipOpf1Row = opf1CsvReader.next()
opf1CurrentDataRowIndex += 1
_skipOpf2Row = opf2CsvReader.next()
opf2CurrentDataRowIndex += 1
fieldsIndexesToCompare = tuple(xrange(2, len(opf1FieldNames), 2))
self.assertGreater(len(fieldsIndexesToCompare), 0)
print ("%s: Comparing fields at indexes: %s; "
"opf1Labels: %s; opf2Labels: %s") % (
experimentLabel,
fieldsIndexesToCompare,
[opf1FieldNames[i] for i in fieldsIndexesToCompare],
[opf2FieldNames[i] for i in fieldsIndexesToCompare])
for i in fieldsIndexesToCompare:
self.assertTrue(opf1FieldNames[i].endswith("predicted"),
msg="%r doesn't end with 'predicted'" % opf1FieldNames[i])
self.assertTrue(opf2FieldNames[i].endswith("predicted"),
msg="%r doesn't end with 'predicted'" % opf2FieldNames[i])
mismatchCount = 0
while True:
try:
opf1Row = opf1CsvReader.next()
except StopIteration:
opf1EOF = True
else:
opf1CurrentDataRowIndex += 1
try:
opf2Row = opf2CsvReader.next()
except StopIteration:
opf2EOF = True
else:
opf2CurrentDataRowIndex += 1
if opf1EOF != opf2EOF:
print ("%s: ERROR: Data row counts mismatch: "
"opf1EOF: %s, opf1CurrentDataRowIndex: %s; "
"opf2EOF: %s, opf2CurrentDataRowIndex: %s") % (
experimentLabel,
opf1EOF, opf1CurrentDataRowIndex,
opf2EOF, opf2CurrentDataRowIndex)
return False
if opf1EOF and opf2EOF:
# Done with both prediction datasets
break
# Compare the rows
self.assertEqual(len(opf1Row), len(opf2Row))
for i in fieldsIndexesToCompare:
opf1FloatValue = float(opf1Row[i])
opf2FloatValue = float(opf2Row[i])
if opf1FloatValue != opf2FloatValue:
mismatchCount += 1
print ("%s: ERROR: mismatch in "
"prediction values: dataRowIndex: %s, fieldIndex: %s (%r); "
"opf1FieldValue: <%s>, opf2FieldValue: <%s>; "
"opf1FieldValueAsFloat: %s, opf2FieldValueAsFloat: %s; "
"opf1Row: %s, opf2Row: %s") % (
experimentLabel,
opf1CurrentDataRowIndex,
i,
opf1FieldNames[i],
opf1Row[i],
opf2Row[i],
opf1FloatValue,
opf2FloatValue,
opf1Row,
opf2Row)
# Stop comparison if we exceeded the allowed number of mismatches
if maxMismatches is not None and mismatchCount >= maxMismatches:
break
if mismatchCount != 0:
print "%s: ERROR: there were %s mismatches between %r and %r" % (
experimentLabel, mismatchCount, path1, path2)
return False
# A difference here would indicate a logic error in this method
self.assertEqual(opf1CurrentDataRowIndex, opf2CurrentDataRowIndex)
print ("%s: Comparison of predictions "
"completed: OK; number of prediction rows examined: %s; "
"path1: %r; path2: %r") % \
(experimentLabel,
opf1CurrentDataRowIndex + 1,
path1,
path2)
return True
def _openOpfPredictionCsvFile(self, filepath):
""" Open an OPF prediction CSV file and advance it to the first data row
Returns: the tuple (csvReader, fieldNames), where 'csvReader' is the
csv reader object, and 'fieldNames' is a sequence of field
names.
"""
# Open the OPF prediction file
csvReader = self._openCsvFile(filepath)
# Advance it past the three NUPIC header lines
names = csvReader.next()
_types = csvReader.next()
_specials = csvReader.next()
return (csvReader, names)
@staticmethod
def _openCsvFile(filepath):
# We'll be operating on csvs with arbitrarily long fields
size = 2**27
csv.field_size_limit(size)
rawFileObj = open(filepath, 'rU')
csvReader = csv.reader(rawFileObj, dialect='excel')
return csvReader
def _testSamePredictions(self, experiment, predSteps, checkpointAt,
predictionsFilename, additionalFields=None):
""" Test that we get the same predictions out from the following two
scenarios:
a_plus_b: Run the network for 'a' iterations followed by 'b' iterations
a, followed by b: Run the network for 'a' iterations, save it, load it
back in, then run for 'b' iterations.
Parameters:
-----------------------------------------------------------------------
experiment: base directory of the experiment. This directory should
contain the following:
base.py
a_plus_b/description.py
a/description.py
b/description.py
The sub-directory description files should import the
base.py and only change the first and last record used
from the data file.
predSteps: Number of steps ahead predictions are for
checkpointAt: Number of iterations that 'a' runs for.
IMPORTANT: This must match the number of records that
a/description.py runs for - it is NOT dynamically stuffed into
the a/description.py.
predictionsFilename: The name of the predictions file that the OPF
generates for this experiment (for example
'DefaulTask.NontemporalMultiStep.predictionLog.csv')
"""
# Get the 3 sub-experiment directories
aPlusBExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a_plus_b")
aExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "a")
bExpDir = os.path.join(_EXPERIMENT_BASE, experiment, "b")
# Run a+b
_aPlusBExp = runExperiment(args=[aPlusBExpDir])
# Run a, the copy the saved checkpoint into the b directory
_aExp = runExperiment(args=[aExpDir])
if os.path.exists(os.path.join(bExpDir, 'savedmodels')):
shutil.rmtree(os.path.join(bExpDir, 'savedmodels'))
shutil.copytree(src=os.path.join(aExpDir, 'savedmodels'),
dst=os.path.join(bExpDir, 'savedmodels'))
_bExp = runExperiment(args=[bExpDir, '--load=DefaultTask'])
# Now, compare the predictions at the end of a+b to those in b.
aPlusBPred = FileRecordStream(os.path.join(aPlusBExpDir, 'inference',
predictionsFilename))
bPred = FileRecordStream(os.path.join(bExpDir, 'inference',
predictionsFilename))
colNames = [x[0] for x in aPlusBPred.getFields()]
actValueColIdx = colNames.index('multiStepPredictions.actual')
predValueColIdx = colNames.index('multiStepPredictions.%d' % (predSteps))
# Skip past the 'a' records in aPlusB
for i in range(checkpointAt):
aPlusBPred.next()
# Now, read through the records that don't have predictions yet
for i in range(predSteps):
aPlusBPred.next()
bPred.next()
# Now, compare predictions in the two files
rowIdx = checkpointAt + predSteps + 4 - 1
epsilon = 0.0001
while True:
rowIdx += 1
try:
rowAPB = aPlusBPred.next()
rowB = bPred.next()
# Compare actuals
self.assertEqual(rowAPB[actValueColIdx], rowB[actValueColIdx],
"Mismatch in actual values: row %d of a+b has %s and row %d of "
"b has %s" % (rowIdx, rowAPB[actValueColIdx], rowIdx-checkpointAt,
rowB[actValueColIdx]))
# Compare predictions, within nearest epsilon
predAPB = eval(rowAPB[predValueColIdx])
predB = eval(rowB[predValueColIdx])
# Sort with highest probabilities first
predAPB = [(a, b) for b, a in predAPB.items()]
predB = [(a, b) for b, a in predB.items()]
predAPB.sort(reverse=True)
predB.sort(reverse=True)
if additionalFields is not None:
for additionalField in additionalFields:
fieldIdx = colNames.index(additionalField)
self.assertEqual(rowAPB[fieldIdx], rowB[fieldIdx],
"Mismatch in field \'%s\' values: row %d of a+b has value: (%s)\n"
" and row %d of b has value: %s" % \
(additionalField, rowIdx, rowAPB[fieldIdx],
rowIdx-checkpointAt, rowB[fieldIdx]))
self.assertEqual(len(predAPB), len(predB),
"Mismatch in predicted values: row %d of a+b has %d predictions: "
"\n (%s) and row %d of b has %d predictions:\n (%s)" % \
(rowIdx, len(predAPB), predAPB, rowIdx-checkpointAt, len(predB),
predB))
for i in range(len(predAPB)):
(aProb, aValue) = predAPB[i]
(bProb, bValue) = predB[i]
self.assertLess(abs(aValue-bValue), epsilon,
"Mismatch in predicted values: row %d of a+b predicts value %s "
"and row %d of b predicts %s" % (rowIdx, aValue,
rowIdx-checkpointAt, bValue))
self.assertLess(abs(aProb-bProb), epsilon,
"Mismatch in probabilities: row %d of a+b predicts %s with "
"probability %s and row %d of b predicts %s with probability %s" \
% (rowIdx, aValue, aProb, rowIdx-checkpointAt, bValue, bProb))
except StopIteration:
break
print "Predictions match!"
@staticmethod
def _testBackwardsCompatibility(experiment, checkpointName):
""" Test that we can load in a checkpoint saved by an earlier version of
the OPF.
Parameters:
-----------------------------------------------------------------------
experiment: Directory of the experiment.
checkpointName: which checkpoint to verify
"""
# Get the experiment directories
expDir = os.path.join(_EXPERIMENT_BASE, experiment)
# Copy the pertinent checkpoint
if os.path.exists(os.path.join(expDir, 'savedmodels')):
shutil.rmtree(os.path.join(expDir, 'savedmodels'))
shutil.copytree(src=os.path.join(expDir, checkpointName),
dst=os.path.join(expDir, 'savedmodels'))
# Run it from the checkpoint
_aPlusBExp = runExperiment(args=[expDir, '--load=DefaultTask',
'--noCheckpoint'])
class PositiveTests(MyTestCaseBase):
def test_NonTemporalMultiStep(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(
experiment="non_temporal_multi_step", predSteps=24, checkpointAt=250,
predictionsFilename=
"DefaultTask.NontemporalMultiStep.predictionLog.csv")
@unittest.skip("Currently Fails: NUP-1864")
def test_TemporalMultiStep(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(experiment="temporal_multi_step", predSteps=24,
checkpointAt=250,
predictionsFilename='DefaultTask.TemporalMultiStep.predictionLog.csv')
@unittest.skip("Currently Fails: NUP-1864")
def test_TemporalAnomaly(self):
""" Test that we get the same predictions out of a model that was
saved and reloaded from a checkpoint as we do from one that runs
continuously.
"""
self._testSamePredictions(experiment="temporal_anomaly", predSteps=1,
checkpointAt=250,
predictionsFilename='DefaultTask.TemporalAnomaly.predictionLog.csv',
additionalFields=['anomalyScore'])
def test_BackwardsCompatibility(self):
""" Test that we can load in a checkpoint saved by an earlier version of
the OPF.
"""
self._testBackwardsCompatibility(
os.path.join('backwards_compatibility', 'a'),
'savedmodels_2012-10-05')
if __name__ == "__main__":
initLogging(verbose=True)
unittest.main()
| agpl-3.0 |
jazkarta/edx-platform | lms/djangoapps/shoppingcart/migrations/0004_auto__add_field_orderitem_fulfilled_time.py | 114 | 8621 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrderItem.fulfilled_time'
db.add_column('shoppingcart_orderitem', 'fulfilled_time',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'OrderItem.fulfilled_time'
db.delete_column('shoppingcart_orderitem', 'fulfilled_time')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
google/mirandum | alerts/youtubesubs/support.py | 1 | 2287 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import httplib2
from youtubesubs.models import *
from googaccount.models import CredentialsModel
from django.contrib.auth.models import User
from oauth2client.contrib.django_orm import Storage
BASE_URL = "https://www.googleapis.com/youtube/v3/"
def run_youtubesubs(ffu):
added = 0
storage = Storage(CredentialsModel, 'id', ffu.credentials, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
raise Exception("bad creds")
return added
http = httplib2.Http()
http = credential.authorize(http)
resp, data = http.request("%ssubscriptions?part=subscriberSnippet&myRecentSubscribers=true&maxResults=25" % BASE_URL)
data = json.loads(data)
if 'error' in data:
raise Exception("Error fetching youtubesubs: %s" % json.dumps(data['error']))
events = []
if 'items' in data:
for i in data['items']:
unique_id = i['subscriberSnippet']['channelId']
if YoutubeSubEvent.objects.filter(external_id=i['id'], updater=ffu).count() > 0:
break
details = json.dumps(i)
try:
ffe = YoutubeSubEvent(external_id=i['id'], updater=ffu, details=details)
events.append(ffe)
except Exception, E:
print "Failed in individual youtubesubs run: %s\nData:\n%s" % (E, details)
added += 1
for event in reversed(events):
try:
event.save()
added += 1
except Exception, E:
print "Failed in individual sponsor run: %s\nData:\n%s" % (E, ffe.details)
return added
| apache-2.0 |
blinktrade/bitex | apps/ws_gateway/rest_api_handler.py | 10 | 2993 | import tornado.web
import tornado.httpclient
import calendar
import json
from market_data_helper import MarketDataSubscriber
class RestApiHandler(tornado.web.RequestHandler):
def head(self, version, symbol, resource):
self._process_request(version, symbol, resource)
def get(self, version, symbol, resource):
self._process_request(version, symbol, resource)
def _send_tiker(self, symbol):
md_subscriber = MarketDataSubscriber.get(symbol, self.application)
ticker = {
"pair": symbol,
"high": md_subscriber.inst_status.max_price / 1e8,
"low": md_subscriber.inst_status.min_price / 1e8,
"last": md_subscriber.inst_status.last_price / 1e8,
"vol_" + symbol[3:].lower(): md_subscriber.inst_status.volume_price / 1e8,
"vol": md_subscriber.inst_status.volume_size / 1e8,
"buy": md_subscriber.inst_status.bid / 1e8,
"sell": md_subscriber.inst_status.ask / 1e8
}
self.write( json.dumps(ticker))
def _send_order_book(self, symbol):
md_subscriber = MarketDataSubscriber.get(symbol, self.application.db_session)
bids = []
asks = []
for order in md_subscriber.buy_side:
bids.append([order['price']/1e8, order['qty']/1e8, order['user_id']])
for order in md_subscriber.sell_side:
asks.append([order['price']/1e8, order['qty']/1e8, order['user_id']])
self.write(
{
'pair': symbol,
'bids': bids,
'asks': asks
}
)
def _send_trades(self, symbol, since):
md_subscriber = MarketDataSubscriber.get(symbol, self.application)
trades = []
for trade in md_subscriber.get_trades(symbol, since):
trades.append({
'tid': trade.id,
'price': trade.price/1e8,
'amount': trade.size/1e8,
'date': calendar.timegm(trade.created.timetuple()),
})
self.write(json.dumps(trades))
def _process_request(self, version, symbol, resource):
currency = self.get_argument("crypto_currency", default='BTC', strip=False)
since = self.get_argument("since", default=0, strip=False)
callback = self.get_argument("callback", default='', strip=False)
if not callback:
callback = self.get_argument("jsonp", default='', strip=False)
instrument = '%s%s'%(currency, symbol)
if callback:
self.write( callback + '(' )
if version == 'v1':
if resource == 'orderbook':
self._send_order_book(instrument)
elif resource == 'trades':
self._send_trades(instrument, float(since))
elif resource == 'ticker':
self._send_tiker(instrument)
else:
self.send_error(404)
else:
self.send_error(404)
if callback:
self.write( ');' )
| gpl-3.0 |
redhat-cip/tempest | tempest/tests/common/test_cred_provider.py | 22 | 5191 | # Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from tempest_lib import auth
from tempest_lib import exceptions as lib_exc
from tempest_lib.services.identity.v2 import token_client as v2_client
from tempest_lib.services.identity.v3 import token_client as v3_client
from tempest.common import cred_provider
from tempest.common import tempest_fixtures as fixtures
from tempest import config
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests import fake_identity
class ConfiguredV2CredentialsTests(base.TestCase):
attributes = {
'username': 'fake_username',
'password': 'fake_password',
'tenant_name': 'fake_tenant_name'
}
identity_response = fake_identity._fake_v2_response
credentials_class = auth.KeystoneV2Credentials
tokenclient_class = v2_client.TokenClientJSON
identity_version = 'v2'
def setUp(self):
super(ConfiguredV2CredentialsTests, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.stubs.Set(self.tokenclient_class, 'raw_request',
self.identity_response)
def _get_credentials(self, attributes=None):
if attributes is None:
attributes = self.attributes
return self.credentials_class(**attributes)
def _check(self, credentials, credentials_class, filled):
# Check the right version of credentials has been returned
self.assertIsInstance(credentials, credentials_class)
# Check the id attributes are filled in
attributes = [x for x in credentials.ATTRIBUTES if (
'_id' in x and x != 'domain_id')]
for attr in attributes:
if filled:
self.assertIsNotNone(getattr(credentials, attr))
else:
self.assertIsNone(getattr(credentials, attr))
def _verify_credentials(self, credentials_class, filled=True,
identity_version=None):
for ctype in cred_provider.CREDENTIAL_TYPES:
if identity_version is None:
creds = cred_provider.get_configured_credentials(
credential_type=ctype, fill_in=filled)
else:
creds = cred_provider.get_configured_credentials(
credential_type=ctype, fill_in=filled,
identity_version=identity_version)
self._check(creds, credentials_class, filled)
def test_create(self):
creds = self._get_credentials()
self.assertEqual(self.attributes, creds._initial)
def test_create_invalid_attr(self):
self.assertRaises(lib_exc.InvalidCredentials,
self._get_credentials,
attributes=dict(invalid='fake'))
def test_get_configured_credentials(self):
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class)
def test_get_configured_credentials_unfilled(self):
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class,
filled=False)
def test_get_configured_credentials_version(self):
# version specified and not loaded from config
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class,
identity_version=self.identity_version)
def test_is_valid(self):
creds = self._get_credentials()
self.assertTrue(creds.is_valid())
class ConfiguredV3CredentialsTests(ConfiguredV2CredentialsTests):
attributes = {
'username': 'fake_username',
'password': 'fake_password',
'project_name': 'fake_project_name',
'user_domain_name': 'fake_domain_name'
}
credentials_class = auth.KeystoneV3Credentials
identity_response = fake_identity._fake_v3_response
tokenclient_class = v3_client.V3TokenClientJSON
identity_version = 'v3'
def setUp(self):
super(ConfiguredV3CredentialsTests, self).setUp()
# Additional config items reset by cfg fixture after each test
cfg.CONF.set_default('auth_version', 'v3', group='identity')
# Identity group items
for prefix in ['', 'alt_', 'admin_']:
cfg.CONF.set_default(prefix + 'domain_name', 'fake_domain_name',
group='identity')
| apache-2.0 |
nuxleus/cherokee-webserver | admin/wizards/static.py | 3 | 4262 | # -*- coding: utf-8 -*-
#
# Cherokee-admin's Common Static wizard
#
# Authors:
# Taher Shihadeh <taher@octality.com>
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
#
# Tested:
# 2010/04/13: Cherokee 0.99.41
# 2010/06/15: Cherokee 1.0.3b
#
import re
import CTK
import Wizard
from util import *
from configured import *
NOTE_WELCOME_H1 = N_("Welcome to the Common Static wizard")
NOTE_WELCOME_P1 = N_("This wizard adds a rule to optimally serve the most common static files.")
NOTE_CREATE_H1 = N_("Current rules have been checked")
NOTE_CREATE_OK = N_("The process is very simple. Let the wizard take over and don't worry about a thing.")
NOTE_CREATE_ERR = N_("Common files have been already configured, so there is nothing to be done.")
PREFIX = 'tmp!wizard!static'
URL_APPLY = r'/wizard/vserver/static/apply'
class Commit:
def Commit_Rule (self):
vsrv_num = CTK.cfg.get_val ('%s!vsrv_num'%(PREFIX))
vsrv_pre = 'vserver!%s' %(vsrv_num)
x, rule_pre = cfg_vsrv_rule_get_next (vsrv_pre)
Wizard.AddUsualStaticFiles (rule_pre)
# Clean up
CTK.cfg.normalize ('%s!rule'%(vsrv_pre))
del (CTK.cfg[PREFIX])
return CTK.cfg_reply_ajax_ok()
def __call__ (self):
if CTK.post.pop('final'):
CTK.cfg_apply_post()
return self.Commit_Rule()
return CTK.cfg_apply_post()
class Create:
def _check_if_valid (self):
vsrv_num = CTK.cfg.get_val ('%s!vsrv_num'%(PREFIX))
vsrv_pre = 'vserver!%s' %(vsrv_num)
rules = CTK.cfg.keys('%s!rule'%(vsrv_pre))
for r in rules:
if CTK.cfg.get_val ('%s!rule!%s!match'%(vsrv_pre, r)) == 'fullpath':
files = Wizard.USUAL_STATIC_FILES[:]
entries = CTK.cfg.keys('%s!rule!%s!match!fullpath'%(vsrv_pre, r))
for e in entries:
f = CTK.cfg.get_val('%s!rule!%s!match!fullpath!%s'%(vsrv_pre, r, e))
try:
files.remove(f)
except ValueError:
pass
if not len(files):
return False
return True
def __call__ (self):
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Hidden('final', '1')
cont = CTK.Container()
cont += CTK.RawHTML ('<h2>%s</h2>'%(_(NOTE_CREATE_H1)))
cont += submit
if self._check_if_valid ():
cont += CTK.RawHTML ('<p>%s</p>'%(_(NOTE_CREATE_OK)))
cont += CTK.DruidButtonsPanel_PrevCreate_Auto()
else:
cont += CTK.RawHTML ('<p>%s</p>'%(_(NOTE_CREATE_ERR)))
cont += CTK.DruidButtonsPanel_Cancel()
return cont.Render().toStr()
class Welcome:
def __call__ (self):
cont = CTK.Container()
cont += CTK.RawHTML ('<h2>%s</h2>' %(_(NOTE_WELCOME_H1)))
cont += Wizard.Icon ('static', {'class': 'wizard-descr'})
box = CTK.Box ({'class': 'wizard-welcome'})
box += CTK.RawHTML ('<p>%s</p>' %(_(NOTE_WELCOME_P1)))
cont += box
# Send the VServer num
tmp = re.findall (r'^/wizard/vserver/(\d+)/', CTK.request.url)
submit = CTK.Submitter (URL_APPLY)
submit += CTK.Hidden('%s!vsrv_num'%(PREFIX), tmp[0])
cont += submit
cont += CTK.DruidButtonsPanel_Next_Auto()
return cont.Render().toStr()
# Rule
CTK.publish ('^/wizard/vserver/(\d+)/static$', Welcome)
CTK.publish ('^/wizard/vserver/(\d+)/static/2', Create)
CTK.publish (r'^%s$'%(URL_APPLY), Commit, method="POST")
| gpl-2.0 |
ScottehMax/pyMon | chathandler.py | 1 | 10364 | from threading import Thread
from Queue import Queue
from itertools import groupby
import time
import os
import imp
import inspect
import logging
import redis
import concurrent.futures as futures
from utils import condense
class ChatHandler:
"""Deals with most of the chat messages."""
def __init__(self, cb):
# these instance variables are just for convenience
self.user = cb.user
self.config = cb.config
self.ws = cb
self.thread_pool_executor = futures.ThreadPoolExecutor(max_workers=20)
self.triggers = []
self.join_time = {}
self.current_users = cb.currentusers
self.battling = False # self.config.get('Chatbot', 'battle')
try:
redis_uname = self.config.get('External', 'redis_uname')
redis_pass = self.config.get('External', 'redis_pass')
redis_server = self.config.get('External', 'redis_server')
redis_url = os.getenv('REDISTOGO_URL', 'redis://%s:%s@%s' %
(redis_uname, redis_pass, redis_server))
self.redis = redis.from_url(redis_url)
# self.redis = redis.from_url('redis://127.0.0.1:6379')
except Exception as e:
print e
print "Redis connection failed (ignore if you're not using redis)"
self.initialise_triggers(self.config)
self.initialise_queue()
def initialise_triggers(self, config):
"""Loads all triggers as specified in config."""
trigger_list = config.get('Chatbot', 'triggers').split(',')
print trigger_list # debug
for trigger_filename in trigger_list:
modname, ext = os.path.splitext(trigger_filename)
trigger_file, path, descr = imp.find_module(modname, ['./triggers'])
if trigger_file:
mod = imp.load_module(modname, trigger_file, path, descr)
# This isn't very good... investigate a better solution.
self.triggers.append([x for x in inspect.getmembers(mod)[0:2] if x[0] != 'Trigger'][0][1](self))
else:
print 'Error loading Trigger %s' % trigger_filename
print self.triggers
def initialise_queue(self):
self.queue = Queue()
self.queue_worker = Thread(target=self.run_queue,
name='message_queue',
args=[self.queue])
self.queue_worker.daemon = True
# queue_worker.start()
# self.battle_thread = Thread(target=self.battling_queue,
# name='battling_queue')
# battle_thread.start()
def run_queue(self, queue):
while True:
msg = queue.get()
if msg is not None:
self.ws.send(msg)
time.sleep(0.6)
def battling_queue(self):
while True:
while self.battling:
print 'Searching for a new battle...'
self.queue_message('|/utm')
self.queue_message('|/search randombattle')
time.sleep(30)
def queue_message(self, msg):
try:
self.queue.put(msg)
except AttributeError:
print 'Queue not initialised'
def send_msg(self, room, msg):
if len(room) > 0 and room[0] == '>':
room = room[1:]
message = "%s|%s" % (room, msg)
self.queue_message(message)
def send_pm(self, target, msg):
self.send_msg('', '/pm %s, %s' % (target, msg))
def call_trigger_response(self, trigger, m_info):
try:
response = trigger.response(m_info)
return response
except Exception as e:
logging.error("Crashed: %s, %s, %s, %s - %s" %
(e.message, e.args, trigger, type(e), m_info))
self.send_pm(self.ws.master,
"Crashed: %s, %s, %s, %s" %
(e.message, e.args, trigger, type(e)))
def future_callback(self, future):
response = future.result()
room = future.room
m_info = future.m_info
if response:
who = m_info['who']
if type(response) != list:
response = [response]
for s_response in response:
if m_info['where'] == 'pm':
s_response = '/pm %s, %s' % (who, s_response)
self.send_msg(room, s_response)
def handle(self, msg, room):
room = room.replace('>', '')
m_info = self.make_msg_info(msg, room, self.ws)
if m_info['where'] == ':':
# : messages contain an UNIX timestamp of when the room was joined
# nvm, PS's time is usually off
self.join_time[room] = str(int(time.time()))
# Prevents the chatbot from responding to messages
# sent before it entered the room
if (m_info.get('who') and
((m_info.get('when') and
int(m_info.get('when')) > int(self.join_time[room])) or
m_info.get('where') in {'j', 'l', 'pm'})):
for trigger in self.triggers:
# print 'testing trigger %s' % trigger
try:
if trigger.match(m_info):
print 'match %s' % trigger
future = self.thread_pool_executor.submit(self.call_trigger_response, trigger, m_info)
future.room = room
future.m_info = m_info
future.add_done_callback(self.future_callback)
except Exception as e:
logging.error("Crashed in match: %s, %s, %s, %s - %s" %
(e.message, e.args, trigger, type(e), m_info))
self.send_pm(self.ws.master,
"Crashed in match: %s, %s, %s" %
(e.message, e.args, trigger))
# User list is currently hardcoded here. Might move this to triggers later on
if m_info['where'] == 'j' and condense(m_info['who']) not in map(condense, self.current_users[room]):
self.current_users[room].append(msg[1])
elif m_info['where'] == 'l':
for user in self.current_users[room]:
if condense(user) == condense(msg[1]):
self.current_users[room].remove(user)
elif m_info['where'] == 'n':
# |N| messages are of the format |N|(rank)newname|oldnameid
# Rank is a blank space if the nick is a regular user
# i.e. |N|@Scotteh|stretcher
newuser, olduser, userfound = msg[1], msg[2], False
for user in self.current_users[room]:
if condense(user) == condense(msg[2]):
self.current_users[room].remove(user)
userfound = True
if userfound:
self.current_users[room].append(msg[1])
elif m_info['where'] == 'users':
# Resets the userlist for the room if it exists, and creates a new one
# |users| messages are only sent on room join
self.current_users[room] = []
for user in msg[1].split(',')[1:]:
self.current_users[room].append(user)
if m_info['where'] == 'raw' and int(time.time()) > int(self.join_time[room]):
print (int(time.time()), self.join_time[room])
# Get checker. Hardcoded.
getmap = {2: 'dubs',
3: 'trips',
4: 'quads',
5: 'quints',
6: 'sexts',
7: 'septs',
8: 'octs',
9: 'nons',
10: 'decs'}
if m_info['all'][1].startswith('<div class="infobox">Roll '):
if '+' in m_info['all'][1] or ')-' in m_info['all'][1]:
# dirty cheaters, trying to fake GETs
return
raw_msg = msg[1][21:-6] # Strips the leading HTML
# Don't try and understand the next line, it takes raw_msg as input and
# creates a list of size 2 lists splitting the raw_msg and showing the consecutive
# characters, and returns the amount of consecutive characters at the end
# '11223344441122' => [['1', 2], ['2', 2], ['3', 2], ['4', 4], ['1', 2], ['2', 2]]
get = getmap.get([[k,len(list(g))] for k, g in groupby(raw_msg)][-1][1])
if get:
self.send_msg(room, 'nice ' + get)
def make_msg_info(self, msg, room, ws):
info = {'where': msg[0],
'ws': ws,
'all': msg,
'ch': self,
'me': self.user
}
info['where'] = info['where'].lower()
if info['where'] == 'c:':
info.update({'where': 'c',
'room': room,
'who': msg[2].decode('utf-8')[1:].encode('utf-8'),
'allwho': msg[2],
'when': str(int(time.time())),
'what': '|'.join(msg[3:])})
elif info['where'] == 'c':
info.update({'room': room,
'who': msg[1].decode('utf-8')[1:].encode('utf-8'),
'allwho': msg[1],
'what': '|'.join(msg[2:])})
elif info['where'] == 'j' or info['where'] == 'l':
info.update({'room': room,
'who': msg[1][1:],
'allwho': msg[1],
'what': ''})
elif info['where'] == 'n':
info.update({'room': room,
'who': msg[1][1:],
'allwho': msg[1],
'oldname': msg[2],
'what': ''})
elif info['where'] == 'users':
info.update({'room': room,
'who': '',
'what': msg[1]})
elif info['where'] == 'pm':
info.update({'who': msg[1][1:],
'allwho': msg[1],
'target': msg[2][1:],
'what': msg[3]})
return info
| mit |
lulandco/SickRage | lib/hachoir_parser/audio/midi.py | 84 | 9168 | """
Musical Instrument Digital Interface (MIDI) audio file parser.
Documentation:
- Standard MIDI File Format, Dustin Caldwell (downloaded on wotsit.org)
Author: Victor Stinner
Creation: 27 december 2006
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, Bits, ParserError,
String, UInt32, UInt24, UInt16, UInt8, Enum, RawBits, RawBytes)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.tools import createDict, humanDurationNanosec
from hachoir_parser.common.tracker import NOTE_NAME
MAX_FILESIZE = 10 * 1024 * 1024
class Integer(Bits):
def __init__(self, parent, name, description=None):
Bits.__init__(self, parent, name, 8, description)
stream = parent.stream
addr = self.absolute_address
value = 0
while True:
bits = stream.readBits(addr, 8, parent.endian)
value = (value << 7) + (bits & 127)
if not(bits & 128):
break
addr += 8
self._size += 8
if 32 < self._size:
raise ParserError("Integer size is bigger than 32-bit")
self.createValue = lambda: value
def parseNote(parser):
yield Enum(UInt8(parser, "note", "Note number"), NOTE_NAME)
yield UInt8(parser, "velocity")
def parseControl(parser):
yield UInt8(parser, "control", "Controller number")
yield UInt8(parser, "value", "New value")
def parsePatch(parser):
yield UInt8(parser, "program", "New program number")
def parseChannel(parser, size=1):
yield UInt8(parser, "channel", "Channel number")
def parsePitch(parser):
yield UInt8(parser, "bottom", "(least sig) 7 bits of value")
yield UInt8(parser, "top", "(most sig) 7 bits of value")
def parseText(parser, size):
yield String(parser, "text", size)
def parseSMPTEOffset(parser, size):
yield RawBits(parser, "padding", 1)
yield Enum(Bits(parser, "frame_rate", 2),
{0:"24 fps", 1:"25 fps", 2:"30 fps (drop frame)", 3:"30 fps"})
yield Bits(parser, "hour", 5)
yield UInt8(parser, "minute")
yield UInt8(parser, "second")
yield UInt8(parser, "frame")
yield UInt8(parser, "subframe", "100 subframes per frame")
def formatTempo(field):
return humanDurationNanosec(field.value*1000)
def parseTempo(parser, size):
yield textHandler(UInt24(parser, "microsec_quarter", "Microseconds per quarter note"), formatTempo)
def parseTimeSignature(parser, size):
yield UInt8(parser, "numerator", "Numerator of time signature")
yield UInt8(parser, "denominator", "denominator of time signature 2=quarter 3=eighth, etc.")
yield UInt8(parser, "nb_tick", "Number of ticks in metronome click")
yield UInt8(parser, "nb_32nd_note", "Number of 32nd notes to the quarter note")
class Command(FieldSet):
COMMAND = {}
for channel in xrange(16):
COMMAND[0x80+channel] = ("Note off (channel %u)" % channel, parseNote)
COMMAND[0x90+channel] = ("Note on (channel %u)" % channel, parseNote)
COMMAND[0xA0+channel] = ("Key after-touch (channel %u)" % channel, parseNote)
COMMAND[0xB0+channel] = ("Control change (channel %u)" % channel, parseControl)
COMMAND[0xC0+channel] = ("Program (patch) change (channel %u)" % channel, parsePatch)
COMMAND[0xD0+channel] = ("Channel after-touch (channel %u)" % channel, parseChannel)
COMMAND[0xE0+channel] = ("Pitch wheel change (channel %u)" % channel, parsePitch)
COMMAND_DESC = createDict(COMMAND, 0)
COMMAND_PARSER = createDict(COMMAND, 1)
META_COMMAND_TEXT = 1
META_COMMAND_NAME = 3
META_COMMAND = {
0x00: ("Sets the track's sequence number", None),
0x01: ("Text event", parseText),
0x02: ("Copyright info", parseText),
0x03: ("Sequence or Track name", parseText),
0x04: ("Track instrument name", parseText),
0x05: ("Lyric", parseText),
0x06: ("Marker", parseText),
0x07: ("Cue point", parseText),
0x20: ("MIDI Channel Prefix", parseChannel),
0x2F: ("End of the track", None),
0x51: ("Set tempo", parseTempo),
0x54: ("SMPTE offset", parseSMPTEOffset),
0x58: ("Time Signature", parseTimeSignature),
0x59: ("Key signature", None),
0x7F: ("Sequencer specific information", None),
}
META_COMMAND_DESC = createDict(META_COMMAND, 0)
META_COMMAND_PARSER = createDict(META_COMMAND, 1)
def __init__(self, *args, **kwargs):
if 'prev_command' in kwargs:
self.prev_command = kwargs['prev_command']
del kwargs['prev_command']
else:
self.prev_command = None
self.command = None
FieldSet.__init__(self, *args, **kwargs)
def createFields(self):
yield Integer(self, "time", "Delta time in ticks")
next = self.stream.readBits(self.absolute_address+self.current_size, 8, self.root.endian)
if next & 0x80 == 0:
# "Running Status" command
if self.prev_command is None:
raise ParserError("Running Status command not preceded by another command.")
self.command = self.prev_command.command
else:
yield Enum(textHandler(UInt8(self, "command"), hexadecimal), self.COMMAND_DESC)
self.command = self["command"].value
if self.command == 0xFF:
yield Enum(textHandler(UInt8(self, "meta_command"), hexadecimal), self.META_COMMAND_DESC)
yield UInt8(self, "data_len")
size = self["data_len"].value
if size:
command = self["meta_command"].value
if command in self.META_COMMAND_PARSER:
parser = self.META_COMMAND_PARSER[command]
else:
parser = None
if parser:
for field in parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
else:
if self.command not in self.COMMAND_PARSER:
raise ParserError("Unknown command: %s" % self["command"].display)
parser = self.COMMAND_PARSER[self.command]
for field in parser(self):
yield field
def createDescription(self):
if "meta_command" in self:
return self["meta_command"].display
else:
return self.COMMAND_DESC[self.command]
class Track(FieldSet):
def __init__(self, *args):
FieldSet.__init__(self, *args)
self._size = (8 + self["size"].value) * 8
def createFields(self):
yield String(self, "marker", 4, "Track marker (MTrk)", charset="ASCII")
yield UInt32(self, "size")
cur = None
if True:
while not self.eof:
cur = Command(self, "command[]", prev_command=cur)
yield cur
else:
size = self["size"].value
if size:
yield RawBytes(self, "raw", size)
def createDescription(self):
command = self["command[0]"]
if "meta_command" in command \
and command["meta_command"].value in (Command.META_COMMAND_TEXT, Command.META_COMMAND_NAME) \
and "text" in command:
return command["text"].value.strip("\r\n")
else:
return ""
class Header(FieldSet):
static_size = 10*8
FILE_FORMAT = {
0: "Single track",
1: "Multiple tracks, synchronous",
2: "Multiple tracks, asynchronous",
}
def createFields(self):
yield UInt32(self, "size")
yield Enum(UInt16(self, "file_format"), self.FILE_FORMAT)
yield UInt16(self, "nb_track")
yield UInt16(self, "delta_time", "Delta-time ticks per quarter note")
def createDescription(self):
return "%s; %s tracks" % (
self["file_format"].display, self["nb_track"].value)
class MidiFile(Parser):
MAGIC = "MThd"
PARSER_TAGS = {
"id": "midi",
"category": "audio",
"file_ext": ["mid", "midi"],
"mime": (u"audio/mime", ),
"magic": ((MAGIC, 0),),
"min_size": 64,
"description": "MIDI audio"
}
endian = BIG_ENDIAN
def validate(self):
if self.stream.readBytes(0, 4) != self.MAGIC:
return "Invalid signature"
if self["header/size"].value != 6:
return "Invalid header size"
return True
def createFields(self):
yield String(self, "signature", 4, r"MIDI signature (MThd)", charset="ASCII")
yield Header(self, "header")
while not self.eof:
yield Track(self, "track[]")
def createDescription(self):
return "MIDI audio: %s" % self["header"].description
def createContentSize(self):
count = self["/header/nb_track"].value - 1
start = self["track[%u]" % count].absolute_address
# Search "End of track" of last track
end = self.stream.searchBytes("\xff\x2f\x00", start, MAX_FILESIZE*8)
if end is not None:
return end + 3*8
return None
| gpl-3.0 |
rapidpro/tracpro | tracpro/orgs_ext/views.py | 1 | 4725 | from __future__ import absolute_import, unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from dash.orgs.views import OrgCRUDL, InferOrgMixin, OrgPermsMixin
from dash.utils import ms_to_datetime
from dateutil.relativedelta import relativedelta
from smartmin.templatetags.smartmin import format_datetime
from smartmin.views import SmartUpdateView, SmartFormView
from . import constants
from . import forms
from . import tasks
class MakeAdminsIntoStaffMixin(object):
# Make sure all admins are staff users.
def post_save(self, obj):
obj.get_org_admins().filter(is_staff=False).update(is_staff=True)
return super(MakeAdminsIntoStaffMixin, self).post_save(obj)
class OrgExtCRUDL(OrgCRUDL):
actions = ('create', 'update', 'list', 'home', 'edit', 'chooser',
'choose', 'fetchruns')
class Chooser(OrgCRUDL.Chooser):
def dispatch(self, request, *args, **kwargs):
if request.org:
# We have an org, no need for the chooser view
return redirect(reverse('home.home'))
return super(OrgExtCRUDL.Chooser, self).dispatch(request, *args, **kwargs)
class Create(MakeAdminsIntoStaffMixin, OrgCRUDL.Create):
form_class = forms.OrgExtForm
fields = ('name', 'available_languages', 'language',
'timezone', 'subdomain', 'api_token', 'google_analytics', 'show_spoof_data',
'logo', 'administrators')
class List(OrgCRUDL.List):
default_order = ('name',)
class Update(MakeAdminsIntoStaffMixin, OrgCRUDL.Update):
form_class = forms.OrgExtForm
fields = ('is_active', 'name', 'available_languages', 'language',
'contact_fields', 'timezone', 'subdomain', 'api_token', 'google_analytics',
'show_spoof_data', 'logo', 'administrators',
'how_to_handle_sameday_responses',
)
class Home(OrgCRUDL.Home):
fields = ('name', 'timezone', 'api_token', 'google_analytics', 'last_contact_sync',
'last_flow_run_fetch')
field_config = {
'api_token': {
'label': _("RapidPro API Token"),
},
}
permission = 'orgs.org_home'
title = _("My Organization")
def get_google_analytics(self, obj):
return obj.get_config("google_analytics", "")
def get_last_contact_sync(self, obj):
result = obj.get_task_result(constants.TaskType.sync_contacts)
if result:
return "%s (%d created, %d updated, %d deleted, %d failed)" % (
format_datetime(ms_to_datetime(result['time'])),
result['counts']['created'],
result['counts']['updated'],
result['counts']['deleted'],
result['counts']['failed'],
)
else:
return None
def get_last_flow_run_fetch(self, obj):
result = obj.get_task_result(constants.TaskType.fetch_runs)
if result:
return "%s (%d fetched)" % (
format_datetime(ms_to_datetime(result['time'])),
result.get('counts', {}).get('fetched', 0)
)
else:
return None
class Edit(InferOrgMixin, OrgPermsMixin, SmartUpdateView):
fields = ('name', 'timezone', 'contact_fields', 'logo', 'google_analytics')
form_class = forms.OrgExtForm
permission = 'orgs.org_edit'
success_url = '@orgs_ext.org_home'
title = _("Edit My Organization")
class Fetchruns(InferOrgMixin, OrgPermsMixin, SmartFormView):
form_class = forms.FetchRunsForm
permission = 'orgs.org_fetch_runs'
success_url = '@orgs_ext.org_home'
title = _("Fetch past runs for my organization")
template_name = 'polls/fetch_runs.html'
def form_valid(self, form):
org = self.get_object()
howfarback = relativedelta(days=form.cleaned_data['days'])
since = timezone.now() - howfarback
email = self.request.user.email
tasks.fetch_runs.delay(org.id, since, email)
success_message = _("We have scheduled a fetch in the background. An email will be "
"sent to {email} when the fetch has completed.").format(email=email)
messages.success(self.request, success_message)
return super(SmartFormView, self).form_valid(form)
| bsd-3-clause |
asimonet/website | reveal.js/node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 1812 | 9537 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
| cc0-1.0 |
kisna72/django | tests/fixtures/models.py | 128 | 3287 | """
Fixtures.
Fixtures are a way of loading data into the database in bulk. Fixure data
can be stored in any serializable format (including JSON and XML). Fixtures
are identified by name, and are stored in either a directory named 'fixtures'
in the application directory, or in one of the directories named in the
``FIXTURE_DIRS`` setting.
"""
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
class Meta:
ordering = ('-pub_date', 'headline')
@python_2_unicode_compatible
class Blog(models.Model):
name = models.CharField(max_length=100)
featured = models.ForeignKey(Article, related_name='fixtures_featured_set')
articles = models.ManyToManyField(Article, blank=True,
related_name='fixtures_articles_set')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=100)
tagged_type = models.ForeignKey(ContentType, related_name="fixtures_tag_set")
tagged_id = models.PositiveIntegerField(default=0)
tagged = GenericForeignKey(ct_field='tagged_type', fk_field='tagged_id')
def __str__(self):
return '<%s: %s> tagged "%s"' % (self.tagged.__class__.__name__,
self.tagged, self.name)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
def natural_key(self):
return (self.name,)
class SpyManager(PersonManager):
def get_queryset(self):
return super(SpyManager, self).get_queryset().filter(cover_blown=False)
class Spy(Person):
objects = SpyManager()
cover_blown = models.BooleanField(default=False)
@python_2_unicode_compatible
class Visa(models.Model):
person = models.ForeignKey(Person)
permissions = models.ManyToManyField(Permission, blank=True)
def __str__(self):
return '%s %s' % (self.person.name,
', '.join(p.name for p in self.permissions.all()))
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Person)
def __str__(self):
authors = ' and '.join(a.name for a in self.authors.all())
return '%s by %s' % (self.name, authors) if authors else self.name
class Meta:
ordering = ('name',)
| bsd-3-clause |
Debian/openjfx | modules/web/src/main/native/Tools/Scripts/webkitpy/replay/main.py | 2 | 5264 | # Copyright (C) 2011 Google Inc. All rights reserved.
# Copyright (C) 2014 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import shutil
import sys
import tempfile
from webkitpy.common.checkout.scm.detection import detect_scm_system
from webkitpy.common.system.executive import ScriptError
class InputGeneratorTests:
def __init__(self, reset_results, executive):
self.reset_results = reset_results
self.executive = executive
def generate_from_json(self, json_file, output_directory):
cmd = ['python',
'JavaScriptCore/replay/scripts/CodeGeneratorReplayInputs.py',
'--outputDir', output_directory,
'--force',
'--framework', 'Test',
'--test',
json_file]
exit_code = 0
try:
stderr_output = self.executive.run_command(cmd)
if stderr_output:
self.write_error_file(json_file, output_directory, stderr_output)
except ScriptError, e:
print e.output
exit_code = e.exit_code
return exit_code
def write_error_file(self, input_filepath, output_directory, error_output):
output_filepath = os.path.join(output_directory, os.path.basename(input_filepath) + '-error')
with open(output_filepath, "w") as output_file:
output_file.write(error_output)
def detect_changes(self, work_directory, reference_directory):
changes_found = False
for output_file in os.listdir(work_directory):
cmd = ['diff',
'-u',
'-N',
os.path.join(reference_directory, output_file),
os.path.join(work_directory, output_file)]
exit_code = 0
try:
output = self.executive.run_command(cmd)
except ScriptError, e:
output = e.output
exit_code = e.exit_code
if exit_code or output:
print 'FAIL: %s' % output_file
print output
changes_found = True
else:
print 'PASS: %s' % output_file
return changes_found
def run_tests(self, input_directory, reference_directory):
work_directory = reference_directory
passed = True
for input_file in os.listdir(input_directory):
(name, extension) = os.path.splitext(input_file)
if extension != '.json':
continue
# Generate output into the work directory (either the given one or a
# temp one if not reset_results is performed)
if not self.reset_results:
work_directory = tempfile.mkdtemp()
if self.generate_from_json(os.path.join(input_directory, input_file), work_directory):
passed = False
if self.reset_results:
print "Reset results for test: %s" % (input_file)
continue
# Detect changes
if self.detect_changes(work_directory, reference_directory):
passed = False
shutil.rmtree(work_directory)
return passed
def main(self):
current_scm = detect_scm_system(os.path.dirname(os.path.abspath(__file__)))
os.chdir(os.path.join(current_scm.checkout_root, 'Source'))
all_tests_passed = True
input_directory = os.path.join('JavaScriptCore', 'replay', 'scripts', 'tests')
reference_directory = os.path.join('JavaScriptCore', 'replay', 'scripts', 'tests', 'expected')
if not self.run_tests(input_directory, reference_directory):
all_tests_passed = False
print ''
if all_tests_passed:
print 'All tests PASS!'
return 0
else:
print 'Some tests FAIL! (To update the reference files, execute "run-input-generator-tests --reset-results")'
return -1
| gpl-2.0 |
yujikato/DIRAC | src/DIRAC/RequestManagementSystem/Service/test/OperationHandlerBaseTests.py | 2 | 2636 | ########################################################################
# $HeadURL $
# File: OperationHandlerBaseTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/25 08:09:08
########################################################################
""" :mod: OperationHandlerBaseTests
===============================
.. module: OperationHandlerBaseTests
:synopsis: unittests for OperationHandlerBase
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
unittests for OperationHandlerBase
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id $"
# #
# @file OperationHandlerBaseTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/25 08:09:21
# @brief Definition of OperationHandlerBaseTests class.
# # imports
import unittest
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
########################################################################
class OperationHandlerBaseTests( unittest.TestCase ):
"""
.. class:: OperationHandlerBaseTests
"""
def setUp( self ):
""" test set up """
self.req = Request()
self.req.RequestName = "testRequest"
self.op = Operation( {"Type" : "ForwardDISET", "Arguments" : "foobar" } )
self.req.addOperation( self.op )
self.baseOp = OperationHandlerBase()
def tearDown( self ):
""" test tear down """
del self.baseOp
del self.op
del self.req
def testOperationHandlerBase( self ):
""" base op test """
self.baseOp.setOperation( self.op )
# # log is there
self.assertEqual( "log" in dir( self.baseOp ), True, "log missing" )
# # operation is there
self.assertEqual( "operation" in dir( self.baseOp ), True, "operation is missing" )
# # request is there
self.assertEqual( "request" in dir( self.baseOp ), True, "request is missing" )
# # __call__ not implemented
self.assertRaises( NotImplementedError, self.baseOp )
# # replica manager
self.assertEqual( isinstance( self.baseOp.dm, DataManager ), True, "DataManager is missing" )
# # tests execution
if __name__ == "__main__":
testLoader = unittest.TestLoader()
OperationHandlerBaseTests = testLoader.loadTestsFromTestCase( OperationHandlerBaseTests )
suite = unittest.TestSuite( [ OperationHandlerBaseTests ] )
unittest.TextTestRunner( verbosity = 3 ).run( suite )
| gpl-3.0 |
StackStorm/mistral | mistral/tests/unit/utils/test_inspect_utils.py | 1 | 2068 | # Copyright 2014 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from mistral.actions import std_actions
from mistral.tests.unit import base
from mistral.utils import inspect_utils as i_u
from mistral.workflow import commands
class ClassWithProperties(object):
a = 1
@property
def prop(self):
pass
class InspectUtilsTest(base.BaseTest):
def test_get_parameters_str(self):
action_class = std_actions.HTTPAction
parameters_str = i_u.get_arg_list_as_str(action_class.__init__)
http_action_params = (
'url, method="GET", params=null, body=null, '
'headers=null, cookies=null, auth=null, '
'timeout=null, allow_redirects=null, '
'proxies=null, verify=null'
)
self.assertEqual(http_action_params, parameters_str)
def test_get_parameters_str_all_mandatory(self):
clazz = commands.RunTask
parameters_str = i_u.get_arg_list_as_str(clazz.__init__)
self.assertEqual(
'wf_ex, wf_spec, task_spec, ctx, triggered_by=null',
parameters_str
)
def test_get_parameters_str_with_function_parameter(self):
def test_func(foo, bar=None, test_func=time.sleep):
pass
parameters_str = i_u.get_arg_list_as_str(test_func)
self.assertEqual("foo, bar=null", parameters_str)
def test_get_public_fields(self):
attrs = i_u.get_public_fields(ClassWithProperties)
self.assertEqual(attrs, {'a': 1})
| apache-2.0 |
wangxinxi/litecoin | test/functional/import-rescan.py | 10 | 8952 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
fee = self.nodes[0].getnetworkinfo()["relayfee"]
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| mit |
roxyboy/scikit-learn | sklearn/linear_model/stochastic_gradient.py | 130 | 50966 | # Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
| bsd-3-clause |
drawks/ansible | lib/ansible/modules/network/avi/avi_webhook.py | 28 | 3928 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_webhook
author: Gaurav Rastogi (@grastogi23) <grastogi@avinetworks.com>
short_description: Module for setup of Webhook Avi RESTful Object
description:
- This module is used to configure Webhook object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
callback_url:
description:
- Callback url for the webhook.
- Field introduced in 17.1.1.
description:
description:
- Field introduced in 17.1.1.
name:
description:
- The name of the webhook profile.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the webhook profile.
- Field introduced in 17.1.1.
verification_token:
description:
- Verification token sent back with the callback asquery parameters.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Webhook object
avi_webhook:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_webhook
"""
RETURN = '''
obj:
description: Webhook (api/webhook) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, avi_ansible_api, HAS_AVI)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
callback_url=dict(type='str',),
description=dict(type='str',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
verification_token=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) or requests is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'webhook',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
DIRACGrid/DIRAC | src/DIRAC/ResourceStatusSystem/Utilities/RssConfiguration.py | 2 | 3285 | """
:mod: RssConfiguration
Module that collects utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = '$Id$'
from DIRAC import S_OK
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ResourceStatusSystem.PolicySystem.StateMachine import RSSMachine
from DIRAC.ResourceStatusSystem.Utilities import Utils
_rssConfigPath = 'ResourceStatus'
class RssConfiguration(object):
"""
RssConfiguration::
{
Config:
{
State : Active | InActive,
Cache : 300,
FromAddress : 'email@site.domain'
StatusType :
{
default : all,
StorageElement: ReadAccess, WriteAccess, CheckAccess, RemoveAccess
}
}
}
"""
def __init__(self):
self.opsHelper = Operations()
def getConfigState(self, default='InActive'):
"""
Gets from <pathToRSSConfiguration>/Config the value of State
"""
return self.opsHelper.getValue('%s/Config/State' % _rssConfigPath, default)
def getConfigCache(self, default=300):
"""
Gets from <pathToRSSConfiguration>/Config the value of Cache
"""
return self.opsHelper.getValue('%s/Config/Cache' % _rssConfigPath, default)
def getConfigFromAddress(self, default=None):
"""
Gets from <pathToRSSConfiguration>/Config the value of FromAddress
"""
return self.opsHelper.getValue('%s/Config/FromAddress' % _rssConfigPath, default)
def getConfigStatusType(self, elementType=None):
"""
Gets all the status types per elementType, if not given, it takes default
from CS. If not, hardcoded variable DEFAULT.
"""
_DEFAULTS = ('all', )
res = self.opsHelper.getOptionsDict('%s/Config/StatusTypes' % _rssConfigPath)
if res['OK']:
if elementType in res['Value']:
return List.fromChar(res['Value'][elementType])
if 'default' in res['Value']:
return List.fromChar(res['Value']['default'])
return _DEFAULTS
def getPolicies():
"""
Returns from the OperationsHelper: <_rssConfigPath>/Policies
"""
return Utils.getCSTree('%s/Policies' % _rssConfigPath)
def getPolicyActions():
"""
Returns from the OperationsHelper: <_rssConfigPath>/PolicyActions
"""
return Utils.getCSTree('%s/PolicyActions' % _rssConfigPath)
def getnotificationGroups():
"""
Returns from the OperationsHelper: <_rssConfigPath>/PolicyActions
"""
return Utils.getCSTree('%s/Config' % _rssConfigPath)
def getNotifications():
"""
Returns from the OperationsHelper: <_rssConfigPath>/Notification
"""
return Utils.getCSTree('%s/Notification' % _rssConfigPath)
def getValidElements():
"""
Returns from the OperationsHelper: <_rssConfigPath>/GeneralConfig/ValidElements
"""
_DEFAULTS = ('Site', 'Resource', 'Node')
# result = Operations().getValue( '%s/GeneralConfig/ValidElements' % _rssConfigPath )
# if result is not None:
# return List.fromChar( result )
return _DEFAULTS
def getValidStatus():
"""
Returns a list of statuses as were defined on the RSS(State)Machine
"""
validStatus = RSSMachine(None).getStates()
return S_OK(validStatus)
| gpl-3.0 |
miipl-naveen/optibizz | addons/account_analytic_default/account_analytic_default.py | 57 | 9022 | # -*- coding: utf-8 -*-
###############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_default(osv.osv):
_name = "account.analytic.default"
_description = "Analytic Distribution"
_rec_name = "analytic_id"
_order = "sequence"
_columns = {
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of analytic distribution"),
'analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'product_id': fields.many2one('product.product', 'Product', ondelete='cascade', help="Select a product which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this product, it will automatically take this as an analytic account)"),
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='cascade', help="Select a partner which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this partner, it will automatically take this as an analytic account)"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', help="Select a user which will use analytic account specified in analytic default."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', help="Select a company which will use analytic account specified in analytic default (e.g. create new customer invoice or Sales order if we select this company, it will automatically take this as an analytic account)"),
'date_start': fields.date('Start Date', help="Default start date for this Analytic Account."),
'date_stop': fields.date('End Date', help="Default end date for this Analytic Account."),
}
def account_get(self, cr, uid, product_id=None, partner_id=None, user_id=None, date=None, company_id=None, context=None):
domain = []
if product_id:
domain += ['|', ('product_id', '=', product_id)]
domain += [('product_id','=', False)]
if partner_id:
domain += ['|', ('partner_id', '=', partner_id)]
domain += [('partner_id', '=', False)]
if company_id:
domain += ['|', ('company_id', '=', company_id)]
domain += [('company_id', '=', False)]
if user_id:
domain += ['|',('user_id', '=', user_id)]
domain += [('user_id','=', False)]
if date:
domain += ['|', ('date_start', '<=', date), ('date_start', '=', False)]
domain += ['|', ('date_stop', '>=', date), ('date_stop', '=', False)]
best_index = -1
res = False
for rec in self.browse(cr, uid, self.search(cr, uid, domain, context=context), context=context):
index = 0
if rec.product_id: index += 1
if rec.partner_id: index += 1
if rec.company_id: index += 1
if rec.user_id: index += 1
if rec.date_start: index += 1
if rec.date_stop: index += 1
if index > best_index:
res = rec
best_index = index
return res
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_description = "Invoice Line"
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id=currency_id, company_id=company_id, context=context)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), company_id=company_id, context=context)
if rec:
res_prod['value'].update({'account_analytic_id': rec.analytic_id.id})
else:
res_prod['value'].update({'account_analytic_id': False})
return res_prod
class stock_picking(osv.osv):
_inherit = "stock.picking"
def _get_account_analytic_invoice(self, cursor, user, picking, move_line):
partner_id = picking.partner_id and picking.partner_id.id or False
rec = self.pool.get('account.analytic.default').account_get(cursor, user, move_line.product_id.id, partner_id, user, time.strftime('%Y-%m-%d'))
if rec:
return rec.analytic_id.id
return super(stock_picking, self)._get_account_analytic_invoice(cursor, user, picking, move_line)
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line, self).invoice_line_create(cr, uid, ids, context=context)
if not ids:
return create_ids
sale_line = self.browse(cr, uid, ids[0], context=context)
inv_line_obj = self.pool.get('account.invoice.line')
anal_def_obj = self.pool.get('account.analytic.default')
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = anal_def_obj.account_get(cr, uid, line.product_id.id, sale_line.order_id.partner_id.id, sale_line.order_id.user_id.id, time.strftime('%Y-%m-%d'), context=context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'account_analytic_id': rec.analytic_id.id}, context=context)
return create_ids
class product_product(osv.Model):
_inherit = 'product.product'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
return {
product_id: Analytic.search_count(cr, uid, [('product_id', '=', product_id)], context=context)
for product_id in ids
}
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
class product_template(osv.Model):
_inherit = 'product.template'
def _rules_count(self, cr, uid, ids, field_name, arg, context=None):
Analytic = self.pool['account.analytic.default']
res = {}
for product_tmpl_id in self.browse(cr, uid, ids, context=context):
res[product_tmpl_id.id] = sum([p.rules_count for p in product_tmpl_id.product_variant_ids])
return res
_columns = {
'rules_count': fields.function(_rules_count, string='# Analytic Rules', type='integer'),
}
def action_view_rules(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'account_analytic_default.action_product_default_list', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
# Remove context so it is not going to filter on product_id with active_id of template
result['context'] = "{}"
return result
class stock_move(osv.Model):
_inherit = 'stock.move'
def _create_invoice_line_from_vals(self, cr, uid, move, invoice_line_vals, context=None):
# It will set the default analtyic account on the invoice line
partner_id = self.pool['account.invoice'].browse(cr, uid, invoice_line_vals.get('invoice_id'), context=context).partner_id.id
if 'account_analytic_id' not in invoice_line_vals or not invoice_line_vals.get('account_analytic_id'):
rec = self.pool['account.analytic.default'].account_get(cr, uid, move.product_id.id, partner_id, uid, time.strftime('%Y-%m-%d'), company_id=move.company_id.id, context=context)
if rec:
invoice_line_vals.update({'account_analytic_id': rec.analytic_id.id})
res = super(stock_move, self)._create_invoice_line_from_vals(cr, uid, move, invoice_line_vals, context=context)
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cloudspaces/eyeos-u1db | APISync/APISync.py | 1 | 12160 | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from settings import settings
from mongodb import mongoDb
import time
from urlparse import urlparse
import json
import sys, os
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
self.comments = mongoDb("localhost",27017,"comments")
self.calendars = mongoDb("localhost",27017,"calendars")
self.eyedocs = mongoDb("localhost",27017,"eyedocs")
BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_POST(self):
#print self.path
#print self.headers
postdata = self.getPostData()
if self.path.startswith('/comment'):
if postdata.has_key('id') and postdata.has_key('user') and postdata.has_key('text') and postdata.has_key('cloud'):
time_created = time.strftime("%Y%m%d%H%M%S")
response = self.comments.insertComment(postdata['id'],postdata['user'],postdata['text'].decode('hex'),postdata['cloud'],time_created)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/event'):
if postdata.has_key('user') and postdata.has_key('calendar') and postdata.has_key('cloud') and \
postdata.has_key('isallday') and postdata.has_key('timestart') and postdata.has_key('timeend') and \
postdata.has_key('repetition') and postdata.has_key('finaltype') and postdata.has_key('finalvalue') and \
postdata.has_key('subject') and postdata.has_key('location') and postdata.has_key('description') and \
postdata.has_key('repeattype'):
response = self.calendars.insertEvent(postdata['user'],postdata['calendar'],postdata['cloud'],postdata['isallday'],
postdata['timestart'],postdata['timeend'],postdata['repetition'],
postdata['finaltype'],postdata['finalvalue'],postdata['subject'].decode('hex'),postdata['location'].decode('hex'),
postdata['description'].decode('hex'),postdata['repeattype'])
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/calendar'):
if postdata.has_key('user') and postdata.has_key('name') and postdata.has_key('cloud') and postdata.has_key('description') and \
postdata.has_key('timezone'):
response = self.calendars.insertCalendar(postdata['user'],postdata['name'],postdata['cloud'],postdata['description'],postdata['timezone'])
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/lockFile'):
if postdata.has_key('id') and postdata.has_key('cloud') and postdata.has_key('user') and postdata.has_key('ipserver') and \
postdata.has_key('datetime') and postdata.has_key('timelimit'):
interop = None
if postdata.has_key('interop'):
interop = postdata['interop']
response = self.eyedocs.lockFile(postdata['id'],postdata['cloud'],postdata['user'],postdata['ipserver'],postdata['datetime'].decode('hex'),int(postdata['timelimit']),interop)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
else:
response = {"error":400,"descripcion":"Recurso no encontrado"}
self.sendData(response)
def do_DELETE(self):
params = self.path.split('/')
if self.path.startswith('/comment'):
if len(params) == 6:
id = params[2]
user = params[3]
cloud = params[4]
time_created = params[5]
response = self.comments.deleteComment(id,user,cloud,time_created)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/event'):
if len(params) == 8:
user = params[2]
calendar = params[3]
cloud = params[4]
timestart = params[5]
timeend = params[6]
isallday = params[7]
response = self.calendars.deleteEvent(user,calendar,cloud,timestart,timeend,isallday)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/calendar'):
if len(params) == 5:
user = params[2]
name = params[3]
cloud = params[4]
response = self.calendars.deleteCalendar(user,name,cloud)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/calUser'):
if len(params) == 4:
user = params[2]
cloud = params[3]
response = self.calendars.deleteCalendarsUser(user,cloud)
self.sendDataArray(response)
return
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
else:
response = {"error":400,"descripcion":"Recurso no encontrado"}
self.sendData(response)
def do_GET(self):
params = self.path.split('/')
if self.path.startswith('/comment'):
if len(params) == 4 or len(params) == 5:
id = params[2]
cloud = params[3]
interop = None
if len(params) == 5:
interop = params[4]
data = self.comments.getComments(id,cloud,interop)
self.sendDataArray(data)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
self.sendData(response)
elif self.path.startswith('/event'):
if len(params) == 5:
user = params[2]
calendar = params[3]
cloud = params[4]
data = self.calendars.getEvents(user,calendar,cloud)
self.sendDataArray(data)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
self.sendData(response)
elif self.path.startswith('/calendar'):
if len(params) == 4:
user = params[2]
cloud = params[3]
data = self.calendars.getCalendars(user,cloud)
self.sendDataArray(data)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
self.sendData(response)
elif self.path.startswith('/calEvents'):
if len(params) == 4:
user = params[2]
cloud = params[3]
data = self.calendars.getCalendarsAndEvents(user,cloud)
self.sendDataArray(data)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
self.sendData(response)
elif self.path.startswith('/lockFile'):
if len(params) == 4 or len(params) == 5:
id = params[2]
cloud = params[3]
interop = None
if len(params) == 5:
interop = params[4]
data = self.eyedocs.getMetadataFile(id,cloud,interop)
self.sendDataArray(data)
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
self.sendData(response)
else:
response = {"error":400,"descripcion":"Recurso no encontrado"}
self.sendData(response)
def do_PUT(self):
postdata = self.getPostData()
if self.path.startswith('/event'):
if postdata.has_key('user') and postdata.has_key('calendar') and postdata.has_key('cloud') and \
postdata.has_key('isallday') and postdata.has_key('timestart') and postdata.has_key('timeend') and \
postdata.has_key('repetition') and postdata.has_key('finaltype') and postdata.has_key('finalvalue') and \
postdata.has_key('subject') and postdata.has_key('location') and postdata.has_key('description') and \
postdata.has_key('repeattype'):
response = self.calendars.updateEvent(postdata['user'],postdata['calendar'],postdata['cloud'],postdata['isallday'],
postdata['timestart'],postdata['timeend'],postdata['repetition'],
postdata['finaltype'],postdata['finalvalue'],postdata['subject'].decode('hex'),postdata['location'].decode('hex'),
postdata['description'].decode('hex'),postdata['repeattype'])
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/calendar'):
if postdata.has_key('user') and postdata.has_key('name') and postdata.has_key('cloud') and postdata.has_key('description') and \
postdata.has_key('timezone'):
response = self.calendars.updateCalendar(postdata['user'],postdata['name'],postdata['cloud'],postdata['description'],postdata['timezone'])
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/updateTime'):
if postdata.has_key('id') and postdata.has_key('cloud') and postdata.has_key('user') and postdata.has_key('ipserver') and \
postdata.has_key('datetime'):
response = self.eyedocs.updateDateTime(postdata['id'],postdata['cloud'],postdata['user'],postdata['ipserver'],postdata['datetime'].decode('hex'))
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
elif self.path.startswith('/unLockFile'):
if postdata.has_key('id') and postdata.has_key('cloud') and postdata.has_key('user') and postdata.has_key('ipserver') and \
postdata.has_key('datetime'):
response = self.eyedocs.unLockFile(postdata['id'],postdata['cloud'],postdata['user'],postdata['ipserver'],postdata['datetime'].decode('hex'))
else:
response = {"error":400,"descripcion":"Parametros incorrectos"}
else:
response = {"error":400,"descripcion":"Recurso no encontrado"}
self.sendData(response)
def getPostData(self):
data = {}
try:
length = int(self.headers.getheader('content-length'))
postdata = self.rfile.read(length)
data = dict((itm.split('=')[0],itm.split('=')[1]) for itm in postdata.split('&'))
except:
pass
return data
def sendData(self,response):
if response.has_key('error'):
self.send_response(response['error'],response['descripcion'])
else:
self.send_response(200,"OK")
self.end_headers()
self.wfile.write(json.dumps(response))
def sendDataArray(self,data):
self.send_response(200,"OK")
self.end_headers()
self.wfile.write(json.dumps(data))
def createPid(pid):
try:
file = open('/var/run/serverOauth.pid', 'w')
file.write(str(pid))
file.close()
except IOError as e:
print >>sys.stderr, "Error create file pid:%d (%s)" % (e.errno, e.strerror)
os.kill(int(pid), 9)
sys.exit(0)
try:
try:
pid = os.fork()
if pid > 0:
createPid(str(pid))
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
server = HTTPServer((settings['Server']['host'], settings['Server']['port']), RequestHandler)
print 'Server APISYNC running...'
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
| agpl-3.0 |
rowemoore/odoo | addons/sale_crm/__openerp__.py | 260 | 2036 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sale order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'crm', 'web_kanban_gauge'],
'data': [
'wizard/crm_make_sale_view.xml',
'sale_crm_view.xml',
'security/sale_crm_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': ['test/sale_crm.yml'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
x303597316/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/admindocs/tests/test_fields.py | 111 | 1192 | from __future__ import absolute_import, unicode_literals
from django.contrib.admindocs import views
from django.db import models
from django.db.models import fields
from django.utils import unittest
from django.utils.translation import ugettext as _
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
| apache-2.0 |
Open365/Open365 | lib/EyeosApps/EyeosApps.py | 1 | 4242 | import json
import re
from lib.EyeosApps.EyeosAppsStorage import EyeosAppsStorage
from lib.EyeosApps.EyeosAppsJsonValidator import EyeosAppsJsonValidator
from lib.EyeosApi.Application import Application
from lib.Settings import Settings
from lib.Wrappers.Logger import Logger
from lib.Errors.EyeosAPIError import EyeosAPIError
from lib.EyeosApi.Principals import Principals
from lib.EyeosApi.Login import Login
class EyeosApps:
DEFAULT_APPS_DIR = './eyeos_apps/default'
def __init__(self, injected_open=None, injected_eyeos_apps_storage=None, injected_json=None,
injected_eyeos_apps_validator=None,
injected_application_api=None,
injected_principals_api=None,
injected_base_group=None,
injected_login_api=None):
self.eyeos_apps_storage = injected_eyeos_apps_storage or EyeosAppsStorage()
self.open = injected_open or open
self.json = injected_json or json
self.logger = Logger(__name__)
self.eyeos_apps_validator = injected_eyeos_apps_validator or EyeosAppsJsonValidator()
self.application_api = injected_application_api or Application()
self.principals_api = injected_principals_api or Principals()
self.principal_base_group = injected_base_group or Settings().getSettings()['principalservice']['base_group']
self.login_api = injected_login_api or Login()
def install(self, apps_directory, admin_user, password, domain):
apps_file = apps_directory + '/apps.json'
with self.open(apps_file, 'r') as f:
try:
apps = self.json.load(f)
except ValueError as e:
raise ValueError("File {0} does not contain valid JSON".format(apps_file)) from e
self.eyeos_apps_validator.validate(apps)
try:
eyeos_card = self.login_api.authenticate(admin_user, password, domain)
self.logger.info('Emptying previous apps...')
self.application_api.empty_apps(eyeos_card)
self.logger.info('Saving apps...')
self.eyeos_apps_storage.save(apps)
self.application_api.save(apps, eyeos_card)
group = self.principals_api.get_systemgroup(self.principal_base_group, eyeos_card)
group = self._generate_new_permissions(apps, group)
self.principals_api.put_systemgroup(group, eyeos_card)
except (ValueError, KeyError, EyeosAPIError) as e:
self.logger.error(e)
exit(1)
def _generate_new_permissions(self, applications, group):
""" existing permissions like eyeos.application.* should be removed
and then add new eyeos.applications.whatever permissions for each eyeos_apps applications.
permissions like eyeos.admin.*.edit should be removed and add new ones from the
control_panel applications
the eyeos.admin.*.edit permissions might pose a problem in the future if there appear new
permissions not tied to an application. That's a problem for future everyone :S
"""
permissions = group['permissions']
i = 0
while i < len(permissions):
permission = permissions[i]
if (re.match(r'^eyeos\.application\.[^.]*$', permission['id']) or
re.match(r'^eyeos\.admin\.[^.]*\.edit$', permission['id'])):
permissions.remove(permission)
continue
i += 1
for eyeos_application in applications['eyeosApps']:
permissions.append({
"id": "eyeos.application." + eyeos_application['appID'],
"name": 'Execute ' + eyeos_application['name'],
"description": "Run " + eyeos_application['name'] + " application",
"enabled": True
})
for admin_application in applications['controlPanelApps']:
permissions.append({
"id": "eyeos.admin." + admin_application['appID'] + '.edit',
"name": 'Manage ' + admin_application['name'],
"description": "Manage " + admin_application['name'] + " in admin panel",
"enabled": False
})
return group
| agpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Python-2.7.10/Demo/classes/Rev.py | 47 | 2052 | '''
A class which presents the reverse of a sequence without duplicating it.
From: "Steven D. Majewski" <sdm7g@elvis.med.virginia.edu>
It works on mutable or inmutable sequences.
>>> chars = list(Rev('Hello World!'))
>>> print ''.join(chars)
!dlroW olleH
The .forw is so you can use anonymous sequences in __init__, and still
keep a reference the forward sequence. )
If you give it a non-anonymous mutable sequence, the reverse sequence
will track the updated values. ( but not reassignment! - another
good reason to use anonymous values in creating the sequence to avoid
confusion. Maybe it should be change to copy input sequence to break
the connection completely ? )
>>> nnn = range(3)
>>> rnn = Rev(nnn)
>>> for n in rnn: print n
...
2
1
0
>>> for n in range(4, 6): nnn.append(n) # update nnn
...
>>> for n in rnn: print n # prints reversed updated values
...
5
4
2
1
0
>>> nnn = nnn[1:-1]
>>> nnn
[1, 2, 4]
>>> for n in rnn: print n # prints reversed values of old nnn
...
5
4
2
1
0
#
>>> WH = Rev('Hello World!')
>>> print WH.forw, WH.back
Hello World! !dlroW olleH
>>> nnn = Rev(range(1, 10))
>>> print nnn.forw
[1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print nnn.back
[9, 8, 7, 6, 5, 4, 3, 2, 1]
>>> rrr = Rev(nnn)
>>> rrr
<1, 2, 3, 4, 5, 6, 7, 8, 9>
'''
class Rev:
def __init__(self, seq):
self.forw = seq
self.back = self
def __len__(self):
return len(self.forw)
def __getitem__(self, j):
return self.forw[-(j + 1)]
def __repr__(self):
seq = self.forw
if isinstance(seq, list):
wrap = '[]'
sep = ', '
elif isinstance(seq, tuple):
wrap = '()'
sep = ', '
elif isinstance(seq, str):
wrap = ''
sep = ''
else:
wrap = '<>'
sep = ', '
outstrs = [str(item) for item in self.back]
return wrap[:1] + sep.join(outstrs) + wrap[-1:]
def _test():
import doctest, Rev
return doctest.testmod(Rev)
if __name__ == "__main__":
_test()
| mit |
c7zero/chipsec | chipsec/helper/efi/__init__.py | 9 | 1143 | #CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2015, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
##################################################################################
#
# List of all extension modules: add your module name here
#
##################################################################################
import sys
if sys.platform.startswith('uefi') or sys.platform.startswith('EFI'):
__all__ = [ "efihelper" ]
else:
__all__ = [ ]
| gpl-2.0 |
piem/aubio | python/demos/demo_bench_yin.py | 4 | 1767 | #! /usr/bin/env python
import numpy as np
from aubio import pitch
import pylab as plt
buf_size = 2048 * 1
hop_size = buf_size // 4
samplerate = 44100
minfreq = 40
maxfreq = 6000
def sinewave(freq, duration, samplerate = samplerate):
""" generate a sinewave """
length = hop_size
while length < duration * samplerate:
length += hop_size
return np.sin( 2. * np.pi * np.arange(length) * freq / samplerate ).astype("float32")
def get_stats_for_pitch_method(method, freqs, samplerate = samplerate):
""" for a given pitch method and a list of frequency, generate a sinewave
and get mean deviation """
means = np.zeros(len(freqs))
medians = np.zeros(len(freqs))
for freq, fn in zip(freqs, range(len(freqs))):
s = sinewave(freq, .50).reshape(-1, hop_size)
#s = (sinewave(freq, .50) + .0*sinewave(freq/2., .50)).reshape(-1, hop_size)
p = pitch(method, buf_size, hop_size, samplerate = samplerate)
candidates = np.zeros(len(s))
#samples = np.zeros(buf_size)
for frame, i in zip(s, range(len(s))):
candidates[i] = p(frame)[0]
# skip first few candidates
candidates = candidates[4:]
means[fn] = np.mean(candidates[candidates != 0] - freq)
medians[fn] = np.median(candidates[candidates != 0] - freq)
print (freq, means[fn], medians[fn])
return means, medians
if __name__ == '__main__':
freqs = np.arange(minfreq, maxfreq, 1.)
modes = ["yin", "yinfft"]
for mode in modes:
means, medians = get_stats_for_pitch_method(mode, freqs)
plt.figure()
plt.plot(freqs, means, 'g-')
plt.plot(freqs, medians, 'r--')
#plt.savefig(mode + '_deviations_test.png', dpi=300)
plt.show()
| gpl-3.0 |
salivatears/ansible | contrib/inventory/windows_azure.py | 119 | 11306 | #!/usr/bin/env python
'''
Windows Azure external inventory script
=======================================
Generates inventory that Ansible can understand by making API request to
Windows Azure using the azure python library.
NOTE: This script assumes Ansible is being executed where azure is already
installed.
pip install azure
Adapted from the ansible Linode plugin by Dan Slimmon.
'''
# (c) 2013, John Whitbeck
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
# Standard imports
import re
import sys
import argparse
import os
from urlparse import urlparse
from time import time
try:
import json
except ImportError:
import simplejson as json
try:
import azure
from azure import WindowsAzureError
from azure.servicemanagement import ServiceManagementService
except ImportError as e:
print("failed=True msg='`azure` library required for this script'")
sys.exit(1)
# Imports for ansible
import ConfigParser
class AzureInventory(object):
def __init__(self):
"""Main execution path."""
# Inventory grouped by display group
self.inventory = {}
# Index of deployment name -> host
self.index = {}
self.host_metadata = {}
# Cache setting defaults.
# These can be overridden in settings (see `read_settings`).
cache_dir = os.path.expanduser('~')
self.cache_path_cache = os.path.join(cache_dir, '.ansible-azure.cache')
self.cache_path_index = os.path.join(cache_dir, '.ansible-azure.index')
self.cache_max_age = 0
# Read settings and parse CLI arguments
self.read_settings()
self.read_environment()
self.parse_cli_args()
# Initialize Azure ServiceManagementService
self.sms = ServiceManagementService(self.subscription_id, self.cert_path)
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
if self.args.list_images:
data_to_print = self.json_format_dict(self.get_images(), True)
elif self.args.list or self.args.host:
# Display list of nodes for inventory
if len(self.inventory) == 0:
data = json.loads(self.get_inventory_from_cache())
else:
data = self.inventory
if self.args.host:
data_to_print = self.get_host(self.args.host)
else:
# Add the `['_meta']['hostvars']` information.
hostvars = {}
if len(data) > 0:
for host in set([h for hosts in data.values() for h in hosts if h]):
hostvars[host] = self.get_host(host, jsonify=False)
data['_meta'] = {'hostvars': hostvars}
# JSONify the data.
data_to_print = self.json_format_dict(data, pretty=True)
print(data_to_print)
def get_host(self, hostname, jsonify=True):
"""Return information about the given hostname, based on what
the Windows Azure API provides.
"""
if hostname not in self.host_metadata:
return "No host found: %s" % json.dumps(self.host_metadata)
if jsonify:
return json.dumps(self.host_metadata[hostname])
return self.host_metadata[hostname]
def get_images(self):
images = []
for image in self.sms.list_os_images():
if str(image.label).lower().find(self.args.list_images.lower()) >= 0:
images.append(vars(image))
return json.loads(json.dumps(images, default=lambda o: o.__dict__))
def is_cache_valid(self):
"""Determines if the cache file has expired, or if it is still valid."""
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
"""Reads the settings from the .ini file."""
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/windows_azure.ini')
# Credentials related
if config.has_option('azure', 'subscription_id'):
self.subscription_id = config.get('azure', 'subscription_id')
if config.has_option('azure', 'cert_path'):
self.cert_path = config.get('azure', 'cert_path')
# Cache related
if config.has_option('azure', 'cache_path'):
cache_path = os.path.expandvars(os.path.expanduser(config.get('azure', 'cache_path')))
self.cache_path_cache = os.path.join(cache_path, 'ansible-azure.cache')
self.cache_path_index = os.path.join(cache_path, 'ansible-azure.index')
if config.has_option('azure', 'cache_max_age'):
self.cache_max_age = config.getint('azure', 'cache_max_age')
def read_environment(self):
''' Reads the settings from environment variables '''
# Credentials
if os.getenv("AZURE_SUBSCRIPTION_ID"):
self.subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
if os.getenv("AZURE_CERT_PATH"):
self.cert_path = os.getenv("AZURE_CERT_PATH")
def parse_cli_args(self):
"""Command line argument processing"""
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on Azure',
)
parser.add_argument('--list', action='store_true', default=True,
help='List nodes (default: True)')
parser.add_argument('--list-images', action='store',
help='Get all available images.')
parser.add_argument('--refresh-cache',
action='store_true', default=False,
help='Force refresh of thecache by making API requests to Azure '
'(default: False - use cache files)',
)
parser.add_argument('--host', action='store',
help='Get all information about an instance.')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
"""Do API calls, and save data in cache files."""
self.add_cloud_services()
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def add_cloud_services(self):
"""Makes an Azure API call to get the list of cloud services."""
try:
for cloud_service in self.sms.list_hosted_services():
self.add_deployments(cloud_service)
except WindowsAzureError as e:
print("Looks like Azure's API is down:")
print("")
print(e)
sys.exit(1)
def add_deployments(self, cloud_service):
"""Makes an Azure API call to get the list of virtual machines
associated with a cloud service.
"""
try:
for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments:
self.add_deployment(cloud_service, deployment)
except WindowsAzureError as e:
print("Looks like Azure's API is down:")
print("")
print(e)
sys.exit(1)
def add_deployment(self, cloud_service, deployment):
"""Adds a deployment to the inventory and index"""
for role in deployment.role_instance_list.role_instances:
try:
# Default port 22 unless port found with name 'SSH'
port = '22'
for ie in role.instance_endpoints.instance_endpoints:
if ie.name == 'SSH':
port = ie.public_port
break
except AttributeError as e:
pass
finally:
self.add_instance(role.instance_name, deployment, port, cloud_service, role.instance_status)
def add_instance(self, hostname, deployment, ssh_port, cloud_service, status):
"""Adds an instance to the inventory and index"""
dest = urlparse(deployment.url).hostname
# Add to index
self.index[hostname] = deployment.name
self.host_metadata[hostname] = dict(ansible_ssh_host=dest,
ansible_ssh_port=int(ssh_port),
instance_status=status,
private_id=deployment.private_id)
# List of all azure deployments
self.push(self.inventory, "azure", hostname)
# Inventory: Group by service name
self.push(self.inventory, self.to_safe(cloud_service.service_name), hostname)
if int(ssh_port) == 22:
self.push(self.inventory, "Cloud_services", hostname)
# Inventory: Group by region
self.push(self.inventory, self.to_safe(cloud_service.hosted_service_properties.location), hostname)
def push(self, my_dict, key, element):
"""Pushed an element onto an array that may not have been defined in the dict."""
if key in my_dict:
my_dict[key].append(element);
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
"""Reads the inventory from the cache file and returns it as a JSON object."""
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
"""Reads the index from the cache file and sets self.index."""
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
"""Writes data in JSON format to a file."""
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
"""Escapes any characters that would be invalid in an ansible group name."""
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string."""
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
AzureInventory()
| gpl-3.0 |
interactiveinstitute/watthappened | python_modules/http_parser/pyparser.py | 1 | 14254 | # -*- coding: utf-8 -
#
# This file is part of http-parser released under the MIT license.
# See the NOTICE for more information.
import os
import re
import sys
if sys.version_info >= (3,):
import urllib.parse as urlparse
else:
import urlparse
import zlib
from http_parser.util import (b, bytes_to_str, IOrderedDict, StringIO,
unquote, MAXSIZE)
METHOD_RE = re.compile("[A-Z0-9$-_.]{3,20}")
VERSION_RE = re.compile("HTTP/(\d+).(\d+)")
STATUS_RE = re.compile("(\d{3})\s*(\w*)")
HEADER_RE = re.compile("[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]")
# errors
BAD_FIRST_LINE = 0
INVALID_HEADER = 1
INVALID_CHUNK = 2
class InvalidRequestLine(Exception):
""" error raised when first line is invalid """
class InvalidHeader(Exception):
""" error raised on invalid header """
class InvalidChunkSize(Exception):
""" error raised when we parse an invalid chunk size """
class HttpParser(object):
def __init__(self, kind=2, decompress=False):
self.kind = kind
self.decompress = decompress
# errors vars
self.errno = None
self.errstr = ""
# protected variables
self._buf = []
self._version = None
self._method = None
self._status_code = None
self._status = None
self._reason = None
self._url = None
self._path = None
self._query_string = None
self._fragment= None
self._headers = IOrderedDict()
self._environ = dict()
self._chunked = False
self._body = []
self._trailers = None
self._partial_body = False
self._clen = None
self._clen_rest = None
# private events
self.__on_firstline = False
self.__on_headers_complete = False
self.__on_message_begin = False
self.__on_message_complete = False
self.__decompress_obj = None
def get_version(self):
return self._version
def get_method(self):
return self._method
def get_status_code(self):
return self._status_code
def get_url(self):
return self._url
def get_path(self):
return self._path
def get_query_string(self):
return self._query_string
def get_fragment(self):
return self._fragment
def get_headers(self):
return self._headers
def get_wsgi_environ(self):
if not self.__on_headers_complete:
return None
environ = self._environ.copy()
# clean special keys
for key in ("CONTENT_LENGTH", "CONTENT_TYPE", "SCRIPT_NAME"):
hkey = "HTTP_%s" % key
if hkey in environ:
environ[key] = environ.pop(hkey)
script_name = environ.get('SCRIPT_NAME',
os.environ.get("SCRIPT_NAME", ""))
if script_name:
path_info = self._path.split(script_name, 1)[1]
environ.update({
"PATH_INFO": unquote(path_info),
"SCRIPT_NAME": script_name})
else:
environ['SCRIPT_NAME'] = ""
if environ.get('HTTP_X_FORWARDED_PROTOCOL', '').lower() == "ssl":
environ['wsgi.url_scheme'] = "https"
elif environ.get('HTTP_X_FORWARDED_SSL', '').lower() == "on":
environ['wsgi.url_scheme'] = "https"
else:
environ['wsgi.url_scheme'] = "http"
return environ
def recv_body(self):
""" return last chunk of the parsed body"""
body = b("").join(self._body)
self._body = []
self._partial_body = False
return body
def recv_body_into(self, barray):
""" Receive the last chunk of the parsed bodyand store the data
in a buffer rather than creating a new string. """
l = len(barray)
body = b("").join(self._body)
m = min(len(body), l)
data, rest = body[:m], body[m:]
barray[0:m] = data
if not rest:
self._body = []
self._partial_body = False
else:
self._body = [rest]
return m
def is_upgrade(self):
""" Do we get upgrade header in the request. Useful for
websockets """
return self._headers.get('connection', "") == "upgrade"
def is_headers_complete(self):
""" return True if all headers have been parsed. """
return self.__on_headers_complete
def is_partial_body(self):
""" return True if a chunk of body have been parsed """
return self._partial_body
def is_message_begin(self):
""" return True if the parsing start """
return self.__on_message_begin
def is_message_complete(self):
""" return True if the parsing is done (we get EOF) """
return self.__on_message_complete
def is_chunked(self):
""" return True if Transfer-Encoding header value is chunked"""
return self._chunked
def should_keep_alive(self):
""" return True if the connection should be kept alive
"""
hconn = self._headers.get('connection', "").lower()
if hconn == "close":
return False
elif hconn == "keep-alive":
return True
return self._version == (1, 1)
def execute(self, data, length):
# end of body can be passed manually by putting a length of 0
if length == 0:
self.on_message_complete = True
return length
# start to parse
nb_parsed = 0
while True:
if not self.__on_firstline:
idx = data.find(b("\r\n"))
if idx < 0:
self._buf.append(data)
return len(data)
else:
self.__on_firstline = True
self._buf.append(data[:idx])
first_line = bytes_to_str(b("").join(self._buf))
nb_parsed = nb_parsed + idx + 2
rest = data[idx+2:]
data = b("")
if self._parse_firstline(first_line):
self._buf = [rest]
else:
return nb_parsed
elif not self.__on_headers_complete:
if data:
self._buf.append(data)
data = b("")
try:
to_parse = b("").join(self._buf)
ret = self._parse_headers(to_parse)
if not ret:
return length
nb_parsed = nb_parsed + (len(to_parse) - ret)
except InvalidHeader as e:
self.errno = INVALID_HEADER
self.errstr = str(e)
return nb_parsed
elif not self.__on_message_complete:
if not self.__on_message_begin:
self.__on_message_begin = True
if data:
self._buf.append(data)
data = b("")
ret = self._parse_body()
if ret is None:
return length
elif ret < 0:
return ret
elif ret == 0:
self.__on_message_complete = True
return length
else:
nb_parsed = max(length, ret)
else:
return 0
def _parse_firstline(self, line):
try:
if self.kind == 2: # auto detect
try:
self._parse_request_line(line)
except InvalidRequestLine:
self._parse_response_line(line)
elif self.kind == 1:
self._parse_response_line(line)
elif self.kind == 0:
self._parse_request_line(line)
except InvalidRequestLine as e:
self.errno = BAD_FIRST_LINE
self.errstr = str(e)
return False
return True
def _parse_response_line(self, line):
bits = line.split(None, 1)
if len(bits) != 2:
raise InvalidRequestLine(line)
# version
matchv = VERSION_RE.match(bits[0])
if matchv is None:
raise InvalidRequestLine("Invalid HTTP version: %s" % bits[0])
self._version = (int(matchv.group(1)), int(matchv.group(2)))
# status
matchs = STATUS_RE.match(bits[1])
if matchs is None:
raise InvalidRequestLine("Invalid status %" % bits[1])
self._status = bits[1]
self._status_code = int(matchs.group(1))
self._reason = matchs.group(2)
def _parse_request_line(self, line):
bits = line.split(None, 2)
if len(bits) != 3:
raise InvalidRequestLine(line)
# Method
if not METHOD_RE.match(bits[0]):
raise InvalidRequestLine("invalid Method: %s" % bits[0])
self._method = bits[0].upper()
# URI
self._url = bits[1]
parts = urlparse.urlsplit(bits[1])
self._path = parts.path or ""
self._query_string = parts.query or ""
self._fragment = parts.fragment or ""
# Version
match = VERSION_RE.match(bits[2])
if match is None:
raise InvalidRequestLine("Invalid HTTP version: %s" % bits[2])
self._version = (int(match.group(1)), int(match.group(2)))
# update environ
if hasattr(self,'environ'):
self._environ.update({
"PATH_INFO": self._path,
"QUERY_STRING": self._query_string,
"RAW_URI": self._url,
"REQUEST_METHOD": self._method,
"SERVER_PROTOCOL": bits[2]})
def _parse_headers(self, data):
idx = data.find(b("\r\n\r\n"))
if idx < 0: # we don't have all headers
return False
# Split lines on \r\n keeping the \r\n on each line
lines = [bytes_to_str(line) + "\r\n" for line in
data[:idx].split(b("\r\n"))]
# Parse headers into key/value pairs paying attention
# to continuation lines.
while len(lines):
# Parse initial header name : value pair.
curr = lines.pop(0)
if curr.find(":") < 0:
raise InvalidHeader("invalid line %s" % curr.strip())
name, value = curr.split(":", 1)
name = name.rstrip(" \t").upper()
if HEADER_RE.search(name):
raise InvalidHeader("invalid header name %s" % name)
name, value = name.strip(), [value.lstrip()]
# Consume value continuation lines
while len(lines) and lines[0].startswith((" ", "\t")):
value.append(lines.pop(0))
value = ''.join(value).rstrip()
# multiple headers
if name in self._headers:
value = "%s, %s" % (self._headers[name], value)
# store new header value
self._headers[name] = value
# update WSGI environ
key = 'HTTP_%s' % name.upper().replace('-','_')
self._environ[key] = value
# detect now if body is sent by chunks.
clen = self._headers.get('content-length')
te = self._headers.get('transfer-encoding', '').lower()
if clen is not None:
try:
self._clen_rest = self._clen = int(clen)
except ValueError:
pass
else:
self._chunked = (te == 'chunked')
if not self._chunked:
self._clen_rest = MAXSIZE
# detect encoding and set decompress object
encoding = self._headers.get('content-encoding')
if self.decompress:
if encoding == "gzip":
self.__decompress_obj = zlib.decompressobj(16+zlib.MAX_WBITS)
elif encoding == "deflate":
self.__decompress_obj = zlib.decompressobj()
rest = data[idx+4:]
self._buf = [rest]
self.__on_headers_complete = True
return len(rest)
def _parse_body(self):
if not self._chunked:
body_part = b("").join(self._buf)
self._clen_rest -= len(body_part)
# maybe decompress
if self.__decompress_obj is not None:
body_part = self.__decompress_obj.decompress(body_part)
self._partial_body = True
self._body.append(body_part)
self._buf = []
if self._clen_rest <= 0:
self.__on_message_complete = True
return
else:
data = b("").join(self._buf)
try:
size, rest = self._parse_chunk_size(data)
except InvalidChunkSize as e:
self.errno = INVALID_CHUNK
self.errstr = "invalid chunk size [%s]" % str(e)
return -1
if size == 0:
return size
if size is None or len(rest) < size:
return None
body_part, rest = rest[:size], rest[size:]
if len(rest) < 2:
self.errno = INVALID_CHUNK
self.errstr = "chunk missing terminator [%s]" % data
return -1
# maybe decompress
if self.__decompress_obj is not None:
body_part = self.__decompress_obj.decompress(body_part)
self._partial_body = True
self._body.append(body_part)
self._buf = [rest[2:]]
return len(rest)
def _parse_chunk_size(self, data):
idx = data.find(b("\r\n"))
if idx < 0:
return None, None
line, rest_chunk = data[:idx], data[idx+2:]
chunk_size = line.split(b(";"), 1)[0].strip()
try:
chunk_size = int(chunk_size, 16)
except ValueError:
raise InvalidChunkSize(chunk_size)
if chunk_size == 0:
self._parse_trailers(rest_chunk)
return 0, None
return chunk_size, rest_chunk
def _parse_trailers(self, data):
idx = data.find(b("\r\n\r\n"))
if data[:2] == b("\r\n"):
self._trailers = self._parse_headers(data[:idx])
| mit |
emmuchira/kps_erp | erpnext/setup/install.py | 9 | 2238 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe import _
from frappe.desk.page.setup_wizard.setup_wizard import add_all_roles_to
from frappe.custom.doctype.custom_field.custom_field import create_custom_field
default_mail_footer = """<div style="padding: 7px; text-align: right; color: #888"><small>Sent via
<a style="color: #888" href="http://erpnext.org">ERPNext</a></div>"""
def after_install():
frappe.get_doc({'doctype': "Role", "role_name": "Analytics"}).insert()
set_single_defaults()
create_compact_item_print_custom_field()
create_print_zero_amount_taxes_custom_field()
add_all_roles_to("Administrator")
frappe.db.commit()
def check_setup_wizard_not_completed():
if frappe.db.get_default('desktop:home_page') == 'desktop':
print()
print("ERPNext can only be installed on a fresh site where the setup wizard is not completed")
print("You can reinstall this site (after saving your data) using: bench --site [sitename] reinstall")
print()
return False
def set_single_defaults():
for dt in ('Accounts Settings', 'Print Settings', 'HR Settings', 'Buying Settings',
'Selling Settings', 'Stock Settings', 'Daily Work Summary Settings'):
default_values = frappe.db.sql("""select fieldname, `default` from `tabDocField`
where parent=%s""", dt)
if default_values:
try:
b = frappe.get_doc(dt, dt)
for fieldname, value in default_values:
b.set(fieldname, value)
b.save()
except frappe.MandatoryError:
pass
except frappe.ValidationError:
pass
frappe.db.set_default("date_format", "dd-mm-yyyy")
def create_compact_item_print_custom_field():
create_custom_field('Print Settings', {
'label': _('Compact Item Print'),
'fieldname': 'compact_item_print',
'fieldtype': 'Check',
'default': 1,
'insert_after': 'with_letterhead'
})
def create_print_zero_amount_taxes_custom_field():
create_custom_field('Print Settings', {
'label': _('Print taxes with zero amount'),
'fieldname': 'print_taxes_with_zero_amount',
'fieldtype': 'Check',
'default': 0,
'insert_after': 'allow_print_for_cancelled'
}) | gpl-3.0 |
aszlig/LastWatch | lastwatch.py | 1 | 19341 | #!/usr/bin/env python
# This is LastWatch, a last.fm scrobbler which uses inotify to detect
# the songs played by the audio player of your choice :-)
#
# Copyright (c) 2008 aszlig <"^[0-9]+$"@redmoonstudios.de>
#
# LastWatch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LastWatch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LastWatch. If not, see <http://www.gnu.org/licenses/>.
LASTWATCH_VERSION = "0.4.1"
LASTFM_API_KEY = '3db903c7c55cf3da762c0476e7da00a8'
LASTFM_API_SECRET = 'c6557b5e328f9d3d6e676f125f98a367'
DEBUG = False
import re
import sys
import time
import os
import signal
import ConfigParser
import pylast
from gettext import gettext as _
from textwrap import wrap
from optparse import OptionParser
from pyinotify import ThreadedNotifier, WatchManager, EventsCodes, ProcessEvent
from mutagen import File as MutagenFile
RE_FORMAT = re.compile('(?<!%)%(?P<mod>[a-zA-Z])|([^%]+)|(?P<esc>%%)')
class FilenameParser(object):
match_all = r'.*'
match_name = r'\d*(?:\D+\d*)+'
match_num = r'\d{1,3}'
ftable = {
'a': (match_name, 'artist', _("Artist")),
'A': (match_name, 'album', _("Album")),
't': (match_name, 'title', _("Song title")),
'n': (match_num, 'number', _("Track number")),
'i': (match_all, 'ignore', _("Ignore this value")),
}
def __init__(self, filename):
self.filename = os.path.abspath(filename)
def make_node(self, farg):
"""
Creates a node which is a tuple consisting of (type, content)
and adds named groups to the regular expressions for numeric
and string data types.
"""
modifier = farg.group('mod')
if farg.group('esc'):
self.node_groups.append(('plain', '%'))
elif not modifier:
self.node_groups.append(('plain', farg.group()))
elif modifier in self.ftable:
opts = self.ftable[modifier]
if opts[1] == 'ignore':
append = r'\s*%s\s*' % opts[0]
else:
append = r'\s*(?P<%s>%s)\s*' % (opts[1], opts[0])
self.node_groups.append(('re', append))
else:
raise ValueError(_("Modifier not found: %%%s") % modifier)
def merge_nodes(self):
new_nodegroups = []
last_type = None
for node in self.node_groups:
if last_type and last_type[0] == node[0]:
last_type = (last_type[0], last_type[1] + node[1])
continue
elif last_type:
new_nodegroups.append(last_type)
last_type = node
new_nodegroups.append(last_type)
self.node_groups = new_nodegroups
def match_re_plain(self, node, regex):
"""
Match <regex><plain>...more...
"""
found = self._filename.lower().find(node[1])
if found == -1:
errmsg = _("Couldn't find next plain token (%(token)r) "
"after regex %(regex)r on %(text)r.")
errmsg %= {'token': node[1], 'regex': regex,
'text': self._filename}
raise LookupError(errmsg)
to_match = self._filename[:found]
cutoff_at = found + len(node[1])
self._filename = self._filename[cutoff_at:]
match = re.match(regex, to_match)
if match:
self.matches.append(match)
else:
errmsg = _("The regex %(regex)r did not match on %(text)r.")
errmsg %= {'regex': regex, 'text': to_match}
raise LookupError(errmsg)
def match_plain(self, node):
"""
Match <plain>...more...
"""
if not self._filename.lower().startswith(node[1]):
errmsg = _("Unfortunately, %(text)r did not start with %(token)r.")
errmsg %= {'text': self._filename, 'token': node[1]}
raise LookupError(errmsg)
self._filename = self._filename[len(node[1]):]
def prepare_filename(self, format):
"""
Cut the pathname to the last path segments we're trying to
match and strip off the extension.
"""
filename = os.path.splitext(self.filename)[0]
format_depth = format.count('/')
if format_depth == 0:
new_path = os.path.basename(filename)
else:
head, tail = os.path.split(filename)
new_path = tail
for x in range(0, format_depth):
head, tail = os.path.split(head)
new_path = os.path.join(tail, new_path)
self._filename = new_path
def parse_current(self, format="%n. %t %% htuoheu %a"):
"""
Tries to match a format string on the current filename.
See self.ftable for a list of modifiers.
Parsing is done by tokenizing the format string into plaintext
nodes and regular expression nodes which will be merged and
matched one after one.
"""
self.node_groups = []
self.matches = []
format = format.lstrip('/')
self.prepare_filename(format)
format = format.replace('/', os.path.sep)
RE_FORMAT.sub(self.make_node, format)
self.merge_nodes()
re_buffer = r''
for node in self.node_groups:
if node[0] == 're':
re_buffer += node[1]
elif node[0] == 'plain' and re_buffer:
self.match_re_plain(node, r'^%s$' % re_buffer)
re_buffer = r''
elif node[0] == 'plain' and not re_buffer:
self.match_plain(node)
else:
errmsg = _("Node is neither 'plain' nor 're', "
"which is really weird O_o")
raise LookupError(errmsg)
# last ...<regex> to parse
if re_buffer:
match = re.match(r'^%s$' % re_buffer, self._filename)
if match:
self.matches.append(match)
else:
errmsg = _("Whoops, we can't match the last "
"regex %(regex)r on %(text)r")
errmsg %= {'regex': re_buffer, 'text': self._filename}
raise LookupError(errmsg)
def mergedicts(x, y):
x.update(y.groupdict())
return x
results = reduce(mergedicts, self.matches, {})
# sanitize
for result in results:
item = results[result]
if item.isdigit():
item = int(item)
else:
item = item.replace('_', ' ')
return results
def parse(self):
"""
Tries several format strings on the current filename and return
the first one matching, otherwise raise a LookupError.
"""
formats = (
'%i - %A (%i)/%n. %a - %t',
'%a/%A - %i/%n %i - [%i] %t (%i kbps)',
'%a - %i - %A/ - %i - %n %t',
'%n - %a - %t',
'%a/%A/%i_%n_%t',
'%a_-_%A_-_%t',
'%a_-_%n_-_%t',
'%a_-_%t',
'%a - %A - %t',
'%a - %n - %t',
'%a - %t',
'%a/%A/%n. %t',
'%a - %i - %A/%i - %i - %n %t',
'%a - %i - %A/%i - %t',
'%a/%n_-_%t_-_%A',
'%a/%n_-_%A_-_%t',
'%n-%a-%t',
'%a - %i - %A/%n - %t',
'%a-%t',
'%a - %i - %A/%n %t',
'%a/%A/%t',
)
for check in formats:
try:
return self.parse_current(format=check)
except LookupError:
pass
errmsg = _("Couldn't find any title information based "
"on the path of %(file)r.")
errmsg %= {'file': self.filename}
raise LookupError(errmsg)
class TitleNotFound(Exception):
pass
class Songinfo(dict):
TAG_TRANSLATE = {
'title': ('TIT2',),
'artist': ('TPE1', 'TPE2',),
'album': ('TALB',),
}
def __init__(self, filename):
self._filename = filename
self._match = None
dict.__init__(self)
def __str__(self):
artist = self.get('artist', _("Unknown Artist"))
title = self.get('title', _("Unknown Title"))
return "%s - %s" % (artist, title)
def fetch_info(self, optional=('album',)):
"""
Check the file type and call the corresponding method to get
title info :-)
"""
self._audio = MutagenFile(self._filename)
required = ('artist', 'title')
info = {
'length': (int(self._audio.info.length)
if self._audio.info.length else 0),
}
for tag in required + optional:
try:
info[tag] = self.get_taginfo(tag)
except TitleNotFound:
if tag in optional:
continue
raise
self.update(info)
def get_alternative_tag(self, tags):
for tag in tags:
item = self._audio.get(tag, None)
if item and isinstance(item, list):
return item[0]
elif item:
return item
return None
def get_from_fname(self, what):
if self._match is not None:
match = self._match
else:
try:
parser = FilenameParser(self._filename)
match = parser.parse()
except LookupError:
raise TitleNotFound(self._filename)
if match:
self._match = match
try:
return match[what]
except KeyError:
pass
raise TitleNotFound(self._filename)
def get_taginfo(self, what):
item = self._audio.get(what, None)
if item and isinstance(item, list):
return item[0]
elif not item and what in self.TAG_TRANSLATE:
item = self.get_alternative_tag(self.TAG_TRANSLATE[what])
if not item:
item = self.get_from_fname(what)
if item:
return item
else:
return item
elif item:
return item
else:
item = self.get_from_fname(what)
if item:
return item
raise TitleNotFound(self._filename)
def to_lastfm(filename, start_time, runtime, settings, dry_run=False):
"""
Check if we meet the conditions and submit the song info to last.fm.
"""
try:
song = Songinfo(filename)
song.fetch_info()
except TitleNotFound, e:
print "Title for %s not found!" % e
return
lfm = pylast.LastFMNetwork(
api_key=LASTFM_API_KEY,
api_secret=LASTFM_API_SECRET,
username=settings.get('lastfm', 'user'),
password_hash=pylast.md5(settings.get('lastfm', 'passwd')),
)
if song['length'] <= 30:
return
if not (runtime >= 240 or song['length'] * 50 / 100 <= runtime):
return
if dry_run:
print _("Would submit %s to last.fm "
"with a total runtime of %d seconds.") % (song, runtime)
else:
print _("Will submit %s to last.fm "
"with a total runtime of %d seconds.") % (song, runtime)
lfm.scrobble(
artist=song['artist'],
title=song['title'],
timestamp=int(start_time),
)
class Music(object):
def __init__(self, settings, dry_run=False):
self._running = {}
self._dry_run = dry_run
self._settings = settings
def gc(self, current, rotate=3): # FIXME: what if rotate is 1 or 0?
"""
Garbage collector - will ensure that the maintained file
dictionary doesn't start to grow to the size of an entire
planetary system :-D
"""
if len(self._running) < rotate:
return
for fn, st in self._running.iteritems():
if fn == current:
continue
if st == 'delete' or st < self._running.get(current, 0):
if DEBUG:
print "GC: " + _("Removing %s") % fn
del self._running[fn]
else:
continue
return self.gc(current, rotate)
def start(self, filename):
if self._running.get(filename, None) == 'munge':
self._running[filename] = 'delete'
return
self.gc(filename)
self._running[filename] = time.time()
print _("Started %s!") % filename
def stop(self, filename):
if filename not in self._running:
return
if self._running[filename] in ('delete', 'munge'):
return
start_time = self._running[filename]
runtime = time.time() - start_time
if runtime <= 30:
if DEBUG:
print _("File %s discarded!") % filename
del self._running[filename]
return
to_lastfm(filename, start_time, runtime, self._settings,
dry_run=self._dry_run)
self._running[filename] = 'munge'
print _("Stopped %s!") % filename
class Handler(ProcessEvent):
def __init__(self, settings, dry_run=False):
self._active = False
self._music = Music(settings, dry_run=dry_run)
def set_active(self):
self._active = True
def process_default(self, event_k):
if not self._active:
return
if DEBUG:
print _("Untrapped event: %s") % event_k
def allowed_file(self, event_k):
"""
We can only handle OGG, MP3 and FLAC files, so we'll check if
the suffix matches in the data we got back from inotify =)
"""
suffix = os.path.splitext(event_k.name)[1][1:].lower()
if suffix in ('ogg', 'oga', 'mp3', 'flac'):
return True
return False
def process_IN_OPEN(self, event_k):
if self._active and self.allowed_file(event_k):
self._music.start(os.path.join(event_k.path, event_k.name))
def process_IN_CLOSE_NOWRITE(self, event_k):
if self._active and self.allowed_file(event_k):
self._music.stop(os.path.join(event_k.path, event_k.name))
def lastwatch(paths, settings, dry_run=False):
flags = EventsCodes.FLAG_COLLECTIONS.get('OP_FLAGS', None)
if flags:
mask = flags.get('IN_OPEN') | flags.get('IN_CLOSE_NOWRITE')
mask |= flags.get('IN_CREATE') | flags.get('IN_MOVED_TO')
else:
mask = EventsCodes.IN_OPEN | EventsCodes.IN_CLOSE_NOWRITE
mask |= EventsCodes.IN_CREATE | EventsCodes.IN_MOVED_TO
wm = WatchManager()
handler = Handler(settings, dry_run=dry_run)
watcher = ThreadedNotifier(wm, handler)
watcher.start()
try:
for path in paths:
path = os.path.realpath(path)
sys.stdout.write(_("Indexing %s for watching...") % path)
sys.stdout.flush()
wm.add_watch(path, mask, rec=True, auto_add=True)
sys.stdout.write(_(" done.") + "\n")
print _("You have successfully launched Lastwatch.")
print "\n".join(wrap(_("The directories you have specified will be "
"monitored as long as this process is running, "
"the flowers are blooming and the earth "
"revolves around the sun..."), 80))
# flowers to devhell ;-)
handler.set_active()
while True:
time.sleep(1)
except KeyboardInterrupt:
watcher.stop()
print _("LastWatch stopped.")
return
except Exception, err:
print err
def daemonize(stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
"""
Fork the current process and redirect all file descriptors
to the appropriate devices or files (default is /dev/null).
"""
try:
pid = os.fork()
if pid > 0:
sys.exit(0)
except OSError, err:
sys.stderr.write(_("We cannot escape into the background: %s")
% err.strerror + "\n")
sys.exit(1)
# flush the standard output queue
for f in sys.stdout, sys.stderr:
f.flush()
si = file(stdin, 'r')
so = file(stdout, 'a+')
se = file(stderr, 'a+', 0)
# redirect them all to /dev/null (or any other file/device)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def suicide(signum, frame):
watcher.stop()
sys.exit(0)
class Settings(ConfigParser.ConfigParser):
def __init__(self, cfgfile):
ConfigParser.ConfigParser.__init__(self)
self._cfgfile = cfgfile
self.read(cfgfile)
if not self.has_section('lastfm'):
self.prompt_credentials()
def prompt_credentials(self):
uname = raw_input("last.fm username: ")
passwd = raw_input("last.fm password: ")
self.add_section('lastfm')
self.set('lastfm', 'user', uname)
self.set('lastfm', 'passwd', passwd)
with open(self._cfgfile, 'wb') as cfg:
self.write(cfg)
class LWOpts(OptionParser):
def __init__(self):
usage = _("Usage: %prog [options] directories...")
version = _("%%prog version %s") % LASTWATCH_VERSION
OptionParser.__init__(self, usage=usage, version=version)
self.add_option(
"-v", "--verbose",
action="store_true", dest="verbose", default=False,
help=_("Be verbose about what's happening (especially "
"about the garbage collector).")
)
self.add_option(
"-n", "--dry-run",
action="store_true", dest="dryrun", default=False,
help=_("Do not submit any titles to last.fm.")
)
self.add_option(
"-b", "--background",
action="store_true", dest="detach", default=False,
help=_("Fork into the background.")
)
self.add_option(
"-c", "--config", metavar="FILE",
dest="cfgfile", default="~/.lastwatchrc",
help=_("Specify configuration file at FILE instead of "
"the default location at \"%default\".")
)
def main():
parser = LWOpts()
options, args = parser.parse_args()
if len(args) < 1:
parser.error(_("No directories specified!"))
if options.detach:
daemonize()
signal.signal(signal.SIGINT, suicide)
if options.verbose:
Settings.DEBUG = True
settings = Settings(os.path.expanduser(options.cfgfile))
if options.dryrun:
lastwatch(args, settings, dry_run=True)
else:
lastwatch(args, settings)
if __name__ == '__main__':
main()
| gpl-3.0 |
lakshmi-kannan/st2 | st2common/st2common/transport/reactor.py | 9 | 4006 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kombu import Exchange, Queue
from st2common import log as logging
from st2common.constants.trace import TRACE_CONTEXT
from st2common.models.api.trace import TraceContext
from st2common.transport import publishers
from st2common.transport import utils as transport_utils
__all__ = [
'TriggerCUDPublisher',
'TriggerInstancePublisher',
'TriggerDispatcher',
'get_sensor_cud_queue',
'get_trigger_cud_queue',
'get_trigger_instances_queue'
]
LOG = logging.getLogger(__name__)
# Exchange for Trigger CUD events
TRIGGER_CUD_XCHG = Exchange('st2.trigger', type='topic')
# Exchange for TriggerInstance events
TRIGGER_INSTANCE_XCHG = Exchange('st2.trigger_instances_dispatch', type='topic')
# Exchane for Sensor CUD events
SENSOR_CUD_XCHG = Exchange('st2.sensor', type='topic')
class SensorCUDPublisher(publishers.CUDPublisher):
"""
Publisher responsible for publishing Trigger model CUD events.
"""
def __init__(self, urls):
super(SensorCUDPublisher, self).__init__(urls, SENSOR_CUD_XCHG)
class TriggerCUDPublisher(publishers.CUDPublisher):
"""
Publisher responsible for publishing Trigger model CUD events.
"""
def __init__(self, urls):
super(TriggerCUDPublisher, self).__init__(urls, TRIGGER_CUD_XCHG)
class TriggerInstancePublisher(object):
def __init__(self, urls):
self._publisher = publishers.PoolPublisher(urls=urls)
def publish_trigger(self, payload=None, routing_key=None):
# TODO: We should use trigger reference as a routing key
self._publisher.publish(payload, TRIGGER_INSTANCE_XCHG, routing_key)
class TriggerDispatcher(object):
"""
This trigger dispatcher dispatches trigger instances to a message queue (RabbitMQ).
"""
def __init__(self, logger=LOG):
self._publisher = TriggerInstancePublisher(urls=transport_utils.get_messaging_urls())
self._logger = logger
def dispatch(self, trigger, payload=None, trace_context=None):
"""
Method which dispatches the trigger.
:param trigger: Full name / reference of the trigger.
:type trigger: ``str`` or ``object``
:param payload: Trigger payload.
:type payload: ``dict``
:param trace_context: Trace context to associate with Trigger.
:type trace_context: ``TraceContext``
"""
assert isinstance(payload, (type(None), dict))
assert isinstance(trace_context, (type(None), TraceContext))
payload = {
'trigger': trigger,
'payload': payload,
TRACE_CONTEXT: trace_context
}
routing_key = 'trigger_instance'
self._logger.debug('Dispatching trigger (trigger=%s,payload=%s)', trigger, payload)
self._publisher.publish_trigger(payload=payload, routing_key=routing_key)
def get_trigger_cud_queue(name, routing_key, exclusive=False):
return Queue(name, TRIGGER_CUD_XCHG, routing_key=routing_key, exclusive=exclusive)
def get_trigger_instances_queue(name, routing_key):
return Queue(name, TRIGGER_INSTANCE_XCHG, routing_key=routing_key)
def get_sensor_cud_queue(name, routing_key):
return Queue(name, SENSOR_CUD_XCHG, routing_key=routing_key)
| apache-2.0 |
redhat-openstack/cinder | cinder/tests/utils.py | 6 | 4261 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cinder import context
from cinder import db
def get_test_admin_context():
return context.get_admin_context()
def create_volume(ctxt,
host='test_host',
display_name='test_volume',
display_description='this is a test volume',
status='available',
migration_status=None,
size=1,
availability_zone='fake_az',
volume_type_id=None,
replication_status='disabled',
replication_extended_status=None,
replication_driver_data=None,
consistencygroup_id=None,
**kwargs):
"""Create a volume object in the DB."""
vol = {}
vol['size'] = size
vol['host'] = host
vol['user_id'] = ctxt.user_id
vol['project_id'] = ctxt.project_id
vol['status'] = status
vol['migration_status'] = migration_status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
vol['availability_zone'] = availability_zone
if consistencygroup_id:
vol['consistencygroup_id'] = consistencygroup_id
if volume_type_id:
vol['volume_type_id'] = volume_type_id
for key in kwargs:
vol[key] = kwargs[key]
vol['replication_status'] = replication_status
vol['replication_extended_status'] = replication_extended_status
vol['replication_driver_data'] = replication_driver_data
return db.volume_create(ctxt, vol)
def create_snapshot(ctxt,
volume_id,
display_name='test_snapshot',
display_description='this is a test snapshot',
status='creating'):
vol = db.volume_get(ctxt, volume_id)
snap = {}
snap['volume_id'] = volume_id
snap['user_id'] = ctxt.user_id
snap['project_id'] = ctxt.project_id
snap['status'] = status
snap['volume_size'] = vol['size']
snap['display_name'] = display_name
snap['display_description'] = display_description
return db.snapshot_create(ctxt, snap)
def create_consistencygroup(ctxt,
host='test_host',
name='test_cg',
description='this is a test cg',
status='available',
availability_zone='fake_az',
volume_type_id=None,
**kwargs):
"""Create a consistencygroup object in the DB."""
cg = {}
cg['host'] = host
cg['user_id'] = ctxt.user_id
cg['project_id'] = ctxt.project_id
cg['status'] = status
cg['name'] = name
cg['description'] = description
cg['availability_zone'] = availability_zone
if volume_type_id:
cg['volume_type_id'] = volume_type_id
for key in kwargs:
cg[key] = kwargs[key]
return db.consistencygroup_create(ctxt, cg)
def create_cgsnapshot(ctxt,
name='test_cgsnap',
description='this is a test cgsnap',
status='available',
consistencygroup_id=None,
**kwargs):
"""Create a cgsnapshot object in the DB."""
cgsnap = {}
cgsnap['user_id'] = ctxt.user_id
cgsnap['project_id'] = ctxt.project_id
cgsnap['status'] = status
cgsnap['name'] = name
cgsnap['description'] = description
cgsnap['consistencygroup_id'] = consistencygroup_id
for key in kwargs:
cgsnap[key] = kwargs[key]
return db.cgsnapshot_create(ctxt, cgsnap)
| apache-2.0 |
RalfJung/dudel | dudel/__init__.py | 1 | 1320 | from raven.contrib.flask import Sentry
from flask import Flask
from flask.ext.babel import Babel
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.markdown import Markdown
from flask.ext.login import LoginManager
from flask.ext.gravatar import Gravatar
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager
from flask.ext.mail import Mail
import pytz
app = Flask(__name__)
app.config.from_pyfile("../config.py.example", silent=True)
app.config.from_pyfile("../config.py", silent=True)
app.jinja_env.add_extension('pyjade.ext.jinja.PyJadeExtension')
manager = Manager(app)
db = SQLAlchemy(app)
markdown = Markdown(app, safe_mode="escape")
login_manager = LoginManager(app)
sentry = Sentry(app)
gravatar = Gravatar(app, size=48, rating='g', default='identicon', force_default=False, use_ssl=True, base_url=None)
babel = Babel(app)
supported_languages = ['en', 'de']
migrate = Migrate(app, db)
manager.add_command("db", MigrateCommand)
mail = Mail(app)
default_timezone = pytz.timezone(app.config["DEFAULT_TIMEZONE"])
from dudel.util import load_icons
ICONS = load_icons("dudel/icons.txt")
import dudel.assets
import dudel.models
import dudel.forms
import dudel.filters
import dudel.views
import dudel.admin
import dudel.plugins.ldapauth
login_manager.login_view = "login"
| gpl-3.0 |
hectord/lettuce | tests/integration/lib/Django-1.2.5/django/conf/locale/sr_Latn/formats.py | 136 | 1702 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
'%Y-%m-%d', # '2006-10-25'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| gpl-3.0 |
CedricVallee/pythonFinancialAnalyst | FinancialAnalystV3/4.fitModels/Fitter.py | 1 | 1447 | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 12 2016
Author: Cedric Vallee
"""
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
# Function to split data between a training and a testing set
def split(df,test_ratio):
return train_test_split(df, test_size = test_ratio)
# Function to create Classification Report (Group 7)
def fit_model(train, test, target, variables, classifier):
tarTrain = train.as_matrix(target)
varTrain = train.as_matrix(variables)
classifier.fit(varTrain,tarTrain)
varTest = test.as_matrix(variables)
predictions = classifier.predict(varTest)
# Print confusion matrix
tab = pd.crosstab(test['Actual'], predictions, rownames=['Actual'], colnames=['Predicted'], margins=True)
print(tab)
# Print accuracy, precision, recall, F measure
print(classification_report(test['Actual'], predictions))
a=accuracy_score(test['Actual'],predictions)
p=precision_score(test['Actual'],predictions, pos_label = "pos")
r=recall_score(test['Actual'].values,predictions, pos_label = "pos")
f=f1_score(test['Actual'].values,predictions, pos_label = "pos")
print "Accuracy = ",a,"\nPrecision =",p,"\nRecall = ",r,"\nF-Score = ",f
| mit |
gvpeek/django_football | django_football/django_football/urls.py | 1 | 1148 | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
import core.views
import leagues.views
import teams.views
import stats.views
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', core.views.index, name='index'),
url(r'^universe/', include('core.urls')),
url(r'^league/', include('leagues.urls')),
url(r'^people/', include('people.urls')),
url(r'team/', include('teams.urls')),
url(r'^stats/', include('stats.urls')),
url(r'^admin/', include(admin.site.urls)),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| mit |
fedora-conary/conary | conary_test/cvctest/usergrouptest.py | 2 | 19583 | #
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from conary.build import cook, policy, usergroup
from conary import files, versions
from conary_test import rephelp
class PwClass:
def restore(self):
files.userCache.nameLookupFn = self.real
def getpwnam(self, user):
if user == 'root':
return (None, None, 0)
f = file(self.root + '/etc/passwd')
assert (f.readlines() == [
'root:*:0:0:root:/root:/bin/bash\n',
'foo:$1$XzHooEIT$hszQQcxv6tokTs46604IW1:1000:1000::/usr/share/foo:/bin/foosh\n',
])
return (None, None, 1000)
def __init__(self, root):
self.real = files.userCache.nameLookupFn
self.root = root
files.userCache.nameLookupFn = self.getpwnam
class UserGroupInfoRecipeTest(rephelp.RepositoryHelper):
def testUserInfoRecipe(self):
recipestr1 = """
class TestUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
def setup(r):
r.macros.foo = 'foo'
r.macros.bar = 'bar'
r.User('%(foo)s', 1000, group='%(bar)s', homedir='%(datadir)s/%(foo)s',
shell='%(essentialbindir)s/foosh',
saltedPassword='$1$XzHooEIT$hszQQcxv6tokTs46604IW1')
"""
self.reset()
# do one test with logBuild because this code path is important
# and has broken more than once
(built, d) = self.buildRecipe(recipestr1, "TestUser", logBuild=True)
self.assertEquals(len(built), 2)
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
f = file(self.workDir + '/etc/conary/userinfo/foo')
assert (f.readlines() == [
'PREFERRED_UID=1000\n',
'GROUP=bar\n',
'HOMEDIR=/usr/share/foo\n',
'SHELL=/bin/foosh\n',
'PASSWORD=$1$XzHooEIT$hszQQcxv6tokTs46604IW1\n'])
f.close()
f = file(self.workDir + '/etc/passwd')
assert (f.readlines() == [
'root:*:0:0:root:/root:/bin/bash\n',
'foo:$1$XzHooEIT$hszQQcxv6tokTs46604IW1:1000:1000::/usr/share/foo:/bin/foosh\n',
])
f.close()
f = file(self.workDir + '/etc/group')
assert (f.readlines() == [
'root:*:0:root\n',
'bar:*:1000:\n',
])
f.close()
# test that the right dependencies are attached
pathsFound = []
repos = self.openRepository()
for name, version, flavor in built:
version = versions.VersionFromString(version)
trove = repos.getTrove(name, version, flavor)
for pathId, path, fileId, version, fileObj in repos.iterFilesInTrove(
trove.getName(), trove.getVersion(), trove.getFlavor(),
withFiles=True):
prov = str(fileObj.provides())
req = str(fileObj.requires())
pathsFound.append(path)
if path == '/etc/conary/userinfo/foo':
self.failUnless(prov.find('userinfo: foo') != -1)
self.failUnless(req.find('groupinfo: bar') != -1)
elif path == '/etc/conary/groupinfo/bar':
self.failUnless(prov.find('groupinfo: bar') != -1)
self.failUnless('/etc/conary/userinfo/foo' in pathsFound)
self.failUnless('/etc/conary/groupinfo/bar' in pathsFound)
# now test installing the info-foo package along with a package
# which requires it from a single change set, and make sure ownership
# would have been set properly
foorecipe = """
class FooRecipe(PackageRecipe):
clearBuildReqs()
name = 'foo'
version = '1'
def setup(r):
r.Create('/foo', contents = "contents")
r.Ownership('foo', 'root', '/foo')
"""
(built, d) = self.buildRecipe(foorecipe, "FooRecipe", logBuild=True)
csPath = self.workDir + '/test.ccs'
self.resetRoot()
# this makes sure that the /etc/passwd is correct before we try and
# lookup foo in the user database. what a hack.
self.resetRoot()
c = PwClass(self.rootDir)
self.mimicRoot()
self.updatePkg(self.rootDir, 'foo', resolve=True)
c.restore()
self.realRoot()
f = file(self.rootDir + '/etc/passwd')
assert (f.readlines() == [
'root:*:0:0:root:/root:/bin/bash\n',
'foo:$1$XzHooEIT$hszQQcxv6tokTs46604IW1:1000:1000::/usr/share/foo:/bin/foosh\n',
])
recipestr2 = """
class TestBadUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
def setup(r):
r.User('blah', 1000, group='bar', homedir='%(datadir)s/foo',
shell='%(essentialbindir)s/foosh')
"""
self.assertRaises(usergroup.UserGroupError, self.buildRecipe,
recipestr2, "TestBadUser")
recipestr3 = """
class TestBad2User(UserInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
def setup(r):
r.User('foo', 1000, group='bar', homedir='%(datadir)s/foo',
shell='%(essentialbindir)s/foosh')
r.User('foo', 1000, group='bar', homedir='%(datadir)s/foo',
shell='%(essentialbindir)s/foosh')
"""
self.assertRaises(usergroup.UserGroupError, self.buildRecipe,
recipestr3, "TestBad2User")
def testUserInfoWithExistingDefaultGroup(self):
recipestr1 = """
class TestUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
clearBuildReqs()
def setup(r):
r.macros.foo = 'foo'
r.macros.bar = 'bar'
r.User('%(foo)s', 1000, group='root')
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestUser")
self.updatePkg(self.workDir, 'info-foo', resolve=True)
f = file(self.workDir + '/etc/conary/userinfo/foo')
assert (f.readlines() == [
'PREFERRED_UID=1000\n',
'GROUP=root\n',
'SHELL=/sbin/nologin\n',])
f.close()
f = file(self.workDir + '/etc/passwd')
assert (f.readlines() == [
'root:*:0:0:root:/root:/bin/bash\n',
'foo:*:1000:0::/:/sbin/nologin\n',
])
f.close()
f = file(self.workDir + '/etc/group')
assert (f.readlines() == [
'root:*:0:root\n',
])
f.close()
def testUserInfoRecipeWithSupplemental(self):
recipestr0 = """
class TestSupplementalGroupUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-bar'
version = '1'
def setup(r):
r.User('bar', 999)
"""
recipestr1 = """
class TestSupplementalGroupUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-baz'
version = '1'
def setup(r):
r.User('baz', 998)
"""
recipestr2 = """
class TestUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
def setup(r):
r.macros.foo = 'foo'
r.macros.bar = 'bar'
r.macros.baz = 'baz'
r.User('%(foo)s', 1000, groupid=998, # test group ID allocation
supplemental=['%(bar)s', '%(baz)s'])
"""
recipestr3 = """
class TestUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-fdsa'
version = '1'
def setup(r):
r.macros.fdsa = 'fdsa'
r.macros.bar = 'bar'
r.macros.baz = 'baz'
r.User('%(fdsa)s', 1000, groupid=998, # test ID allocation
supplemental=['%(bar)s'])
"""
self.reset()
for recipestr in [recipestr0, recipestr1]:
built, d = self.buildRecipe(recipestr, "TestSupplementalGroupUser")
for p in built:
self.updatePkg(self.workDir, p[0], p[1], resolve=True)
for recipestr in [recipestr2, recipestr3]:
built, d = self.buildRecipe(recipestr, "TestUser")
for p in built:
self.updatePkg(self.workDir, p[0], p[1], resolve=True)
f = file(self.workDir + '/etc/conary/userinfo/foo')
assert (f.readlines() == [
'PREFERRED_UID=1000\n',
'GROUPID=998\n',
'SHELL=/sbin/nologin\n',
'SUPPLEMENTAL=bar,baz\n',])
f.close()
f = file(self.workDir + '/etc/passwd')
assert (f.readlines() == [
'root:*:0:0:root:/root:/bin/bash\n',
'fdsa:*:1:2::/:/sbin/nologin\n',
'baz:*:998:998::/:/sbin/nologin\n',
'bar:*:999:999::/:/sbin/nologin\n',
'foo:*:1000:1::/:/sbin/nologin\n',
])
f.close()
f = file(self.workDir + '/etc/group')
assert (f.readlines() == [
'root:*:0:root\n',
'foo:*:1:\n',
'fdsa:*:2:\n',
'baz:*:998:foo\n',
'bar:*:999:foo,fdsa\n',
])
f.close()
def testGroupInfoRecipe(self):
recipestr1 = """
class TestGroup(GroupInfoRecipe):
name = 'info-foo'
version = '1'
clearBuildReqs()
def setup(r):
r.macros.foo = 'foo'
r.Group('%(foo)s', 1000)
"""
self.reset()
(built, d) = self.buildRecipe(recipestr1, "TestGroup")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
f = file(self.workDir + '/etc/conary/groupinfo/foo')
assert (f.readlines() == ['PREFERRED_GID=1000\n'])
f.close()
f = file(self.workDir + '/etc/group')
assert (f.readlines() == [
'root:*:0:root\n',
'foo:*:1000:\n',
])
f.close()
recipestr2 = """
class TestBadGroup(GroupInfoRecipe):
name = 'info-foo'
version = '1'
clearBuildReqs()
def setup(r):
r.Group('blah', 1000)
"""
self.assertRaises(usergroup.UserGroupError, self.buildRecipe,
recipestr2, "TestBadGroup")
recipestr3 = """
class TestBad2Group(GroupInfoRecipe):
name = 'info-foo'
version = '1'
clearBuildReqs()
def setup(r):
r.Group('foo', 1000)
r.SupplementalGroup('foo', 'bar', 999)
"""
self.assertRaises(usergroup.UserGroupError, self.buildRecipe,
recipestr3, "TestBad2Group")
recipestr4 = """
class TestBad2Group(GroupInfoRecipe):
name = 'info-foo'
version = '1'
clearBuildReqs()
def setup(r):
r.Group('foo', 1000)
r.Group('foo', 999)
"""
self.assertRaises(usergroup.UserGroupError, self.buildRecipe,
recipestr4, "TestBad2Group")
def testSupplementalGroupInfoRecipe(self):
recipestr0 = """
class TestSupplementalGroupUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-bar'
version = '1'
def setup(r):
r.User('bar', 999)
"""
recipestr1 = """
class TestSupplementalGroup(GroupInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
def setup(r):
r.macros.foo = 'foo'
r.macros.bar = 'bar'
r.SupplementalGroup('%(bar)s', '%(foo)s', 1000)
"""
self.reset()
# satisfy dependency
builtb, d = self.buildRecipe(recipestr0, "TestSupplementalGroupUser")
for p in builtb:
self.updatePkg(self.workDir, p[0], p[1])
# now the group we are testing
(built, d) = self.buildRecipe(recipestr1, "TestSupplementalGroup")
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
f = file(self.workDir + '/etc/conary/groupinfo/foo')
assert (f.readlines() == [
'PREFERRED_GID=1000\n',
'USER=bar\n'])
f.close()
f = file(self.workDir + '/etc/group')
assert (f.readlines() == [
'root:*:0:root\n',
'bar:*:999:\n',
'foo:*:1000:bar\n',
])
f.close()
# now test if a group already exists
self.resetWork()
for p in builtb:
self.updatePkg(self.workDir, p[0], p[1])
f = file(self.workDir + '/etc/group', 'a')
f.write('asdf:*:1000:\n')
f.close()
for p in built:
self.updatePkg(self.workDir, p[0], p[1])
f = file(self.workDir + '/etc/group')
assert (f.readlines() == [
'root:*:0:root\n',
'foo:*:1:bar\n',
'bar:*:999:\n',
'asdf:*:1000:\n',
])
def testBadPassword(self):
recipestr1 = """
class TestBadPassword(UserInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
clearBuildReqs()
def setup(r):
r.User('foo', 1000, group='bar', homedir='%(datadir)s/foo',
saltedPassword='foo')
"""
e = self.assertRaises(usergroup.UserGroupError,
self.buildRecipe, recipestr1, "TestBadPassword")
self.assertEqual(str(e),
'"foo" is not a valid md5 salted password. Use md5pw (installed with conary) to create a valid password.')
def testSupplementalGroupInfoRecipeOrdering(self):
recipestr0 = """
class TestSupplementalGroupUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-adm'
version = '1'
def setup(r):
r.User('adm', 3, group='adm', groupid=4,
supplemental=['sys'],
homedir='%(localstatedir)s/adm')
"""
recipestr1 = """
class TestSupplementalGroup(GroupInfoRecipe):
clearBuildReqs()
name = 'info-sys'
version = '1'
def setup(r):
r.Group('sys', 3)
"""
built, d = self.buildRecipe(recipestr0, "TestSupplementalGroupUser")
built, d = self.buildRecipe(recipestr1, "TestSupplementalGroup")
rc, str = self.captureOutput(self.updatePkg, self.workDir,
'info-adm', resolve=True)
f = file(self.workDir + '/etc/group')
lines = f.readlines()
assert (lines == [
'root:*:0:root\n',
'sys:*:3:adm\n',
'adm:*:4:\n',
])
f.close()
f = file(self.workDir + '/etc/passwd')
lines = f.readlines()
assert (lines == [
'root:*:0:0:root:/root:/bin/bash\n',
'adm:*:3:4::/var/adm:/sbin/nologin\n'
])
f.close()
def testUserInfoRecipeWithExternalGroup(self):
recipestr1 = """
class TestUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
def setup(r):
r.macros.foo = 'foo'
r.macros.bar = 'bar'
r.User('%(foo)s', 1000, group='%(bar)s', provideGroup=False)
"""
self.reset()
# do one test with logBuild because this code path is important
# and has broken
(built, d) = self.buildRecipe(recipestr1, "TestUser", logBuild=True)
# test that the right dependencies are attached
repos = self.openRepository()
(name, version, flavor) = built[0]
version = versions.VersionFromString(version)
trove = repos.getTrove(name, version, flavor)
pathsFound = []
for pathId, path, fileId, version, fileObj in repos.iterFilesInTrove(
trove.getName(), trove.getVersion(), trove.getFlavor(),
withFiles=True):
req = str(fileObj.requires())
prov = str(fileObj.provides())
pathsFound.append(path)
if path == '/etc/conary/userinfo/foo':
assert prov.find('userinfo: foo') != -1, prov
assert req.find('groupinfo: bar') != -1, req
assert '/etc/conary/userinfo/foo' in pathsFound, pathsFound
def testBadCommand(self):
recipestr1 = """
class TestUser(UserInfoRecipe):
clearBuildReqs()
name = 'info-foo'
version = '1'
def setup(r):
r.macros.foo = 'foo'
r.macros.bar = 'bar'
r.User('%(foo)s', 1000, group='%(bar)s', provideGroup=False)
# ensure unifying Info Recipes with PackageRecipe didn't allow for
# more than we intended CNY-2723
r.Create('/etc/foo')
"""
err = self.assertRaises(cook.CookError, self.buildRecipe,
recipestr1, "TestUser", logBuild=True)
self.assertFalse("AttributeError: 'TestUser' object has no " \
"attribute 'Create'" not in str(err))
def testUserPolicyInvocation(self):
recipestr1 = r"""
class TestUserInfo(UserInfoRecipe):
name = 'info-foo'
version = '0'
clearBuildReqs()
def setup(r):
r.User('foo', 11)
# policies exist, even though they're not advertised. ensure they can't
# do any harm
r.PackageSpec('manpage', '.*')
"""
built, d = self.buildRecipe(recipestr1, "TestUserInfo")
self.assertEquals(built[0][0], 'info-foo:group')
self.assertEquals(built[1][0], 'info-foo:user')
def testUserPolicyInvocation2(self):
recipestr1 = r"""
class TestUserInfo(UserInfoRecipe):
name = 'info-foo'
version = '0'
clearBuildReqs()
def setup(r):
r.User('foo', 11)
# policies exist, even though they're not advertised. ensure they can't
# do any harm
r.ComponentSpec('manpage:foo', '.*')
"""
built, d = self.buildRecipe(recipestr1, "TestUserInfo")
self.assertEquals(built[0][0], 'info-foo:group')
self.assertEquals(built[1][0], 'info-foo:user')
def testUserMissingParams(self):
recipestr1 = r"""
class TestUserInfo(UserInfoRecipe):
name = 'info-test'
version = '0'
clearBuildReqs()
def setup(r):
r.User()
"""
err = self.assertRaises(cook.CookError, self.buildRecipe,
recipestr1, "TestUserInfo")
recipestr2 = r"""
class TestUserInfo(UserInfoRecipe):
name = 'info-test'
version = '0'
clearBuildReqs()
def setup(r):
r.User('foo')
"""
err = self.assertRaises(cook.CookError, self.buildRecipe,
recipestr2, "TestUserInfo")
def testGroupMissingParams(self):
recipestr1 = r"""
class TestGroupInfo(GroupInfoRecipe):
name = 'info-test'
version = '0'
clearBuildReqs()
def setup(r):
r.Group()
"""
err = self.assertRaises(cook.CookError, self.buildRecipe,
recipestr1, "TestGroupInfo")
recipestr2 = r"""
class TestGroupInfo(GroupInfoRecipe):
name = 'info-test'
version = '0'
clearBuildReqs()
def setup(r):
r.Group('foo')
"""
err = self.assertRaises(cook.CookError, self.buildRecipe,
recipestr2, "TestGroupInfo")
recipestr3 = r"""
class TestGroupInfo(GroupInfoRecipe):
name = 'info-test'
version = '0'
clearBuildReqs()
def setup(r):
r.Group(badParam = 'foo')
"""
err = self.assertRaises(cook.CookError, self.buildRecipe,
recipestr3, "TestGroupInfo")
def testBasePolicyClass(self):
class DummyPolicy(policy.UserGroupBasePolicy):
def __init__(x): pass
def error(x, msg):
self.assertEquals(msg, 'Do not directly invoke DummyPolicy')
pol = DummyPolicy()
pol.updateArgs('test')
| apache-2.0 |
patriciolobos/desa8 | openerp/addons/base_report_designer/plugin/openerp_report_designer/bin/script/ModifyExistingReport.py | 384 | 8450 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
import uno
import string
import unohelper
import xmlrpclib
import base64, tempfile
from com.sun.star.task import XJobExecutor
import os
import sys
if __name__<>'package':
from lib.gui import *
from lib.error import *
from LoginTest import *
from lib.logreport import *
from lib.rpc import *
database="test"
uid = 3
class ModifyExistingReport(unohelper.Base, XJobExecutor):
def __init__(self, ctx):
self.ctx = ctx
self.module = "openerp_report"
self.version = "0.1"
LoginTest()
if not loginstatus and __name__=="package":
exit(1)
self.win = DBModalDialog(60, 50, 180, 120, "Modify Existing Report")
self.win.addFixedText("lblReport", 2, 3, 60, 15, "Report Selection")
self.win.addComboListBox("lstReport", -1,15,178,80 , False )
self.lstReport = self.win.getControl( "lstReport" )
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
self.logobj=Logger()
self.hostname = docinfo.getUserFieldValue(0)
global passwd
self.password = passwd
global url
self.sock=RPCSession(url)
# Open a new connexion to the server
ids = self.sock.execute(database, uid, self.password, 'ir.module.module', 'search', [('name','=','base_report_designer'),('state', '=', 'installed')])
if not len(ids):
ErrorDialog("Please install base_report_designer module.", "", "Module Uninstalled Error!")
exit(1)
ids = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'search', [('report_xsl', '=', False),('report_xml', '=', False)])
fields=['id', 'name','report_name','model']
self.reports = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'read', ids, fields)
self.report_with_id = []
for report in self.reports:
if report['name']<>"":
model_ids = self.sock.execute(database, uid, self.password, 'ir.model' , 'search', [('model','=', report['model'])])
model_res_other =self.sock.execute(database, uid, self.password, 'ir.model', 'read', model_ids, [ 'name', 'model' ] )
if model_res_other <> []:
name = model_res_other[0]['name'] + " - " + report['name']
else:
name = report['name'] + " - " + report['model']
self.report_with_id.append( (report['id'], name, report['model'] ) )
self.report_with_id.sort( lambda x, y: cmp( x[1], y[1] ) )
for id, report_name, model_name in self.report_with_id:
self.lstReport.addItem( report_name, self.lstReport.getItemCount() )
self.win.addButton('btnSave',10,-5,50,15,'Open Report' ,actionListenerProc = self.btnOk_clicked )
self.win.addButton('btnCancel',-10 ,-5,50,15,'Cancel' ,actionListenerProc = self.btnCancel_clicked )
self.win.addButton('btnDelete',15 -80 ,-5,50,15,'Delete Report',actionListenerProc = self.btnDelete_clicked)
self.win.doModalDialog("lstReport",self.report_with_id[0][1] )
def btnOk_clicked(self, oActionEvent):
try:
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
selectedItemPos = self.win.getListBoxSelectedItemPos( "lstReport" )
id = self.report_with_id[ selectedItemPos ][0]
res = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'report_get', id)
if res['file_type'] in ['sxw','odt'] :
file_type = res['file_type']
else :
file_type = 'sxw'
fp_name = tempfile.mktemp('.'+file_type)
fp_name1="r"+fp_name
fp_path=os.path.join(fp_name1).replace("\\","/")
fp_win=fp_path[1:]
filename = ( os.name == 'nt' and fp_win or fp_name )
if res['report_sxw_content']:
write_data_to_file( filename, base64.decodestring(res['report_sxw_content']))
url = "file:///%s" % filename
arr=Array(makePropertyValue("MediaType","application/vnd.sun.xml.writer"),)
oDoc2 = desktop.loadComponentFromURL(url, "openerp", 55, arr)
docinfo2=oDoc2.getDocumentInfo()
docinfo2.setUserFieldValue(0, self.hostname)
docinfo2.setUserFieldValue(1,self.password)
docinfo2.setUserFieldValue(2,id)
docinfo2.setUserFieldValue(3,self.report_with_id[selectedItemPos][2])
oParEnum = oDoc2.getTextFields().createEnumeration()
while oParEnum.hasMoreElements():
oPar = oParEnum.nextElement()
if oPar.supportsService("com.sun.star.text.TextField.DropDown"):
oPar.SelectedItem = oPar.Items[0]
oPar.update()
if oDoc2.isModified():
if oDoc2.hasLocation() and not oDoc2.isReadonly():
oDoc2.store()
ErrorDialog("Download is completed.","Your file has been placed here :\n ."+ fp_name,"Download Message !")
obj=Logger()
obj.log_write('Modify Existing Report',LOG_INFO, ':successful download report %s using database %s' % (self.report_with_id[selectedItemPos][2], database))
except Exception, e:
ErrorDialog("The report could not be downloaded.", "Report: %s\nDetails: %s" % ( fp_name, str(e) ),"Download Message !")
import traceback,sys
info = reduce(lambda x, y: x+y, traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
self.logobj.log_write('ModifyExistingReport', LOG_ERROR, info)
self.win.endExecute()
def btnCancel_clicked(self, oActionEvent):
self.win.endExecute()
def btnDelete_clicked(self, oActionEvent):
desktop=getDesktop()
doc = desktop.getCurrentComponent()
docinfo=doc.getDocumentInfo()
selectedItemPos = self.win.getListBoxSelectedItemPos( "lstReport" )
name=self.win.getListBoxSelectedItem ("lstReport")
id = self.report_with_id[ selectedItemPos ][0]
temp = self.sock.execute(database, uid, self.password, 'ir.actions.report.xml', 'unlink', id,)
str_value='ir.actions.report.xml,'+str(id)
ids = self.sock.execute(database, uid, self.password, 'ir.values' , 'search',[('value','=',str_value)])
if ids:
rec = self.sock.execute(database, uid, self.password, 'ir.values', 'unlink', ids,)
else :
pass
if temp:
ErrorDialog("Report", "The report could not be deleted:\n"+name+".", "Message !")
self.logobj.log_write('Delete Report', LOG_INFO, ': report %s successfully deleted using database %s.' % (name, database))
else:
ErrorDialog("Report", "The report could not be deleted:\n"+name+".", "Message !")
self.win.endExecute()
if __name__<>"package" and __name__=="__main__":
ModifyExistingReport(None)
elif __name__=="package":
g_ImplementationHelper.addImplementation( ModifyExistingReport, "org.openoffice.openerp.report.modifyreport", ("com.sun.star.task.Job",),)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
denkab/FrameworkBenchmarks | frameworks/Python/AsyncIO/yocto_http/hello/endpoints/world.py | 53 | 1234 | import logging
import asyncio
import ujson
from ..services import queries_number
from ..services.world import get_random_record, get_random_records, update_random_records, get_fortunes
from ..services import redis
LOG = logging.getLogger(__name__)
@asyncio.coroutine
def db(request):
"""Test type 2: Single database query"""
container = request.app.ah_container
return ujson.dumps((yield from get_random_record(container)))
@asyncio.coroutine
def queries(request):
"""Test type 3: Multiple database queries"""
container = request.app.ah_container
limit = queries_number(request.params.get('queries', 1))
return ujson.dumps((yield from get_random_records(container, limit)))
@asyncio.coroutine
def fortunes(request):
"""Test type 4: Fortunes"""
container = request.app.ah_container
template = request.app['j2_env'].get_template('fortunes.html.j2')
return template.render({'fortunes': (yield from get_fortunes(container))})
@asyncio.coroutine
def updates(request):
"""Test type 5: Database updates"""
container = request.app.ah_container
limit = queries_number(request.params.get('queries', 1))
return ujson.dumps((yield from update_random_records(container, limit))) | bsd-3-clause |
kawamon/hue | desktop/libs/libzookeeper/src/libzookeeper/conf.py | 2 | 2261 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
import logging
import sys
from desktop.lib.conf import Config, coerce_string
if sys.version_info[0] > 2:
from urllib.parse import urlparse
new_str = str
else:
from urlparse import urlparse
LOG = logging.getLogger(__name__)
def zkensemble():
"""
Try to guess the value if no values are specified.
"""
from django.conf import settings
if 'zookeeper' in settings.INSTALLED_APPS:
try:
# Backward compatibility until Hue 4
from zookeeper.conf import CLUSTERS
clusters = CLUSTERS.get()
if clusters['default'].HOST_PORTS.get() != 'localhost:2181':
return '%s' % clusters['default'].HOST_PORTS.get()
except:
LOG.warning('Could not get zookeeper ensemble from the zookeeper app')
if 'search' in settings.INSTALLED_APPS:
try:
from search.conf import SOLR_URL
parsed = urlparse(SOLR_URL.get())
return "%s:2181" % (parsed.hostname or 'localhost')
except:
LOG.warning('Could not get zookeeper ensemble from the search app')
return "localhost:2181"
ENSEMBLE=Config(
"ensemble",
help="ZooKeeper ensemble. Comma separated list of Host/Port, e.g. localhost:2181,localhost:2182,localhost:2183",
dynamic_default=zkensemble,
type=coerce_string,
)
PRINCIPAL_NAME=Config(
"principal_name",
help="Name of Kerberos principal when using security",
default="zookeeper",
type=str,
)
| apache-2.0 |
wzhfy/spark | examples/src/main/python/mllib/gradient_boosting_regression_example.py | 27 | 2404 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Gradient Boosted Trees Regression Example.
"""
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonGradientBoostedTreesRegressionExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a GradientBoostedTrees model.
# Notes: (a) Empty categoricalFeaturesInfo indicates all features are continuous.
# (b) Use more iterations in practice.
model = GradientBoostedTrees.trainRegressor(trainingData,
categoricalFeaturesInfo={}, numIterations=3)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda lp: (lp[0] - lp[1]) * (lp[0] - lp[1])).sum() /\
float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))
print('Learned regression GBT model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myGradientBoostingRegressionModel")
sameModel = GradientBoostedTreesModel.load(sc, "target/tmp/myGradientBoostingRegressionModel")
# $example off$
| apache-2.0 |
Manojkumar91/odoo_inresto | addons/website_mail/models/mail_message.py | 7 | 4677 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
from openerp.tools.translate import _
from openerp.osv import osv, fields, expression
from openerp.exceptions import AccessError
class MailMessage(osv.Model):
_inherit = 'mail.message'
def _get_description_short(self, cr, uid, ids, name, arg, context=None):
res = dict.fromkeys(ids, False)
for message in self.browse(cr, uid, ids, context=context):
if message.subject:
res[message.id] = message.subject
else:
plaintext_ct = '' if not message.body else html2plaintext(message.body)
res[message.id] = plaintext_ct[:30] + '%s' % (' [...]' if len(plaintext_ct) >= 30 else '')
return res
_columns = {
'description': fields.function(
_get_description_short, type='char',
help='Message description: either the subject, or the beginning of the body'
),
'website_published': fields.boolean(
'Published', help="Visible on the website as a comment", copy=False,
),
}
def default_get(self, cr, uid, fields_list, context=None):
defaults = super(MailMessage, self).default_get(cr, uid, fields_list, context=context)
# Note: explicitly implemented in default_get() instead of _defaults,
# to avoid setting to True for all existing messages during upgrades.
# TODO: this default should probably be dynamic according to the model
# on which the messages are attached, thus moved to create().
if 'website_published' in fields_list:
defaults.setdefault('website_published', True)
return defaults
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to restrict
messages to published messages for public users. """
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
args = expression.AND([[('website_published', '=', True)], list(args)])
return super(MailMessage, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Add Access rules of mail.message for non-employee user:
- read:
- raise if the type is comment and subtype NULL (internal note)
"""
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
cr.execute('SELECT id FROM "%s" WHERE website_published IS FALSE AND id = ANY (%%s)' % (self._table), (ids,))
if cr.fetchall():
raise AccessError(_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % (self._description, operation))
return super(MailMessage, self).check_access_rule(cr, uid, ids=ids, operation=operation, context=context)
| agpl-3.0 |
ftzm/muscleup | django/muscleup/urls.py | 1 | 4708 | """muscleup URL Configuration
The `urlpatterns` list routes URLs to api_views. For more info please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function api_views
1. Add an import: from my_app import api_views
2. Add a URL to urlpatterns: url(r'^$', api_views.home, name='home')
Class-based api_views
1. Add an import: from other_app.api_views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_jwt.views import obtain_jwt_token
from core.views import api_views, web_views
api_v1_patterns = [
url(r'^exercises/$', api_views.ExerciseList.as_view()),
url(r'^exercises/(?P<pk>[0-9]+)/$',
api_views.ExerciseDetail.as_view(),
name='exercises-detail'),
url(r'^upgrades/$', api_views.UpgradeList.as_view()),
url(r'^upgrades/(?P<pk>[0-9]+)/$',
api_views.UpgradeDetail.as_view(),
name='upgrades-detail'),
url(r'^exercises/(?P<pk>[0-9]+)/sets/$',
api_views.ExerciseSetList.as_view()),
url(r'^exercises/(?P<exercise_pk>[0-9]+)/sets/(?P<pk>[0-9]+)/$',
api_views.ExerciseSetDetail.as_view(),
name='exercises-sets-detail'),
url(r'^workouts/$', api_views.WorkoutList.as_view()),
url(r'^workouts/(?P<pk>[0-9]+)/$',
api_views.WorkoutDetail.as_view(),
name='workouts-detail'),
url(r'^workouts/(?P<pk>[0-9]+)/sets/$', api_views.WorkoutSetList.as_view()),
url(r'^workouts/(?P<workout_pk>[0-9]+)/sets/(?P<pk>[0-9]+)/$',
api_views.WorkoutSetDetail.as_view(),
name='workouts-sets-detail'),
url(r'^progressions/$', api_views.ProgressionList.as_view()),
url(r'^progressions/(?P<pk>[0-9]+)/$',
api_views.ProgressionDetail.as_view(),
name='progressions-detail'),
url(r'^progressions/(?P<progression_pk>[0-9]+)/progressionslots/$',
api_views.ProgressionSlotList.as_view(),
name='progressions-progressionslots-list'),
url(r'^progressions/(?P<progression_pk>[0-9]+)/progressionslots/' \
'(?P<pk>[0-9]+)/',
api_views.ProgressionSlotDetail.as_view(),
name='progressions-progressionslots-detail'),
url(r'^routines/$', api_views.RoutineList.as_view()),
url(r'^routinesexpanded/$', api_views.RoutineListExpanded.as_view()),
url(r'^routines/(?P<pk>[0-9]+)/$',
api_views.RoutineDetail.as_view(),
name='routines-detail'),
url(r'^routines/(?P<routine_pk>[0-9]+)/routinedays/$',
api_views.RoutineDayList.as_view(),
name='routines-routineslots-list'),
url(r'^routines/(?P<routine_pk>[0-9]+)/routinedays/' \
'(?P<pk>[0-9]+)/$',
api_views.RoutineDayDetail.as_view(),
name='routines-routinedays-detail'),
url(r'^routines/(?P<routine_pk>[0-9]+)/routinedays/' \
'(?P<pk>[0-9]+)/slots/$',
api_views.RoutineDaySlotList.as_view(),
name='routines-routinedays-slot-list'),
url(r'^routines/(?P<routine_pk>[0-9]+)/routinedays/' \
'(?P<routineday_pk>[0-9]+)/slots/(?P<pk>[0-9]+)/$',
api_views.RoutineDaySlotDetail.as_view(),
name='routines-routinedays-slot-detail'),
]
urlpatterns = [
url(r'^api-v1/', include(api_v1_patterns)),
url(r'^admin/', admin.site.urls),
url(r'^api-auth/', include('rest_framework.urls',
namespace='rest_framework')),
url(r'^progress/$', web_views.Progress.as_view(), name='progress'),
url(r'^exercises/$', web_views.Exercises.as_view(), name='exercises'),
url(r'^add_exercise/$', web_views.AddExercise.as_view(),
name='add_exercise'),
url(r'^routines/$', web_views.Routines.as_view(), name='routines'),
url(r'^delete_exercise/(?P<pk>[0-9]+)/$',
web_views.DeleteExercise.as_view(),
name='delete_exercise'),
url(r'^delete_routine/(?P<pk>[0-9]+)/$',
web_views.DeleteRoutine.as_view(),
name='delete_routine'),
url(r'^add_routinedayslot/$',
web_views.AddRoutinedayslot.as_view(),
name='add_routinedayslot'),
url(r'^delete_routinedayslot/(?P<pk>[0-9]+)/$',
web_views.DeleteRoutinedayslot.as_view(),
name='delete_routinedayslot'),
url(r'^login/$', web_views.Login.as_view(), name='login'),
url(r'^logout/$', web_views.Logout.as_view(), name='logout'),
url(r'^$', web_views.Home.as_view(), name='home'),
#java authentication token
url(r'^api-token-auth/', obtain_jwt_token),
]
| gpl-3.0 |
jumpstarter-io/nova | nova/keymgr/key.py | 156 | 2587 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base Key and SymmetricKey Classes
This module defines the Key and SymmetricKey classes. The Key class is the base
class to represent all encryption keys. The basis for this class was copied
from Java.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Key(object):
"""Base class to represent all keys."""
@abc.abstractmethod
def get_algorithm(self):
"""Returns the key's algorithm.
Returns the key's algorithm. For example, "DSA" indicates that this key
is a DSA key and "AES" indicates that this key is an AES key.
"""
pass
@abc.abstractmethod
def get_format(self):
"""Returns the encoding format.
Returns the key's encoding format or None if this key is not encoded.
"""
pass
@abc.abstractmethod
def get_encoded(self):
"""Returns the key in the format specified by its encoding."""
pass
class SymmetricKey(Key):
"""This class represents symmetric keys."""
def __init__(self, alg, key):
"""Create a new SymmetricKey object.
The arguments specify the algorithm for the symmetric encryption and
the bytes for the key.
"""
self.alg = alg
self.key = key
def get_algorithm(self):
"""Returns the algorithm for symmetric encryption."""
return self.alg
def get_format(self):
"""This method returns 'RAW'."""
return "RAW"
def get_encoded(self):
"""Returns the key in its encoded format."""
return self.key
def __eq__(self, other):
if isinstance(other, SymmetricKey):
return (self.alg == other.alg and
self.key == other.key)
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
| apache-2.0 |
OpenDroneMap/WebODM | app/tests/test_api_admin.py | 1 | 8467 | from django.contrib.auth.models import User, Group
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework_jwt.settings import api_settings
from django.contrib.auth.hashers import check_password
from .classes import BootTestCase
from app.api.admin import UserSerializer, GroupSerializer
class TestApi(BootTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_user(self):
##
## Super user operation
##
client = APIClient()
super_user_name = 'testsuperuser'
super_user_pass = 'test1234'
# Get token
res = client.post('/api/token-auth/', {
'username': super_user_name,
'password': super_user_pass,
})
self.assertEqual(res.status_code, status.HTTP_200_OK)
super_user_token = res.data['token']
client = APIClient(HTTP_AUTHORIZATION="{0} {1}".format(api_settings.JWT_AUTH_HEADER_PREFIX, super_user_token))
# Can create (active) user
res = client.post('/api/admin/users/', {'username': 'testuser999', 'email': 'testuser999@test.com', 'password': 'test999', 'is_active': True})
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
user = User.objects.get(username='testuser999')
self.assertTrue(user != None)
self.assertFalse(user.is_superuser)
self.assertTrue(user.is_active)
# Can get user
created_user_id = user.id
res = client.get('/api/admin/users/{}/'.format(created_user_id))
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['username'], user.username)
self.assertEqual(res.data['email'], user.email)
self.assertEqual(res.data['password'], user.password)
self.assertTrue(check_password('test999', user.password))
# Can update user
res = client.put('/api/admin/users/{}/'.format(created_user_id), {'username': 'testuser888', 'email': 'testuser888@test.com', 'password': 'test888'})
self.assertEqual(res.status_code, status.HTTP_200_OK)
user = User.objects.filter(id=created_user_id).first()
self.assertTrue(user != None and (not user.is_superuser))
res = client.get('/api/admin/users/{}/'.format(created_user_id)) # ReGet user
self.assertEqual(res.data['username'], user.username)
self.assertEqual(res.data['email'], user.email)
self.assertEqual(res.data['password'], user.password)
# Can find user by email
res = client.get('/api/admin/users/?email=testuser888@test.com')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['count'], 1)
result = res.data['results'][0]
self.assertEqual(result['id'], user.id)
self.assertEqual(result['username'], user.username)
self.assertEqual(result['email'], 'testuser888@test.com')
# Can delete user
res = client.delete('/api/admin/users/{}/'.format(created_user_id))
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
user = User.objects.filter(id=created_user_id).first()
self.assertTrue(user is None)
##
## user operation
##
client = APIClient()
user_name = 'testuser'
user_pass = 'test1234'
# Get token
res = client.post('/api/token-auth/', {
'username': user_name,
'password': user_pass,
})
self.assertEqual(res.status_code, status.HTTP_200_OK)
user_token = res.data['token']
client = APIClient(HTTP_AUTHORIZATION="{0} {1}".format(api_settings.JWT_AUTH_HEADER_PREFIX, user_token))
# Can't create user
res = client.post('/api/admin/users/', {'username': 'testuser999', 'email': 'testuser999@test.com', 'password': 'test999', 'is_active': True})
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
user = User.objects.filter(username='testuser999').first()
self.assertTrue(user is None)
user = User.objects.get(username=user_name)
# Can't get user
res = client.get('/api/admin/users/{}/'.format(user.id))
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
# Can't update user
res = client.put('/api/admin/users/{}/'.format(user.id), {'password': 'changed'})
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
# Can't delete user
res = client.delete('/api/admin/users/{}/'.format(user.id))
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_group(self):
##
## Super user operaiton
##
client = APIClient()
super_user_name = 'testsuperuser'
super_user_pass = 'test1234'
# Get token
res = client.post('/api/token-auth/', {
'username': super_user_name,
'password': super_user_pass,
})
self.assertEqual(res.status_code, status.HTTP_200_OK)
super_user_token = res.data['token']
client = APIClient(HTTP_AUTHORIZATION="{0} {1}".format(api_settings.JWT_AUTH_HEADER_PREFIX, super_user_token))
# Can create group
res = client.post('/api/admin/groups/', {'name': 'Test', 'permissions': [53, 54]})
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
group = Group.objects.get(name='Test')
self.assertTrue(group != None)
serializer = GroupSerializer(group)
self.assertEqual([53, 54], serializer.data['permissions'])
# Can get group
created_group_id = group.id
res = client.get('/api/admin/groups/{}/'.format(created_group_id))
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['name'], group.name)
# Can update group
res = client.put('/api/admin/groups/{}/'.format(created_group_id), {'name': 'TestTest', 'permissions': [37, 38]})
self.assertEqual(res.status_code, status.HTTP_200_OK)
group = Group.objects.filter(id=created_group_id).first()
self.assertTrue(group != None)
serializer = GroupSerializer(group)
res = client.get('/api/admin/groups/{}/'.format(created_group_id)) # ReGet group
self.assertEqual('TestTest', serializer.data['name'])
self.assertEqual([37, 38], serializer.data['permissions'])
# Can find group by name
res = client.get('/api/admin/groups/?name=TestTest')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['count'], 1)
result = res.data['results'][0]
self.assertEqual(result['id'], group.id)
self.assertEqual(result['name'], 'TestTest')
# Can delete group
res = client.delete('/api/admin/groups/{}/'.format(created_group_id))
self.assertTrue(res.status_code == status.HTTP_204_NO_CONTENT)
group = Group.objects.filter(id=created_group_id).first()
self.assertTrue(group is None)
##
## user operation
##
client = APIClient()
user_name = 'testuser'
user_pass = 'test1234'
# Get token
res = client.post('/api/token-auth/', {
'username': user_name,
'password': user_pass,
})
self.assertEqual(res.status_code, status.HTTP_200_OK)
user_token = res.data['token']
client = APIClient(HTTP_AUTHORIZATION="{0} {1}".format(api_settings.JWT_AUTH_HEADER_PREFIX, user_token))
# Can't create group
res = client.post('/api/admin/groups/', {'name': 'Test', 'permissions': [53, 54]})
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
group = Group.objects.filter(name='Test').first()
self.assertTrue(group is None)
group = Group.objects.get(name='Default')
# Can't get group
res = client.get('/api/admin/groups/{}/'.format(group.id))
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
# Can't update group
res = client.put('/api/admin/groups/{}/'.format(group.id), {'name': 'TestTest', 'permissions': [37, 38]})
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
# Can't delete group
res = client.delete('/api/admin/groups/{}/'.format(group.id))
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
| mpl-2.0 |
fighterlyt/bite-project | server/models/storage.py | 17 | 8299 | # Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describes metadata for the test storage back-end.
We currently use Google Docs as a storage back-end, but
this should be easy to change in the future.
"""
__author__ = 'michaelwill@google.com (Michael Williamson)'
import json
import logging
import re
import uuid
from google.appengine.ext import db
from config import settings
DEFAULT_NUMBER_PER_BATCH = 500
# In the bite test code, any references to legacy wtf ids have
# been replaced with the string 'legacy-<id>'.
# The most common occurrence of this was where a test
# case had a 'call()' statement
# defined inline. This is a regular expression to
# determine if the id passed to an AppEngine handler
# is a legacy id or not.
LEGACY_ID_REGEX = re.compile(r'legacy-([0-9]+)')
class StorageMetadata(db.Model):
"""Stores metadata associated with persistent bite text objects."""
# An id that is independent of the AppEngine datastore.
# When switching out the storage model, be sure to keep
# this field the same, as it may be referenced in
# the test code.
id = db.StringProperty(required=True)
# TODO(michaelwill): Ideally this would be
# a reference property to our project model,
# but at the moment, our project implementation
# is not ready for prime time.
project = db.StringProperty(required=True)
# There are still certain situations where having
# the old legacy wtf id around is useful, but new
# tests should not set this property.
legacy_wtf_id = db.StringProperty()
test = db.TextProperty(required=False)
docs_resource_url = db.StringProperty(required=False)
docs_resource_id = db.StringProperty(required=False)
test_name = db.StringProperty(required=True)
def GetText(self):
"""Retrieves the active revision text blob for this storage entity."""
return self._GetActiveTestVersion()
def _GetActiveTestVersion(self):
"""Gets the active test version."""
result = ''
if self.test:
test = json.loads(self.test)
result = test['active']
return result
def Update(self, new_project, new_name, new_contents):
"""Updates the metadata and Google Docs using a transaction."""
db.run_in_transaction(self._UpdateTransaction,
new_project, new_name, new_contents)
def _UpdateTransaction(self, new_project, new_name, new_contents):
"""This transaction ensures the metadata and Google Docs are in sync."""
self.project = new_project
self.test_name = new_name
self.test = self._UpdateTestMetadata(new_contents)
self.put()
def _UpdateTestMetadata(self, new_contents):
"""Updates the test metadata stored."""
result = ''
if self.test:
cur_test = json.loads(self.test)
cur_test['backup2'] = ''
cur_test['backup1'] = ''
cur_test['active'] = new_contents
result = json.dumps(cur_test)
return result
class ZipData(db.Model):
"""Stores the zip string data."""
json_str = db.TextProperty(required=True)
class ScriptStep(db.Model):
"""Stores the screenshot for a step."""
script_id = db.StringProperty()
step_index = db.StringProperty()
image_url = db.TextProperty()
class ScriptActivity(db.Model):
"""Stores the script activity."""
loaded_times = db.IntegerProperty()
modified = db.DateTimeProperty(required=False, auto_now=True)
def IncreaseAndGetLoadedTimes(id):
"""Gets the total loaded times."""
instance = ScriptActivity.get_or_insert(id + '_activity', loaded_times=0)
instance.loaded_times += 1
instance.put()
return instance.loaded_times
def AddNewScriptStep(id, index, data):
"""Adds a new script step."""
new_step = ScriptStep(script_id=id,
step_index=index,
image_url=data)
new_step.put()
def GetAllSteps(id):
"""Gets all of the screenshots of a script."""
return db.GqlQuery('SELECT * FROM ScriptStep WHERE script_id = :1', id)
def DeleteAllSteps(id):
"""Deletes all of the screenshots of a script."""
keys = db.GqlQuery('SELECT __key__ FROM ScriptStep WHERE script_id = :1', id)
db.delete(keys)
def DeleteAllStepsByScriptIds(ids):
"""Deletes all of the screenshots of the given scripts."""
for id in ids:
DeleteAllSteps(id)
def SaveZipData(json_str):
"""Saves the zip data to db."""
zip = ZipData(json_str=json_str)
return str(zip.put())
def LoadZipByKeyStr(key_str):
"""Load the zip data by key string."""
return ZipData.get(db.Key(key_str))
def GetTestString(contents):
"""Gets the test contents to be saved in the metadata."""
return json.dumps(
{'active': contents,
'backup1': '',
'backup2': ''});
def Save(project, new_test_name, contents):
"""Saves both new metadata and a new docs object."""
return db.run_in_transaction(
_SaveTransaction, project, new_test_name, contents)
def _SaveTransaction(project, new_test_name, contents):
"""Carries out the actual save operation, retrying if necessary."""
storage_metadata = StorageMetadata(
id=GetUniqueId(),
project=project,
docs_resource_url='',
docs_resource_id='',
test_name=new_test_name,
test=GetTestString(contents))
storage_metadata.put()
return storage_metadata
def FetchById(id_string):
"""Fetches a storage metadata instance by its id field.
This function also supports passing a legacy wtf id,
identified by a 'legacy-' tag in front of the numerical
id. If a legacy id is detected, we query using that instead
of the normal storage id.
Args:
id_string: Either a pure numerical id string, or one prefixed
with the string 'legacy-'.
Returns:
The corresponding StorageMetadata instance or None if no
instance is found for the given id.
"""
q = StorageMetadata.all()
match = LEGACY_ID_REGEX.search(id_string)
if match:
legacy_id = match.group(1)
q.filter('legacy_wtf_id = ', legacy_id)
else:
q.filter('id = ', id_string)
return q.get()
def FetchByIds(ids):
"""Fetches the metadata instances by ids."""
metadata = []
for id in ids:
metadata.append(FetchById(id))
return metadata
def DeleteMetadata(instances):
"""Deletes all of the metadata."""
def BatchDelete(instances):
db.delete(instances)
while instances:
if len(instances) <= DEFAULT_NUMBER_PER_BATCH:
BatchDelete(instances)
del instances
break
else:
BatchDelete(instances[:DEFAULT_NUMBER_PER_BATCH])
del instances[:DEFAULT_NUMBER_PER_BATCH]
def FetchByDocsResourceId(resource_id):
"""Fetchs a storage metadata instance by its docs resource id."""
q = StorageMetadata.all()
q.filter('docs_resource_id = ', resource_id)
return q.get()
def FetchByProjectAndTestName(project_name, test_name):
"""Fetches the first test with the given name."""
q = StorageMetadata.all()
q.filter('project = ', project_name)
q.filter('test_name = ', test_name)
return q.get()
def FetchByProject(project_name):
"""Fetches a list of metadata objects by their project."""
q = StorageMetadata.all()
q.filter('project = ', project_name)
response_objects = []
for storage_metadata in q:
response_objects.append(storage_metadata)
return response_objects
def AddPreexisting(project, test_name, resource_url, resource_id,
legacy_wtf_id=None):
"""Adds the metadata for a storage instance that already exists in Docs."""
metadata = StorageMetadata(
id=GetUniqueId(),
project=project, test_name=test_name, docs_resource_url=resource_url,
docs_resource_id=resource_id, legacy_wtf_id=legacy_wtf_id)
metadata.put()
return metadata
def GetUniqueId():
"""Returns a unique 128 bit identifier as a string."""
return str(uuid.uuid4())
| apache-2.0 |
sumpfgottheit/pdu1800_data_provider | pygame32/pygame/tests/_dummymovietest.py | 11 | 4007 | #################################### IMPORTS ###################################
from __future__ import generators
if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
if is_pygame_pkg:
from pygame.tests.test_utils import test_not_implemented, unittest, trunk_relative_path
else:
from test.test_utils import test_not_implemented, unittest
import pygame
import pygame._dummybackend as gmovie
from pygame.locals import *
import os
import sys
import time
################################### CONSTANTS ##################################
filename = "War3.avi"
class MovieTypeTest( unittest.TestCase ):
def test_init(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie, True)
#screen = pygame.display.get_surface()
#movie = pygame.gmovie.Movie(filename, screen)
#self.assertEqual(movie, True)
del movie
def test_play_pause(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie.playing, False)
movie.play(-1)
self.assertEqual(movie.playing, True)
self.assertEqual(movie.paused, False)
movie.pause()
self.assertEqual(movie.playing, False)
self.assertEqual(movie.paused, True)
movie.pause()
self.assertEqual(movie.playing, True)
self.assertEqual(movie.paused, False)
del movie
def test_stop(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie.playing, False)
movie.play(-1)
self.assertEqual(movie.playing, True)
self.assertEqual(movie.paused, False)
movie.stop()
self.assertEqual(movie.playing, False)
self.assertEqual(movie.paused, False)
del movie
def test_rewind(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
movie.play(-1)
time.sleep(2)
#equivalent to stop without a time-argument
movie.rewind()
self.assertEqual(movie.playing, False)
self.assertEqual(movie.paused, False)
del movie
def test_width(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie.width, 200)
del movie
def test_height(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
self.assertEqual(movie.height, 200)
del movie
def test_resize(self):
pygame.display.init()
pygame.mixer.quit()
movie_file = trunk_relative_path('examples/data/blue.mpg')
movie = gmovie.Movie(movie_file)
movie.play(-1)
movie.resize(movie.width/2, movie.height/2)
#equivalent to stop without a time-argument
self.assertEqual(movie.height, 100)
self.assertEqual(movie.width, 100)
del movie
| mit |
dpyro/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_treeadapters.py | 451 | 1852 | from __future__ import absolute_import, division, unicode_literals
from . import support # flake8: noqa
import html5lib
from html5lib.treeadapters import sax
from html5lib.treewalkers import getTreeWalker
def test_to_sax():
handler = support.TracingSaxHandler()
tree = html5lib.parse("""<html xml:lang="en">
<title>Directory Listing</title>
<a href="/"><b/></p>
""", treebuilder="etree")
walker = getTreeWalker("etree")
sax.to_sax(walker(tree), handler)
expected = [
'startDocument',
('startElementNS', ('http://www.w3.org/1999/xhtml', 'html'),
'html', {(None, 'xml:lang'): 'en'}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'head'), 'head', {}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'title'), 'title', {}),
('characters', 'Directory Listing'),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'title'), 'title'),
('characters', '\n '),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'head'), 'head'),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'body'), 'body', {}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'a'), 'a', {(None, 'href'): '/'}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'b'), 'b', {}),
('startElementNS', ('http://www.w3.org/1999/xhtml', 'p'), 'p', {}),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'p'), 'p'),
('characters', '\n '),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'b'), 'b'),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'a'), 'a'),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'body'), 'body'),
('endElementNS', ('http://www.w3.org/1999/xhtml', 'html'), 'html'),
'endDocument',
]
assert expected == handler.visited
| mpl-2.0 |
romain-li/edx-platform | lms/djangoapps/instructor/features/data_download.py | 17 | 3724 | """
Define steps for instructor dashboard - data download tab
acceptance tests.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from nose.tools import assert_in, assert_regexp_matches
from terrain.steps import reload_the_page
from django.utils import http
@step(u'I see a table of student profiles')
def find_student_profile_table(step): # pylint: disable=unused-argument
# Find the grading configuration display
world.wait_for_visible('#data-student-profiles-table')
# Wait for the data table to be populated
world.wait_for(lambda _: world.css_text('#data-student-profiles-table') not in [u'', u'Loading'])
if world.role == 'instructor':
expected_data = [
world.instructor.username,
world.instructor.email,
world.instructor.profile.name,
world.instructor.profile.gender,
world.instructor.profile.goals
]
elif world.role == 'staff':
expected_data = [
world.staff.username,
world.staff.email,
world.staff.profile.name,
world.staff.profile.gender,
world.staff.profile.goals
]
for datum in expected_data:
assert_in(datum, world.css_text('#data-student-profiles-table'))
@step(u"I do not see a button to 'List enrolled students' profile information'")
def no_student_profile_table(step): # pylint: disable=unused-argument
world.is_css_not_present('input[name="list-profiles"]')
@step(u"I see the grading configuration for the course")
def find_grading_config(step): # pylint: disable=unused-argument
# Find the grading configuration display
world.wait_for_visible('#data-grade-config-text')
# expected config is the default grading configuration from common/lib/xmodule/xmodule/course_module.py
expected_config = u"""-----------------------------------------------------------------------------
Course grader:
<class 'xmodule.graders.WeightedSubsectionsGrader'>
Graded sections:
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Homework, category=Homework, weight=0.15
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Lab, category=Lab, weight=0.15
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Midterm Exam, category=Midterm Exam, weight=0.3
subgrader=<class 'xmodule.graders.AssignmentFormatGrader'>, type=Final Exam, category=Final Exam, weight=0.4
-----------------------------------------------------------------------------
Listing grading context for course {}
graded sections:
[]
all graded blocks:
length=0""".format(world.course_key)
assert_in(expected_config, world.css_text('#data-grade-config-text'))
def verify_report_is_generated(report_name_substring):
# Need to reload the page to see the reports table updated
reload_the_page(step)
world.wait_for_visible('#report-downloads-table')
# Find table and assert a .csv file is present
quoted_id = http.urlquote(world.course_key).replace('/', '_')
expected_file_regexp = quoted_id + '_' + report_name_substring + r'_\d{4}-\d{2}-\d{2}-\d{4}\.csv'
assert_regexp_matches(
world.css_html('#report-downloads-table'), expected_file_regexp,
msg="Expected report filename was not found."
)
@step(u"I see a grade report csv file in the reports table")
def find_grade_report_csv_link(step): # pylint: disable=unused-argument
verify_report_is_generated('grade_report')
@step(u"I see a student profile csv file in the reports table")
def find_student_profile_report_csv_link(step): # pylint: disable=unused-argument
verify_report_is_generated('student_profile_info')
| agpl-3.0 |
luisitobarcito/mini-DNN | Solvers.py | 1 | 7066 | """
DNN is a little python module to demonstrate
the basic elements of a deep neural network
in action.
"""
import numpy as np
import matplotlib.pyplot as plt
from pointwise_activations import func_list
from loss_functions import loss_list
class Solver(object):
"""
Solver object contains the method employed to update the network
parameters based on the gradient information.
"""
lr_rate = None
rate_decay = None
def __init__(self, params):
for prm_name in params.keys():
setattr(self, prm_name, params[prm_name])
def resetAux(self, net):
pass
def solverFunc(self):
pass
def step(self, net, Xin, loss_func):
pass
class SGDSolver(Solver):
momentum = None
def __init__(self, params):
for prm_name in params.keys():
setattr(self, prm_name, params[prm_name])
if self.momentum is None:
self.momentum = 0
def step(self, net, Xin, T, loss_func):
loss = loss_list[loss_func][0]
lossPrime = loss_list[loss_func][1]
net.forward(Xin)
objective = np.mean(loss(T, net.Xout), axis=0)
net.backward(lossPrime(T, net.Xout) / T.shape[0])
net.updateParam(self.solver_func)
return objective
def solver_func(self, layer):
deltas = {}
for paramname in layer.params.keys():
layer.deltas[paramname] = self.momentum * layer.deltas[paramname] - self.lr_rate * layer.grads[paramname]
class NAGSolver(SGDSolver):
""" Nesterov Accelerated gradient
"""
def setAux(self, net):
for layer in net.layers:
for paramname in layer.params.keys():
layer.params_aux[paramname] = layer.params[paramname] + self.momentum * layer.deltas[paramname]
def step(self, net, Xin, T, loss_func):
loss = loss_list[loss_func][0]
lossPrime = loss_list[loss_func][1]
net.forward(Xin)
objective = np.mean(loss(T, net.Xout), axis=0)
self.setAux(net)
net.forward(Xin, aux=True)
net.backward(lossPrime(T, net.Xout) / T.shape[0], aux=True)
net.updateParam(self.solver_func)
return objective
class RMSPropSolver(Solver):
""" RMS propagation
W_aux and b_aux hold the MS gradients
"""
rms_forget = None
def __init__(self, params):
for prm_name in params.keys():
setattr(self, prm_name, params[prm_name])
if hasattr(self, 'momentum'):
print 'Ignoring momentum parameter for RMSPropSolver'
if self.rms_forget is None:
self.rms_forget = 0.99
def resetAux(self, net):
for layer in net.layers:
for paramname in layer.params.keys():
layer.params_aux[paramname] = np.ones_like(layer.params[paramname])
def step(self, net, Xin, T, loss_func):
loss = loss_list[loss_func][0]
lossPrime = loss_list[loss_func][1]
net.forward(Xin)
objective = np.mean(loss(T, net.Xout), axis=0)
net.backward(lossPrime(T, net.Xout) / T.shape[0])
net.updateParam(self.solver_func)
return objective
def solver_func(self, layer):
for paramname in layer.params.keys():
layer.params_aux[paramname] = self.rms_forget * layer.params_aux[paramname] + (1 - self.rms_forget) * (layer.grads[paramname]**2)
layer.deltas[paramname] = - self.lr_rate * layer.grads[paramname] / np.sqrt(layer.params_aux[paramname])
class AdaGradSolver(Solver):
""" AdaDeltaSolver
params_aux hold the MS gradients
"""
def __init__(self, params):
for prm_name in params.keys():
setattr(self, prm_name, params[prm_name])
def resetAux(self, net):
for layer in net.layers:
for paramname in layer.params.keys():
layer.params_aux[paramname] = np.zeros_like(layer.params[paramname])
def step(self, net, Xin, T, loss_func):
loss = loss_list[loss_func][0]
lossPrime = loss_list[loss_func][1]
net.forward(Xin)
objective = np.mean(loss(T, net.Xout), axis=0)
net.backward(lossPrime(T, net.Xout) / T.shape[0])
net.updateParam(self.solver_func)
return objective
def solver_func(self, layer):
for paramname in layer.params.keys():
layer.params_aux[paramname] = layer.params_aux[paramname] + (layer.grads[paramname]**2)
layer.deltas[paramname] = - self.lr_rate * layer.grads[paramname] / np.sqrt(layer.params_aux[paramname])
class AdaDeltaSolver(Solver):
""" AdaDeltaSolver
W_aux and b_aux hold the MS gradients
"""
rms_forget = None
ada_eps = None
deltas_aux = None
grads_aux = None
def __init__(self, params):
for prm_name in params.keys():
setattr(self, prm_name, params[prm_name])
if hasattr(self, 'lr_rate'):
print 'Ignoring learning rate for AdaDeltaSolver'
if self.rms_forget is None:
self.rms_forget = 0.99
if self.ada_eps is None:
self.ada_eps = 1e-10
self.deltas_aux = []
self.grads_aux = []
def resetAux(self, net):
for layer in net.layers:
deltas_aux = {}
grads_aux = {}
for paramname in layer.params.keys():
deltas_aux[paramname] = np.zeros_like(layer.params[paramname])
grads_aux[paramname] = np.zeros_like(layer.params[paramname])
layer.params_aux[paramname] = np.zeros_like(layer.params[paramname])
self.deltas_aux += [deltas_aux]
self.grads_aux += [grads_aux]
def updateAux(self, net):
layer_count = 0
for layer in net.layers:
for paramname in layer.params.keys():
self.grads_aux[layer_count][paramname] = self.rms_forget * self.grads_aux[layer_count][paramname] + (1 - self.rms_forget) * (layer.grads[paramname]**2)
self.deltas_aux[layer_count][paramname] = self.rms_forget * self.deltas_aux[layer_count][paramname] + (1 - self.rms_forget) * (layer.deltas[paramname]**2)
layer.params_aux[paramname] = np.sqrt(self.deltas_aux[layer_count][paramname] + self.ada_eps) / np.sqrt(self.grads_aux[layer_count][paramname] + self.ada_eps)
layer_count += 1
def step(self, net, Xin, T, loss_func):
loss = loss_list[loss_func][0]
lossPrime = loss_list[loss_func][1]
net.forward(Xin)
objective = np.mean(loss(T, net.Xout), axis=0)
net.backward(lossPrime(T, net.Xout) / T.shape[0])
self.updateAux(net)
net.updateParam(self.solver_func)
return objective
def solver_func(self, layer):
for paramname in layer.params.keys():
layer.deltas[paramname] = -layer.grads[paramname] * layer.params_aux[paramname]
| mit |
QianBIG/odoo | addons/account_analytic_plans/wizard/__init__.py | 445 | 1117 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import analytic_plan_create_model
import account_crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
facaiy/spark | examples/src/main/python/mllib/naive_bayes_example.py | 106 | 2285 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
NaiveBayes Example.
Usage:
`spark-submit --master local[4] examples/src/main/python/mllib/naive_bayes_example.py`
"""
from __future__ import print_function
import shutil
from pyspark import SparkContext
# $example on$
from pyspark.mllib.classification import NaiveBayes, NaiveBayesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonNaiveBayesExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split data approximately into training (60%) and test (40%)
training, test = data.randomSplit([0.6, 0.4])
# Train a naive Bayes model.
model = NaiveBayes.train(training, 1.0)
# Make prediction and test accuracy.
predictionAndLabel = test.map(lambda p: (model.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('model accuracy {}'.format(accuracy))
# Save and load model
output_dir = 'target/tmp/myNaiveBayesModel'
shutil.rmtree(output_dir, ignore_errors=True)
model.save(sc, output_dir)
sameModel = NaiveBayesModel.load(sc, output_dir)
predictionAndLabel = test.map(lambda p: (sameModel.predict(p.features), p.label))
accuracy = 1.0 * predictionAndLabel.filter(lambda pl: pl[0] == pl[1]).count() / test.count()
print('sameModel accuracy {}'.format(accuracy))
# $example off$
| apache-2.0 |
mozilla/rna | rna/utils.py | 2 | 1891 | import json
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse
from .models import Note, Release
def get_last_modified_date(*args, **kwargs):
"""Returns the date of the last modified Note or Release.
For use with Django's last_modified decorator.
"""
try:
latest_note = Note.objects.latest()
latest_release = Release.objects.latest()
except ObjectDoesNotExist:
return None
return max(latest_note.modified, latest_release.modified)
def migrate_versions():
for r in Release.objects.filter(version__endswith='.0.0').only(
'channel', 'version'):
if r.channel == 'Release':
Release.objects.filter(id=r.id).update(version=r.version[:-2])
elif r.channel == 'Aurora':
Release.objects.filter(id=r.id).update(version=r.version[:-2] + 'a2')
elif r.channel == 'Beta':
Release.objects.filter(id=r.id).update(version=r.version[:-2] + 'beta')
def get_duplicate_product_versions():
version_ids = {}
duplicates = {}
for product in Release.PRODUCTS:
version_ids[product] = {}
for r in Release.objects.filter(product=product):
version_ids[product].setdefault(r.version, [])
version_ids[product][r.version].append(r.id)
if len(version_ids[product][r.version]) > 1:
duplicates[(product, r.version)] = version_ids[product][
r.version]
return duplicates
class HttpResponseJSON(HttpResponse):
def __init__(self, data, status=None, cors=False):
super(HttpResponseJSON, self).__init__(content=json.dumps(data),
content_type='application/json',
status=status)
if cors:
self['Access-Control-Allow-Origin'] = '*'
| mpl-2.0 |
dhruv13J/scikit-learn | benchmarks/bench_plot_nmf.py | 206 | 5890 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, R=None):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
R : integer, optional
random seed
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
if R == "svd":
W, H = _initialize_nmf(V, r)
elif R is None:
R = np.random.mtrand._rand
W = np.abs(R.standard_normal((n, r)))
H = np.abs(R.standard_normal((r, m)))
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
it = 0
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init=None, max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, R=None, tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
omni5cience/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/django/contrib/gis/db/models/sql/query.py | 32 | 4938 | from django.db import connections
from django.db.models.query import sql
from django.contrib.gis.db.models.constants import ALL_TERMS
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db.models.sql import aggregates as gis_aggregates
from django.contrib.gis.db.models.sql.conversion import AreaField, DistanceField, GeomField
from django.contrib.gis.db.models.sql.where import GeoWhereNode
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Area, Distance
class GeoQuery(sql.Query):
"""
A single spatial SQL query.
"""
# Overridding the valid query terms.
query_terms = ALL_TERMS
aggregates_module = gis_aggregates
compiler = 'GeoSQLCompiler'
#### Methods overridden from the base Query class ####
def __init__(self, model, where=GeoWhereNode):
super(GeoQuery, self).__init__(model, where)
# The following attributes are customized for the GeoQuerySet.
# The GeoWhereNode and SpatialBackend classes contain backend-specific
# routines and functions.
self.custom_select = {}
self.transformed_srid = None
self.extra_select_fields = {}
def clone(self, *args, **kwargs):
obj = super(GeoQuery, self).clone(*args, **kwargs)
# Customized selection dictionary and transformed srid flag have
# to also be added to obj.
obj.custom_select = self.custom_select.copy()
obj.transformed_srid = self.transformed_srid
obj.extra_select_fields = self.extra_select_fields.copy()
return obj
def convert_values(self, value, field, connection):
"""
Using the same routines that Oracle does we can convert our
extra selection objects into Geometry and Distance objects.
TODO: Make converted objects 'lazy' for less overhead.
"""
if connection.ops.oracle:
# Running through Oracle's first.
value = super(GeoQuery, self).convert_values(value, field or GeomField(), connection)
if value is None:
# Output from spatial function is NULL (e.g., called
# function on a geometry field with NULL value).
pass
elif isinstance(field, DistanceField):
# Using the field's distance attribute, can instantiate
# `Distance` with the right context.
value = Distance(**{field.distance_att: value})
elif isinstance(field, AreaField):
value = Area(**{field.area_att: value})
elif isinstance(field, (GeomField, GeometryField)) and value:
value = Geometry(value)
elif field is not None:
return super(GeoQuery, self).convert_values(value, field, connection)
return value
def get_aggregation(self, using, force_subq=False):
# Remove any aggregates marked for reduction from the subquery
# and move them to the outer AggregateQuery.
connection = connections[using]
for alias, aggregate in self.aggregate_select.items():
if isinstance(aggregate, gis_aggregates.GeoAggregate):
if not getattr(aggregate, 'is_extent', False) or connection.ops.oracle:
self.extra_select_fields[alias] = GeomField()
return super(GeoQuery, self).get_aggregation(using, force_subq)
def resolve_aggregate(self, value, aggregate, connection):
"""
Overridden from GeoQuery's normalize to handle the conversion of
GeoAggregate objects.
"""
if isinstance(aggregate, self.aggregates_module.GeoAggregate):
if aggregate.is_extent:
if aggregate.is_extent == '3D':
return connection.ops.convert_extent3d(value)
else:
return connection.ops.convert_extent(value)
else:
return connection.ops.convert_geom(value, aggregate.source)
else:
return super(GeoQuery, self).resolve_aggregate(value, aggregate, connection)
# Private API utilities, subject to change.
def _geo_field(self, field_name=None):
"""
Returns the first Geometry field encountered; or specified via the
`field_name` keyword. The `field_name` may be a string specifying
the geometry field on this GeoQuery's model, or a lookup string
to a geometry field via a ForeignKey relation.
"""
if field_name is None:
# Incrementing until the first geographic field is found.
for fld in self.model._meta.fields:
if isinstance(fld, GeometryField):
return fld
return False
else:
# Otherwise, check by the given field name -- which may be
# a lookup to a _related_ geographic field.
return GeoWhereNode._check_geo_field(self.model._meta, field_name)
| mit |
Unow/edx-platform | lms/djangoapps/courseware/management/commands/clean_xml.py | 30 | 4219 | import lxml.etree
import os
import sys
import traceback
from fs.osfs import OSFS
from path import path
from django.core.management.base import BaseCommand
from xmodule.modulestore.xml import XMLModuleStore
def traverse_tree(course):
'''Load every descriptor in course. Return bool success value.'''
queue = [course]
while len(queue) > 0:
node = queue.pop()
queue.extend(node.get_children())
return True
def export(course, export_dir):
"""Export the specified course to course_dir. Creates dir if it doesn't exist.
Overwrites files, does not clean out dir beforehand.
"""
fs = OSFS(export_dir, create=True)
if not fs.isdirempty('.'):
print ('WARNING: Directory {dir} not-empty.'
' May clobber/confuse things'.format(dir=export_dir))
try:
course.runtime.export_fs = fs
root = lxml.etree.Element('root')
course.add_xml_to_node(root)
with fs.open('course.xml', mode='w') as f:
root.write(f)
return True
except:
print 'Export failed!'
traceback.print_exc()
return False
def import_with_checks(course_dir, verbose=True):
all_ok = True
print "Attempting to load '{0}'".format(course_dir)
course_dir = path(course_dir)
data_dir = course_dir.dirname()
course_dirs = [course_dir.basename()]
# No default class--want to complain if it doesn't find plugins for any
# module.
modulestore = XMLModuleStore(data_dir,
default_class=None,
course_dirs=course_dirs)
def str_of_err(tpl):
(msg, exc_str) = tpl
return '{msg}\n{exc}'.format(msg=msg, exc=exc_str)
courses = modulestore.get_courses()
n = len(courses)
if n != 1:
print 'ERROR: Expect exactly 1 course. Loaded {n}: {lst}'.format(
n=n, lst=courses)
return (False, None)
course = courses[0]
errors = modulestore.get_course_errors(course.id)
if len(errors) != 0:
all_ok = False
print '\n'
print "=" * 40
print 'ERRORs during import:'
print '\n'.join(map(str_of_err, errors))
print "=" * 40
print '\n'
#print course
validators = (
traverse_tree,
)
print "=" * 40
print "Running validators..."
for validate in validators:
print 'Running {0}'.format(validate.__name__)
all_ok = validate(course) and all_ok
if all_ok:
print 'Course passes all checks!'
else:
print "Course fails some checks. See above for errors."
return all_ok, course
def check_roundtrip(course_dir):
'''Check that import->export leaves the course the same'''
print "====== Roundtrip import ======="
(ok, course) = import_with_checks(course_dir)
if not ok:
raise Exception("Roundtrip import failed!")
print "====== Roundtrip export ======="
export_dir = course_dir + ".rt"
export(course, export_dir)
# dircmp doesn't do recursive diffs.
# diff = dircmp(course_dir, export_dir, ignore=[], hide=[])
print "======== Roundtrip diff: ========="
sys.stdout.flush() # needed to make diff appear in the right place
os.system("diff -r {0} {1}".format(course_dir, export_dir))
print "======== ideally there is no diff above this ======="
def clean_xml(course_dir, export_dir, force):
(ok, course) = import_with_checks(course_dir)
if ok or force:
if not ok:
print "WARNING: Exporting despite errors"
export(course, export_dir)
check_roundtrip(export_dir)
else:
print "Did NOT export"
class Command(BaseCommand):
help = """Imports specified course.xml, validate it, then exports in
a canonical format.
Usage: clean_xml PATH-TO-COURSE-DIR PATH-TO-OUTPUT-DIR [force]
If 'force' is specified as the last argument, exports even if there
were import errors.
"""
def handle(self, *args, **options):
n = len(args)
if n < 2 or n > 3:
print Command.help
return
force = False
if n == 3 and args[2] == 'force':
force = True
clean_xml(args[0], args[1], force)
| agpl-3.0 |
kingvuplus/test | lib/python/Plugins/Extensions/DVDBurn/TitleCutter.py | 98 | 3312 | from Plugins.Extensions.CutListEditor.ui import CutListEditor
from Components.ServiceEventTracker import ServiceEventTracker
from enigma import iPlayableService, iServiceInformation
from Tools.Directories import fileExists
class TitleCutter(CutListEditor):
def __init__(self, session, t):
CutListEditor.__init__(self, session, t.source)
self.skin = CutListEditor.skin
self.session = session
self.t = t
self.__event_tracker = ServiceEventTracker(screen=self, eventmap=
{
iPlayableService.evUpdatedInfo: self.getPMTInfo,
iPlayableService.evCuesheetChanged: self.refillList
})
self.onExecBegin.remove(self.showTutorial)
def getPMTInfo(self):
service = self.session.nav.getCurrentService()
audio = service and service.audioTracks()
n = audio and audio.getNumberOfTracks() or 0
if n > 0:
from DVDTitle import ConfigFixedText
from TitleProperties import languageChoices
from Components.config import config, ConfigSubsection, ConfigSubList, ConfigSelection, ConfigYesNo
self.t.properties.audiotracks = ConfigSubList()
for x in range(n):
i = audio.getTrackInfo(x)
DVB_lang = i.getLanguage()
description = i.getDescription()
pid = str(i.getPID())
if description == "MPEG":
description = "MP2"
print "[audiotrack] pid:", pid, "description:", description, "language:", DVB_lang, "count:", x, "active:", (x < 8)
self.t.properties.audiotracks.append(ConfigSubsection())
self.t.properties.audiotracks[-1].active = ConfigYesNo(default = (x < 8))
self.t.properties.audiotracks[-1].format = ConfigFixedText(description)
self.t.properties.audiotracks[-1].language = ConfigSelection(choices = languageChoices.choices, default=languageChoices.getLanguage(DVB_lang))
self.t.properties.audiotracks[-1].pid = ConfigFixedText(pid)
self.t.properties.audiotracks[-1].DVB_lang = ConfigFixedText(DVB_lang)
sAspect = service.info().getInfo(iServiceInformation.sAspect)
if sAspect in ( 1, 2, 5, 6, 9, 0xA, 0xD, 0xE ):
aspect = "4:3"
else:
aspect = "16:9"
self.t.properties.aspect.setValue(aspect)
self.t.VideoType = service.info().getInfo(iServiceInformation.sVideoType)
def checkAndGrabThumb(self):
if not fileExists(self.t.inputfile.rsplit('.',1)[0] + ".png"):
CutListEditor.grabFrame(self)
def exit(self):
if self.t.VideoType == -1:
self.getPMTInfo()
self.checkAndGrabThumb()
self.session.nav.stopService()
self.close(self.cut_list[:])
class CutlistReader(TitleCutter):
skin = """
<screen position="0,0" size="720,576">
<eLabel position="0,0" size="720,576" zPosition="1" backgroundColor="#000000" />
<widget name="Video" position="0,0" size="100,75" />
<widget name="SeekState" position="0,0" />
<widget source="cutlist" position="0,0" render="Listbox" >
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryText(text = 1),
MultiContentEntryText(text = 2)
],
"fonts": [gFont("Regular", 18)],
"itemHeight": 20
}
</convert>
</widget>
<widget name="Timeline" position="0,0" />
</screen>"""
def __init__(self, session, t):
TitleCutter.__init__(self, session, t)
self.skin = CutlistReader.skin
def getPMTInfo(self):
TitleCutter.getPMTInfo(self)
TitleCutter.checkAndGrabThumb(self)
self.close(self.cut_list[:])
| gpl-2.0 |
wschwa/Mr-Orange-Sick-Beard | lib/guessit/transfo/post_process.py | 2 | 2416 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2012 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit.patterns import subtitle_exts
from guessit.textutils import reorder_title
import logging
log = logging.getLogger(__name__)
def process(mtree):
# 1- try to promote language to subtitle language where it makes sense
for node in mtree.nodes():
if 'language' not in node.guess:
continue
def promote_subtitle():
# pylint: disable=W0631
node.guess.set('subtitleLanguage', node.guess['language'],
confidence=node.guess.confidence('language'))
del node.guess['language']
# - if we matched a language in a file with a sub extension and that
# the group is the last group of the filename, it is probably the
# language of the subtitle
# (eg: 'xxx.english.srt')
if (mtree.node_at((-1,)).value.lower() in subtitle_exts and
node == mtree.leaves()[-2]):
promote_subtitle()
# - if a language is in an explicit group just preceded by "st",
# it is a subtitle language (eg: '...st[fr-eng]...')
try:
idx = node.node_idx
previous = mtree.node_at((idx[0], idx[1] - 1)).leaves()[-1]
if previous.value.lower()[-2:] == 'st':
promote_subtitle()
except IndexError:
pass
# 2- ", the" at the end of a series title should be prepended to it
for node in mtree.nodes():
if 'series' not in node.guess:
continue
node.guess['series'] = reorder_title(node.guess['series'])
| gpl-3.0 |
johnfrenchxyz/508-checklist | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 1284 | 100329 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
from gyp.common import OrderedSet
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target(object):
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here. In this case, we also need to save the compile_deps for the target,
# so that the the target that directly depends on the .objs can also depend
# on those.
self.component_objs = None
self.compile_deps = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter(object):
def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.hash_for_rules = hash_for_rules
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
assert not os.path.isabs(path_dir), (
"'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets, order_only=None):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
assert not order_only
return None
if len(targets) > 1 or order_only:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
self.ninja.variable('cc_host', '$cl_' + arch)
self.ninja.variable('cxx_host', '$cl_' + arch)
self.ninja.variable('asm', '$ml_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
compile_deps = self.target.actions_stamp or actions_depends
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
self.target.compile_deps = compile_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
compile_deps)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
xcassets = self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetToolchainEnv()
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'], self.hash_for_rules)
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
depfile = action.get('depfile', None)
if depfile:
depfile = self.ExpandSpecial(depfile, self.base_to_build)
pool = 'console' if int(action.get('ninja_use_console', 0)) else None
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env, pool,
depfile=depfile)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetToolchainEnv()
all_outputs = []
for rule in rules:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env, pool)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if '${%s}' % var in argument:
needed_variables.add(var)
def cygwin_munge(path):
# pylint: disable=cell-var-from-loop
if is_cygwin:
return path.replace('\\', '/')
return path
inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
# If there are n source files matching the rule, and m additional rule
# inputs, then adding 'inputs' to each build edge written below will
# write m * n inputs. Collapsing reduces this to m + n.
sources = rule.get('rule_sources', [])
num_inputs = len(inputs)
if prebuild:
num_inputs += 1
if num_inputs > 2 and len(sources) > 2:
inputs = [self.WriteCollapsedDependencies(
rule['rule_name'], inputs, order_only=prebuild)]
prebuild = []
# For each source file, write an edge that generates all the outputs.
for source in sources:
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
outputs = [self.GypPathToNinja(o, env) for o in outputs]
if self.flavor == 'win':
# WriteNewNinjaRule uses unique_name for creating an rsp file on win.
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetToolchainEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
xcassets = []
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
if os.path.splitext(output)[-1] != '.xcassets':
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource'), \
('binary', isBinary)])
bundle_depends.append(output)
else:
xcassets.append(res)
return xcassets
def WriteMacXCassets(self, xcassets, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources' .xcassets files.
This add an invocation of 'actool' via the 'mac_tool.py' helper script.
It assumes that the assets catalogs define at least one imageset and
thus an Assets.car file will be generated in the application resources
directory. If this is not the case, then the build will probably be done
at each invocation of ninja."""
if not xcassets:
return
extra_arguments = {}
settings_to_arg = {
'XCASSETS_APP_ICON': 'app-icon',
'XCASSETS_LAUNCH_IMAGE': 'launch-image',
}
settings = self.xcode_settings.xcode_settings[self.config_name]
for settings_key, arg_name in settings_to_arg.iteritems():
value = settings.get(settings_key)
if value:
extra_arguments[arg_name] = value
partial_info_plist = None
if extra_arguments:
partial_info_plist = self.GypPathToUniqueOutput(
'assetcatalog_generated_info.plist')
extra_arguments['output-partial-info-plist'] = partial_info_plist
outputs = []
outputs.append(
os.path.join(
self.xcode_settings.GetBundleResourceFolder(),
'Assets.car'))
if partial_info_plist:
outputs.append(partial_info_plist)
keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
extra_env = self.xcode_settings.GetPerTargetSettings()
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
bundle_depends.extend(self.ninja.build(
outputs, 'compile_xcassets', xcassets,
variables=[('env', env), ('keys', keys)]))
return partial_info_plist
def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
if partial_info_plist:
intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
info_plist = self.ninja.build(
intermediate_plist, 'merge_infoplist',
[partial_info_plist, info_plist])
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys),
('binary', isBinary)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
self.ninja.variable('nm', '$nm_host')
self.ninja.variable('readelf', '$readelf_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
asmflags = self.msvs_settings.GetAsmflags(config_name)
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
elif self.toolset == 'host':
cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CFLAGS_host', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'asmflags',
map(self.ExpandSpecial, asmflags))
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetToolchainEnv()
if self.flavor == 'win':
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
if self.flavor == 'win':
midl_include_dirs = config.get('midl_include_dirs', [])
midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
midl_include_dirs, config_name)
self.WriteVariableList(ninja_file, 'midl_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in midl_include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
arflags = config.get('arflags', [])
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
self.WriteVariableList(ninja_file, 'arflags',
map(self.ExpandSpecial, arflags))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
build_output = output
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
# TODO(yyanagisawa): more work needed to fix:
# https://code.google.com/p/gyp/issues/detail?id=411
if (spec['type'] in ('shared_library', 'loadable_module') and
not self.is_mac_bundle):
extra_bindings.append(('lib', output))
self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
variables=extra_bindings)
else:
self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
order_deps = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
if target.compile_deps:
order_deps.add(target.compile_deps)
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
map(self.ExpandSpecial, ldflags))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor != 'win':
link_file_list = output
if self.is_mac_bundle:
# 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
# 'Dependency Framework.framework.rsp'
link_file_list = self.xcode_settings.GetWrapperName()
if arch:
link_file_list += '.' + arch
link_file_list += '.rsp'
# If an rspfile contains spaces, ninja surrounds the filename with
# quotes around it and then passes it to open(), creating a file with
# quotes in its name (and when looking for the rsp file, the name
# makes it through bash which strips the quotes) :-/
link_file_list = link_file_list.replace(' ', '_')
extra_bindings.append(
('link_file_list',
gyp.common.EncodePOSIXShellArgument(link_file_list)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if ('/NOENTRY' not in ldflags and
not self.msvs_settings.GetNoImportLibrary(config_name)):
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
order_only=list(order_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetToolchainEnv(self, additional_settings=None):
"""Returns the variables toolchain would set for build steps."""
env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
if self.flavor == 'win':
env = self.GetMsvsToolchainEnv(
additional_settings=additional_settings)
return env
def GetMsvsToolchainEnv(self, additional_settings=None):
"""Returns the variables Visual Studio would set for build steps."""
return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=self.config_name)
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
depfile=None):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, depfile=depfile,
restat=True, pool=pool,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
exts = gyp.MSVSUtil.TARGET_TYPE_EXT
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
if pool_size:
return pool_size
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
# VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
# on a 64 GB machine.
mem_limit = max(1, stat.ullTotalPhys / (5 * (2 ** 30))) # total / 5GB
hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
ar = 'lib.exe'
# cc and cxx must be set to the correct architecture by overriding with one
# of cl_x86 or cl_x64 below.
cc = 'UNSET'
cxx = 'UNSET'
ld = 'link.exe'
ld_host = '$ld'
else:
ar = 'ar'
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
ar_host = 'ar'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
nm = 'nm'
nm_host = 'nm'
readelf = 'readelf'
readelf_host = 'readelf'
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_root, value)
if key == 'AR.host':
ar_host = os.path.join(build_to_root, value)
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key == 'LD':
ld = os.path.join(build_to_root, value)
if key == 'LD.host':
ld_host = os.path.join(build_to_root, value)
if key == 'NM':
nm = os.path.join(build_to_root, value)
if key == 'NM.host':
nm_host = os.path.join(build_to_root, value)
if key == 'READELF':
readelf = os.path.join(build_to_root, value)
if key == 'READELF.host':
readelf_host = os.path.join(build_to_root, value)
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
configs = [target_dicts[qualified_target]['configurations'][config_name]
for qualified_target in target_list]
shared_system_includes = None
if not generator_flags.get('ninja_use_custom_environment_files', 0):
shared_system_includes = \
gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
configs, generator_flags)
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, shared_system_includes, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', ar)
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('ml_x86', 'ml.exe')
master_ninja.variable('ml_x64', 'ml64.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
if flavor != 'mac':
# Mac does not use readelf/nm for .TOC generation, so avoiding polluting
# the master ninja with extra unused variables.
master_ninja.variable(
'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
master_ninja.variable(
'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
master_ninja.variable('readelf_host',
GetEnvironFallback(['READELF_host'], readelf_host))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$midl_includes $idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $out',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $arflags $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $arflags $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ $readelf -d $lib | grep SONAME ; '
'$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content=
'-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
rspfile='$link_file_list',
rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
master_ninja.rule(
'solipo',
description='SOLIPO $out, POSTBUILDS',
command=(
'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
'%(extract_toc)s > $lib.TOC'
% { 'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '@$link_file_list$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
rspfile='$link_file_list',
rspfile_content='$in $solibs $libs',
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
master_ninja.rule(
'merge_infoplist',
description='MERGE INFOPLISTS $in',
command='$env ./gyp-mac-tool merge-info-plist $out $in')
master_ninja.rule(
'compile_xcassets',
description='COMPILE XCASSETS $in',
command='$env ./gyp-mac-tool compile-xcassets $keys $in')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
# short name of targets that were skipped because they didn't contain anything
# interesting.
# NOTE: there may be overlap between this an non_empty_target_names.
empty_target_names = set()
# Set of non-empty short target names.
# NOTE: there may be overlap between this an empty_target_names.
non_empty_target_names = set()
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
# If build_file is a symlink, we must not follow it because there's a chance
# it could point to a path above toplevel_dir, and we cannot correctly deal
# with that case at the moment.
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
False)
qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
toolset)
hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
non_empty_target_names.add(name)
else:
empty_target_names.add(name)
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
# Write phony targets for any empty targets that weren't written yet. As
# short names are not necessarily unique only do this for short names that
# haven't already been output for another target.
empty_target_names = empty_target_names - non_empty_target_names
if empty_target_names:
master_ninja.newline()
master_ninja.comment('Empty targets (output for completeness).')
for name in sorted(empty_target_names):
master_ninja.build(name, 'phony')
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
TheApacheCats/yum | yum/sqlutils.py | 10 | 6422 | #!/usr/bin/python -tt
# This program is free software; you can redistribute it and/or modify
# it under the terms of version 2 of the GNU General Public License
# as published by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
"""
utility functions to handle differences in pysqlite versions
These are from Wichert Akkerman <wichert@deephackmode.org>'s python-dhm
http://www.wiggy.net/code/python-dhm
"""
try:
import sqlite3 as sqlite
except ImportError:
import sqlite
class TokenizeError(Exception):
"""Tokenizer error class"""
pass
def Tokenize(str, whitespace=" \t\r\n", quotes="\"", escapes="\\"):
"""String tokenizer
This function tokenizes a string while taking quotation and
escaping into account.
>>> import dhm.strtools
>>> dhm.strtools.Tokenize("this is a test")
['this', 'is', 'a', 'test']
>>> dhm.strtools.Tokenize("this \"is a\" test")
['this', 'is a', 'test']
>>> dhm.strtools.Tokenize("this \\\"is\\\" a test")
['this', '"is"', 'a', 'test']
>>> dhm.strtools.Tokenize("this \"is a test")
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "/usr/local/lib/python2.2/site-packages/dhm/strtools.py", line 80, in Tokenize
raise TokenizeError, "Unexpected end of string in quoted text"
dhm.strtools.TokenizeError: Unexecpted end of string in quoted text
@param str: string to tokenize
@type str: string
@param whitespace: whitespace characters separating tokens
@type whitespace: string
@param quotes: legal quoting characters
@type quotes: string
@param escapes: characters which can escape quoting characters
@type escapes: string
@return: list of tokens
@rtype: sequence of strings
"""
(buffer, tokens, curtoken, quote)=(str, [], None, None)
try:
while buffer:
if buffer[0]==quote:
quote=None
elif (quote==None) and (buffer[0] in quotes):
quote=buffer[0]
elif buffer[0] in whitespace:
if quote!=None:
curtoken+=buffer[0]
else:
tokens.append(curtoken)
curtoken=None
while buffer[1] in whitespace:
buffer=buffer[1:]
elif buffer[0] in escapes:
if curtoken==None:
curtoken=buffer[1]
else:
curtoken+=buffer[1]
buffer=buffer[1:]
else:
if curtoken==None:
curtoken=buffer[0]
else:
curtoken+=buffer[0]
buffer=buffer[1:]
except IndexError:
raise TokenizeError, "Unexpected end of string"
if quote:
raise TokenizeError, "Unexpected end of string in quoted text"
if curtoken!=None:
tokens.append(curtoken)
return tokens
def QmarkToPyformat(query, params):
"""Convert from qmark to pyformat parameter style.
The python DB-API 2.0 specifies four different possible parameter
styles that can be used by drivers. This function converts from the
qmark style to pyformat style.
@param query: SQL query to transform
@type query: string
@param params: arguments to query
@type params: sequence of strings
@return: converted query and parameters
@rtype: tuple with the new command and a dictionary of arguments
"""
tokens=Tokenize(query, quotes="'")
output=[]
count=1
for token in tokens:
if token.endswith("?"):
output.append(token[:-1] + "%%(param%d)s" % count)
count+=1
elif token.endswith("?,") or token.endswith("?)"):
ntoken = token[:-2] + "%%(param%d)s" % count
ntoken += token[-1]
output.append(ntoken)
count+=1
else:
output.append(token)
dict={}
count=1
for param in params:
dict["param%d" % count]=param
count+=1
return (" ".join(output), dict)
def executeSQLPyFormat(cursor, query, params=None):
"""
Execute a python < 2.5 (external sqlite module) style query.
@param cursor: A sqlite cursor
@param query: The query to execute
@param params: An optional list of parameters to the query
"""
if params is None:
return cursor.execute(query)
# Leading whitespace confuses QmarkToPyformat()
query = query.strip()
(q, p) = QmarkToPyformat(query, params)
return cursor.execute(q, p)
def executeSQLQmark(cursor, query, params=None):
"""
Execute a python 2.5 (sqlite3) style query.
@param cursor: A sqlite cursor
@param query: The query to execute
@param params: An optional list of parameters to the query
"""
if params is None:
return cursor.execute(query)
return cursor.execute(query, params)
if sqlite.version_info[0] > 1:
executeSQL = executeSQLQmark
else:
executeSQL = executeSQLPyFormat
def sql_esc(pattern):
""" Apply SQLite escaping, if needed. Returns pattern and esc. """
esc = ''
if "_" in pattern or "%" in pattern:
esc = ' ESCAPE "!"'
pattern = pattern.replace("!", "!!")
pattern = pattern.replace("%", "!%")
pattern = pattern.replace("_", "!_")
return (pattern, esc)
def sql_esc_glob(patterns):
""" Converts patterns to SQL LIKE format, if required (or gives up if
not possible). """
ret = []
for pattern in patterns:
if '[' in pattern: # LIKE only has % and _, so [abc] can't be done.
return [] # So Load everything
# Convert to SQL LIKE format
(pattern, esc) = sql_esc(pattern)
pattern = pattern.replace("*", "%")
pattern = pattern.replace("?", "_")
ret.append((pattern, esc))
return ret
| gpl-2.0 |
keisuke-umezawa/chainer | chainerx_cc/examples/imagenet_py/train_imagenet.py | 7 | 5221 | #!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.
Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images, scale them to 256x256 and convert them to RGB, and make
two lists of space-separated CSV whose first column is full path to image and
second column is zero-origin label (this format is same as that used by Caffe's
ImageDataLayer).
"""
import argparse
import time
import chainer.iterators
import numpy as np
import chainerx as chx
from image_dataset import PreprocessedDataset
import resnet50
def get_imagenet(dataset_iter):
x, t = zip(*next(dataset_iter))
return chx.array(x), chx.array(t)
def compute_loss(y, t):
# softmax cross entropy
score = chx.log_softmax(y, axis=1)
mask = (t[:, chx.newaxis] == chx.arange(
1000, dtype=t.dtype)).astype(score.dtype)
# TODO(beam2d): implement mean
return -(score * mask).sum() * (1 / y.shape[0])
def evaluate(model, X_test, Y_test, eval_size, batch_size):
N_test = X_test.shape[0] if eval_size is None else eval_size
if N_test > X_test.shape[0]:
raise ValueError(
'Test size can be no larger than {}'.format(X_test.shape[0]))
with chx.no_backprop_mode():
total_loss = chx.array(0, dtype=chx.float32)
num_correct = chx.array(0, dtype=chx.int64)
for i in range(0, N_test, batch_size):
x = X_test[i:min(i + batch_size, N_test)]
t = Y_test[i:min(i + batch_size, N_test)]
y = model(x)
total_loss += compute_loss(y, t) * batch_size
num_correct += (y.argmax(axis=1).astype(t.dtype)
== t).astype(chx.int32).sum()
mean_loss = float(total_loss) / N_test
accuracy = int(num_correct) / N_test
return mean_loss, accuracy
def main():
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to training image-label list file')
parser.add_argument('val', help='Path to validation image-label list file')
parser.add_argument(
'--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument(
'--epoch', '-E', type=int, default=10,
help='Number of epochs to train')
parser.add_argument(
'--iteration', '-I', type=int, default=None,
help='Number of iterations to train. Epoch is ignored if specified.')
parser.add_argument(
'--loaderjob', '-j', type=int,
help='Number of parallel data loading processes')
parser.add_argument(
'--mean', '-m', default='mean.npy',
help='Mean file (computed by compute_mean.py)')
parser.add_argument(
'--root', '-R', default='.',
help='Root directory path of image files')
parser.add_argument(
'--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.set_defaults(test=False)
parser.add_argument(
'--device', '-d', default='native', help='Device to use')
args = parser.parse_args()
chx.set_default_device(args.device)
batch_size = args.batchsize
eval_size = args.val_batchsize
# Prepare model
model = resnet50.ResNet50()
# Prepare datasets and mean file
mean = np.load(args.mean)
train = PreprocessedDataset(args.train, args.root, mean, model.insize)
test = PreprocessedDataset(args.val, args.root, mean, model.insize, False)
train_iter = chainer.iterators.MultiprocessIterator(
train, batch_size, n_processes=args.loaderjob)
test_iter = chainer.iterators.MultiprocessIterator(
test, eval_size, n_processes=args.loaderjob)
N = len(train)
# Train
model.require_grad()
it = 0
epoch = 0
is_finished = False
start = time.time()
while not is_finished:
for i in range(0, N // batch_size):
x, t = get_imagenet(train_iter)
y = model(x)
loss = compute_loss(y, t)
loss.backward()
model.update(lr=0.01)
it += 1
if args.iteration is not None:
x_test, t_test = get_imagenet(test_iter)
mean_loss, accuracy = evaluate(
model, x_test, t_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'iteration {}... loss={},\taccuracy={},\telapsed_time={}'
.format(it, mean_loss, accuracy, elapsed_time))
if it >= args.iteration:
is_finished = True
break
epoch += 1
if args.iteration is None:
x_test, t_test = get_imagenet(test_iter)
mean_loss, accuracy = evaluate(
model, x_test, t_test, eval_size, batch_size)
elapsed_time = time.time() - start
print(
'epoch {}... loss={},\taccuracy={},\telapsed_time={}'
.format(epoch, mean_loss, accuracy, elapsed_time))
if epoch >= args.epoch:
is_finished = True
if __name__ == '__main__':
main()
| mit |
ragnarstroberg/imsrg | src/pybind11/tests/test_pytypes.py | 4 | 5777 | import pytest
import sys
from pybind11_tests import pytypes as m
from pybind11_tests import debug_enabled
def test_list(capture, doc):
with capture:
l = m.get_list()
assert l == ["overwritten"]
l.append("value2")
m.print_list(l)
assert capture.unordered == """
Entry at position 0: value
list item 0: overwritten
list item 1: value2
"""
assert doc(m.get_list) == "get_list() -> list"
assert doc(m.print_list) == "print_list(arg0: list) -> None"
def test_set(capture, doc):
s = m.get_set()
assert s == {"key1", "key2", "key3"}
with capture:
s.add("key4")
m.print_set(s)
assert capture.unordered == """
key: key1
key: key2
key: key3
key: key4
"""
assert doc(m.get_list) == "get_list() -> list"
assert doc(m.print_list) == "print_list(arg0: list) -> None"
def test_dict(capture, doc):
d = m.get_dict()
assert d == {"key": "value"}
with capture:
d["key2"] = "value2"
m.print_dict(d)
assert capture.unordered == """
key: key, value=value
key: key2, value=value2
"""
assert doc(m.get_dict) == "get_dict() -> dict"
assert doc(m.print_dict) == "print_dict(arg0: dict) -> None"
assert m.dict_keyword_constructor() == {"x": 1, "y": 2, "z": 3}
def test_str(doc):
assert m.str_from_string().encode().decode() == "baz"
assert m.str_from_bytes().encode().decode() == "boo"
assert doc(m.str_from_bytes) == "str_from_bytes() -> str"
class A(object):
def __str__(self):
return "this is a str"
def __repr__(self):
return "this is a repr"
assert m.str_from_object(A()) == "this is a str"
assert m.repr_from_object(A()) == "this is a repr"
s1, s2 = m.str_format()
assert s1 == "1 + 2 = 3"
assert s1 == s2
def test_bytes(doc):
assert m.bytes_from_string().decode() == "foo"
assert m.bytes_from_str().decode() == "bar"
assert doc(m.bytes_from_str) == "bytes_from_str() -> {}".format(
"bytes" if sys.version_info[0] == 3 else "str"
)
def test_capsule(capture):
pytest.gc_collect()
with capture:
a = m.return_capsule_with_destructor()
del a
pytest.gc_collect()
assert capture.unordered == """
creating capsule
destructing capsule
"""
with capture:
a = m.return_capsule_with_destructor_2()
del a
pytest.gc_collect()
assert capture.unordered == """
creating capsule
destructing capsule: 1234
"""
with capture:
a = m.return_capsule_with_name_and_destructor()
del a
pytest.gc_collect()
assert capture.unordered == """
created capsule (1234, 'pointer type description')
destructing capsule (1234, 'pointer type description')
"""
def test_accessors():
class SubTestObject:
attr_obj = 1
attr_char = 2
class TestObject:
basic_attr = 1
begin_end = [1, 2, 3]
d = {"operator[object]": 1, "operator[char *]": 2}
sub = SubTestObject()
def func(self, x, *args):
return self.basic_attr + x + sum(args)
d = m.accessor_api(TestObject())
assert d["basic_attr"] == 1
assert d["begin_end"] == [1, 2, 3]
assert d["operator[object]"] == 1
assert d["operator[char *]"] == 2
assert d["attr(object)"] == 1
assert d["attr(char *)"] == 2
assert d["missing_attr_ptr"] == "raised"
assert d["missing_attr_chain"] == "raised"
assert d["is_none"] is False
assert d["operator()"] == 2
assert d["operator*"] == 7
assert m.tuple_accessor(tuple()) == (0, 1, 2)
d = m.accessor_assignment()
assert d["get"] == 0
assert d["deferred_get"] == 0
assert d["set"] == 1
assert d["deferred_set"] == 1
assert d["var"] == 99
def test_constructors():
"""C++ default and converting constructors are equivalent to type calls in Python"""
types = [str, bool, int, float, tuple, list, dict, set]
expected = {t.__name__: t() for t in types}
assert m.default_constructors() == expected
data = {
str: 42,
bool: "Not empty",
int: "42",
float: "+1e3",
tuple: range(3),
list: range(3),
dict: [("two", 2), ("one", 1), ("three", 3)],
set: [4, 4, 5, 6, 6, 6],
memoryview: b'abc'
}
inputs = {k.__name__: v for k, v in data.items()}
expected = {k.__name__: k(v) for k, v in data.items()}
assert m.converting_constructors(inputs) == expected
assert m.cast_functions(inputs) == expected
def test_implicit_casting():
"""Tests implicit casting when assigning or appending to dicts and lists."""
z = m.get_implicit_casting()
assert z['d'] == {
'char*_i1': 'abc', 'char*_i2': 'abc', 'char*_e': 'abc', 'char*_p': 'abc',
'str_i1': 'str', 'str_i2': 'str1', 'str_e': 'str2', 'str_p': 'str3',
'int_i1': 42, 'int_i2': 42, 'int_e': 43, 'int_p': 44
}
assert z['l'] == [3, 6, 9, 12, 15]
def test_print(capture):
with capture:
m.print_function()
assert capture == """
Hello, World!
1 2.0 three True -- multiple args
*args-and-a-custom-separator
no new line here -- next print
flush
py::print + str.format = this
"""
assert capture.stderr == "this goes to stderr"
with pytest.raises(RuntimeError) as excinfo:
m.print_failure()
assert str(excinfo.value) == "make_tuple(): unable to convert " + (
"argument of type 'UnregisteredType' to Python object"
if debug_enabled else
"arguments to Python object (compile in debug mode for details)"
)
| gpl-2.0 |
flavour/Turkey | modules/unit_tests/s3/s3navigation.py | 17 | 6912 | # -*- coding: utf-8 -*-
#
# S3Navigation Unit Tests
#
# To run this script use:
# python web2py.py -S eden -M -R applications/eden/modules/unit_tests/s3/s3navigation.py
#
import unittest
from s3 import S3NavigationItem as M
class SelectTests(unittest.TestCase):
""" Tests for S3NavigationItem selection/deselection """
# -------------------------------------------------------------------------
def setUp(self):
items = {
"a1": M(tags=["a1"]),
"a11": M(tags=["a1", "a11"]),
"a12": M(tags=["a1", "a12"]),
"a2": M(tags=["a2"]),
"a21": M(tags=["a2", "a21"]),
"a22": M(tags=["a2", "a22"]),
}
self.menu = M()(items["a1"](items["a11"], items["a12"]),
items["a2"](items["a21"], items["a22"]),
)
self.items = items
# -------------------------------------------------------------------------
def tearDown(self):
self.menu = None
self.items = None
# -------------------------------------------------------------------------
def testSelectLeaf(self):
""" Test selection of a leaf node """
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
menu.select(tag="a11")
assertTrue(menu.selected)
assertTrue(items["a1"].selected)
assertTrue(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertIsNone(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected)
# -------------------------------------------------------------------------
def testSelectBranch(self):
""" Test selection of a branch """
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
menu.select(tag="a2")
assertTrue(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertTrue(items["a2"].selected)
assertTrue(items["a21"].selected)
assertIsNone(items["a22"].selected)
# -------------------------------------------------------------------------
def testSelectSpecificNode(self):
""" Test selection of specific nodes """
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
items["a2"].select()
assertTrue(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertTrue(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected)
items["a12"].select()
assertTrue(menu.selected)
assertTrue(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertTrue(items["a12"].selected)
assertIsNone(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected)
# -------------------------------------------------------------------------
def testSelectNonexistentTag(self):
""" Test selection with non-existent tag """
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
# Make a selection
menu.select(tag="a21")
assertTrue(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertTrue(items["a2"].selected)
assertTrue(items["a21"].selected)
assertIsNone(items["a22"].selected)
# Use a non-existent tag
menu.select(tag="nonexistent")
# Nothing should be selected
assertIsNone(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertIsNone(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected)
# -------------------------------------------------------------------------
def testDeselectAll(self):
""" Test deselection """
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
# Make a selection
menu.select(tag="a21")
assertTrue(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertTrue(items["a2"].selected)
assertTrue(items["a21"].selected)
assertIsNone(items["a22"].selected)
# Deselect all => should completely remove all selections
menu.deselect_all()
assertIsNone(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertIsNone(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected)
# -------------------------------------------------------------------------
def testSwitchSelection(self):
""" Test consecutive manual selects """
menu = self.menu
items = self.items
assertTrue = self.assertTrue
assertIsNone = self.assertIsNone
# First selection
menu.select(tag="a11")
assertTrue(menu.selected)
assertTrue(items["a1"].selected)
assertTrue(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertIsNone(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertIsNone(items["a22"].selected)
# Second selection => should completely reset the first
menu.select(tag="a22")
assertTrue(menu.selected)
assertIsNone(items["a1"].selected)
assertIsNone(items["a11"].selected)
assertIsNone(items["a12"].selected)
assertTrue(items["a2"].selected)
assertIsNone(items["a21"].selected)
assertTrue(items["a22"].selected)
# =============================================================================
def run_suite(*test_classes):
""" Run the test suite """
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for test_class in test_classes:
tests = loader.loadTestsFromTestCase(test_class)
suite.addTests(tests)
if suite is not None:
unittest.TextTestRunner(verbosity=2).run(suite)
return
if __name__ == "__main__":
run_suite(
SelectTests,
)
# END ========================================================================
| mit |
jiivan/genoomy | genoome/genoome/settings/production.py | 1 | 3466 | """Production settings and globals."""
from __future__ import absolute_import
from os import environ
from .base import *
# Normally you should not import ANYTHING from Django directly
# into your settings, but ImproperlyConfigured is an exception.
from django.core.exceptions import ImproperlyConfigured
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg)
########## HOST CONFIGURATION
# See: https://docs.djangoproject.com/en/1.5/releases/1.5/#allowed-hosts-required-in-production
ALLOWED_HOSTS = ['.genoomy.com']
########## END HOST CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host
EMAIL_HOST = environ.get('EMAIL_HOST', 'mail.genoomy.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-password
EMAIL_HOST_PASSWORD = environ.get('EMAIL_HOST_PASSWORD', 'Genomy12345')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-host-user
EMAIL_HOST_USER = environ.get('EMAIL_HOST_USER', 'contact@genoomy.com')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-port
EMAIL_PORT = environ.get('EMAIL_PORT', 587)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = '[%s] ' % SITE_NAME
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-use-tls
EMAIL_USE_TLS = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'genoome_db',
'USER': 'genoome_user',
'PASSWORD': 'genoome_password',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'localhost:6379',
'OPTIONS': {
'DB': 2,
},
}
}
########## END CACHE CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING['handlers']['file'] = {
'level': 'DEBUG',
'filename': '/var/log/django/genoome.log',
'class': 'logging.handlers.RotatingFileHandler',
'maxBytes': 5242880,
'formatter': 'verbose'
}
LOGGING['root'] = {
'handlers': ['file',],
'level': 'DEBUG',
}
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = get_env_setting('DJANGO_SECRET_KEY')
########## END SECRET CONFIGURATION
########## PAYPAL CONFIGURATION
PAYPAL_RECEIVER_EMAIL = "info@genoomy.com"
PAYPAL_TEST = False
########## END PAYPAL CONFIGURATION
| mit |
HeinleinSupport/check_mk | netifaces/web/plugins/wato/check_parameters_netifaces_rbl.py | 1 | 1118 | #!/usr/bin/env python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
register_check_parameters(
subgroup_applications,
"netifaces_rbl",
_("RBL"),
Dictionary(
title = _("List of RBLs to check against"),
help = _('The check <tt>netifaces.rbl</tt> monitors IP addresses of the host against the RBLs defined here.'),
elements = [
( 'warn',
ListOfStrings(
title = _("WARN"),
help = _('This list contains the RBLs that generate a WARNING state.'),
),
),
( 'crit',
ListOfStrings(
title = _('CRIT'),
help = _('This list contains the RBLs that generate a CRITICAL state.'),
default_value = ['ix.dnsbl.manitu.net', 'bl.spamcop.net', 'zen.spamhaus.org'],
),
),
],
),
TextAscii(
title = _("Interface Address"),
help = _("The IP address as returned by the netifaces agent plugin."),
allow_empty = False
),
match_type = 'dict',
)
| gpl-2.0 |
smartforceplus/SmartForceplus | addons/board/board.py | 70 | 6623 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from operator import itemgetter
from textwrap import dedent
from openerp import tools
from openerp.osv import fields, osv
class board_board(osv.osv):
_name = 'board.board'
_description = "Board"
_auto = False
_columns = {}
@tools.cache()
def list(self, cr, uid, context=None):
Actions = self.pool.get('ir.actions.act_window')
Menus = self.pool.get('ir.ui.menu')
IrValues = self.pool.get('ir.values')
act_ids = Actions.search(cr, uid, [('res_model', '=', self._name)], context=context)
refs = ['%s,%s' % (Actions._name, act_id) for act_id in act_ids]
# cannot search "action" field on menu (non stored function field without search_fnct)
irv_ids = IrValues.search(cr, uid, [
('model', '=', 'ir.ui.menu'),
('key', '=', 'action'),
('key2', '=', 'tree_but_open'),
('value', 'in', refs),
], context=context)
menu_ids = map(itemgetter('res_id'), IrValues.read(cr, uid, irv_ids, ['res_id'], context=context))
menu_names = Menus.name_get(cr, uid, menu_ids, context=context)
return [dict(id=m[0], name=m[1]) for m in menu_names]
def _clear_list_cache(self):
self.list.clear_cache(self)
def create(self, cr, user, vals, context=None):
return 0
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
"""
Overrides orm field_view_get.
@return: Dictionary of Fields, arch and toolbar.
"""
res = {}
res = super(board_board, self).fields_view_get(cr, user, view_id, view_type,
context, toolbar=toolbar, submenu=submenu)
CustView = self.pool.get('ir.ui.view.custom')
vids = CustView.search(cr, user, [('user_id', '=', user), ('ref_id', '=', view_id)], context=context)
if vids:
view_id = vids[0]
arch = CustView.browse(cr, user, view_id, context=context)
res['custom_view_id'] = view_id
res['arch'] = arch.arch
res['arch'] = self._arch_preprocessing(cr, user, res['arch'], context=context)
res['toolbar'] = {'print': [], 'action': [], 'relate': []}
return res
def _arch_preprocessing(self, cr, user, arch, context=None):
from lxml import etree
def remove_unauthorized_children(node):
for child in node.iterchildren():
if child.tag == 'action' and child.get('invisible'):
node.remove(child)
else:
child = remove_unauthorized_children(child)
return node
def encode(s):
if isinstance(s, unicode):
return s.encode('utf8')
return s
archnode = etree.fromstring(encode(arch))
return etree.tostring(remove_unauthorized_children(archnode), pretty_print=True)
class board_create(osv.osv_memory):
def board_create(self, cr, uid, ids, context=None):
assert len(ids) == 1
this = self.browse(cr, uid, ids[0], context=context)
view_arch = dedent("""<?xml version="1.0"?>
<form string="%s" version="7.0">
<board style="2-1">
<column/>
<column/>
</board>
</form>
""".strip() % (this.name,))
view_id = self.pool.get('ir.ui.view').create(cr, uid, {
'name': this.name,
'model': 'board.board',
'priority': 16,
'type': 'form',
'arch': view_arch,
}, context=context)
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, {
'name': this.name,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'board.board',
'usage': 'menu',
'view_id': view_id,
'help': dedent('''<div class="oe_empty_custom_dashboard">
<p>
<b>This dashboard is empty.</b>
</p><p>
To add the first report into this dashboard, go to any
menu, switch to list or graph view, and click <i>'Add to
Dashboard'</i> in the extended search options.
</p><p>
You can filter and group data before inserting into the
dashboard using the search options.
</p>
</div>
''')
}, context=context)
menu_id = self.pool.get('ir.ui.menu').create(cr, uid, {
'name': this.name,
'parent_id': this.menu_parent_id.id,
'action': 'ir.actions.act_window,%s' % (action_id,)
}, context=context)
self.pool.get('board.board')._clear_list_cache()
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {
'menu_id': menu_id
},
}
def _default_menu_parent_id(self, cr, uid, context=None):
_, menu_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'menu_reporting_dashboard')
return menu_id
_name = "board.create"
_description = "Board Creation"
_columns = {
'name': fields.char('Board Name', required=True),
'menu_parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
}
_defaults = {
'menu_parent_id': _default_menu_parent_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sarantapichos/faircoop-market | addons/website_hr_recruitment/controllers/main.py | 170 | 5916 | # -*- coding: utf-8 -*-
import base64
from openerp import SUPERUSER_ID
from openerp import http
from openerp.tools.translate import _
from openerp.http import request
from openerp.addons.website.models.website import slug
class website_hr_recruitment(http.Controller):
@http.route([
'/jobs',
'/jobs/country/<model("res.country"):country>',
'/jobs/department/<model("hr.department"):department>',
'/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>',
'/jobs/office/<int:office_id>',
'/jobs/country/<model("res.country"):country>/office/<int:office_id>',
'/jobs/department/<model("hr.department"):department>/office/<int:office_id>',
'/jobs/country/<model("res.country"):country>/department/<model("hr.department"):department>/office/<int:office_id>',
], type='http', auth="public", website=True)
def jobs(self, country=None, department=None, office_id=None, **kwargs):
env = request.env(context=dict(request.env.context, show_address=True, no_tag_br=True))
Country = env['res.country']
Jobs = env['hr.job']
# List jobs available to current UID
job_ids = Jobs.search([], order="website_published desc,no_of_recruitment desc").ids
# Browse jobs as superuser, because address is restricted
jobs = Jobs.sudo().browse(job_ids)
# Deduce departments and offices of those jobs
departments = set(j.department_id for j in jobs if j.department_id)
offices = set(j.address_id for j in jobs if j.address_id)
countries = set(o.country_id for o in offices if o.country_id)
# Default search by user country
if not (country or department or office_id or kwargs.get('all_countries')):
country_code = request.session['geoip'].get('country_code')
if country_code:
countries_ = Country.search([('code', '=', country_code)])
country = countries_[0] if countries_ else None
if not any(j for j in jobs if j.address_id and j.address_id.country_id == country):
country = False
# Filter the matching one
if country and not kwargs.get('all_countries'):
jobs = (j for j in jobs if j.address_id is None or j.address_id.country_id and j.address_id.country_id.id == country.id)
if department:
jobs = (j for j in jobs if j.department_id and j.department_id.id == department.id)
if office_id:
jobs = (j for j in jobs if j.address_id and j.address_id.id == office_id)
# Render page
return request.website.render("website_hr_recruitment.index", {
'jobs': jobs,
'countries': countries,
'departments': departments,
'offices': offices,
'country_id': country,
'department_id': department,
'office_id': office_id,
})
@http.route('/jobs/add', type='http', auth="user", website=True)
def jobs_add(self, **kwargs):
job = request.env['hr.job'].create({
'name': _('New Job Offer'),
})
return request.redirect("/jobs/detail/%s?enable_editor=1" % slug(job))
@http.route('/jobs/detail/<model("hr.job"):job>', type='http', auth="public", website=True)
def jobs_detail(self, job, **kwargs):
return request.render("website_hr_recruitment.detail", {
'job': job,
'main_object': job,
})
@http.route('/jobs/apply/<model("hr.job"):job>', type='http', auth="public", website=True)
def jobs_apply(self, job):
error = {}
default = {}
if 'website_hr_recruitment_error' in request.session:
error = request.session.pop('website_hr_recruitment_error')
default = request.session.pop('website_hr_recruitment_default')
return request.render("website_hr_recruitment.apply", {
'job': job,
'error': error,
'default': default,
})
@http.route('/jobs/thankyou', methods=['POST'], type='http', auth="public", website=True)
def jobs_thankyou(self, **post):
error = {}
for field_name in ["partner_name", "phone", "email_from"]:
if not post.get(field_name):
error[field_name] = 'missing'
if error:
request.session['website_hr_recruitment_error'] = error
ufile = post.pop('ufile')
if ufile:
error['ufile'] = 'reset'
request.session['website_hr_recruitment_default'] = post
return request.redirect('/jobs/apply/%s' % post.get("job_id"))
# public user can't create applicants (duh)
env = request.env(user=SUPERUSER_ID)
value = {
'source_id' : env.ref('hr_recruitment.source_website_company').id,
'name': '%s\'s Application' % post.get('partner_name'),
}
for f in ['email_from', 'partner_name', 'description']:
value[f] = post.get(f)
for f in ['department_id', 'job_id']:
value[f] = int(post.get(f) or 0)
# Retro-compatibility for saas-3. "phone" field should be replace by "partner_phone" in the template in trunk.
value['partner_phone'] = post.pop('phone', False)
applicant_id = env['hr.applicant'].create(value).id
if post['ufile']:
attachment_value = {
'name': post['ufile'].filename,
'res_name': value['partner_name'],
'res_model': 'hr.applicant',
'res_id': applicant_id,
'datas': base64.encodestring(post['ufile'].read()),
'datas_fname': post['ufile'].filename,
}
env['ir.attachment'].create(attachment_value)
return request.render("website_hr_recruitment.thankyou", {})
# vim :et:
| agpl-3.0 |
chyyuu/distorm | python/distorm3/sample.py | 36 | 1845 | # Mario Vilas, http://breakingcode.wordpress.com
# Licensed Under GPLv3
# Example code
import distorm3
import sys
import optparse
# Parse the command line arguments
usage = 'Usage: %prog [--b16 | --b32 | --b64] filename [offset]'
parser = optparse.OptionParser(usage=usage)
parser.add_option( '--b16', help='80286 decoding',
action='store_const', dest='dt', const=distorm3.Decode16Bits )
parser.add_option( '--b32', help='IA-32 decoding [default]',
action='store_const', dest='dt', const=distorm3.Decode32Bits )
parser.add_option( '--b64', help='AMD64 decoding',
action='store_const', dest='dt', const=distorm3.Decode64Bits )
parser.set_defaults(dt=distorm3.Decode32Bits)
options, args = parser.parse_args(sys.argv)
if len(args) < 2:
parser.error('missing parameter: filename')
filename = args[1]
offset = 0
length = None
if len(args) == 3:
try:
offset = int(args[2], 10)
except ValueError:
parser.error('invalid offset: %s' % args[2])
if offset < 0:
parser.error('invalid offset: %s' % args[2])
elif len(args) > 3:
parser.error('too many parameters')
# Read the code from the file
try:
code = open(filename, 'rb').read()
except Exception as e:
parser.error('error reading file %s: %s' % (filename, e))
# Print each decoded instruction
# This shows how to use the Deocode - Generator
iterable = distorm3.DecodeGenerator(offset, code, options.dt)
for (offset, size, instruction, hexdump) in iterable:
print("%.8x: %-32s %s" % (offset, hexdump, instruction))
# It could also be used as a returned list:
# l = distorm3.Decode(offset, code, options.dt)
# for (offset, size, instruction, hexdump) in l:
# print("%.8x: %-32s %s" % (offset, hexdump, instruction))
| gpl-3.0 |
coto/beecoss | bp_includes/external/pytz/gae.py | 74 | 3068 | """
A pytz version that runs smoothly on Google App Engine.
Based on http://appengine-cookbook.appspot.com/recipe/caching-pytz-helper/
To use, add pytz to your path normally, but import it from the gae module:
from pytz.gae import pytz
Applied patches:
- The zoneinfo dir is removed from pytz, as this module includes a ziped
version of it.
- pytz is monkey patched to load zoneinfos from a zipfile.
- pytz is patched to not check all zoneinfo files when loaded. This is
sad, I wish that was lazy, so it could be monkey patched. As it is,
the zipfile patch doesn't work and it'll spend resources checking
hundreds of files that we know aren't there.
pytz caches loaded zoneinfos, and this module will additionally cache them
in memcache to avoid unzipping constantly. The cache key includes the
OLSON_VERSION so it is invalidated when pytz is updated.
"""
import os
import logging
import pytz
import zipfile
from cStringIO import StringIO
# Fake memcache for when we're not running under the SDK, likely a script.
class memcache(object):
@classmethod
def add(*args, **kwargs):
pass
@classmethod
def get(*args, **kwargs):
return None
try:
# Don't use memcache outside of Google App Engine or with GAE's dev server.
if not os.environ.get('SERVER_SOFTWARE', '').startswith('Development'):
from google.appengine.api import memcache
except ImportError:
pass
zoneinfo = None
zoneinfo_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'zoneinfo.zip'))
def get_zoneinfo():
"""Cache the opened zipfile in the module."""
global zoneinfo
if zoneinfo is None:
zoneinfo = zipfile.ZipFile(zoneinfo_path)
return zoneinfo
class TimezoneLoader(object):
"""A loader that that reads timezones using ZipFile."""
def __init__(self):
self.available = {}
def open_resource(self, name):
"""Opens a resource from the zoneinfo subdir for reading."""
name_parts = name.lstrip('/').split('/')
if os.path.pardir in name_parts:
raise ValueError('Bad path segment: %r' % os.path.pardir)
cache_key = 'pytz.zoneinfo.%s.%s' % (pytz.OLSON_VERSION, name)
zonedata = memcache.get(cache_key)
if zonedata is None:
zonedata = get_zoneinfo().read('zoneinfo/' + '/'.join(name_parts))
memcache.add(cache_key, zonedata)
logging.info('Added timezone to memcache: %s' % cache_key)
else:
logging.info('Loaded timezone from memcache: %s' % cache_key)
return StringIO(zonedata)
def resource_exists(self, name):
"""Return true if the given resource exists"""
if name not in self.available:
try:
get_zoneinfo().getinfo('zoneinfo/' + name)
self.available[name] = True
except KeyError:
self.available[name] = False
return self.available[name]
pytz.loader = TimezoneLoader()
| lgpl-3.0 |
farjump/gnu-binutils | gdb/testsuite/gdb.perf/disassemble.py | 46 | 1514 | # Copyright (C) 2013-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from perftest import perftest
class Disassemble(perftest.TestCaseWithBasicMeasurements):
def __init__(self):
super (Disassemble, self).__init__ ("disassemble")
def warm_up(self):
do_test_command = "disassemble ada_evaluate_subexp"
gdb.execute (do_test_command, False, True)
def _do_test(self, c):
for func in ["evaluate_subexp_standard", "handle_inferior_event", "c_parse_internal"]:
do_test_command = "disassemble %s" % func
for _ in range(c+1):
gdb.execute (do_test_command, False, True)
def execute_test(self):
for i in range(3):
# Flush code cache.
gdb.execute("set code-cache off");
gdb.execute("set code-cache on");
self.measure.measure(lambda: self._do_test(i), i)
| gpl-2.0 |
pratapvardhan/pandas | pandas/core/base.py | 1 | 41615 | """
Base and utility classes for pandas objects.
"""
import warnings
import textwrap
from pandas import compat
from pandas.compat import builtins
import numpy as np
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries, ABCIndexClass
from pandas.core.dtypes.common import (
is_datetimelike,
is_object_dtype,
is_list_like,
is_scalar,
is_extension_type,
is_extension_array_dtype)
from pandas.util._validators import validate_bool_kwarg
from pandas.errors import AbstractMethodError
from pandas.core import common as com, algorithms
import pandas.core.nanops as nanops
import pandas._libs.lib as lib
from pandas.compat.numpy import function as nv
from pandas.compat import PYPY
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
from pandas.core.accessor import DirNamesMixin
_shared_docs = dict()
_indexops_doc_kwargs = dict(klass='IndexOpsMixin', inplace='',
unique='IndexOpsMixin', duplicated='IndexOpsMixin')
class StringMixin(object):
"""implements string methods so long as object defines a `__unicode__`
method.
Handles Python2/3 compatibility transparently.
"""
# side note - this could be made into a metaclass if more than one
# object needs
# ----------------------------------------------------------------------
# Formatting
def __unicode__(self):
raise AbstractMethodError(self)
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
class PandasObject(StringMixin, DirNamesMixin):
"""baseclass for various pandas objects"""
@property
def _constructor(self):
"""class constructor (for this class it's just `__class__`"""
return self.__class__
def __unicode__(self):
"""
Return a string representation for a particular object.
Invoked by unicode(obj) in py2 only. Yields a Unicode String in both
py2/py3.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key=None):
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if getattr(self, '_cache', None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self):
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
if hasattr(self, 'memory_usage'):
mem = self.memory_usage(deep=True)
if not is_scalar(mem):
mem = mem.sum()
return int(mem)
# no memory_usage attribute, so fall back to
# object's 'sizeof'
return super(PandasObject, self).__sizeof__()
class NoNewAttributesMixin(object):
"""Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on a accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""Prevents setting additional attributes"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if (getattr(self, "__frozen", False) and not
(key == "_cache" or
key in type(self).__dict__ or
getattr(self, key, None) is not None)):
raise AttributeError("You cannot add any new attribute '{key}'".
format(key=key))
object.__setattr__(self, key, value)
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
class SelectionMixin(object):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
_selection = None
_internal_names = ['_cache', '__setstate__']
_internal_names_set = set(_internal_names)
_builtin_table = {
builtins.sum: np.sum,
builtins.max: np.max,
builtins.min: np.min
}
_cython_table = {
builtins.sum: 'sum',
builtins.max: 'max',
builtins.min: 'min',
np.all: 'all',
np.any: 'any',
np.sum: 'sum',
np.mean: 'mean',
np.prod: 'prod',
np.std: 'std',
np.var: 'var',
np.median: 'median',
np.max: 'max',
np.min: 'min',
np.cumprod: 'cumprod',
np.cumsum: 'cumsum'
}
@property
def _selection_name(self):
"""
return a name for myself; this would ideally be called
the 'name' property, but we cannot conflict with the
Series.name property which can be set
"""
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, ABCSeries,
ABCIndexClass, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@cache_readonly
def ndim(self):
return self._selected_obj.ndim
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj,
ABCDataFrame):
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise Exception('Column(s) {selection} already selected'
.format(selection=self._selection))
if isinstance(key, (list, tuple, ABCSeries, ABCIndexClass,
np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError("Columns not found: {missing}"
.format(missing=str(bad_keys)[1:-1]))
return self._gotitem(list(key), ndim=2)
elif not getattr(self, 'as_index', False):
if key not in self.obj.columns:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError("Column not found: {key}".format(key=key))
return self._gotitem(key, ndim=1)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
def _try_aggregate_string_function(self, arg, *args, **kwargs):
"""
if arg is a string, then try to operate on it:
- try to find a function (or attribute) on ourselves
- try to find a numpy function
- raise
"""
assert isinstance(arg, compat.string_types)
f = getattr(self, arg, None)
if f is not None:
if callable(f):
return f(*args, **kwargs)
# people may try to aggregate on a non-callable attribute
# but don't let them think they can pass args to it
assert len(args) == 0
assert len([kwarg for kwarg in kwargs
if kwarg not in ['axis', '_level']]) == 0
return f
f = getattr(np, arg, None)
if f is not None:
return f(self, *args, **kwargs)
raise ValueError("{arg} is an unknown string function".format(arg=arg))
def _aggregate(self, arg, *args, **kwargs):
"""
provide an implementation for the aggregators
Parameters
----------
arg : string, dict, function
*args : args to pass on to the function
**kwargs : kwargs to pass on to the function
Returns
-------
tuple of result, how
Notes
-----
how can be a string describe the required post-processing, or
None if not required
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
is_nested_renamer = False
_axis = kwargs.pop('_axis', None)
if _axis is None:
_axis = getattr(self, 'axis', 0)
_level = kwargs.pop('_level', None)
if isinstance(arg, compat.string_types):
return self._try_aggregate_string_function(arg, *args,
**kwargs), None
if isinstance(arg, dict):
# aggregate based on the passed dict
if _axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
def nested_renaming_depr(level=4):
# deprecation of nested renaming
# GH 15931
warnings.warn(
("using a dict with renaming "
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=level)
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
if any(is_aggregator(x) for x in compat.itervalues(arg)):
new_arg = compat.OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
is_nested_renamer = True
if k not in obj.columns:
msg = ('cannot perform renaming for {key} with a '
'nested dictionary').format(key=k)
raise SpecificationError(msg)
nested_renaming_depr(4 + (_level or 0))
elif isinstance(obj, ABCSeries):
nested_renaming_depr()
elif isinstance(obj, ABCDataFrame) and \
k not in obj.columns:
raise KeyError(
"Column '{col}' does not exist!".format(col=k))
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(compat.iterkeys(arg))
if (isinstance(obj, ABCDataFrame) and
len(obj.columns.intersection(keys)) != len(keys)):
nested_renaming_depr()
from pandas.core.reshape.concat import concat
def _agg_1dim(name, how, subset=None):
"""
aggregate a 1-dim with how
"""
colg = self._gotitem(name, ndim=1, subset=subset)
if colg.ndim != 1:
raise SpecificationError("nested dictionary is ambiguous "
"in aggregation")
return colg.aggregate(how, _level=(_level or 0) + 1)
def _agg_2dim(name, how):
"""
aggregate a 2-dim with how
"""
colg = self._gotitem(self._selection, ndim=2,
subset=obj)
return colg.aggregate(how, _level=None)
def _agg(arg, func):
"""
run the aggregations over the arg with func
return an OrderedDict
"""
result = compat.OrderedDict()
for fname, agg_how in compat.iteritems(arg):
result[fname] = func(fname, agg_how)
return result
# set the final keys
keys = list(compat.iterkeys(arg))
result = compat.OrderedDict()
# nested renamer
if is_nested_renamer:
result = list(_agg(arg, _agg_1dim).values())
if all(isinstance(r, dict) for r in result):
result, results = compat.OrderedDict(), result
for r in results:
result.update(r)
keys = list(compat.iterkeys(result))
else:
if self._selection is not None:
keys = None
# some selection on the object
elif self._selection is not None:
sl = set(self._selection_list)
# we are a Series like object,
# but may have multiple aggregations
if len(sl) == 1:
result = _agg(arg, lambda fname,
agg_how: _agg_1dim(self._selection, agg_how))
# we are selecting the same set as we are aggregating
elif not len(sl - set(keys)):
result = _agg(arg, _agg_1dim)
# we are a DataFrame, with possibly multiple aggregations
else:
result = _agg(arg, _agg_2dim)
# no selection
else:
try:
result = _agg(arg, _agg_1dim)
except SpecificationError:
# we are aggregating expecting all 1d-returns
# but we have 2d
result = _agg(arg, _agg_2dim)
# combine results
def is_any_series():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCSeries)
for r in compat.itervalues(result))
def is_any_frame():
# return a boolean if we have *any* nested series
return any(isinstance(r, ABCDataFrame)
for r in compat.itervalues(result))
if isinstance(result, list):
return concat(result, keys=keys, axis=1, sort=True), True
elif is_any_frame():
# we have a dict of DataFrames
# return a MI DataFrame
return concat([result[k] for k in keys],
keys=keys, axis=1), True
elif isinstance(self, ABCSeries) and is_any_series():
# we have a dict of Series
# return a MI Series
try:
result = concat(result)
except TypeError:
# we want to give a nice error here if
# we have non-same sized objects, so
# we don't automatically broadcast
raise ValueError("cannot perform both aggregation "
"and transformation operations "
"simultaneously")
return result, True
# fall thru
from pandas import DataFrame, Series
try:
result = DataFrame(result)
except ValueError:
# we have a dict of scalars
result = Series(result,
name=getattr(self, 'name', None))
return result, True
elif is_list_like(arg) and arg not in compat.string_types:
# we require a list, but not an 'str'
return self._aggregate_multiple_funcs(arg,
_level=_level,
_axis=_axis), None
else:
result = None
f = self._is_cython_func(arg)
if f and not args and not kwargs:
return getattr(self, f)(), None
# caller can react
return result, True
def _aggregate_multiple_funcs(self, arg, _level, _axis):
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if self._selected_obj.ndim == 1:
obj = self._selected_obj
else:
obj = self._obj_with_exclusions
results = []
keys = []
# degenerate case
if obj.ndim == 1:
for a in arg:
try:
colg = self._gotitem(obj.name, ndim=1, subset=obj)
results.append(colg.aggregate(a))
# make sure we find a good name
name = com._get_callable_name(a) or a
keys.append(name)
except (TypeError, DataError):
pass
except SpecificationError:
raise
# multiples
else:
for index, col in enumerate(obj):
try:
colg = self._gotitem(col, ndim=1,
subset=obj.iloc[:, index])
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except ValueError:
# cannot aggregate
continue
except SpecificationError:
raise
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas.core.dtypes.cast import is_nested_object
from pandas import Series
result = Series(results, index=keys, name=self.name)
if is_nested_object(result):
raise ValueError("cannot combine transform and "
"aggregation operations")
return result
def _shallow_copy(self, obj=None, obj_type=None, **kwargs):
""" return a new object with the replacement attributes """
if obj is None:
obj = self._selected_obj.copy()
if obj_type is None:
obj_type = self._constructor
if isinstance(obj, obj_type):
obj = obj.obj
for attr in self._attributes:
if attr not in kwargs:
kwargs[attr] = getattr(self, attr)
return obj_type(obj, **kwargs)
def _is_cython_func(self, arg):
""" if we define an internal function for this argument, return it """
return self._cython_table.get(arg)
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return self._builtin_table.get(arg, arg)
class GroupByMixin(object):
""" provide the groupby facilities to the mixed object """
@staticmethod
def _dispatch(name, *args, **kwargs):
""" dispatch to apply """
def outer(self, *args, **kwargs):
def f(x):
x = self._shallow_copy(x, groupby=self._groupby)
return getattr(x, name)(*args, **kwargs)
return self._groupby.apply(f)
outer.__name__ = name
return outer
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
# create a new object to prevent aliasing
if subset is None:
subset = self.obj
# we need to make a shallow copy of ourselves
# with the same groupby
kwargs = dict([(attr, getattr(self, attr))
for attr in self._attributes])
self = self.__class__(subset,
groupby=self._groupby[key],
parent=self,
**kwargs)
self._reset_cache()
if subset.ndim == 2:
if is_scalar(key) and key in subset or is_list_like(key):
self._selection = key
return self
class IndexOpsMixin(object):
""" common ops mixin to support a unified interface / docs for Series /
Index
"""
# ndarray compatibility
__array_priority__ = 1000
def transpose(self, *args, **kwargs):
""" return the transpose, which is by definition self """
nv.validate_transpose(args, kwargs)
return self
T = property(transpose, doc="return the transpose, which is by "
"definition self")
@property
def shape(self):
""" return a tuple of the shape of the underlying data """
return self._values.shape
@property
def ndim(self):
""" return the number of dimensions of the underlying data,
by definition 1
"""
return 1
def item(self):
""" return the first element of the underlying data as a python
scalar
"""
try:
return self.values.item()
except IndexError:
# copy numpy's message here because Py26 raises an IndexError
raise ValueError('can only convert an array of size 1 to a '
'Python scalar')
@property
def data(self):
""" return the data pointer of the underlying data """
warnings.warn("{obj}.data is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.data
@property
def itemsize(self):
""" return the size of the dtype of the item of the underlying data """
warnings.warn("{obj}.itemsize is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.itemsize
@property
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._values.nbytes
@property
def strides(self):
""" return the strides of the underlying data """
warnings.warn("{obj}.strides is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self._ndarray_values.strides
@property
def size(self):
""" return the number of elements in the underlying data """
return self._values.size
@property
def flags(self):
""" return the ndarray.flags for the underlying data """
warnings.warn("{obj}.flags is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.flags
@property
def base(self):
""" return the base object if the memory of the underlying data is
shared
"""
warnings.warn("{obj}.base is deprecated and will be removed "
"in a future version".format(obj=type(self).__name__),
FutureWarning, stacklevel=2)
return self.values.base
@property
def _ndarray_values(self):
# type: () -> np.ndarray
"""The data as an ndarray, possibly losing information.
The expectation is that this is cheap to compute, and is primarily
used for interacting with our indexers.
- categorical -> codes
"""
if is_extension_array_dtype(self):
return self.values._ndarray_values
return self.values
@property
def empty(self):
return not self.size
def max(self):
"""
Return the maximum value of the Index.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
return nanops.nanmax(self.values)
def argmax(self, axis=None):
"""
return a ndarray of the maximum argument indexer
See also
--------
numpy.ndarray.argmax
"""
return nanops.nanargmax(self.values)
def min(self):
"""
Return the minimum value of the Index.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
return nanops.nanmin(self.values)
def argmin(self, axis=None):
"""
return a ndarray of the minimum argument indexer
See also
--------
numpy.ndarray.argmin
"""
return nanops.nanargmin(self.values)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
See Also
--------
numpy.ndarray.tolist
"""
if is_datetimelike(self._values):
return [com._maybe_box_datetimelike(x) for x in self._values]
elif is_extension_array_dtype(self._values):
return list(self._values)
else:
return self._values.tolist()
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
"""
return iter(self.tolist())
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
return isna(self).any()
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform the reduction type operation if we can """
func = getattr(self, name, None)
if func is None:
raise TypeError("{klass} cannot perform the operation {op}".format(
klass=self.__class__.__name__, op=name))
return func(**kwds)
def _map_values(self, mapper, na_action=None):
"""An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if isinstance(mapper, dict):
if hasattr(mapper, '__missing__'):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
from pandas import Series
mapper = Series(mapper)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_extension_type(self.dtype):
values = self._values
else:
values = self.values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_1d(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_type(self.dtype):
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self.astype(object)
values = getattr(values, 'values', values)
if na_action == 'ignore':
def map_f(values, f):
return lib.map_infer_mask(values, f,
isna(values).view(np.uint8))
else:
map_f = lib.map_infer
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : integer, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: number of non-NA elements in a Series
DataFrame.count: number of non-NA elements in a DataFrame
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
4.0 1
2.0 1
1.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
4.0 0.2
2.0 0.2
1.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(2.0, 3.0] 2
(0.996, 2.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
NaN 1
4.0 1
2.0 1
1.0 1
dtype: int64
"""
from pandas.core.algorithms import value_counts
result = value_counts(self, sort=sort, ascending=ascending,
normalize=normalize, bins=bins, dropna=dropna)
return result
def unique(self):
values = self._values
if hasattr(values, 'unique'):
result = values.unique()
else:
from pandas.core.algorithms import unique1d
result = unique1d(values)
return result
def nunique(self, dropna=True):
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : boolean, default True
Don't include NaN in the count.
Returns
-------
nunique : int
"""
uniqs = self.unique()
n = len(uniqs)
if dropna and isna(uniqs).any():
n -= 1
return n
@property
def is_unique(self):
"""
Return boolean if values in the object are unique
Returns
-------
is_unique : boolean
"""
return self.nunique() == len(self)
@property
def is_monotonic(self):
"""
Return boolean if values in the object are
monotonic_increasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic : boolean
"""
from pandas import Index
return Index(self).is_monotonic
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self):
"""
Return boolean if values in the object are
monotonic_decreasing
.. versionadded:: 0.19.0
Returns
-------
is_monotonic_decreasing : boolean
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def memory_usage(self, deep=False):
"""
Memory usage of the values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
See Also
--------
numpy.ndarray.nbytes
"""
if hasattr(self.values, 'memory_usage'):
return self.values.memory_usage(deep=deep)
v = self.values.nbytes
if deep and is_object_dtype(self) and not PYPY:
v += lib.memory_usage_of_objects(self.values)
return v
@Substitution(
values='', order='', size_hint='',
sort=textwrap.dedent("""\
sort : boolean, default False
Sort `uniques` and shuffle `labels` to maintain the
relationship.
"""))
@Appender(algorithms._shared_docs['factorize'])
def factorize(self, sort=False, na_sentinel=-1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs['searchsorted'] = (
"""Find indices where elements should be inserted to maintain order.
Find the indices into a sorted %(klass)s `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
Parameters
----------
value : array_like
Values to insert into `self`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `value`.
See Also
--------
numpy.searchsorted
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> x = pd.Series([1, 2, 3])
>>> x
0 1
1 2
2 3
dtype: int64
>>> x.searchsorted(4)
array([3])
>>> x.searchsorted([0, 4])
array([0, 3])
>>> x.searchsorted([1, 3], side='left')
array([0, 2])
>>> x.searchsorted([1, 3], side='right')
array([1, 3])
>>> x = pd.Categorical(['apple', 'bread', 'bread',
'cheese', 'milk'], ordered=True)
[apple, bread, bread, cheese, milk]
Categories (4, object): [apple < bread < cheese < milk]
>>> x.searchsorted('bread')
array([1]) # Note: an array, not a scalar
>>> x.searchsorted(['bread'], side='right')
array([3])
""")
@Substitution(klass='IndexOpsMixin')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
# needs coercion on the key (DatetimeIndex does already)
return self.values.searchsorted(value, side=side, sorter=sorter)
def drop_duplicates(self, keep='first', inplace=False):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(self, ABCIndexClass):
if self.is_unique:
return self._shallow_copy()
duplicated = self.duplicated(keep=keep)
result = self[np.logical_not(duplicated)]
if inplace:
return self._update_inplace(result)
else:
return result
def duplicated(self, keep='first'):
from pandas.core.algorithms import duplicated
if isinstance(self, ABCIndexClass):
if self.is_unique:
return np.zeros(len(self), dtype=np.bool)
return duplicated(self, keep=keep)
else:
return self._constructor(duplicated(self, keep=keep),
index=self.index).__finalize__(self)
# ----------------------------------------------------------------------
# abstracts
def _update_inplace(self, result, **kwargs):
raise AbstractMethodError(self)
| bsd-3-clause |
MIPS/external-chromium_org | third_party/closure_linter/closure_linter/requireprovidesorter.py | 137 | 9826 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains logic for sorting goog.provide and goog.require statements.
Closurized JavaScript files use goog.provide and goog.require statements at the
top of the file to manage dependencies. These statements should be sorted
alphabetically, however, it is common for them to be accompanied by inline
comments or suppression annotations. In order to sort these statements without
disrupting their comments and annotations, the association between statements
and comments/annotations must be maintained while sorting.
RequireProvideSorter: Handles checking/fixing of provide/require statements.
"""
from closure_linter import javascripttokens
from closure_linter import tokenutil
# Shorthand
Type = javascripttokens.JavaScriptTokenType
class RequireProvideSorter(object):
"""Checks for and fixes alphabetization of provide and require statements.
When alphabetizing, comments on the same line or comments directly above a
goog.provide or goog.require statement are associated with that statement and
stay with the statement as it gets sorted.
"""
def CheckProvides(self, token):
"""Checks alphabetization of goog.provide statements.
Iterates over tokens in given token stream, identifies goog.provide tokens,
and checks that they occur in alphabetical order by the object being
provided.
Args:
token: A token in the token stream before any goog.provide tokens.
Returns:
A tuple containing the first provide token in the token stream and a list
of provided objects sorted alphabetically. For example:
(JavaScriptToken, ['object.a', 'object.b', ...])
None is returned if all goog.provide statements are already sorted.
"""
provide_tokens = self._GetRequireOrProvideTokens(token, 'goog.provide')
provide_strings = self._GetRequireOrProvideTokenStrings(provide_tokens)
sorted_provide_strings = sorted(provide_strings)
if provide_strings != sorted_provide_strings:
return [provide_tokens[0], sorted_provide_strings]
return None
def CheckRequires(self, token):
"""Checks alphabetization of goog.require statements.
Iterates over tokens in given token stream, identifies goog.require tokens,
and checks that they occur in alphabetical order by the dependency being
required.
Args:
token: A token in the token stream before any goog.require tokens.
Returns:
A tuple containing the first require token in the token stream and a list
of required dependencies sorted alphabetically. For example:
(JavaScriptToken, ['object.a', 'object.b', ...])
None is returned if all goog.require statements are already sorted.
"""
require_tokens = self._GetRequireOrProvideTokens(token, 'goog.require')
require_strings = self._GetRequireOrProvideTokenStrings(require_tokens)
sorted_require_strings = sorted(require_strings)
if require_strings != sorted_require_strings:
return (require_tokens[0], sorted_require_strings)
return None
def FixProvides(self, token):
"""Sorts goog.provide statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.provide'))
def FixRequires(self, token):
"""Sorts goog.require statements in the given token stream alphabetically.
Args:
token: The first token in the token stream.
"""
self._FixProvidesOrRequires(
self._GetRequireOrProvideTokens(token, 'goog.require'))
def _FixProvidesOrRequires(self, tokens):
"""Sorts goog.provide or goog.require statements.
Args:
tokens: A list of goog.provide or goog.require tokens in the order they
appear in the token stream. i.e. the first token in this list must
be the first goog.provide or goog.require token.
"""
strings = self._GetRequireOrProvideTokenStrings(tokens)
sorted_strings = sorted(strings)
# Make a separate pass to remove any blank lines between goog.require/
# goog.provide tokens.
first_token = tokens[0]
last_token = tokens[-1]
i = last_token
while i != first_token:
if i.type is Type.BLANK_LINE:
tokenutil.DeleteToken(i)
i = i.previous
# A map from required/provided object name to tokens that make up the line
# it was on, including any comments immediately before it or after it on the
# same line.
tokens_map = self._GetTokensMap(tokens)
# Iterate over the map removing all tokens.
for name in tokens_map:
tokens_to_delete = tokens_map[name]
for i in tokens_to_delete:
tokenutil.DeleteToken(i)
# Re-add all tokens in the map in alphabetical order.
insert_after = tokens[0].previous
for string in sorted_strings:
for i in tokens_map[string]:
tokenutil.InsertTokenAfter(i, insert_after)
insert_after = i
def _GetRequireOrProvideTokens(self, token, token_string):
"""Gets all goog.provide or goog.require tokens in the given token stream.
Args:
token: The first token in the token stream.
token_string: One of 'goog.provide' or 'goog.require' to indicate which
tokens to find.
Returns:
A list of goog.provide or goog.require tokens in the order they appear in
the token stream.
"""
tokens = []
while token:
if token.type == Type.IDENTIFIER:
if token.string == token_string:
tokens.append(token)
elif token.string not in ['goog.require', 'goog.provide']:
# The goog.provide and goog.require identifiers are at the top of the
# file. So if any other identifier is encountered, return.
break
token = token.next
return tokens
def _GetRequireOrProvideTokenStrings(self, tokens):
"""Gets a list of strings corresponding to the given list of tokens.
The string will be the next string in the token stream after each token in
tokens. This is used to find the object being provided/required by a given
goog.provide or goog.require token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A list of object names that are being provided or required by the given
list of tokens. For example:
['object.a', 'object.c', 'object.b']
"""
token_strings = []
for token in tokens:
name = tokenutil.Search(token, Type.STRING_TEXT).string
token_strings.append(name)
return token_strings
def _GetTokensMap(self, tokens):
"""Gets a map from object name to tokens associated with that object.
Starting from the goog.provide/goog.require token, searches backwards in the
token stream for any lines that start with a comment. These lines are
associated with the goog.provide/goog.require token. Also associates any
tokens on the same line as the goog.provide/goog.require token with that
token.
Args:
tokens: A list of goog.provide or goog.require tokens.
Returns:
A dictionary that maps object names to the tokens associated with the
goog.provide or goog.require of that object name. For example:
{
'object.a': [JavaScriptToken, JavaScriptToken, ...],
'object.b': [...]
}
The list of tokens includes any comment lines above the goog.provide or
goog.require statement and everything after the statement on the same
line. For example, all of the following would be associated with
'object.a':
/** @suppress {extraRequire} */
goog.require('object.a'); // Some comment.
"""
tokens_map = {}
for token in tokens:
object_name = tokenutil.Search(token, Type.STRING_TEXT).string
# If the previous line starts with a comment, presume that the comment
# relates to the goog.require or goog.provide and keep them together when
# sorting.
first_token = token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(first_token)
while previous_first_token.IsAnyType(Type.COMMENT_TYPES):
first_token = previous_first_token
previous_first_token = tokenutil.GetFirstTokenInPreviousLine(
first_token)
# Find the last token on the line.
last_token = tokenutil.GetLastTokenInSameLine(token)
all_tokens = self._GetTokenList(first_token, last_token)
tokens_map[object_name] = all_tokens
return tokens_map
def _GetTokenList(self, first_token, last_token):
"""Gets a list of all tokens from first_token to last_token, inclusive.
Args:
first_token: The first token to get.
last_token: The last token to get.
Returns:
A list of all tokens between first_token and last_token, including both
first_token and last_token.
Raises:
Exception: If the token stream ends before last_token is reached.
"""
token_list = []
token = first_token
while token != last_token:
if not token:
raise Exception('ran out of tokens')
token_list.append(token)
token = token.next
token_list.append(last_token)
return token_list
| bsd-3-clause |
wainersm/buildbot | worker/buildbot_worker/test/unit/test_scripts_runner.py | 9 | 14700 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
import mock
from twisted.python import log
from twisted.python import usage
from twisted.trial import unittest
from buildbot_worker.scripts import runner
from buildbot_worker.test.util import misc
class OptionsMixin(object):
def assertOptions(self, opts, exp):
got = dict([(k, opts[k]) for k in exp])
if got != exp:
msg = []
for k in exp:
if opts[k] != exp[k]:
msg.append(" %s: expected %r, got %r" %
(k, exp[k], opts[k]))
self.fail("did not get expected options\n" + ("\n".join(msg)))
class BaseDirTestsMixin(object):
"""
Common tests for Options classes with 'basedir' parameter
"""
GETCWD_PATH = "test-dir"
ABSPATH_PREFIX = "test-prefix-"
MY_BASEDIR = "my-basedir"
# the options class to instantiate for test cases
options_class = None
def setUp(self):
self.patch(os, "getcwd", lambda: self.GETCWD_PATH)
self.patch(os.path, "abspath", lambda path: self.ABSPATH_PREFIX + path)
def parse(self, *args):
assert self.options_class is not None
opts = self.options_class()
opts.parseOptions(args)
return opts
def test_defaults(self):
opts = self.parse()
self.assertEqual(opts["basedir"],
self.ABSPATH_PREFIX + self.GETCWD_PATH,
"unexpected basedir path")
def test_basedir_arg(self):
opts = self.parse(self.MY_BASEDIR)
self.assertEqual(opts["basedir"],
self.ABSPATH_PREFIX + self.MY_BASEDIR,
"unexpected basedir path")
def test_too_many_args(self):
self.assertRaisesRegex(usage.UsageError,
"I wasn't expecting so many arguments",
self.parse, "arg1", "arg2")
class TestMakerBase(BaseDirTestsMixin, unittest.TestCase):
"""
Test buildbot_worker.scripts.runner.MakerBase class.
"""
options_class = runner.MakerBase
class TestStopOptions(BaseDirTestsMixin, unittest.TestCase):
"""
Test buildbot_worker.scripts.runner.StopOptions class.
"""
options_class = runner.StopOptions
def test_synopsis(self):
opts = runner.StopOptions()
self.assertIn('buildbot-worker stop', opts.getSynopsis())
class TestStartOptions(OptionsMixin, BaseDirTestsMixin, unittest.TestCase):
"""
Test buildbot_worker.scripts.runner.StartOptions class.
"""
options_class = runner.StartOptions
def test_synopsis(self):
opts = runner.StartOptions()
self.assertIn('buildbot-worker start', opts.getSynopsis())
def test_all_args(self):
opts = self.parse("--quiet", "--nodaemon", self.MY_BASEDIR)
self.assertOptions(opts,
dict(quiet=True, nodaemon=True,
basedir=self.ABSPATH_PREFIX + self.MY_BASEDIR))
class TestRestartOptions(OptionsMixin, BaseDirTestsMixin, unittest.TestCase):
"""
Test buildbot_worker.scripts.runner.RestartOptions class.
"""
options_class = runner.RestartOptions
def test_synopsis(self):
opts = runner.RestartOptions()
self.assertIn('buildbot-worker restart', opts.getSynopsis())
def test_all_args(self):
opts = self.parse("--quiet", "--nodaemon", self.MY_BASEDIR)
self.assertOptions(opts,
dict(quiet=True, nodaemon=True,
basedir=self.ABSPATH_PREFIX + self.MY_BASEDIR))
class TestCreateWorkerOptions(OptionsMixin, unittest.TestCase):
"""
Test buildbot_worker.scripts.runner.CreateWorkerOptions class.
"""
req_args = ["bdir", "mstr:5678", "name", "pswd"]
def parse(self, *args):
opts = runner.CreateWorkerOptions()
opts.parseOptions(args)
return opts
def test_defaults(self):
self.assertRaisesRegex(usage.UsageError,
"incorrect number of arguments",
self.parse)
def test_synopsis(self):
opts = runner.CreateWorkerOptions()
self.assertIn('buildbot-worker create-worker', opts.getSynopsis())
def test_min_args(self):
# patch runner.MakerBase.postOptions() so that 'basedir'
# argument will not be converted to absolute path
self.patch(runner.MakerBase, "postOptions", mock.Mock())
self.assertOptions(self.parse(*self.req_args),
dict(basedir="bdir", host="mstr", port=5678,
name="name", passwd="pswd"))
def test_all_args(self):
# patch runner.MakerBase.postOptions() so that 'basedir'
# argument will not be converted to absolute path
self.patch(runner.MakerBase, "postOptions", mock.Mock())
opts = self.parse("--force", "--relocatable", "--no-logrotate",
"--keepalive=4", "--umask=0o22",
"--maxdelay=3", "--numcpus=4", "--log-size=2", "--log-count=1",
"--allow-shutdown=file", *self.req_args)
self.assertOptions(opts,
{"force": True,
"relocatable": True,
"no-logrotate": True,
"umask": "0o22",
"maxdelay": 3,
"numcpus": "4",
"log-size": 2,
"log-count": "1",
"allow-shutdown": "file",
"basedir": "bdir",
"host": "mstr",
"port": 5678,
"name": "name",
"passwd": "pswd"})
def test_master_url(self):
self.assertRaisesRegex(usage.UsageError,
"<master> is not a URL - do not use URL",
self.parse, "a", "http://b.c", "d", "e")
def test_inv_keepalive(self):
self.assertRaisesRegex(usage.UsageError,
"keepalive parameter needs to be a number",
self.parse, "--keepalive=X", *self.req_args)
def test_inv_maxdelay(self):
self.assertRaisesRegex(usage.UsageError,
"maxdelay parameter needs to be a number",
self.parse, "--maxdelay=X", *self.req_args)
def test_inv_log_size(self):
self.assertRaisesRegex(usage.UsageError,
"log-size parameter needs to be a number",
self.parse, "--log-size=X", *self.req_args)
def test_inv_log_count(self):
self.assertRaisesRegex(usage.UsageError,
"log-count parameter needs to be a number or None",
self.parse, "--log-count=X", *self.req_args)
def test_inv_numcpus(self):
self.assertRaisesRegex(usage.UsageError,
"numcpus parameter needs to be a number or None",
self.parse, "--numcpus=X", *self.req_args)
def test_inv_umask(self):
self.assertRaisesRegex(usage.UsageError,
"umask parameter needs to be a number or None",
self.parse, "--umask=X", *self.req_args)
def test_inv_allow_shutdown(self):
self.assertRaisesRegex(usage.UsageError,
"allow-shutdown needs to be one of 'signal' or 'file'",
self.parse, "--allow-shutdown=X", *self.req_args)
def test_too_few_args(self):
self.assertRaisesRegex(usage.UsageError,
"incorrect number of arguments",
self.parse, "arg1", "arg2")
def test_too_many_args(self):
self.assertRaisesRegex(usage.UsageError,
"incorrect number of arguments",
self.parse, "extra_arg", *self.req_args)
def test_validateMasterArgument_no_port(self):
"""
test calling CreateWorkerOptions.validateMasterArgument()
on <master> argument without port specified.
"""
opts = runner.CreateWorkerOptions()
self.assertEqual(opts.validateMasterArgument("mstrhost"),
("mstrhost", 9989),
"incorrect master host and/or port")
def test_validateMasterArgument_empty_master(self):
"""
test calling CreateWorkerOptions.validateMasterArgument()
on <master> without host part specified.
"""
opts = runner.CreateWorkerOptions()
self.assertRaisesRegex(usage.UsageError,
"invalid <master> argument ':1234'",
opts.validateMasterArgument, ":1234")
def test_validateMasterArgument_inv_port(self):
"""
test calling CreateWorkerOptions.validateMasterArgument()
on <master> without with unparsable port part
"""
opts = runner.CreateWorkerOptions()
self.assertRaisesRegex(usage.UsageError,
"invalid master port 'apple', "
"needs to be a number",
opts.validateMasterArgument, "host:apple")
def test_validateMasterArgument_ok(self):
"""
test calling CreateWorkerOptions.validateMasterArgument()
on <master> without host and port parts specified.
"""
opts = runner.CreateWorkerOptions()
self.assertEqual(opts.validateMasterArgument("mstrhost:4321"),
("mstrhost", 4321),
"incorrect master host and/or port")
class TestOptions(misc.StdoutAssertionsMixin, unittest.TestCase):
"""
Test buildbot_worker.scripts.runner.Options class.
"""
def setUp(self):
self.setUpStdoutAssertions()
def parse(self, *args):
opts = runner.Options()
opts.parseOptions(args)
return opts
def test_defaults(self):
self.assertRaisesRegex(usage.UsageError,
"must specify a command",
self.parse)
def test_version(self):
exception = self.assertRaises(SystemExit, self.parse, '--version')
self.assertEqual(exception.code, 0, "unexpected exit code")
self.assertInStdout('worker version:')
def test_verbose(self):
self.patch(log, 'startLogging', mock.Mock())
self.assertRaises(usage.UsageError, self.parse, "--verbose")
log.startLogging.assert_called_once_with(sys.stderr)
# used by TestRun.test_run_good to patch in a callback
functionPlaceholder = None
class TestRun(misc.StdoutAssertionsMixin, unittest.TestCase):
"""
Test buildbot_worker.scripts.runner.run()
"""
def setUp(self):
self.setUpStdoutAssertions()
class TestSubCommand(usage.Options):
subcommandFunction = __name__ + ".functionPlaceholder"
optFlags = [["test-opt", None, None]]
class TestOptions(usage.Options):
"""
Option class that emulates usage error. The 'suboptions' flag
enables emulation of usage error in a sub-option.
"""
optFlags = [["suboptions", None, None]]
def postOptions(self):
if self["suboptions"]:
self.subOptions = "SubOptionUsage"
raise usage.UsageError("usage-error-message")
def __str__(self):
return "GeneralUsage"
def test_run_good(self):
"""
Test successful invocation of worker command.
"""
self.patch(sys, "argv", ["command", 'test', '--test-opt'])
# patch runner module to use our test subcommand class
self.patch(runner.Options, 'subCommands',
[['test', None, self.TestSubCommand, None]])
# trace calls to subcommand function
subcommand_func = mock.Mock(return_value=42)
self.patch(sys.modules[__name__],
"functionPlaceholder",
subcommand_func)
# check that subcommand function called with correct arguments
# and that it's return value is used as exit code
exception = self.assertRaises(SystemExit, runner.run)
subcommand_func.assert_called_once_with({'test-opt': 1})
self.assertEqual(exception.code, 42, "unexpected exit code")
def test_run_bad_noargs(self):
"""
Test handling of invalid command line arguments.
"""
self.patch(sys, "argv", ["command"])
# patch runner module to use test Options class
self.patch(runner, "Options", self.TestOptions)
exception = self.assertRaises(SystemExit, runner.run)
self.assertEqual(exception.code, 1, "unexpected exit code")
self.assertStdoutEqual("command: usage-error-message\n\n"
"GeneralUsage\n",
"unexpected error message on stdout")
def test_run_bad_suboption(self):
"""
Test handling of invalid command line arguments in a suboption.
"""
self.patch(sys, "argv", ["command", "--suboptions"])
# patch runner module to use test Options class
self.patch(runner, "Options", self.TestOptions)
exception = self.assertRaises(SystemExit, runner.run)
self.assertEqual(exception.code, 1, "unexpected exit code")
# check that we get error message for a sub-option
self.assertStdoutEqual("command: usage-error-message\n\n"
"SubOptionUsage\n",
"unexpected error message on stdout")
| gpl-2.0 |
jhaux/tensorflow | tensorflow/contrib/keras/python/keras/layers/core.py | 14 | 27677 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Core Keras layers.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import types as python_types
import numpy as np
from tensorflow.contrib.keras.python.keras import activations
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import InputSpec
from tensorflow.contrib.keras.python.keras.engine import Layer
from tensorflow.contrib.keras.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.contrib.keras.python.keras.utils.generic_utils import func_dump
from tensorflow.contrib.keras.python.keras.utils.generic_utils import func_load
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import core as tf_core_layers
from tensorflow.python.util import tf_inspect
class Masking(Layer):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to a LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(timesteps, features)))
model.add(LSTM(32))
```
"""
def __init__(self, mask_value=0., **kwargs):
super(Masking, self).__init__(**kwargs)
self.supports_masking = True
self.mask_value = mask_value
def compute_mask(self, inputs, mask=None):
return K.any(K.not_equal(inputs, self.mask_value), axis=-1)
def call(self, inputs):
boolean_mask = K.any(
K.not_equal(inputs, self.mask_value), axis=-1, keepdims=True)
return inputs * K.cast(boolean_mask, K.floatx())
def get_config(self):
config = {'mask_value': self.mask_value}
base_config = super(Masking, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Dropout(tf_core_layers.Dropout, Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
self.supports_masking = True
# Inheritance call order:
# 1) tf.layers.Dropout, 2) keras.layers.Layer, 3) tf.layers.Layer
super(Dropout, self).__init__(**kwargs)
def call(self, inputs, training=None):
if training is None:
training = K.learning_phase()
output = super(Dropout, self).call(inputs, training=training)
if training is K.learning_phase():
output._uses_learning_phase = True # pylint: disable=protected-access
return output
def get_config(self):
config = {'rate': self.rate}
base_config = super(Dropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SpatialDropout1D(Dropout):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, **kwargs):
super(SpatialDropout1D, self).__init__(rate, **kwargs)
self.input_spec = InputSpec(ndim=3)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
noise_shape = (input_shape[0], 1, input_shape[2])
return noise_shape
class SpatialDropout2D(Dropout):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=4)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
noise_shape = (input_shape[0], input_shape[1], 1, 1)
elif self.data_format == 'channels_last':
noise_shape = (input_shape[0], 1, 1, input_shape[3])
else:
raise ValueError('Invalid data_format:', self.data_format)
return noise_shape
class SpatialDropout3D(Dropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Arguments:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
"""
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == 'channels_last':
noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])
else:
raise ValueError('Invalid data_format:', self.data_format)
return noise_shape
class Activation(Layer):
"""Applies an activation function to an output.
Arguments:
activation: name of activation function to use
or alternatively, a Theano or TensorFlow operation.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, activation, **kwargs):
super(Activation, self).__init__(**kwargs)
self.supports_masking = True
self.activation = activations.get(activation)
def call(self, inputs):
return self.activation(inputs)
def get_config(self):
config = {'activation': activations.serialize(self.activation)}
base_config = super(Activation, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Reshape(Layer):
"""Reshapes an output to a certain shape.
Arguments:
target_shape: target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
model = Sequential()
model.add(Reshape((3, 4), input_shape=(12,)))
# now: model.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# as intermediate layer in a Sequential model
model.add(Reshape((6, 2)))
# now: model.output_shape == (None, 6, 2)
# also supports shape inference using `-1` as dimension
model.add(Reshape((-1, 2, 2)))
# now: model.output_shape == (None, 3, 2, 2)
```
"""
def __init__(self, target_shape, **kwargs):
super(Reshape, self).__init__(**kwargs)
self.target_shape = tuple(target_shape)
def _fix_unknown_dimension(self, input_shape, output_shape):
"""Find and replace a missing dimension in an output shape.
This is a near direct port of the internal Numpy function
`_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`
Arguments:
input_shape: shape of array being reshaped
output_shape: desired shape of the array with at most
a single -1 which indicates a dimension that should be
derived from the input shape.
Returns:
The new output shape with a -1 replaced with its computed value.
Raises a ValueError if the total array size of the output_shape is
different then the input_shape, or more then one unknown dimension
is specified.
Raises:
ValueError: in case of invalid values
for `input_shape` or `input_shape`.
"""
output_shape = list(output_shape)
msg = 'total size of new array must be unchanged'
known, unknown = 1, None
for index, dim in enumerate(output_shape):
if dim < 0:
if unknown is None:
unknown = index
else:
raise ValueError('Can only specify one unknown dimension.')
else:
known *= dim
original = np.prod(input_shape, dtype=int)
if unknown is not None:
if known == 0 or original % known != 0:
raise ValueError(msg)
output_shape[unknown] = original // known
elif original != known:
raise ValueError(msg)
return output_shape
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
output_shape += self._fix_unknown_dimension(input_shape[1:],
self.target_shape)
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
# In case the target shape is not fully defined,
# we need access to the shape of x.
target_shape = self.target_shape
if -1 in target_shape:
# target shape not fully defined
target_shape = self._compute_output_shape(inputs.get_shape())
target_shape = target_shape.as_list()[1:]
return K.reshape(inputs, (-1,) + tuple(target_shape))
def get_config(self):
config = {'target_shape': self.target_shape}
base_config = super(Reshape, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Permute(Layer):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Example:
```python
model = Sequential()
model.add(Permute((2, 1), input_shape=(10, 64)))
# now: model.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Arguments:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimension
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
"""
def __init__(self, dims, **kwargs):
super(Permute, self).__init__(**kwargs)
self.dims = tuple(dims)
self.input_spec = InputSpec(ndim=len(self.dims) + 1)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = copy.copy(input_shape)
for i, dim in enumerate(self.dims):
target_dim = input_shape[dim]
output_shape[i + 1] = target_dim
return tensor_shape.TensorShape(output_shape)
def call(self, inputs):
return K.permute_dimensions(inputs, (0,) + self.dims)
def get_config(self):
config = {'dims': self.dims}
base_config = super(Permute, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Flatten(Layer):
"""Flattens the input. Does not affect the batch size.
Example:
```python
model = Sequential()
model.add(Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32)))
# now: model.output_shape == (None, 64, 32, 32)
model.add(Flatten())
# now: model.output_shape == (None, 65536)
```
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.input_spec = InputSpec(min_ndim=3)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if not all(input_shape[1:]):
raise ValueError('The shape of the input to "Flatten" '
'is not fully defined '
'(got ' + str(input_shape[1:]) + '. '
'Make sure to pass a complete "input_shape" '
'or "batch_input_shape" argument to the first '
'layer in your model.')
return tensor_shape.TensorShape([input_shape[0], np.prod(input_shape[1:])])
def call(self, inputs):
outputs = K.batch_flatten(inputs)
outputs.set_shape(self._compute_output_shape(inputs.get_shape()))
return outputs
class RepeatVector(Layer):
"""Repeats the input n times.
Example:
```python
model = Sequential()
model.add(Dense(32, input_dim=32))
# now: model.output_shape == (None, 32)
# note: `None` is the batch dimension
model.add(RepeatVector(3))
# now: model.output_shape == (None, 3, 32)
```
Arguments:
n: integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
"""
def __init__(self, n, **kwargs):
super(RepeatVector, self).__init__(**kwargs)
self.n = n
self.input_spec = InputSpec(ndim=2)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]])
def call(self, inputs):
return K.repeat(inputs, self.n)
def get_config(self):
config = {'n': self.n}
base_config = super(RepeatVector, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Lambda(Layer):
"""Wraps arbitrary expression as a `Layer` object.
Examples:
```python
# add a x -> x^2 layer
model.add(Lambda(lambda x: x ** 2))
```
```python
# add a layer that returns the concatenation
# of the positive part of the input and
# the opposite of the negative part
def antirectifier(x):
x -= K.mean(x, axis=1, keepdims=True)
x = K.l2_normalize(x, axis=1)
pos = K.relu(x)
neg = K.relu(-x)
return K.concatenate([pos, neg], axis=1)
model.add(Lambda(antirectifier))
```
Arguments:
function: The function to be evaluated.
Takes input tensor as first argument.
arguments: optional dictionary of keyword arguments to be passed
to the function.
Input shape:
Arbitrary. Use the keyword argument input_shape
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Specified by `output_shape` argument
(or auto-inferred when using TensorFlow).
"""
def __init__(self, function, mask=None, arguments=None, **kwargs):
super(Lambda, self).__init__(**kwargs)
self.function = function
self.arguments = arguments if arguments else {}
if mask is not None:
self.supports_masking = True
self.mask = mask
def call(self, inputs, mask=None):
arguments = self.arguments
arg_spec = tf_inspect.getargspec(self.function)
if 'mask' in arg_spec.args:
arguments['mask'] = mask
return self.function(inputs, **arguments)
def compute_mask(self, inputs, mask=None):
if callable(self.mask):
return self.mask(inputs, mask)
return self.mask
def get_config(self):
if isinstance(self.function, python_types.LambdaType):
function = func_dump(self.function)
function_type = 'lambda'
else:
function = self.function.__name__
function_type = 'function'
config = {
'function': function,
'function_type': function_type,
'arguments': self.arguments
}
base_config = super(Lambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config, custom_objects=None):
globs = globals()
if custom_objects:
globs = dict(list(globs.items()) + list(custom_objects.items()))
function_type = config.pop('function_type')
if function_type == 'function':
# Simple lookup in custom objects
function = deserialize_keras_object(
config['function'],
custom_objects=custom_objects,
printable_module_name='function in Lambda layer')
elif function_type == 'lambda':
# Unsafe deserialization from bytecode
function = func_load(config['function'], globs=globs)
else:
raise TypeError('Unknown function type:', function_type)
config['function'] = function
return cls(**config)
class Dense(tf_core_layers.Dense, Layer):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
model = Sequential()
model.add(Dense(32, input_shape=(16,)))
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
# after the first layer, you don't need to specify
# the size of the input anymore:
model.add(Dense(32))
```
Arguments:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
"""
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
# Inheritance call order:
# 1) tf.layers.Dense, 2) keras.layers.Layer, 3) tf.layers.Layer
super(Dense, self).__init__(
units,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
# TODO(fchollet): move weight constraint support to core layers.
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.supports_masking = True
def build(self, input_shape):
super(Dense, self).build(input_shape)
# TODO(fchollet): move weight constraint support to core layers.
if self.kernel_constraint:
self.constraints[self.kernel] = self.kernel_constraint
if self.use_bias and self.bias_constraint:
self.constraints[self.bias] = self.bias_constraint
def get_config(self):
config = {
'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ActivityRegularization(Layer):
"""Layer that applies an update to the cost function based input activity.
Arguments:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularization, self).__init__(**kwargs)
self.supports_masking = True
self.l1 = l1
self.l2 = l2
self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)
def get_config(self):
config = {'l1': self.l1, 'l2': self.l2}
base_config = super(ActivityRegularization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| apache-2.0 |
lidavidm/mathics-heroku | venv/lib/python2.7/site-packages/django/contrib/gis/gdal/tests/test_ds.py | 109 | 11098 | import os
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.test_data import get_ds_file, TestDS, TEST_DATA
from django.utils import unittest
from django.utils.unittest import skipUnless
if HAS_GDAL:
from django.contrib.gis.gdal import DataSource, Envelope, OGRGeometry, OGRException, OGRIndexError, GDAL_VERSION
from django.contrib.gis.gdal.field import OFTReal, OFTInteger, OFTString
# List of acceptable data sources.
ds_list = (
TestDS('test_point', nfeat=5, nfld=3, geom='POINT', gtype=1, driver='ESRI Shapefile',
fields={'dbl' : OFTReal, 'int' : OFTInteger, 'str' : OFTString,},
extent=(-1.35011,0.166623,-0.524093,0.824508), # Got extent from QGIS
srs_wkt='GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]',
field_values={'dbl' : [float(i) for i in range(1, 6)], 'int' : list(range(1, 6)), 'str' : [str(i) for i in range(1, 6)]},
fids=range(5)),
TestDS('test_vrt', ext='vrt', nfeat=3, nfld=3, geom='POINT', gtype='Point25D', driver='VRT',
fields={'POINT_X' : OFTString, 'POINT_Y' : OFTString, 'NUM' : OFTString}, # VRT uses CSV, which all types are OFTString.
extent=(1.0, 2.0, 100.0, 523.5), # Min/Max from CSV
field_values={'POINT_X' : ['1.0', '5.0', '100.0'], 'POINT_Y' : ['2.0', '23.0', '523.5'], 'NUM' : ['5', '17', '23']},
fids=range(1,4)),
TestDS('test_poly', nfeat=3, nfld=3, geom='POLYGON', gtype=3,
driver='ESRI Shapefile',
fields={'float' : OFTReal, 'int' : OFTInteger, 'str' : OFTString,},
extent=(-1.01513,-0.558245,0.161876,0.839637), # Got extent from QGIS
srs_wkt='GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]'),
)
bad_ds = (TestDS('foo'),)
@skipUnless(HAS_GDAL, "GDAL is required")
class DataSourceTest(unittest.TestCase):
def test01_valid_shp(self):
"Testing valid SHP Data Source files."
for source in ds_list:
# Loading up the data source
ds = DataSource(source.ds)
# Making sure the layer count is what's expected (only 1 layer in a SHP file)
self.assertEqual(1, len(ds))
# Making sure GetName works
self.assertEqual(source.ds, ds.name)
# Making sure the driver name matches up
self.assertEqual(source.driver, str(ds.driver))
# Making sure indexing works
try:
ds[len(ds)]
except OGRIndexError:
pass
else:
self.fail('Expected an IndexError!')
def test02_invalid_shp(self):
"Testing invalid SHP files for the Data Source."
for source in bad_ds:
self.assertRaises(OGRException, DataSource, source.ds)
def test03a_layers(self):
"Testing Data Source Layers."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer, this tests DataSource.__iter__
for layer in ds:
# Making sure we get the number of features we expect
self.assertEqual(len(layer), source.nfeat)
# Making sure we get the number of fields we expect
self.assertEqual(source.nfld, layer.num_fields)
self.assertEqual(source.nfld, len(layer.fields))
# Testing the layer's extent (an Envelope), and it's properties
if source.driver == 'VRT' and (GDAL_VERSION >= (1, 7, 0) and GDAL_VERSION < (1, 7, 3)):
# There's a known GDAL regression with retrieving the extent
# of a VRT layer in versions 1.7.0-1.7.2:
# http://trac.osgeo.org/gdal/ticket/3783
pass
else:
self.assertEqual(True, isinstance(layer.extent, Envelope))
self.assertAlmostEqual(source.extent[0], layer.extent.min_x, 5)
self.assertAlmostEqual(source.extent[1], layer.extent.min_y, 5)
self.assertAlmostEqual(source.extent[2], layer.extent.max_x, 5)
self.assertAlmostEqual(source.extent[3], layer.extent.max_y, 5)
# Now checking the field names.
flds = layer.fields
for f in flds: self.assertEqual(True, f in source.fields)
# Negative FIDs are not allowed.
self.assertRaises(OGRIndexError, layer.__getitem__, -1)
self.assertRaises(OGRIndexError, layer.__getitem__, 50000)
if hasattr(source, 'field_values'):
fld_names = source.field_values.keys()
# Testing `Layer.get_fields` (which uses Layer.__iter__)
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name], layer.get_fields(fld_name))
# Testing `Layer.__getitem__`.
for i, fid in enumerate(source.fids):
feat = layer[fid]
self.assertEqual(fid, feat.fid)
# Maybe this should be in the test below, but we might as well test
# the feature values here while in this loop.
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name][i], feat.get(fld_name))
def test03b_layer_slice(self):
"Test indexing and slicing on Layers."
# Using the first data-source because the same slice
# can be used for both the layer and the control values.
source = ds_list[0]
ds = DataSource(source.ds)
sl = slice(1, 3)
feats = ds[0][sl]
for fld_name in ds[0].fields:
test_vals = [feat.get(fld_name) for feat in feats]
control_vals = source.field_values[fld_name][sl]
self.assertEqual(control_vals, test_vals)
def test03c_layer_references(self):
"""
Ensure OGR objects keep references to the objects they belong to.
"""
source = ds_list[0]
# See ticket #9448.
def get_layer():
# This DataSource object is not accessible outside this
# scope. However, a reference should still be kept alive
# on the `Layer` returned.
ds = DataSource(source.ds)
return ds[0]
# Making sure we can call OGR routines on the Layer returned.
lyr = get_layer()
self.assertEqual(source.nfeat, len(lyr))
self.assertEqual(source.gtype, lyr.geom_type.num)
# Same issue for Feature/Field objects, see #18640
self.assertEqual(str(lyr[0]['str']), "1")
def test04_features(self):
"Testing Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer
for layer in ds:
# Incrementing through each feature in the layer
for feat in layer:
# Making sure the number of fields, and the geometry type
# are what's expected.
self.assertEqual(source.nfld, len(list(feat)))
self.assertEqual(source.gtype, feat.geom_type)
# Making sure the fields match to an appropriate OFT type.
for k, v in source.fields.items():
# Making sure we get the proper OGR Field instance, using
# a string value index for the feature.
self.assertEqual(True, isinstance(feat[k], v))
# Testing Feature.__iter__
for fld in feat:
self.assertEqual(True, fld.name in source.fields.keys())
def test05_geometries(self):
"Testing Geometries from Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer and feature.
for layer in ds:
for feat in layer:
g = feat.geom
# Making sure we get the right Geometry name & type
self.assertEqual(source.geom, g.geom_name)
self.assertEqual(source.gtype, g.geom_type)
# Making sure the SpatialReference is as expected.
if hasattr(source, 'srs_wkt'):
self.assertEqual(
source.srs_wkt,
# Depending on lib versions, WGS_84 might be WGS_1984
g.srs.wkt.replace('SPHEROID["WGS_84"', 'SPHEROID["WGS_1984"')
)
def test06_spatial_filter(self):
"Testing the Layer.spatial_filter property."
ds = DataSource(get_ds_file('cities', 'shp'))
lyr = ds[0]
# When not set, it should be None.
self.assertEqual(None, lyr.spatial_filter)
# Must be set a/an OGRGeometry or 4-tuple.
self.assertRaises(TypeError, lyr._set_spatial_filter, 'foo')
# Setting the spatial filter with a tuple/list with the extent of
# a buffer centering around Pueblo.
self.assertRaises(ValueError, lyr._set_spatial_filter, list(range(5)))
filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001)
lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001)
self.assertEqual(OGRGeometry.from_bbox(filter_extent), lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Pueblo', feats[0].get('Name'))
# Setting the spatial filter with an OGRGeometry for buffer centering
# around Houston.
filter_geom = OGRGeometry('POLYGON((-96.363151 28.763374,-94.363151 28.763374,-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))')
lyr.spatial_filter = filter_geom
self.assertEqual(filter_geom, lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Houston', feats[0].get('Name'))
# Clearing the spatial filter by setting it to None. Now
# should indicate that there are 3 features in the Layer.
lyr.spatial_filter = None
self.assertEqual(3, len(lyr))
def test07_integer_overflow(self):
"Testing that OFTReal fields, treated as OFTInteger, do not overflow."
# Using *.dbf from Census 2010 TIGER Shapefile for Texas,
# which has land area ('ALAND10') stored in a Real field
# with no precision.
ds = DataSource(os.path.join(TEST_DATA, 'texas.dbf'))
feat = ds[0][0]
# Reference value obtained using `ogrinfo`.
self.assertEqual(676586997978, feat.get('ALAND10'))
| gpl-3.0 |