diff --git "a/codeparrot-valid_1001.txt" "b/codeparrot-valid_1001.txt"
new file mode 100644--- /dev/null
+++ "b/codeparrot-valid_1001.txt"
@@ -0,0 +1,10000 @@
+
+def libdoc(library_or_resource, outfile, name='', version='', format=None):
+ """Executes libdoc.
+
+ Arguments have same semantics as Libdoc command line options with
+ same names.
+
+ Example::
+
+ from robot.libdoc import libdoc
+
+ libdoc('MyLibrary.py', 'MyLibraryDoc.html', version='1.0')
+ """
+ LibDoc().execute(library_or_resource, outfile, name=name, version=version,
+ format=format)
+
+
+if __name__ == '__main__':
+ libdoc_cli(sys.argv[1:])
+
+import unittest
+from Ann import Ann, timeit
+import numpy as np
+import random
+import copy
+import os
+import pickle
+import logging
+
+logger = logging.getLogger(__name__)
+
+class Test(unittest.TestCase):
+
+ def init_logger(self, level='info'):
+ if (level == 'debug'):
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ def setUp(self):
+ self.init_logger('debug')
+
+ @timeit
+ def test_1(self):
+ # Test for Ann Architecture#
+
+ # First architecture test#
+ n_i1 = 4 # Number of input neurons
+ n_h1 = 2 # Number of hidden layers
+ n_o1 = 1 # Number of output neurons
+
+ ann1 = Ann(n_i=4, n_h=2 , n_o=1) # Create this architecture
+ self.assertEqual(n_i1, ann1.n_i)
+ self.assertEqual(n_h1, ann1.n_h)
+ self.assertEqual(n_o1, ann1.n_o)
+
+ self.assertEqual(ann1.s, [5, 5, 5, 2])
+ self.assertEqual(len(ann1.Thetas), 3)
+ self.assertEqual(ann1.Thetas[0].shape, (4, 5))
+ self.assertEqual(ann1.Thetas[1].shape, (4, 5))
+ self.assertEqual(ann1.Thetas[2].shape, (1, 5))
+
+ # Second architecture test#
+ n_i2 = 10 # Number of input neurons
+ n_h2 = 1 # Number of hidden layers
+ n_o2 = 2 # Number of output neurons
+
+ ann2 = Ann(n_i=n_i2, n_h=n_h2, n_o=n_o2) # Create this architecture
+ self.assertEqual(n_i2, ann2.n_i)
+ self.assertEqual(n_h2, ann2.n_h)
+ self.assertEqual(n_o2, ann2.n_o)
+
+ self.assertEqual(ann2.s, [11, 11, 3])
+ self.assertEqual(len(ann2.Thetas), 2)
+ self.assertEqual(ann2.Thetas[0].shape, (10, 11))
+ self.assertEqual(ann2.Thetas[1].shape, (2, 11))
+
+ # Third architecture test#
+ n_i3 = 100 # Number of input neurons
+ n_h3 = 0 # Number of hidden layers
+ n_o3 = 10 # Number of output neurons
+
+ ann3 = Ann(n_i=n_i3, n_h=n_h3, n_o=n_o3) # Create this architecture
+ self.assertEqual(n_i3, ann3.n_i)
+ self.assertEqual(n_h3, ann3.n_h)
+ self.assertEqual(n_o3, ann3.n_o)
+
+ self.assertEqual(ann3.s, [101, 11])
+ self.assertEqual(len(ann3.Thetas), 1)
+ self.assertEqual(ann3.Thetas[0].shape, (10, 101))
+
+ n_i4 = 1500 # Number of input neurons
+ n_h4 = 3 # Number of hidden layers
+ n_o4 = 6 # Number of output neurons
+
+ # Fourth architecture test#
+ ann4 = Ann(n_i=n_i4, n_h=n_h4, n_o=n_o4) # Create this architecture
+ self.assertEqual(n_i4, ann4.n_i)
+ self.assertEqual(n_h4, ann4.n_h)
+ self.assertEqual(n_o4, ann4.n_o)
+
+ self.assertEqual(ann4.s, [1501, 31 + 1, 31 + 1, 31 + 1, 6 + 1])
+ self.assertEqual(len(ann4.Thetas), 4)
+ self.assertEqual(ann4.Thetas[0].shape, (31, 1501))
+ self.assertEqual(ann4.Thetas[1].shape, (31, 32))
+ self.assertEqual(ann4.Thetas[2].shape, (31, 32))
+ self.assertEqual(ann4.Thetas[3].shape, (6, 32))
+
+ # Fourth (arbitrary) architecture test#
+ s = [3, 2]
+ n_i = 4
+ n_h = len(s)
+ n_o = 2
+ ann1 = Ann(s=s, n_i=n_i, n_h=n_h, n_o=n_o) # Create this architecture
+ self.assertEqual(n_i, ann1.n_i)
+ self.assertEqual(n_h, ann1.n_h)
+ self.assertEqual(n_o, ann1.n_o)
+
+ self.assertEqual(ann1.s, [5, 3, 2, 3])
+ self.assertEqual(len(ann1.Thetas), 3)
+ self.assertEqual(ann1.Thetas[0].shape, (2, 5))
+ self.assertEqual(ann1.Thetas[1].shape, (1, 3))
+ self.assertEqual(ann1.Thetas[2].shape, (2, 2))
+
+ @timeit
+ def test_2(self):
+ # Test for forward-propagation#
+
+ # First architecture test#
+ # Logistic regression (0 hidden layers) forward propagation test#
+ n_i1 = 4 # Number of input neurons
+ n_h1 = 0 # Number of hidden layers
+ n_o1 = 1 # Number of output neurons
+
+ ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
+ x1 = [1, 2, 3, 4] # Array as first example
+ x2 = [-1, -1, -1, -1] # Array as second example
+
+ # Set all weights to zero#
+ for i in range(0, len(ann1.Thetas)):
+ shape = ann1.Thetas[i].shape
+ self.assertEqual(shape, (1, 5))
+ ann1.Thetas[i] = np.zeros(shape)
+ self.assertEqual(ann1.h(x1), 0.5)
+ self.assertEqual(ann1.h(x2), 0.5)
+
+ # Set all weights to one#
+ for i in range(0, len(ann1.Thetas)):
+ shape = ann1.Thetas[i].shape
+ self.assertEqual(shape, (1, 5))
+ ann1.Thetas[i] = np.ones(shape)
+ self.assertAlmostEqual(ann1.h(x1), 0.999, delta=0.001)
+ self.assertAlmostEqual(ann1.h(x2), 0.0474, delta=0.0001)
+
+ # Set all weights randomly between -1 and 1 (and test the range of output)#
+ ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
+ self.assertAlmostEqual(ann1.h(x1), 0.5, delta=0.5) # Sigmoid always gives values between 0 and 1
+ self.assertAlmostEqual(ann1.h(x2), 0.5, delta=0.5)
+
+ # Custom Thetas weights#
+ M = np.matrix([[1, -1, 0.5, -0.3, 2]])
+ ann1.Thetas[0] = M
+ self.assertAlmostEqual(ann1.h(x1), 0.786, delta=0.001)
+ self.assertAlmostEqual(ann1.h(x2), 0.858, delta=0.001)
+
+ # Second architecture test#
+ # 1 hidden layer forward propagation test#
+ n_i1 = 4 # Number of input neurons
+ n_h1 = 1 # Number of hidden layers
+ n_o1 = 1 # Number of output neurons
+
+ ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
+ x1 = [1, 2, 3, 4] # Array as first example
+ x2 = [-1, -1, -1, -1] # Array as second example
+
+ # Set all weights to zero#
+ for i in range(0, len(ann1.Thetas)):
+ shape = ann1.Thetas[i].shape
+ ann1.Thetas[i] = np.zeros(shape)
+ self.assertEqual(ann1.h(x1), 0.5)
+ self.assertEqual(ann1.h(x2), 0.5)
+
+ # Set all weights to one#
+ for i in range(0, len(ann1.Thetas)):
+ shape = ann1.Thetas[i].shape
+ ann1.Thetas[i] = np.ones(shape)
+ self.assertAlmostEqual(ann1.h(x1), 0.993, delta=0.001)
+ self.assertAlmostEqual(ann1.h(x2), 0.767, delta=0.001)
+
+ # Set all weights randomly between -1 and 1 (and test the range of output)#
+ ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
+ self.assertAlmostEqual(ann1.h(x1), 0.5, delta=0.5) # Sigmoid always gives values between 0 and 1
+ self.assertAlmostEqual(ann1.h(x2), 0.5, delta=0.5)
+
+ # Custom Thetas weights#
+ M1 = np.matrix([[1, -1, 0.5, -0.3, 2],
+ [1, -1, 0.5, -0.3, 2],
+ [1, -1, 0.5, -0.3, 2],
+ [1, -1, 0.5, -0.3, 2]])
+ M2 = np.matrix([[1, 1, -1, 0.5, -1]])
+ ann1.Thetas[0] = M1
+ ann1.Thetas[1] = M2
+ # a^(1) Should be [0.786 0.786 0.786 0.786 1]^T#
+ self.assertAlmostEqual(ann1.h(x1), 0.545, delta=0.001)
+ # a^(1) Should be [0.858 0.858 0.858 0.858 1]^T#
+ self.assertAlmostEqual(ann1.h(x2), 0.571, delta=0.001)
+
+ @timeit
+ def test_3(self):
+
+ # Test the dimensions of the Jacobian matrices against Theta matrices for first architecture#
+ n_i1 = 4 # Number of input neurons
+ n_h1 = 2 # Number of hidden layers
+ n_o1 = 2 # Number of output neurons
+
+ ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
+ x1 = [1, 2, 3, 4] # Array as first example
+ y1 = [1, 0]
+ J = ann1.backward(x1, y1)
+ for l in range(0, ann1.L - 1):
+ self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
+
+ # Test the dimensions of the Jacobian matrices against Theta matrices for second architecture#
+ n_i1 = 40 # Number of input neurons
+ n_h1 = 3 # Number of hidden layers
+ n_o1 = 10 # Number of output neurons
+
+ ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
+ x1 = 10 * [1, 2, 3, 4] # Array as first example
+ y1 = [1, 0, 1, 1, 0, 0, 1, 0, 1, 0]
+ J = ann1.backward(x1, y1)
+ for l in range(0, ann1.L - 1):
+ self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
+
+ # Test the dimensions of the Jacobian matrices against Theta matrices for third architecture#
+ n_i1 = 40 # Number of input neurons
+ n_h1 = 0 # Number of hidden layers
+ n_o1 = 10 # Number of output neurons
+
+ ann1 = Ann(n_i=n_i1, n_h=n_h1, n_o=n_o1) # Create this architecture
+ x1 = 10 * [1, 2, 3, 4] # Array as first example
+ y1 = [1, 0, 1, 1, 0, 0, 1, 0, 1, 0]
+ J = ann1.backward(x1, y1)
+ for l in range(0, ann1.L - 1):
+ self.assertEqual(ann1.Thetas[l].shape, J[l].shape)
+
+ @timeit
+ def test_4(self):
+ # Gradient checking (check that a numerical approximation of the gradient is (almost) equal to our backpropagation derivation)#
+
+ # First data-set with one example
+ arrs = []
+ labels = []
+ arrs.append([1, 2, 4, 5, 5, 5])
+ labels.append('cat')
+ ann = Ann(arrs, labels, n_h=10) # Create Ann with these train_examples and labels
+ J = ann.backward(ann.train_examples[0].arr, ann.train_examples[0].y)
+ T_original = copy.deepcopy(ann.Thetas)
+
+ for l in range(0, ann.L - 1):
+ shape_J = J[l].shape
+ eps = 0.0001 # epsilon for a numerical approximation of the gradient
+ for i in range(0, shape_J[0]):
+ for j in range(0, shape_J[1]):
+ T_e = np.zeros(shape_J) # Matrix of zeros
+ T_e[i][j] = eps
+ ann.Thetas[l] = T_original[l] + T_e
+ cost_e = ann.cost() # Cost at Theta + eps
+ ann.Thetas[l] = T_original[l] - T_e
+ cost_minus_e = ann.cost() # Cost at Theta - eps
+ P = (cost_e - cost_minus_e) / (2 * eps) # Numerical approximation
+ J_ij = J[l].item(i, j) # Backpropagation derivation
+
+ # print(P, '\t', J_ij, '\t', abs(P - J_ij), (l, i, j))
+
+ # if (P < 0 and J_ij > 0 or P > 0 and J_ij < 0):
+ # self.fail()
+
+ self.assertAlmostEqual(P, J_ij, delta=0.001)
+ ann.Thetas = copy.deepcopy(T_original)
+
+ # Second data-set with several train_examples
+ arrs = []
+ labels = []
+ classes = ('cat', 'dog')
+ for m in range(0, 100):
+ arr = [random.random() for x in range(0, 20)]
+ label = classes[random.random() > 0.5]
+ arrs.append(arr)
+ labels.append(label)
+ ann = Ann(arrs, labels, n_h=2) # Create Ann with these train_examples and labels
+ # L-1 matrices of partial derivatives for first example
+ J = ann.backward_batch()
+ T_original = copy.deepcopy(ann.Thetas)
+
+ for l in range(0, ann.L - 1):
+ shape_J = J[l].shape
+ eps = 0.0001 # epsilon for a numerical approximation of the gradient
+ a = random.sample(range(0, shape_J[0]), 2)
+ b = random.sample(range(0, shape_J[1]), 2)
+ for i in a:
+ for j in b:
+ T_e = np.zeros(shape_J) # Matrix of zeros
+ T_e[i][j] = eps
+ ann.Thetas[l] = T_original[l] + T_e
+ cost_e = ann.cost() # Cost at Theta + eps
+ ann.Thetas[l] = T_original[l] - T_e
+ cost_minus_e = ann.cost() # Cost at Theta - eps
+ P = (cost_e - cost_minus_e) / (2 * eps) # Numerical approximation
+ J_ij = J[l].item(i, j) # Backpropagation derivation
+
+ self.assertAlmostEqual(P, J_ij, delta=0.001)
+ ann.Thetas = copy.deepcopy(T_original)
+
+ @timeit
+ def test_5(self):
+ # Comprehensive gradient checking #
+
+ # Medium size data-set with more than two classes
+ arrs = []
+ labels = []
+ classes = ('cat', 'dog', 'bird', 'turtle', 'dinosaur', 'human')
+ for m in range(0, 100):
+ arr = [random.random() for x in range(0, 200)]
+ z = random.random()
+ if (z < 1 / 6):
+ label = classes[0]
+ elif (z >= 1 / 6 and z < 2 / 6):
+ label = classes[1]
+ elif (z >= 2 / 6 and z < 3 / 6):
+ label = classes[2]
+ elif (z >= 3 / 6 and z < 4 / 6):
+ label = classes[3]
+ elif (z >= 4 / 6 and z < 5 / 6):
+ label = classes[4]
+ else:
+ label = classes[5]
+ arrs.append(arr)
+ labels.append(label)
+ ann = Ann(arrs, labels, n_h=2) # Create Ann with these train_examples and labels
+ # L-1 matrices of partial derivatives for first example
+ J = ann.backward_batch()
+ T_original = copy.deepcopy(ann.Thetas)
+
+ # Just check the neuron connections between first, second, and third layer
+ for l in range(0, 2):
+ shape_J = J[l].shape
+ eps = 0.0001 # epsilon for a numerical approximation of the gradient
+ # Randomly select 100 neuron connections to check
+ a = random.sample(range(0, shape_J[0]), 10)
+ b = random.sample(range(0, shape_J[1]), 10)
+ for i in a:
+ for j in b:
+ T_e = np.zeros(shape_J) # Matrix of zeros
+ T_e[i][j] = eps
+ ann.Thetas[l] = T_original[l] + T_e
+ cost_e = ann.cost() # Cost at Theta + eps
+ ann.Thetas[l] = T_original[l] - T_e
+ cost_minus_e = ann.cost() # Cost at Theta - eps
+ P = (cost_e - cost_minus_e) / (2 * eps) # Numerical approximation
+ J_ij = J[l].item(i, j) # Backpropagation derivation
+
+ self.assertAlmostEqual(P, J_ij, delta=0.001)
+ ann.Thetas = copy.deepcopy(T_original)
+
+ @timeit
+ def non_test_6(self):
+ # Test if training works by checking that training lowers the cost for random small and medium size data-sets#
+
+ # Small size random data-set with two labels
+ arrs = []
+ labels = []
+ classes = ('cat', 'dog')
+ for i in range(0, 1):
+ print('\nTesting data-set ' + str(i))
+ for m in range(0, 10):
+ arr = [random.random() for x in range(0, 3)]
+ label = classes[random.random() > 0.5]
+ arrs.append(arr)
+ labels.append(label)
+ ann = Ann(arrs, labels) # Create Ann with these train_examples and labels
+ cost_before = ann.cost()
+ ann.train()
+ cost_after = ann.cost()
+ self.assertTrue(cost_after <= cost_before)
+
+ # Medium size random data-set with three labels
+ arrs = []
+ labels = []
+ classes = ('cat', 'dog', 'bird')
+ for i in range(0, 1):
+ print('\nTesting data-set ' + str(i))
+ for m in range(0, 10):
+ arr = [random.random() for x in range(0, 5)]
+ z = random.random()
+ if (z < 0.33):
+ label = classes[0]
+ elif (z >= 0.33 and z < 0.66):
+ label = classes[1]
+ else:
+ label = classes[2]
+ arrs.append(arr)
+ labels.append(label)
+ ann = Ann(arrs, labels) # Create Ann with these train_examples and labels
+ cost_before = ann.cost()
+ ann.train()
+ cost_after = ann.cost()
+ self.assertTrue(cost_after <= cost_before)
+
+ @timeit
+ def test_7(self):
+ # Learn some basic functions#
+ # Linearly-separable data-sets#
+
+ # function 1 (AND function) on 0 hidden layers
+ arrs = []
+ arrs.append([0, 0])
+ arrs.append([0, 1])
+ arrs.append([1, 0])
+ arrs.append([1, 1])
+ labels = []
+ labels.append('false')
+ labels.append('true')
+ labels.append('true')
+ labels.append('true')
+ ann = Ann(arrs, labels, n_h=0)
+ ann.train()
+ ann.validate_train()
+ # Check to see if train_accuracy is over 90%
+ self.assertTrue(ann.train_accuracy() > 0.9)
+ # function 2 on 2 hidden layers
+ arrs = []
+ arrs.append([1, 1])
+ arrs.append([2, 2])
+ arrs.append([1, 3])
+ arrs.append([2, 10])
+ arrs.append([1, -1])
+ arrs.append([-2, -2])
+ arrs.append([1, -3])
+ arrs.append([-2, -10])
+ labels = []
+ labels.append('false')
+ labels.append('false')
+ labels.append('false')
+ labels.append('false')
+ labels.append('true')
+ labels.append('true')
+ labels.append('true')
+ labels.append('true')
+ ann = Ann(arrs, labels, n_h=2)
+ ann.train()
+ ann.validate_train()
+ # Check to see if train_accuracy is over 90%
+ self.assertTrue(ann.train_accuracy() > 0.9)
+
+
+ # Non-linearly-separable data-sets#
+
+
+ # function 1 (XOR function) on 1 hidden layers
+ arrs = []
+ arrs.append([0, 0])
+ arrs.append([0, 1])
+ arrs.append([1, 0])
+ arrs.append([1, 1])
+ labels = []
+ labels.append('false')
+ labels.append('true')
+ labels.append('true')
+ labels.append('false')
+ ann = Ann(arrs, labels, n_h=1)
+ ann.train(it=3000)
+ ann.validate_train()
+ # Check to see if train_accuracy is over 90%
+ self.assertTrue(ann.train_accuracy() > 0.9)
+
+ # function 1b (XOR function) on 1 hidden layers (with custom architecture)
+ arrs = []
+ arrs.append([0, 0])
+ arrs.append([0, 1])
+ arrs.append([1, 0])
+ arrs.append([1, 1])
+ labels = []
+ labels.append('false')
+ labels.append('true')
+ labels.append('true')
+ labels.append('false')
+ s = [4, 5] # Custom hidden layer architecture
+ ann = Ann(arrs, labels, n_h=len(s), s=s)
+ ann.train()
+ ann.validate_train()
+ # Check to see if train_accuracy is over 90%
+ self.assertTrue(ann.train_accuracy() > 0.9)
+
+
+ # function 1 (two nested sets) on 2 hidden layers
+ arrs = []
+ arrs.append([0, 0])
+ arrs.append([0, 1])
+ arrs.append([1, 1])
+ arrs.append([1, 1])
+ arrs.append([10, 0])
+ arrs.append([0, 10])
+ arrs.append([110, 10])
+ arrs.append([-10, 10])
+ labels = []
+ labels.append('false')
+ labels.append('false')
+ labels.append('false')
+ labels.append('false')
+ labels.append('true')
+ labels.append('true')
+ labels.append('true')
+ labels.append('true')
+ ann = Ann(arrs, labels, n_h=0)
+ ann.train()
+ ann.validate_train()
+ # Check to see if train_accuracy is over 90%
+ self.assertTrue(ann.train_accuracy() > 0.9)
+
+ @timeit
+ def test_8(self):
+ # First test#
+ # 1 hidden layer cost test with regularization#
+ x1 = [1, 2, 3, 4] # Array as first example
+ y1 = 'yes'
+ arrs = []
+ labels = []
+ arrs.append(x1)
+ labels.append(y1)
+ ann1 = Ann(arrs, labels, n_h=1) # Create this architecture
+
+ # Custom Thetas weights#
+ M1 = np.matrix([[1, -1, 0.5, -0.3, 2],
+ [1, -1, 0.5, -0.3, 2],
+ [1, -1, 0.5, -0.3, 2],
+ [1, -1, 0.5, -0.3, 2]])
+ M2 = np.matrix([[1, 1, -1, 0.5, -1]])
+ ann1.Thetas[0] = M1
+ ann1.Thetas[1] = M2
+ cost_0 = ann1.cost() # lam equals 0
+ cost_1 = ann1.cost(lam=1) # lam equals 1
+ self.assertTrue(cost_1 > cost_0) # Cost with regularization penalty is always higher than without regularization
+
+ # Gradient checking (now with regularization)#
+ # Medium size data-set with several train_examples
+ lam_test = 1 # Regularization parameter
+ arrs = []
+ labels = []
+ classes = ('cat', 'dog')
+ for m in range(0, 100):
+ arr = [random.random() for x in range(0, 40)]
+ label = classes[random.random() > 0.5]
+ arrs.append(arr)
+ labels.append(label)
+ ann = Ann(arrs, labels, n_h=2) # Create Ann with these train_examples and labels
+ # L-1 matrices of partial derivatives for first example
+ J = ann.backward_batch(lam=lam_test, batch_size=1) # Use full-batch for gradient descent
+ T_original = copy.deepcopy(ann.Thetas)
+
+ for l in range(0, ann.L - 1):
+ shape_J = J[l].shape
+ eps = 0.0001 # epsilon for a numerical approximation of the gradient
+ a = random.sample(range(0, shape_J[0]), 2)
+ b = random.sample(range(0, shape_J[1]), 2)
+ for i in a:
+ for j in b:
+ T_e = np.zeros(shape_J) # Matrix of zeros
+ T_e[i][j] = eps
+ ann.Thetas[l] = T_original[l] + T_e
+ cost_e = ann.cost(lam=lam_test) # Cost at Theta + eps
+ ann.Thetas[l] = T_original[l] - T_e
+ cost_minus_e = ann.cost(lam=lam_test) # Cost at Theta - eps
+ P = (cost_e - cost_minus_e) / (2 * eps) # Numerical approximation
+ J_ij = J[l].item(i, j) # Backpropagation derivation
+
+ # print(P, '\t', J_ij, '\t', abs(P - J_ij), (l, i, j))
+
+ # if (P < 0 and J_ij > 0 or P > 0 and J_ij < 0):
+ # self.fail()
+
+ self.assertAlmostEqual(P, J_ij, delta=0.001)
+ ann.Thetas = copy.deepcopy(T_original)
+
+ @timeit
+ def test_9(self):
+ # function 1 (XOR function) on 1 hidden layers
+ arrs = []
+ arrs.append([0, 0])
+ arrs.append([0, 1])
+ arrs.append([1, 0])
+ arrs.append([1, 1])
+ labels = []
+ labels.append('false')
+ labels.append('true')
+ labels.append('true')
+ labels.append('false')
+ ann = Ann(arrs, labels, n_h=1)
+ # Train and save model
+ model = ann.train()[0][0] # Take the first model from the list of models in the tuple
+ ann.validate_train()
+ # Check to see if train_accuracy is over 90%
+ self.assertTrue(ann.train_accuracy() > 0.9)
+
+ # Load the trained model into a new neural network
+ ann_from_model = Ann(model)
+ # Evaluate some vectors using this neural network initialized only with a model
+ self.assertEqual(ann_from_model.h_by_class(arrs[0]), 'false')
+ self.assertEqual(ann_from_model.h_by_class(arrs[1]), 'true')
+ x = [1.1, 0.9]
+ self.assertEqual(ann_from_model.h_by_class(x), 'false')
+
+ # function 2 on 2 hidden layers
+ arrs2 = []
+ arrs2.append([1, 1])
+ arrs2.append([2, 2])
+ arrs2.append([1, 3])
+ arrs2.append([2, 10])
+ arrs2.append([1, -1])
+ arrs2.append([-2, -2])
+ arrs2.append([1, -3])
+ arrs2.append([-2, -10])
+ labels2 = []
+ labels2.append('false')
+ labels2.append('false')
+ labels2.append('false')
+ labels2.append('false')
+ labels2.append('true')
+ labels2.append('true')
+ labels2.append('true')
+ labels2.append('true')
+ ann = Ann(arrs2, labels2, n_h=2)
+ model2 = ann.train()[0][0]
+ ann.validate_train()
+
+ # Load the second model
+ ann_from_model = Ann(model2)
+ # Evaluate some vectors using this neural network initialized only with a model
+ self.assertEqual(ann_from_model.h_by_class(arrs2[0]), 'false')
+ self.assertEqual(ann_from_model.h_by_class(arrs2[len(arrs2) - 1]), 'true')
+ x = [1, -5]
+ self.assertEqual(ann_from_model.h_by_class(x), 'true')
+
+ # Load the first model again
+ ann_from_model = Ann(model)
+ # Evaluate some vectors using this neural network initialized only with a model
+ self.assertEqual(ann_from_model.h_by_class(arrs[0]), 'false')
+ self.assertEqual(ann_from_model.h_by_class(arrs[1]), 'true')
+ x = [1.1, 0.9]
+ self.assertEqual(ann_from_model.h_by_class(x), 'false')
+
+ # Try pickling our model into a sister folder
+ model_name = model.name
+ directory = '../Ann-models'
+ path_to_file = directory + '/' + model_name
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ pickle.dump(model, open(path_to_file, 'wb'))
+
+ # Try unpickling our model
+ unpickled_model = pickle.load(open(path_to_file, 'rb'))
+ # Load unpickled model and test
+ ann_from_pickle = Ann(unpickled_model)
+ # Evaluate some vectors using this neural network initialized only with a model
+ self.assertEqual(ann_from_pickle.h_by_class(arrs[0]), 'false')
+ self.assertEqual(ann_from_pickle.h_by_class(arrs[1]), 'true')
+ x = [1.1, 0.9]
+ self.assertEqual(ann_from_pickle.h_by_class(x), 'false')
+
+ @timeit
+ def test_10(self):
+ '''Creates a fake data-set with points labeled 'yes' around origin and points labeled 'no' outside'''
+ arrs = []
+ labels = []
+ '''Points about the origin (located in a box of length 16 centered at origin)'''
+ for i in range(0, 10):
+ arr = [random.randint(0, 8) * np.sign(random.random() - 0.5) for x in range(0, 2)]
+ label = 'yes'
+ arrs.append(arr)
+ labels.append(label)
+ '''Points outside the box'''
+ for i in range(0, 10):
+ arr = [random.randint(10, 20) * np.sign(random.random() - 0.5) for x in range(0, 2)]
+ label = 'no'
+ arrs.append(arr)
+ labels.append(label)
+ '''Add some noise'''
+ for i in range(0, 2):
+ arr = [random.randint(0, 8) * np.sign(random.random() - 0.5) for x in range(0, 2)]
+ label = 'no' # Note: this is artificially misclassified
+ arrs.append(arr)
+ labels.append(label)
+ for i in range(0, 10):
+ arr = [random.randint(10, 20) * np.sign(random.random() - 0.5) for x in range(0, 2)]
+ label = 'yes' # Note: this is artificially misclassified
+ arrs.append(arr)
+ labels.append(label)
+
+ ann = Ann(arrs, labels, n_h=2)
+ (models, test_accuracies, test_costs) = ann.train()
+
+ best_test_accuracy = 0
+ best_i = -1
+ for i in range(0, len(test_accuracies)):
+ if (test_accuracies[i] > best_test_accuracy):
+ best_test_accuracy = test_accuracies[i]
+ best_i = i
+
+ if (best_i > -1):
+ model_name = models[i].name
+ directory = '../Ann-models'
+ path_to_file = directory + '/' + model_name
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ pickle.dump(models[i], open(path_to_file, 'wb'))
+ else:
+ logger.error('Error!')
+
+if __name__ == "__main__":
+ Ann.init_logger('debug')
+ unittest.main()
+
+# -*- coding: utf-8 -*-
+#
+# API configuration
+#####################
+
+
+DEBUG = False
+
+# Top-level URL for deployment. Numerous other URLs depend on this.
+CYCLADES_BASE_URL = "https://compute.example.synnefo.org/compute/"
+
+# The API will return HTTP Bad Request if the ?changes-since
+# parameter refers to a point in time more than POLL_LIMIT seconds ago.
+POLL_LIMIT = 3600
+
+# Astakos groups that have access to '/admin' views.
+ADMIN_STATS_PERMITTED_GROUPS = ["admin-stats"]
+
+# Enable/Disable the snapshots feature altogether at the API level.
+# If set to False, Cyclades will not expose the '/snapshots' API URL
+# of the 'volume' app.
+CYCLADES_SNAPSHOTS_ENABLED = True
+
+#
+# Network Configuration
+#
+
+# CYCLADES_DEFAULT_SERVER_NETWORKS setting contains a list of networks to
+# connect a newly created server to, *if the user has not* specified them
+# explicitly in the POST /server API call.
+# Each member of the list may be a network UUID, a tuple of network UUIDs,
+# "SNF:ANY_PUBLIC_IPV4" [any public network with an IPv4 subnet defined],
+# "SNF:ANY_PUBLIC_IPV6 [any public network with only an IPV6 subnet defined],
+# or "SNF:ANY_PUBLIC" [any public network].
+#
+# Access control and quota policy are enforced, just as if the user had
+# specified the value of CYCLADES_DEFAULT_SERVER_NETWORKS in the content
+# of the POST /call, after processing of "SNF:*" directives."
+CYCLADES_DEFAULT_SERVER_NETWORKS = []
+
+# This setting contains a list of networks which every new server
+# will be forced to connect to, regardless of the contents of the POST
+# /servers call, or the value of CYCLADES_DEFAULT_SERVER_NETWORKS.
+# Its format is identical to that of CYCLADES_DEFAULT_SERVER_NETWORKS.
+
+# WARNING: No access control or quota policy are enforced.
+# The server will get all IPv4/IPv6 addresses needed to connect to the
+# networks specified in CYCLADES_FORCED_SERVER_NETWORKS, regardless
+# of the state of the floating IP pool of the user, and without
+# allocating any floating IPs."
+CYCLADES_FORCED_SERVER_NETWORKS = []
+
+# Maximum allowed network size for private networks.
+MAX_CIDR_BLOCK = 22
+
+# Default settings used by network flavors
+DEFAULT_MAC_PREFIX = 'aa:00:0'
+DEFAULT_BRIDGE = 'br0'
+
+# Network flavors that users are allowed to create through API requests
+# Available flavors are IP_LESS_ROUTED, MAC_FILTERED, PHYSICAL_VLAN
+API_ENABLED_NETWORK_FLAVORS = ['MAC_FILTERED']
+
+# Settings for MAC_FILTERED network:
+# ------------------------------------------
+# All networks of this type are bridged to the same bridge. Isolation between
+# networks is achieved by assigning a unique MAC-prefix to each network and
+# filtering packets via ebtables.
+DEFAULT_MAC_FILTERED_BRIDGE = 'prv0'
+
+
+# Firewalling. Firewall tags should contain '%d' to be filled with the NIC
+# ID.
+GANETI_FIREWALL_ENABLED_TAG = 'synnefo:network:%s:protected'
+GANETI_FIREWALL_DISABLED_TAG = 'synnefo:network:%s:unprotected'
+GANETI_FIREWALL_PROTECTED_TAG = 'synnefo:network:%s:limited'
+
+# The default firewall profile that will be in effect if no tags are defined
+DEFAULT_FIREWALL_PROFILE = 'DISABLED'
+
+# Fixed mapping of user VMs to a specific backend.
+# e.g. BACKEND_PER_USER = {'example@synnefo.org': 2}
+BACKEND_PER_USER = {}
+
+
+# Encryption key for the instance hostname in the stat graphs URLs. Set it to
+# a random string and update the STATS_SECRET_KEY setting in the snf-stats-app
+# host (20-snf-stats-app-settings.conf) accordingly.
+CYCLADES_STATS_SECRET_KEY = "secret_key"
+
+# URL templates for the stat graphs.
+# The API implementation replaces '%s' with the encrypted backend id.
+CPU_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/cpu-bar/%s'
+CPU_TIMESERIES_GRAPH_URL = \
+ 'http://stats.example.synnefo.org/stats/v1.0/cpu-ts/%s'
+NET_BAR_GRAPH_URL = 'http://stats.example.synnefo.org/stats/v1.0/net-bar/%s'
+NET_TIMESERIES_GRAPH_URL = \
+ 'http://stats.example.synnefo.org/stats/v1.0/net-ts/%s'
+
+# Recommended refresh period for server stats
+STATS_REFRESH_PERIOD = 60
+
+# The maximum number of file path/content pairs that can be supplied on server
+# build
+MAX_PERSONALITY = 5
+
+# The maximum size, in bytes, for each personality file
+MAX_PERSONALITY_SIZE = 10240
+
+
+# Authentication URL of the astakos instance to be used for user management
+ASTAKOS_AUTH_URL = 'https://accounts.example.synnefo.org/identity/v2.0'
+
+# Tune the size of the Astakos http client connection pool
+# This limit the number of concurrent requests to Astakos.
+CYCLADES_ASTAKOSCLIENT_POOLSIZE = 50
+
+# Key for password encryption-decryption. After changing this setting, synnefo
+# will be unable to decrypt all existing Backend passwords. You will need to
+# store again the new password by using 'snf-manage backend-modify'.
+# SECRET_ENCRYPTION_KEY may up to 32 bytes. Keys bigger than 32 bytes are not
+# supported.
+SECRET_ENCRYPTION_KEY = "Password Encryption Key"
+
+# Astakos service token
+# The token used for astakos service api calls (e.g. api to retrieve user email
+# using a user uuid)
+CYCLADES_SERVICE_TOKEN = ''
+
+# Template to use to build the FQDN of VMs. The setting will be formated with
+# the id of the VM.
+CYCLADES_SERVERS_FQDN = 'snf-%(id)s.vm.example.synnefo.org'
+
+# Description of applied port forwarding rules (DNAT) for Cyclades VMs. This
+# setting contains a mapping from the port of each VM to a tuple contaning the
+# destination IP/hostname and the new port: (host, port). Instead of a tuple a
+# python callable object may be used which must return such a tuple. The caller
+# will pass to the callable the following positional arguments, in the
+# following order:
+# * server_id: The ID of the VM in the DB
+# * ip_address: The IPv4 address of the public VM NIC
+# * fqdn: The FQDN of the VM
+# * user: The UUID of the owner of the VM
+#
+# Here is an example describing the mapping of the SSH port of all VMs to
+# the external address 'gate.example.synnefo.org' and port 60000+server_id.
+# e.g. iptables -t nat -A prerouting -d gate.example.synnefo.org \
+# --dport (61000 + $(VM_ID)) -j DNAT --to-destination $(VM_IP):22
+#CYCLADES_PORT_FORWARDING = {
+# 22: lambda ip_address, server_id, fqdn, user:
+# ("gate.example.synnefo.org", 61000 + server_id),
+#}
+CYCLADES_PORT_FORWARDING = {}
+
+# Extra configuration options required for snf-vncauthproxy (>=1.5). Each dict
+# of the list, describes one vncauthproxy instance.
+CYCLADES_VNCAUTHPROXY_OPTS = [
+ {
+ # These values are required for VNC console support. They should match
+ # a user / password configured in the snf-vncauthproxy authentication /
+ # users file (/var/lib/vncauthproxy/users).
+ 'auth_user': 'synnefo',
+ 'auth_password': 'secret_password',
+ # server_address and server_port should reflect the --listen-address and
+ # --listen-port options passed to the vncauthproxy daemon
+ 'server_address': '127.0.0.1',
+ 'server_port': 24999,
+ # Set to True to enable SSL support on the control socket.
+ 'enable_ssl': False,
+ # If you enabled SSL support for snf-vncauthproxy you can optionally
+ # provide a path to a CA file and enable strict checkfing for the server
+ # certficiate.
+ 'ca_cert': None,
+ 'strict': False,
+ },
+]
+
+# The maximum allowed size(GB) for a Cyclades Volume
+CYCLADES_VOLUME_MAX_SIZE = 200
+
+# The maximum allowed metadata items for a Cyclades Volume
+CYCLADES_VOLUME_MAX_METADATA = 10
+
+# The maximmum allowed metadata items for a Cyclades Virtual Machine
+CYCLADES_VM_MAX_METADATA = 10
+
+# -*- coding: utf-8 -*-
+
+'''
+ Specto Add-on
+ Copyright (C) 2015 lambda
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+'''
+
+
+import re
+from resources.lib.libraries import client
+
+
+def resolve(url):
+ try:
+ url = url.replace('/embed-', '/')
+ url = re.compile('//.+?/([\w]+)').findall(url)[0]
+ url = 'http://putstream.com/embed-%s.html' % url
+
+ result = client.request(url)
+
+ url = re.compile('file *: *"(http.+?)"').findall(result)[-1]
+ return url
+ except:
+ return
+
+
+#!/usr/bin/env python
+#
+# Copyright 2007 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+
+
+"""Copyright 2008 Python Software Foundation, Ian Bicking, and Google."""
+
+import inspect
+import mimetools
+import StringIO
+import sys
+
+
+CONTINUE = 100
+SWITCHING_PROTOCOLS = 101
+PROCESSING = 102
+OK = 200
+CREATED = 201
+ACCEPTED = 202
+NON_AUTHORITATIVE_INFORMATION = 203
+NO_CONTENT = 204
+RESET_CONTENT = 205
+PARTIAL_CONTENT = 206
+MULTI_STATUS = 207
+IM_USED = 226
+MULTIPLE_CHOICES = 300
+MOVED_PERMANENTLY = 301
+FOUND = 302
+SEE_OTHER = 303
+NOT_MODIFIED = 304
+USE_PROXY = 305
+TEMPORARY_REDIRECT = 307
+BAD_REQUEST = 400
+UNAUTHORIZED = 401
+PAYMENT_REQUIRED = 402
+FORBIDDEN = 403
+NOT_FOUND = 404
+METHOD_NOT_ALLOWED = 405
+NOT_ACCEPTABLE = 406
+PROXY_AUTHENTICATION_REQUIRED = 407
+REQUEST_TIMEOUT = 408
+CONFLICT = 409
+GONE = 410
+LENGTH_REQUIRED = 411
+PRECONDITION_FAILED = 412
+REQUEST_ENTITY_TOO_LARGE = 413
+REQUEST_URI_TOO_LONG = 414
+UNSUPPORTED_MEDIA_TYPE = 415
+REQUESTED_RANGE_NOT_SATISFIABLE = 416
+EXPECTATION_FAILED = 417
+UNPROCESSABLE_ENTITY = 422
+LOCKED = 423
+FAILED_DEPENDENCY = 424
+UPGRADE_REQUIRED = 426
+INTERNAL_SERVER_ERROR = 500
+NOT_IMPLEMENTED = 501
+BAD_GATEWAY = 502
+SERVICE_UNAVAILABLE = 503
+GATEWAY_TIMEOUT = 504
+HTTP_VERSION_NOT_SUPPORTED = 505
+INSUFFICIENT_STORAGE = 507
+NOT_EXTENDED = 510
+
+responses = {
+ 100: 'Continue',
+ 101: 'Switching Protocols',
+
+ 200: 'OK',
+ 201: 'Created',
+ 202: 'Accepted',
+ 203: 'Non-Authoritative Information',
+ 204: 'No Content',
+ 205: 'Reset Content',
+ 206: 'Partial Content',
+
+ 300: 'Multiple Choices',
+ 301: 'Moved Permanently',
+ 302: 'Found',
+ 303: 'See Other',
+ 304: 'Not Modified',
+ 305: 'Use Proxy',
+ 306: '(Unused)',
+ 307: 'Temporary Redirect',
+
+ 400: 'Bad Request',
+ 401: 'Unauthorized',
+ 402: 'Payment Required',
+ 403: 'Forbidden',
+ 404: 'Not Found',
+ 405: 'Method Not Allowed',
+ 406: 'Not Acceptable',
+ 407: 'Proxy Authentication Required',
+ 408: 'Request Timeout',
+ 409: 'Conflict',
+ 410: 'Gone',
+ 411: 'Length Required',
+ 412: 'Precondition Failed',
+ 413: 'Request Entity Too Large',
+ 414: 'Request-URI Too Long',
+ 415: 'Unsupported Media Type',
+ 416: 'Requested Range Not Satisfiable',
+ 417: 'Expectation Failed',
+
+ 500: 'Internal Server Error',
+ 501: 'Not Implemented',
+ 502: 'Bad Gateway',
+ 503: 'Service Unavailable',
+ 504: 'Gateway Timeout',
+ 505: 'HTTP Version Not Supported',
+}
+
+HTTP_PORT = 80
+HTTPS_PORT = 443
+
+
+
+
+
+class HTTPConnection:
+
+
+ protocol = 'http'
+ default_port = HTTP_PORT
+ _allow_truncated = True
+ _follow_redirects = False
+
+ def __init__(self, host, port=None, strict=False, timeout=None):
+
+
+
+ from google.appengine.api import urlfetch
+ self._fetch = urlfetch.fetch
+ self._method_map = {
+ 'GET': urlfetch.GET,
+ 'POST': urlfetch.POST,
+ 'HEAD': urlfetch.HEAD,
+ 'PUT': urlfetch.PUT,
+ 'DELETE': urlfetch.DELETE,
+ 'PATCH': urlfetch.PATCH,
+ }
+
+ self.host = host
+ self.port = port
+
+ self._method = self._url = None
+ self._body = ''
+ self.headers = []
+
+
+
+ if not isinstance(timeout, (float, int, long)):
+ timeout = None
+ self.timeout = timeout
+
+ def connect(self):
+ pass
+
+ def request(self, method, url, body=None, headers=None):
+ self._method = method
+ self._url = url
+ try:
+ self._body = body.read()
+ except AttributeError:
+ self._body = body
+ if headers is None:
+ headers = []
+ elif hasattr(headers, 'items'):
+ headers = headers.items()
+ self.headers = headers
+
+ def putrequest(self, request, selector, skip_host=False, skip_accept_encoding=False):
+
+ self._method = request
+ self._url = selector
+
+ def putheader(self, header, *lines):
+ line = '\r\n\t'.join([str(line) for line in lines])
+ self.headers.append((header, line))
+
+ def endheaders(self, message_body=None):
+
+ if message_body is not None:
+ self.send(message_body)
+
+ def set_debuglevel(self, level=None):
+ pass
+
+ def send(self, data):
+ self._body += data
+
+ @staticmethod
+ def _getargspec(callable_object):
+ assert callable(callable_object)
+ try:
+
+ return inspect.getargspec(callable_object)
+ except TypeError:
+
+ return inspect.getargspec(callable_object.__call__)
+
+ def getresponse(self):
+ if self.port and self.port != self.default_port:
+ host = '%s:%s' % (self.host, self.port)
+ else:
+ host = self.host
+ if not self._url.startswith(self.protocol):
+ url = '%s://%s%s' % (self.protocol, host, self._url)
+ else:
+ url = self._url
+ headers = dict(self.headers)
+
+ try:
+ method = self._method_map[self._method.upper()]
+ except KeyError:
+ raise ValueError("%r is an unrecognized HTTP method" % self._method)
+
+
+
+
+
+ args, _, keywords, _ = self._getargspec(self._fetch)
+ extra_kwargs = (
+ {'validate_certificate': False}
+ if keywords or 'validate_certificate' in args
+ else {})
+ response = self._fetch(url, self._body, method, headers,
+ self._allow_truncated, self._follow_redirects,
+ deadline=self.timeout, **extra_kwargs)
+ return HTTPResponse(response)
+
+ def close(self):
+ pass
+
+
+class HTTPSConnection(HTTPConnection):
+
+ protocol = 'https'
+ default_port = HTTPS_PORT
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ strict=False, timeout=None):
+
+ if key_file is not None or cert_file is not None:
+ raise NotImplementedError(
+ "key_file and cert_file arguments are not implemented")
+ HTTPConnection.__init__(self, host, port=port, strict=strict,
+ timeout=timeout)
+
+
+class HTTPMessage(mimetools.Message):
+
+ def addheader(self, key, value):
+ """Add header for field key handling repeats."""
+ prev = self.dict.get(key)
+ if prev is None:
+ self.dict[key] = value
+ else:
+ combined = ", ".join((prev, value))
+ self.dict[key] = combined
+
+ def addcontinue(self, key, more):
+ """Add more field data from a continuation line."""
+ prev = self.dict[key]
+ self.dict[key] = prev + "\n " + more
+
+ def readheaders(self):
+ """Read header lines.
+
+ Read header lines up to the entirely blank line that terminates them.
+ The (normally blank) line that ends the headers is skipped, but not
+ included in the returned list. If a non-header line ends the headers,
+ (which is an error), an attempt is made to backspace over it; it is
+ never included in the returned list.
+
+ The variable self.status is set to the empty string if all went well,
+ otherwise it is an error message. The variable self.headers is a
+ completely uninterpreted list of lines contained in the header (so
+ printing them will reproduce the header exactly as it appears in the
+ file).
+
+ If multiple header fields with the same name occur, they are combined
+ according to the rules in RFC 2616 sec 4.2:
+
+ Appending each subsequent field-value to the first, each separated
+ by a comma. The order in which header fields with the same field-name
+ are received is significant to the interpretation of the combined
+ field value.
+ """
+
+
+
+
+
+ self.dict = {}
+ self.unixfrom = ''
+ self.headers = hlist = []
+ self.status = ''
+ headerseen = ""
+ firstline = 1
+ startofline = unread = tell = None
+ if hasattr(self.fp, 'unread'):
+ unread = self.fp.unread
+ elif self.seekable:
+ tell = self.fp.tell
+ while True:
+ if tell:
+ try:
+ startofline = tell()
+ except IOError:
+ startofline = tell = None
+ self.seekable = 0
+ line = self.fp.readline()
+ if not line:
+ self.status = 'EOF in headers'
+ break
+
+ if firstline and line.startswith('From '):
+ self.unixfrom = self.unixfrom + line
+ continue
+ firstline = 0
+ if headerseen and line[0] in ' \t':
+
+
+
+ hlist.append(line)
+ self.addcontinue(headerseen, line.strip())
+ continue
+ elif self.iscomment(line):
+
+ continue
+ elif self.islast(line):
+
+ break
+ headerseen = self.isheader(line)
+ if headerseen:
+
+ hlist.append(line)
+ self.addheader(headerseen, line[len(headerseen)+1:].strip())
+ continue
+ else:
+
+ if not self.dict:
+ self.status = 'No headers'
+ else:
+ self.status = 'Non-header line where header expected'
+
+ if unread:
+ unread(line)
+ elif tell:
+ self.fp.seek(startofline)
+ else:
+ self.status = self.status + '; bad seek'
+ break
+
+class HTTPResponse(object):
+
+ def __init__(self, fetch_response):
+ self._fetch_response = fetch_response
+ self.fp = StringIO.StringIO(fetch_response.content)
+
+ def __getattr__(self, attr):
+ return getattr(self.fp, attr)
+
+ def getheader(self, name, default=None):
+ return self._fetch_response.headers.get(name, default)
+
+ def getheaders(self):
+ return self._fetch_response.headers.items()
+
+ @property
+ def msg(self):
+ return self._fetch_response.header_msg
+
+ version = 11
+
+ @property
+ def status(self):
+ return self._fetch_response.status_code
+
+ @property
+ def reason(self):
+ return responses.get(self._fetch_response.status_code, 'Unknown')
+
+
+
+class HTTP:
+ "Compatibility class with httplib.py from 1.5."
+
+ _http_vsn = 11
+ _http_vsn_str = 'HTTP/1.1'
+
+ debuglevel = 0
+
+ _connection_class = HTTPConnection
+
+ def __init__(self, host='', port=None, strict=None):
+ "Provide a default host, since the superclass requires one."
+
+
+ if port == 0:
+ port = None
+
+
+
+
+ self._setup(self._connection_class(host, port, strict))
+
+ def _setup(self, conn):
+ self._conn = conn
+
+
+ self.send = conn.send
+ self.putrequest = conn.putrequest
+ self.endheaders = conn.endheaders
+ self.set_debuglevel = conn.set_debuglevel
+
+ conn._http_vsn = self._http_vsn
+ conn._http_vsn_str = self._http_vsn_str
+
+ self.file = None
+
+ def connect(self, host=None, port=None):
+ "Accept arguments to set the host/port, since the superclass doesn't."
+ self.__init__(host, port)
+
+ def getfile(self):
+ "Provide a getfile, since the superclass' does not use this concept."
+ return self.file
+
+ def putheader(self, header, *values):
+ "The superclass allows only one value argument."
+ self._conn.putheader(header, '\r\n\t'.join([str(v) for v in values]))
+
+ def getreply(self):
+ """Compat definition since superclass does not define it.
+
+ Returns a tuple consisting of:
+ - server status code (e.g. '200' if all goes well)
+ - server "reason" corresponding to status code
+ - any RFC822 headers in the response from the server
+ """
+ response = self._conn.getresponse()
+ self.headers = response.msg
+ self.file = response.fp
+ return response.status, response.reason, response.msg
+
+ def close(self):
+ self._conn.close()
+
+
+
+
+
+
+ self.file = None
+
+
+
+class HTTPS(HTTP):
+ """Compatibility with 1.5 httplib interface
+
+ Python 1.5.2 did not have an HTTPS class, but it defined an
+ interface for sending http requests that is also useful for
+ https.
+ """
+
+ _connection_class = HTTPSConnection
+
+ def __init__(self, host='', port=None, key_file=None, cert_file=None,
+ strict=None):
+ if key_file is not None or cert_file is not None:
+ raise NotImplementedError(
+ "key_file and cert_file arguments are not implemented")
+
+
+
+
+ if port == 0:
+ port = None
+ self._setup(self._connection_class(host, port, key_file,
+ cert_file, strict))
+
+
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+
+
+class HTTPException(Exception):
+ pass
+
+class NotConnected(HTTPException):
+ pass
+
+class InvalidURL(HTTPException):
+ pass
+
+class UnknownProtocol(HTTPException):
+ def __init__(self, version):
+ self.version = version
+ HTTPException.__init__(self, version)
+
+class UnknownTransferEncoding(HTTPException):
+ pass
+
+class UnimplementedFileMode(HTTPException):
+ pass
+
+class IncompleteRead(HTTPException):
+ def __init__(self, partial):
+ self.partial = partial
+ HTTPException.__init__(self, partial)
+
+class ImproperConnectionState(HTTPException):
+ pass
+
+class CannotSendRequest(ImproperConnectionState):
+ pass
+
+class CannotSendHeader(ImproperConnectionState):
+ pass
+
+class ResponseNotReady(ImproperConnectionState):
+ pass
+
+class BadStatusLine(HTTPException):
+ def __init__(self, line):
+ self.line = line
+ HTTPException.__init__(self, line)
+
+error = HTTPException
+
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# thumbor imaging service
+# https://github.com/thumbor/thumbor/wiki
+
+# Licensed under the MIT license:
+# http://www.opensource.org/licenses/mit-license
+# Copyright (c) 2011 globo.com thumbor@googlegroups.com
+
+from os.path import join, abspath, dirname
+from preggy import expect
+
+from thumbor.context import Context
+from thumbor.config import Config
+from thumbor.storages.no_storage import Storage as NoStorage
+from tests.base import TestCase
+
+
+class NoStorageTestCase(TestCase):
+ def get_context(self):
+ cfg = Config()
+ return Context(None, cfg, None)
+
+ def get_image_url(self, image):
+ return 's.glbimg.com/some/{0}'.format(image)
+
+ def get_image_path(self, image):
+ return join(abspath(dirname(__file__)), image)
+
+ def get_image_bytes(self, image):
+ ipath = self.get_image_path(image)
+ with open(ipath, 'r') as img:
+ return img.read()
+
+ def test_store_image_should_be_null(self):
+ iurl = self.get_image_url('source.jpg')
+ storage = NoStorage(None)
+ stored = storage.get(iurl)
+ expect(stored.result()).to_be_null()
+
+ def test_store_knows_no_image(self):
+ iurl = self.get_image_url('source.jpg')
+ storage = NoStorage(None)
+ exists = storage.exists(iurl)
+ expect(exists.result()).to_be_false()
+
+ def test_removes_image_should_be_null(self):
+ iurl = self.get_image_url('source.jpg')
+ storage = NoStorage(None)
+ removed = storage.remove(iurl)
+ expect(removed).to_be_null()
+
+ def test_stores_crypto_should_be_null(self):
+ iurl = self.get_image_url('source.jpg')
+ storage = NoStorage(None)
+ storage.put_crypto(iurl)
+ got_crypto = storage.get_crypto(iurl)
+ expect(got_crypto.result()).to_be_null()
+
+ def test_detector_data_should_be_null(self):
+ iurl = self.get_image_url('source.jpg')
+ storage = NoStorage(None)
+ storage.put_detector_data(iurl, "some data")
+ data = storage.get_detector_data(iurl)
+ expect(data.result()).to_be_null()
+
+# This file is part of MyPaint.
+# Copyright (C) 2014 by Andrew Chadwick
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+
+
+"""Global AccelMap editor, for backwards compatibility"""
+
+## Imports
+
+import logging
+logger = logging.getLogger(__name__)
+
+from gi.repository import Gtk
+from gi.repository import Gdk
+from gi.repository import Pango
+from gettext import gettext as _
+
+from lib.helpers import escape
+
+
+## Class defs
+
+class AccelMapEditor (Gtk.Grid):
+ """Ugly properties list for editing the global accel map
+
+ MyPaint normally doesn't use properties lists for reasons of
+ simplicity. However since Gtk 3.12 these are no longer editable via
+ the menus themselves, so we must create an alternative for 3.12
+ users who want to rebind keys.
+ """
+ # This interface is likely to evolve into an accelerator editor for
+ # GtkApplication's GAction-based way of doing things when we drop
+ # support for 3.10.
+
+ ## Consts
+
+ __gtype_name__ = 'AccelMapEditor'
+
+ _COLUMN_TYPES = (str, str, str)
+ _PATH_COLUMN = 0
+ _ACCEL_LABEL_COLUMN = 1
+ _ACTION_LABEL_COLUMN = 2
+
+ _USE_NORMAL_DIALOG_KEYS = True
+ _SHOW_ACCEL_PATH = True
+
+ ## Setup
+
+ def __init__(self):
+ super(AccelMapEditor, self).__init__()
+ self.ui_manager = None
+ self.connect("show", self._show_cb)
+
+ store = Gtk.ListStore(*self._COLUMN_TYPES)
+ self._store = store
+ self._action_labels = {}
+
+ scrolls = Gtk.ScrolledWindow()
+ scrolls.set_shadow_type(Gtk.ShadowType.IN)
+ view = Gtk.TreeView()
+ view.set_model(store)
+ view.set_size_request(480, 320)
+ view.set_hexpand(True)
+ view.set_vexpand(True)
+ scrolls.add(view)
+ self.attach(scrolls, 0, 0, 1, 1)
+ view.set_headers_clickable(True)
+ view.set_enable_search(True)
+ view.set_search_column(self._ACTION_LABEL_COLUMN)
+ self._view = view
+
+ cell = Gtk.CellRendererText()
+ cell.set_property("ellipsize", Pango.EllipsizeMode.END)
+ cell.set_property("editable", False)
+ col = Gtk.TreeViewColumn(_("Action"), cell)
+ col.add_attribute(cell, "text", self._ACTION_LABEL_COLUMN)
+ col.set_expand(True)
+ col.set_resizable(True)
+ col.set_min_width(200)
+ col.set_sort_column_id(self._ACTION_LABEL_COLUMN)
+ view.append_column(col)
+
+ cell = Gtk.CellRendererText()
+ cell.set_property("ellipsize", Pango.EllipsizeMode.END)
+ cell.set_property("editable", True)
+ cell.connect("edited", self._accel_edited_cb)
+ cell.connect("editing-started", self._accel_editing_started_cb)
+ col = Gtk.TreeViewColumn(_("Key combination"), cell)
+ col.add_attribute(cell, "text", self._ACCEL_LABEL_COLUMN)
+ col.set_expand(True)
+ col.set_resizable(True)
+ col.set_min_width(150)
+ col.set_sort_column_id(self._ACCEL_LABEL_COLUMN)
+ view.append_column(col)
+
+ def _show_cb(self, widget):
+ self._init_from_accel_map()
+
+ def _init_from_accel_map(self):
+ """Initializes from the app UIManager and the global AccelMap"""
+ if self.ui_manager is None:
+ import application
+ app = application.get_app()
+ self.ui_manager = app.ui_manager
+ assert self.ui_manager is not None
+ self._action_labels.clear()
+ self._store.clear()
+ accel_labels = {}
+ for path, key, mods, changed in self._get_accel_map_entries():
+ accel_labels[path] = Gtk.accelerator_get_label(key, mods)
+ for group in self.ui_manager.get_action_groups():
+ group_name = group.get_name()
+ for action in group.list_actions():
+ action_name = action.get_name()
+ path = "/%s/%s" % (group_name, action_name)
+ action_label = action.get_label()
+ if not action_label:
+ continue
+ self._action_labels[path] = action_label
+ accel_label = accel_labels.get(path)
+ row = [None for t in self._COLUMN_TYPES]
+ row[self._PATH_COLUMN] = path
+ row[self._ACTION_LABEL_COLUMN] = action_label
+ row[self._ACCEL_LABEL_COLUMN] = accel_label
+ self._store.append(row)
+
+ def _update_from_accel_map(self):
+ """Updates the list from the global AccelMap, logging changes"""
+ accel_labels = {}
+ for path, key, mods, changed in self._get_accel_map_entries():
+ accel_labels[path] = Gtk.accelerator_get_label(key, mods)
+ for row in self._store:
+ path = row[self._PATH_COLUMN]
+ new_label = accel_labels.get(path)
+ old_label = row[self._ACCEL_LABEL_COLUMN]
+ if new_label != old_label:
+ logger.debug("update: %r now uses %r", path, new_label)
+ row[self._ACCEL_LABEL_COLUMN] = new_label
+
+ @classmethod
+ def _get_accel_map_entries(cls):
+ """Gets all entries in the global GtkAccelMap as a list"""
+ accel_map = Gtk.AccelMap.get()
+ entries = []
+ entries_populator = lambda *e: entries.append(e)
+ accel_map.foreach_unfiltered(0, entries_populator)
+ entries = [(accel_path, key, mods, changed)
+ for data, accel_path, key, mods, changed in entries]
+ return entries
+
+ ## Editing
+
+ def _accel_edited_cb(self, cell, path, newname):
+ """Arrange for list updates to happen after editing is done"""
+ self._update_from_accel_map()
+
+ def _accel_editing_started_cb(self, cell, editable, treepath):
+ """Begin editing by showing a key capture dialog"""
+ store = self._store
+ it = store.get_iter(treepath)
+ action_label = store.get_value(it, self._ACTION_LABEL_COLUMN)
+ accel_label = store.get_value(it, self._ACCEL_LABEL_COLUMN)
+ accel_path = store.get_value(it, self._PATH_COLUMN)
+
+ editable.set_sensitive(False)
+ dialog = Gtk.Dialog()
+ dialog.set_modal(True)
+ dialog.set_title(_("Edit Key for '%s'") % action_label)
+ dialog.set_transient_for(self.get_toplevel())
+ dialog.set_position(Gtk.WindowPosition.CENTER_ON_PARENT)
+ dialog.add_buttons(
+ Gtk.STOCK_DELETE, Gtk.ResponseType.REJECT,
+ Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
+ Gtk.STOCK_OK, Gtk.ResponseType.OK,
+ )
+ dialog.set_default_response(Gtk.ResponseType.OK)
+ dialog.connect(
+ "response",
+ self._edit_dialog_response_cb,
+ editable,
+ accel_path
+ )
+
+ evbox = Gtk.EventBox()
+ evbox.set_border_width(12)
+ dialog.connect(
+ "key-press-event",
+ self._edit_dialog_key_press_cb,
+ editable
+ )
+
+ grid = Gtk.Grid()
+ grid.set_row_spacing(12)
+ grid.set_column_spacing(12)
+
+ row = 0
+ label = Gtk.Label()
+ label.set_alignment(0, 0.5)
+ label.set_text(_("Action:"))
+ grid.attach(label, 0, row, 1, 1)
+ label = Gtk.Label()
+ label.set_alignment(0, 0.5)
+ label.set_text(str(action_label))
+ label.set_tooltip_text(str(accel_path))
+ label.set_hexpand(True)
+ grid.attach(label, 1, row, 1, 1)
+
+ if self._SHOW_ACCEL_PATH:
+ row += 1
+ label = Gtk.Label()
+ label.set_alignment(0, 0.5)
+ label.set_text(_("Path:"))
+ grid.attach(label, 0, row, 1, 1)
+ label = Gtk.Label()
+ label.set_alignment(0, 0.5)
+ label.set_text(str(accel_path))
+ label.set_hexpand(True)
+ grid.attach(label, 1, row, 1, 1)
+
+ row += 1
+ label = Gtk.Label()
+ label.set_alignment(0, 0.5)
+ label.set_text(_("Key:"))
+ grid.attach(label, 0, row, 1, 1)
+ label = Gtk.Label()
+ label.set_alignment(0, 0.5)
+ label.set_text(str(accel_label))
+ dialog.accel_label_widget = label
+ label.set_hexpand(True)
+ grid.attach(label, 1, row, 1, 1)
+
+ row += 1
+ label = Gtk.Label()
+ label.set_hexpand(True)
+ label.set_vexpand(True)
+ label.set_margin_top(12)
+ label.set_margin_bottom(12)
+ label.set_alignment(0, 0)
+ label.set_line_wrap(True)
+ label.set_size_request(200, 75)
+ dialog.hint_widget = label
+ self._edit_dialog_set_standard_hint(dialog)
+ grid.attach(label, 0, row, 2, 1)
+
+ evbox.add(grid)
+ dialog.get_content_area().pack_start(evbox, True, True, 0)
+ evbox.show_all()
+
+ dialog.initial_accel_label = accel_label
+ dialog.accel_path = accel_path
+ dialog.result_keyval = None
+ dialog.result_mods = None
+ dialog.show()
+
+ def _edit_dialog_set_hint(self, dialog, markup):
+ """Sets the hint message label in the capture dialog"""
+ dialog.hint_widget.set_markup(markup)
+
+ def _edit_dialog_set_standard_hint(self, dialog):
+ """Set the boring how-to message in capture dialog"""
+ markup = _("Press keys to update this assignment")
+ self._edit_dialog_set_hint(dialog, markup)
+
+ def _edit_dialog_key_press_cb(self, dialog, event, editable):
+ if event.type != Gdk.EventType.KEY_PRESS:
+ return False
+ if event.is_modifier:
+ return False
+ if self._USE_NORMAL_DIALOG_KEYS:
+ if event.keyval == Gdk.KEY_Return:
+ dialog.response(Gtk.ResponseType.OK)
+ return True
+ elif event.keyval == Gdk.KEY_Escape:
+ dialog.response(Gtk.ResponseType.CANCEL)
+ return True
+ elif event.keyval == Gdk.KEY_BackSpace:
+ dialog.response(Gtk.ResponseType.REJECT)
+ return True
+
+ # Stolen from GTK 2.24's gtk/gtkmenu.c (gtk_menu_key_press())
+ # Figure out what modifiers went into determining the key symbol
+ keymap = Gdk.Keymap.get_default()
+ bound, keyval, effective_group, level, consumed_modifiers = (
+ keymap.translate_keyboard_state(
+ event.hardware_keycode,
+ event.state,
+ event.group,
+ ))
+ keyval = Gdk.keyval_to_lower(keyval)
+ mods = Gdk.ModifierType(
+ event.state
+ & Gtk.accelerator_get_default_mod_mask()
+ & ~consumed_modifiers)
+
+ # If lowercasing affects the keysym, then we need to include
+ # SHIFT in the modifiers. We re-upper case when we match against
+ # the keyval, but display and save in caseless form.
+ if keyval != event.keyval:
+ mods |= Gdk.ModifierType.SHIFT_MASK
+ accel_label = Gtk.accelerator_get_label(keyval, mods)
+ # So we get (j, Shift+J) but just (plus, +). As I
+ # understand it.
+
+ if not Gtk.accelerator_valid(keyval, mods):
+ return True
+
+ clash_accel_path = None
+ clash_action_label = None
+ for path, kv, m, changed in self._get_accel_map_entries():
+ if (kv, m) == (keyval, mods):
+ clash_accel_path = path
+ clash_action_label = self._action_labels.get(
+ clash_accel_path,
+ _("Unknown Action"),
+ )
+ break
+ if clash_accel_path == dialog.accel_path: # no change
+ self._edit_dialog_set_standard_hint(dialog)
+ label = str(accel_label)
+ dialog.accel_label_widget.set_text(label)
+ elif clash_accel_path:
+ markup_tmpl = _(
+ "{accel} is already in use for '{action}'. "
+ "The existing assignment will be replaced."
+ )
+ markup = markup_tmpl.format(
+ accel=escape(accel_label),
+ action=escape(clash_action_label),
+ )
+ self._edit_dialog_set_hint(dialog, markup)
+ label = "%s (replace)" % (accel_label,)
+ dialog.accel_label_widget.set_text(str(label))
+ else:
+ self._edit_dialog_set_standard_hint(dialog)
+ label = "%s (changed)" % (accel_label,)
+ dialog.accel_label_widget.set_text(label)
+ dialog.result_mods = mods
+ dialog.result_keyval = keyval
+ return True
+
+ def _edit_dialog_response_cb(self, dialog, response_id, editable, path):
+ mods = dialog.result_mods
+ keyval = dialog.result_keyval
+ if response_id == Gtk.ResponseType.REJECT:
+ entry_exists, junk = Gtk.AccelMap.lookup_entry(path)
+ if entry_exists:
+ logger.info("Delete entry %r", path)
+ if not Gtk.AccelMap.change_entry(path, 0, 0, True):
+ logger.warning("Failed to delete entry for %r", path)
+ editable.editing_done()
+ elif response_id == Gtk.ResponseType.OK:
+ if keyval is not None:
+ self._set_accelmap_entry(path, keyval, mods)
+ editable.editing_done()
+ editable.remove_widget()
+ dialog.destroy()
+
+ @classmethod
+ def _delete_clashing_accelmap_entries(cls, keyval, mods, path_to_keep):
+ accel_name = Gtk.accelerator_name(keyval, mods)
+ for path, k, m, changed in cls._get_accel_map_entries():
+ if path == path_to_keep:
+ continue
+ if (k, m) != (keyval, mods):
+ continue
+ if not Gtk.AccelMap.change_entry(path, 0, 0, True):
+ logger.warning("Failed to delete clashing use of %r (%r)",
+ accel_name, path)
+ else:
+ logger.debug("Deleted clashing use of %r (was %r)",
+ accel_name, path)
+
+ @classmethod
+ def _set_accelmap_entry(cls, path, keyval, mods):
+ cls._delete_clashing_accelmap_entries(keyval, mods, path)
+ accel_name = Gtk.accelerator_name(keyval, mods)
+ entry_exists, junk = Gtk.AccelMap.lookup_entry(path)
+ if entry_exists:
+ logger.info("Changing entry %r: %r", accel_name, path)
+ if Gtk.AccelMap.change_entry(path, keyval, mods, True):
+ logger.debug("Updated %r successfully", path)
+ else:
+ logger.error("Failed to update %r", path)
+ else:
+ logger.info("Adding new entry %r: %r", accel_name, path)
+ Gtk.AccelMap.add_entry(path, keyval, mods)
+ entry_exists, junk = Gtk.AccelMap.lookup_entry(path)
+ assert entry_exists
+
+
+## Testing
+
+def _test():
+ win = Gtk.Window()
+ win.set_title("accelmap.py")
+ win.connect("destroy", Gtk.main_quit)
+ builder = Gtk.Builder()
+ import gui.factoryaction
+ builder.add_from_file("gui/resources.xml")
+ uimgr = builder.get_object("app_ui_manager")
+ editor = AccelMapEditor()
+ editor.ui_manager = uimgr
+ win.add(editor)
+ win.set_default_size(400, 300)
+ win.show_all()
+ Gtk.main()
+
+
+if __name__ == '__main__':
+ logging.basicConfig(level=logging.DEBUG)
+ import signal
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ import sys
+ orig_excepthook = sys.excepthook
+
+ def _excepthook(*args):
+ orig_excepthook(*args)
+ while Gtk.main_level():
+ Gtk.main_quit()
+ sys.exit()
+
+ sys.excepthook = _excepthook
+ _test()
+
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Implements the graph generation for computation of gradients."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+# pylint: disable=unused-import
+from tensorflow.python.ops.gradients_impl import AggregationMethod
+from tensorflow.python.ops.gradients_impl import gradients
+from tensorflow.python.ops.gradients_impl import hessians
+# pylint: enable=unused-import
+from tensorflow.python.util.all_util import remove_undocumented
+
+_allowed_symbols = [
+ # TODO(drpng): find a good place to reference this.
+ "AggregationMethod",
+ "gradients", # tf.gradients.gradients.
+ "hessians", # tf.gradients.hessians
+]
+remove_undocumented(__name__, _allowed_symbols)
+
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+# OpenERP, Open Source Management Solution
+# Copyright (C) 2004-2010 Tiny SPRL ().
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+#
+##############################################################################
+
+
+{
+ 'name': 'Just In Time Scheduling',
+ 'version': '1.0',
+ 'category': 'Base',
+ 'description': """
+This module allows Just In Time computation of procurement orders.
+==================================================================
+
+If you install this module, you will not have to run the regular procurement
+scheduler anymore (but you still need to run the minimum order point rule
+scheduler, or for example let it run daily).
+All procurement orders will be processed immediately, which could in some
+cases entail a small performance impact.
+
+It may also increase your stock size because products are reserved as soon
+as possible and the scheduler time range is not taken into account anymore.
+In that case, you can not use priorities any more on the different picking.
+ """,
+ 'author': 'OpenERP SA',
+ 'website': 'https://www.odoo.com/page/manufacturing',
+ 'depends': ['procurement'],
+ 'data': [],
+ 'demo': [],
+ 'test': ['test/procurement_jit.yml'],
+ 'installable': True,
+ 'auto_install': False,
+}
+# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
+
+""" Python Character Mapping Codec cp874 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP874.TXT' with gencodec.py.
+
+"""#"
+
+import codecs
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self,input,errors='strict'):
+ return codecs.charmap_encode(input,errors,encoding_table)
+
+ def decode(self,input,errors='strict'):
+ return codecs.charmap_decode(input,errors,decoding_table)
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input,self.errors,encoding_table)[0]
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input,self.errors,decoding_table)[0]
+
+class StreamWriter(Codec,codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec,codecs.StreamReader):
+ pass
+
+### encodings module API
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='cp874',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+ )
+
+
+### Decoding Table
+
+decoding_table = (
+ u'\x00' # 0x00 -> NULL
+ u'\x01' # 0x01 -> START OF HEADING
+ u'\x02' # 0x02 -> START OF TEXT
+ u'\x03' # 0x03 -> END OF TEXT
+ u'\x04' # 0x04 -> END OF TRANSMISSION
+ u'\x05' # 0x05 -> ENQUIRY
+ u'\x06' # 0x06 -> ACKNOWLEDGE
+ u'\x07' # 0x07 -> BELL
+ u'\x08' # 0x08 -> BACKSPACE
+ u'\t' # 0x09 -> HORIZONTAL TABULATION
+ u'\n' # 0x0A -> LINE FEED
+ u'\x0b' # 0x0B -> VERTICAL TABULATION
+ u'\x0c' # 0x0C -> FORM FEED
+ u'\r' # 0x0D -> CARRIAGE RETURN
+ u'\x0e' # 0x0E -> SHIFT OUT
+ u'\x0f' # 0x0F -> SHIFT IN
+ u'\x10' # 0x10 -> DATA LINK ESCAPE
+ u'\x11' # 0x11 -> DEVICE CONTROL ONE
+ u'\x12' # 0x12 -> DEVICE CONTROL TWO
+ u'\x13' # 0x13 -> DEVICE CONTROL THREE
+ u'\x14' # 0x14 -> DEVICE CONTROL FOUR
+ u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
+ u'\x16' # 0x16 -> SYNCHRONOUS IDLE
+ u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
+ u'\x18' # 0x18 -> CANCEL
+ u'\x19' # 0x19 -> END OF MEDIUM
+ u'\x1a' # 0x1A -> SUBSTITUTE
+ u'\x1b' # 0x1B -> ESCAPE
+ u'\x1c' # 0x1C -> FILE SEPARATOR
+ u'\x1d' # 0x1D -> GROUP SEPARATOR
+ u'\x1e' # 0x1E -> RECORD SEPARATOR
+ u'\x1f' # 0x1F -> UNIT SEPARATOR
+ u' ' # 0x20 -> SPACE
+ u'!' # 0x21 -> EXCLAMATION MARK
+ u'"' # 0x22 -> QUOTATION MARK
+ u'#' # 0x23 -> NUMBER SIGN
+ u'$' # 0x24 -> DOLLAR SIGN
+ u'%' # 0x25 -> PERCENT SIGN
+ u'&' # 0x26 -> AMPERSAND
+ u"'" # 0x27 -> APOSTROPHE
+ u'(' # 0x28 -> LEFT PARENTHESIS
+ u')' # 0x29 -> RIGHT PARENTHESIS
+ u'*' # 0x2A -> ASTERISK
+ u'+' # 0x2B -> PLUS SIGN
+ u',' # 0x2C -> COMMA
+ u'-' # 0x2D -> HYPHEN-MINUS
+ u'.' # 0x2E -> FULL STOP
+ u'/' # 0x2F -> SOLIDUS
+ u'0' # 0x30 -> DIGIT ZERO
+ u'1' # 0x31 -> DIGIT ONE
+ u'2' # 0x32 -> DIGIT TWO
+ u'3' # 0x33 -> DIGIT THREE
+ u'4' # 0x34 -> DIGIT FOUR
+ u'5' # 0x35 -> DIGIT FIVE
+ u'6' # 0x36 -> DIGIT SIX
+ u'7' # 0x37 -> DIGIT SEVEN
+ u'8' # 0x38 -> DIGIT EIGHT
+ u'9' # 0x39 -> DIGIT NINE
+ u':' # 0x3A -> COLON
+ u';' # 0x3B -> SEMICOLON
+ u'<' # 0x3C -> LESS-THAN SIGN
+ u'=' # 0x3D -> EQUALS SIGN
+ u'>' # 0x3E -> GREATER-THAN SIGN
+ u'?' # 0x3F -> QUESTION MARK
+ u'@' # 0x40 -> COMMERCIAL AT
+ u'A' # 0x41 -> LATIN CAPITAL LETTER A
+ u'B' # 0x42 -> LATIN CAPITAL LETTER B
+ u'C' # 0x43 -> LATIN CAPITAL LETTER C
+ u'D' # 0x44 -> LATIN CAPITAL LETTER D
+ u'E' # 0x45 -> LATIN CAPITAL LETTER E
+ u'F' # 0x46 -> LATIN CAPITAL LETTER F
+ u'G' # 0x47 -> LATIN CAPITAL LETTER G
+ u'H' # 0x48 -> LATIN CAPITAL LETTER H
+ u'I' # 0x49 -> LATIN CAPITAL LETTER I
+ u'J' # 0x4A -> LATIN CAPITAL LETTER J
+ u'K' # 0x4B -> LATIN CAPITAL LETTER K
+ u'L' # 0x4C -> LATIN CAPITAL LETTER L
+ u'M' # 0x4D -> LATIN CAPITAL LETTER M
+ u'N' # 0x4E -> LATIN CAPITAL LETTER N
+ u'O' # 0x4F -> LATIN CAPITAL LETTER O
+ u'P' # 0x50 -> LATIN CAPITAL LETTER P
+ u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
+ u'R' # 0x52 -> LATIN CAPITAL LETTER R
+ u'S' # 0x53 -> LATIN CAPITAL LETTER S
+ u'T' # 0x54 -> LATIN CAPITAL LETTER T
+ u'U' # 0x55 -> LATIN CAPITAL LETTER U
+ u'V' # 0x56 -> LATIN CAPITAL LETTER V
+ u'W' # 0x57 -> LATIN CAPITAL LETTER W
+ u'X' # 0x58 -> LATIN CAPITAL LETTER X
+ u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
+ u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
+ u'[' # 0x5B -> LEFT SQUARE BRACKET
+ u'\\' # 0x5C -> REVERSE SOLIDUS
+ u']' # 0x5D -> RIGHT SQUARE BRACKET
+ u'^' # 0x5E -> CIRCUMFLEX ACCENT
+ u'_' # 0x5F -> LOW LINE
+ u'`' # 0x60 -> GRAVE ACCENT
+ u'a' # 0x61 -> LATIN SMALL LETTER A
+ u'b' # 0x62 -> LATIN SMALL LETTER B
+ u'c' # 0x63 -> LATIN SMALL LETTER C
+ u'd' # 0x64 -> LATIN SMALL LETTER D
+ u'e' # 0x65 -> LATIN SMALL LETTER E
+ u'f' # 0x66 -> LATIN SMALL LETTER F
+ u'g' # 0x67 -> LATIN SMALL LETTER G
+ u'h' # 0x68 -> LATIN SMALL LETTER H
+ u'i' # 0x69 -> LATIN SMALL LETTER I
+ u'j' # 0x6A -> LATIN SMALL LETTER J
+ u'k' # 0x6B -> LATIN SMALL LETTER K
+ u'l' # 0x6C -> LATIN SMALL LETTER L
+ u'm' # 0x6D -> LATIN SMALL LETTER M
+ u'n' # 0x6E -> LATIN SMALL LETTER N
+ u'o' # 0x6F -> LATIN SMALL LETTER O
+ u'p' # 0x70 -> LATIN SMALL LETTER P
+ u'q' # 0x71 -> LATIN SMALL LETTER Q
+ u'r' # 0x72 -> LATIN SMALL LETTER R
+ u's' # 0x73 -> LATIN SMALL LETTER S
+ u't' # 0x74 -> LATIN SMALL LETTER T
+ u'u' # 0x75 -> LATIN SMALL LETTER U
+ u'v' # 0x76 -> LATIN SMALL LETTER V
+ u'w' # 0x77 -> LATIN SMALL LETTER W
+ u'x' # 0x78 -> LATIN SMALL LETTER X
+ u'y' # 0x79 -> LATIN SMALL LETTER Y
+ u'z' # 0x7A -> LATIN SMALL LETTER Z
+ u'{' # 0x7B -> LEFT CURLY BRACKET
+ u'|' # 0x7C -> VERTICAL LINE
+ u'}' # 0x7D -> RIGHT CURLY BRACKET
+ u'~' # 0x7E -> TILDE
+ u'\x7f' # 0x7F -> DELETE
+ u'\u20ac' # 0x80 -> EURO SIGN
+ u'\ufffe' # 0x81 -> UNDEFINED
+ u'\ufffe' # 0x82 -> UNDEFINED
+ u'\ufffe' # 0x83 -> UNDEFINED
+ u'\ufffe' # 0x84 -> UNDEFINED
+ u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
+ u'\ufffe' # 0x86 -> UNDEFINED
+ u'\ufffe' # 0x87 -> UNDEFINED
+ u'\ufffe' # 0x88 -> UNDEFINED
+ u'\ufffe' # 0x89 -> UNDEFINED
+ u'\ufffe' # 0x8A -> UNDEFINED
+ u'\ufffe' # 0x8B -> UNDEFINED
+ u'\ufffe' # 0x8C -> UNDEFINED
+ u'\ufffe' # 0x8D -> UNDEFINED
+ u'\ufffe' # 0x8E -> UNDEFINED
+ u'\ufffe' # 0x8F -> UNDEFINED
+ u'\ufffe' # 0x90 -> UNDEFINED
+ u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
+ u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
+ u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
+ u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
+ u'\u2022' # 0x95 -> BULLET
+ u'\u2013' # 0x96 -> EN DASH
+ u'\u2014' # 0x97 -> EM DASH
+ u'\ufffe' # 0x98 -> UNDEFINED
+ u'\ufffe' # 0x99 -> UNDEFINED
+ u'\ufffe' # 0x9A -> UNDEFINED
+ u'\ufffe' # 0x9B -> UNDEFINED
+ u'\ufffe' # 0x9C -> UNDEFINED
+ u'\ufffe' # 0x9D -> UNDEFINED
+ u'\ufffe' # 0x9E -> UNDEFINED
+ u'\ufffe' # 0x9F -> UNDEFINED
+ u'\xa0' # 0xA0 -> NO-BREAK SPACE
+ u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI
+ u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI
+ u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT
+ u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI
+ u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON
+ u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG
+ u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU
+ u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN
+ u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING
+ u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG
+ u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO
+ u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE
+ u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING
+ u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA
+ u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK
+ u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN
+ u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO
+ u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO
+ u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN
+ u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK
+ u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO
+ u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG
+ u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN
+ u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG
+ u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU
+ u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI
+ u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA
+ u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG
+ u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA
+ u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN
+ u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN
+ u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO
+ u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA
+ u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK
+ u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA
+ u'\u0e24' # 0xC4 -> THAI CHARACTER RU
+ u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING
+ u'\u0e26' # 0xC6 -> THAI CHARACTER LU
+ u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN
+ u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA
+ u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI
+ u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA
+ u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP
+ u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA
+ u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG
+ u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK
+ u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI
+ u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A
+ u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT
+ u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA
+ u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM
+ u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I
+ u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II
+ u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE
+ u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE
+ u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U
+ u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU
+ u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU
+ u'\ufffe' # 0xDB -> UNDEFINED
+ u'\ufffe' # 0xDC -> UNDEFINED
+ u'\ufffe' # 0xDD -> UNDEFINED
+ u'\ufffe' # 0xDE -> UNDEFINED
+ u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT
+ u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E
+ u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE
+ u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O
+ u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN
+ u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI
+ u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO
+ u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK
+ u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU
+ u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK
+ u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO
+ u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI
+ u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA
+ u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT
+ u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT
+ u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN
+ u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN
+ u'\u0e50' # 0xF0 -> THAI DIGIT ZERO
+ u'\u0e51' # 0xF1 -> THAI DIGIT ONE
+ u'\u0e52' # 0xF2 -> THAI DIGIT TWO
+ u'\u0e53' # 0xF3 -> THAI DIGIT THREE
+ u'\u0e54' # 0xF4 -> THAI DIGIT FOUR
+ u'\u0e55' # 0xF5 -> THAI DIGIT FIVE
+ u'\u0e56' # 0xF6 -> THAI DIGIT SIX
+ u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN
+ u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT
+ u'\u0e59' # 0xF9 -> THAI DIGIT NINE
+ u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU
+ u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT
+ u'\ufffe' # 0xFC -> UNDEFINED
+ u'\ufffe' # 0xFD -> UNDEFINED
+ u'\ufffe' # 0xFE -> UNDEFINED
+ u'\ufffe' # 0xFF -> UNDEFINED
+)
+
+### Encoding table
+encoding_table=codecs.charmap_build(decoding_table)
+
+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Tests for tensorflow.ops.tf.Assign*."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import numpy as np
+
+from tensorflow.python.framework import dtypes
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import state_ops
+from tensorflow.python.ops import variables
+from tensorflow.python.platform import test
+
+
+class AssignOpTest(test.TestCase):
+
+ def _initAssignFetch(self, x, y, use_gpu=False):
+ """Initialize a param to init and update it with y."""
+ super(AssignOpTest, self).setUp()
+ with self.test_session(use_gpu=use_gpu):
+ p = variables.Variable(x)
+ assign = state_ops.assign(p, y)
+ p.initializer.run()
+ new_value = assign.eval()
+ return p.eval(), new_value
+
+ def _initAssignAddFetch(self, x, y, use_gpu=False):
+ """Initialize a param to init, and compute param += y."""
+ with self.test_session(use_gpu=use_gpu):
+ p = variables.Variable(x)
+ add = state_ops.assign_add(p, y)
+ p.initializer.run()
+ new_value = add.eval()
+ return p.eval(), new_value
+
+ def _initAssignSubFetch(self, x, y, use_gpu=False):
+ """Initialize a param to init, and compute param -= y."""
+ with self.test_session(use_gpu=use_gpu):
+ p = variables.Variable(x)
+ sub = state_ops.assign_sub(p, y)
+ p.initializer.run()
+ new_value = sub.eval()
+ return p.eval(), new_value
+
+ def _testTypes(self, vals):
+ for dtype in [np.float32, np.float64, np.int32, np.int64]:
+ x = np.zeros(vals.shape).astype(dtype)
+ y = vals.astype(dtype)
+ var_value, op_value = self._initAssignFetch(x, y, use_gpu=False)
+ self.assertAllEqual(y, var_value)
+ self.assertAllEqual(y, op_value)
+ var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=False)
+ self.assertAllEqual(x + y, var_value)
+ self.assertAllEqual(x + y, op_value)
+ var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
+ self.assertAllEqual(x - y, var_value)
+ self.assertAllEqual(x - y, op_value)
+ if test.is_built_with_cuda() and dtype in [np.float32, np.float64]:
+ var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
+ self.assertAllEqual(y, var_value)
+ self.assertAllEqual(y, op_value)
+ var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=True)
+ self.assertAllEqual(x + y, var_value)
+ self.assertAllEqual(x + y, op_value)
+ var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
+ self.assertAllEqual(x - y, var_value)
+ self.assertAllEqual(x - y, op_value)
+
+ def testBasic(self):
+ self._testTypes(np.arange(0, 20).reshape([4, 5]))
+
+ def testAssignNonStrictShapeChecking(self):
+ with self.test_session():
+ data = array_ops.fill([1024, 1024], 0)
+ p = variables.Variable([1])
+ a = state_ops.assign(p, data, validate_shape=False)
+ a.op.run()
+ self.assertAllEqual(p.eval(), data.eval())
+
+ # Assign to yet another shape
+ data2 = array_ops.fill([10, 10], 1)
+ a2 = state_ops.assign(p, data2, validate_shape=False)
+ a2.op.run()
+ self.assertAllEqual(p.eval(), data2.eval())
+
+ def testInitRequiredAssignAdd(self):
+ with self.test_session():
+ p = variables.Variable(array_ops.fill([1024, 1024], 1), dtypes.int32)
+ a = state_ops.assign_add(p, array_ops.fill([1024, 1024], 0))
+ with self.assertRaisesOpError("use uninitialized"):
+ a.op.run()
+
+ def testInitRequiredAssignSub(self):
+ with self.test_session():
+ p = variables.Variable(array_ops.fill([1024, 1024], 1), dtypes.int32)
+ a = state_ops.assign_sub(p, array_ops.fill([1024, 1024], 0))
+ with self.assertRaisesOpError("use uninitialized"):
+ a.op.run()
+
+
+if __name__ == "__main__":
+ test.main()
+
+# Copyright 2014 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Handles converting of pragmas."""
+
+
+class PragmaHandler(object):
+ """Class that handles the conversion of pragmas."""
+
+ def __init__(self, warning_method):
+ """Create a pragma handler.
+
+ Args:
+ warning_method: A function to call to display a warning message.
+ """
+ self._warning_method = warning_method
+
+ def HandlePragma(self,
+ input_line,
+ unused_output_stream,
+ pragma_type,
+ pragma_value):
+ """Handle a parsed pragma directive.
+
+ Args:
+ input_line: The line number this match occurred on.
+ unused_output_stream: Output Markdown file.
+ pragma_type: The pragma's type.
+ pragma_value: The pragma's value, trimmed.
+ """
+ # There is no meaningful equivalent to any of the pragmas
+ # Google Code supports, so simply notify the user a pragma
+ # was matched and that they might want to do something about it.
+ if pragma_type == "summary":
+ self._warning_method(
+ input_line,
+ u"A summary pragma was used for this wiki:\n"
+ "\t{0}\n"
+ "Consider moving it to an introductory paragraph."
+ .format(pragma_value))
+ elif pragma_type == "sidebar":
+ self._warning_method(
+ input_line,
+ u"A sidebar pragma was used for this wiki:\n"
+ "\t{0}\n"
+ "The Gollum wiki system supports sidebars, and by converting "
+ "{0}.wiki to _Sidebar.md it can be used as a sidebar.\n"
+ "See https://github.com/gollum/gollum/wiki for more information."
+ .format(pragma_value))
+ else:
+ self._warning_method(
+ input_line,
+ u"The following pragma has been ignored:\n"
+ "\t#{0} {1}\n"
+ "Consider expressing the same information in a different manner."
+ .format(pragma_type, pragma_value))
+
+#! /usr/bin/python
+# -*- python -*-
+# -*- coding: utf-8 -*-
+# twatch - Experimental use of the perf python interface
+# Copyright (C) 2011 Arnaldo Carvalho de Melo
+#
+# This application is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2.
+#
+# This application is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+
+import perf
+
+def main():
+ cpus = perf.cpu_map()
+ threads = perf.thread_map()
+ evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
+ wakeup_events = 1, watermark = 1,
+ sample_id_all = 1,
+ sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
+ evsel.open(cpus = cpus, threads = threads);
+ evlist = perf.evlist(cpus, threads)
+ evlist.add(evsel)
+ evlist.mmap()
+ while True:
+ evlist.poll(timeout = -1)
+ for cpu in cpus:
+ event = evlist.read_on_cpu(cpu)
+ if not event:
+ continue
+ print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
+ event.sample_pid,
+ event.sample_tid),
+ print event
+
+if __name__ == '__main__':
+ main()
+
+"""Implementation of the DOM Level 3 'LS-Load' feature."""
+
+import copy
+import xml.dom
+
+from xml.dom.NodeFilter import NodeFilter
+
+
+__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"]
+
+
+class Options:
+ """Features object that has variables set for each DOMBuilder feature.
+
+ The DOMBuilder class uses an instance of this class to pass settings to
+ the ExpatBuilder class.
+ """
+
+ # Note that the DOMBuilder class in LoadSave constrains which of these
+ # values can be set using the DOM Level 3 LoadSave feature.
+
+ namespaces = 1
+ namespace_declarations = True
+ validation = False
+ external_parameter_entities = True
+ external_general_entities = True
+ external_dtd_subset = True
+ validate_if_schema = False
+ validate = False
+ datatype_normalization = False
+ create_entity_ref_nodes = True
+ entities = True
+ whitespace_in_element_content = True
+ cdata_sections = True
+ comments = True
+ charset_overrides_xml_encoding = True
+ infoset = False
+ supported_mediatypes_only = False
+
+ errorHandler = None
+ filter = None
+
+
+class DOMBuilder:
+ entityResolver = None
+ errorHandler = None
+ filter = None
+
+ ACTION_REPLACE = 1
+ ACTION_APPEND_AS_CHILDREN = 2
+ ACTION_INSERT_AFTER = 3
+ ACTION_INSERT_BEFORE = 4
+
+ _legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN,
+ ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE)
+
+ def __init__(self):
+ self._options = Options()
+
+ def _get_entityResolver(self):
+ return self.entityResolver
+ def _set_entityResolver(self, entityResolver):
+ self.entityResolver = entityResolver
+
+ def _get_errorHandler(self):
+ return self.errorHandler
+ def _set_errorHandler(self, errorHandler):
+ self.errorHandler = errorHandler
+
+ def _get_filter(self):
+ return self.filter
+ def _set_filter(self, filter):
+ self.filter = filter
+
+ def setFeature(self, name, state):
+ if self.supportsFeature(name):
+ state = state and 1 or 0
+ try:
+ settings = self._settings[(_name_xform(name), state)]
+ except KeyError:
+ raise xml.dom.NotSupportedErr(
+ "unsupported feature: %r" % (name,))
+ else:
+ for name, value in settings:
+ setattr(self._options, name, value)
+ else:
+ raise xml.dom.NotFoundErr("unknown feature: " + repr(name))
+
+ def supportsFeature(self, name):
+ return hasattr(self._options, _name_xform(name))
+
+ def canSetFeature(self, name, state):
+ key = (_name_xform(name), state and 1 or 0)
+ return key in self._settings
+
+ # This dictionary maps from (feature,value) to a list of
+ # (option,value) pairs that should be set on the Options object.
+ # If a (feature,value) setting is not in this dictionary, it is
+ # not supported by the DOMBuilder.
+ #
+ _settings = {
+ ("namespace_declarations", 0): [
+ ("namespace_declarations", 0)],
+ ("namespace_declarations", 1): [
+ ("namespace_declarations", 1)],
+ ("validation", 0): [
+ ("validation", 0)],
+ ("external_general_entities", 0): [
+ ("external_general_entities", 0)],
+ ("external_general_entities", 1): [
+ ("external_general_entities", 1)],
+ ("external_parameter_entities", 0): [
+ ("external_parameter_entities", 0)],
+ ("external_parameter_entities", 1): [
+ ("external_parameter_entities", 1)],
+ ("validate_if_schema", 0): [
+ ("validate_if_schema", 0)],
+ ("create_entity_ref_nodes", 0): [
+ ("create_entity_ref_nodes", 0)],
+ ("create_entity_ref_nodes", 1): [
+ ("create_entity_ref_nodes", 1)],
+ ("entities", 0): [
+ ("create_entity_ref_nodes", 0),
+ ("entities", 0)],
+ ("entities", 1): [
+ ("entities", 1)],
+ ("whitespace_in_element_content", 0): [
+ ("whitespace_in_element_content", 0)],
+ ("whitespace_in_element_content", 1): [
+ ("whitespace_in_element_content", 1)],
+ ("cdata_sections", 0): [
+ ("cdata_sections", 0)],
+ ("cdata_sections", 1): [
+ ("cdata_sections", 1)],
+ ("comments", 0): [
+ ("comments", 0)],
+ ("comments", 1): [
+ ("comments", 1)],
+ ("charset_overrides_xml_encoding", 0): [
+ ("charset_overrides_xml_encoding", 0)],
+ ("charset_overrides_xml_encoding", 1): [
+ ("charset_overrides_xml_encoding", 1)],
+ ("infoset", 0): [],
+ ("infoset", 1): [
+ ("namespace_declarations", 0),
+ ("validate_if_schema", 0),
+ ("create_entity_ref_nodes", 0),
+ ("entities", 0),
+ ("cdata_sections", 0),
+ ("datatype_normalization", 1),
+ ("whitespace_in_element_content", 1),
+ ("comments", 1),
+ ("charset_overrides_xml_encoding", 1)],
+ ("supported_mediatypes_only", 0): [
+ ("supported_mediatypes_only", 0)],
+ ("namespaces", 0): [
+ ("namespaces", 0)],
+ ("namespaces", 1): [
+ ("namespaces", 1)],
+ }
+
+ def getFeature(self, name):
+ xname = _name_xform(name)
+ try:
+ return getattr(self._options, xname)
+ except AttributeError:
+ if name == "infoset":
+ options = self._options
+ return (options.datatype_normalization
+ and options.whitespace_in_element_content
+ and options.comments
+ and options.charset_overrides_xml_encoding
+ and not (options.namespace_declarations
+ or options.validate_if_schema
+ or options.create_entity_ref_nodes
+ or options.entities
+ or options.cdata_sections))
+ raise xml.dom.NotFoundErr("feature %s not known" % repr(name))
+
+ def parseURI(self, uri):
+ if self.entityResolver:
+ input = self.entityResolver.resolveEntity(None, uri)
+ else:
+ input = DOMEntityResolver().resolveEntity(None, uri)
+ return self.parse(input)
+
+ def parse(self, input):
+ options = copy.copy(self._options)
+ options.filter = self.filter
+ options.errorHandler = self.errorHandler
+ fp = input.byteStream
+ if fp is None and options.systemId:
+ import urllib.request
+ fp = urllib.request.urlopen(input.systemId)
+ return self._parse_bytestream(fp, options)
+
+ def parseWithContext(self, input, cnode, action):
+ if action not in self._legal_actions:
+ raise ValueError("not a legal action")
+ raise NotImplementedError("Haven't written this yet...")
+
+ def _parse_bytestream(self, stream, options):
+ import xml.dom.expatbuilder
+ builder = xml.dom.expatbuilder.makeBuilder(options)
+ return builder.parseFile(stream)
+
+
+def _name_xform(name):
+ return name.lower().replace('-', '_')
+
+
+class DOMEntityResolver(object):
+ __slots__ = '_opener',
+
+ def resolveEntity(self, publicId, systemId):
+ assert systemId is not None
+ source = DOMInputSource()
+ source.publicId = publicId
+ source.systemId = systemId
+ source.byteStream = self._get_opener().open(systemId)
+
+ # determine the encoding if the transport provided it
+ source.encoding = self._guess_media_encoding(source)
+
+ # determine the base URI is we can
+ import posixpath, urllib.parse
+ parts = urllib.parse.urlparse(systemId)
+ scheme, netloc, path, params, query, fragment = parts
+ # XXX should we check the scheme here as well?
+ if path and not path.endswith("/"):
+ path = posixpath.dirname(path) + "/"
+ parts = scheme, netloc, path, params, query, fragment
+ source.baseURI = urllib.parse.urlunparse(parts)
+
+ return source
+
+ def _get_opener(self):
+ try:
+ return self._opener
+ except AttributeError:
+ self._opener = self._create_opener()
+ return self._opener
+
+ def _create_opener(self):
+ import urllib.request
+ return urllib.request.build_opener()
+
+ def _guess_media_encoding(self, source):
+ info = source.byteStream.info()
+ if "Content-Type" in info:
+ for param in info.getplist():
+ if param.startswith("charset="):
+ return param.split("=", 1)[1].lower()
+
+
+class DOMInputSource(object):
+ __slots__ = ('byteStream', 'characterStream', 'stringData',
+ 'encoding', 'publicId', 'systemId', 'baseURI')
+
+ def __init__(self):
+ self.byteStream = None
+ self.characterStream = None
+ self.stringData = None
+ self.encoding = None
+ self.publicId = None
+ self.systemId = None
+ self.baseURI = None
+
+ def _get_byteStream(self):
+ return self.byteStream
+ def _set_byteStream(self, byteStream):
+ self.byteStream = byteStream
+
+ def _get_characterStream(self):
+ return self.characterStream
+ def _set_characterStream(self, characterStream):
+ self.characterStream = characterStream
+
+ def _get_stringData(self):
+ return self.stringData
+ def _set_stringData(self, data):
+ self.stringData = data
+
+ def _get_encoding(self):
+ return self.encoding
+ def _set_encoding(self, encoding):
+ self.encoding = encoding
+
+ def _get_publicId(self):
+ return self.publicId
+ def _set_publicId(self, publicId):
+ self.publicId = publicId
+
+ def _get_systemId(self):
+ return self.systemId
+ def _set_systemId(self, systemId):
+ self.systemId = systemId
+
+ def _get_baseURI(self):
+ return self.baseURI
+ def _set_baseURI(self, uri):
+ self.baseURI = uri
+
+
+class DOMBuilderFilter:
+ """Element filter which can be used to tailor construction of
+ a DOM instance.
+ """
+
+ # There's really no need for this class; concrete implementations
+ # should just implement the endElement() and startElement()
+ # methods as appropriate. Using this makes it easy to only
+ # implement one of them.
+
+ FILTER_ACCEPT = 1
+ FILTER_REJECT = 2
+ FILTER_SKIP = 3
+ FILTER_INTERRUPT = 4
+
+ whatToShow = NodeFilter.SHOW_ALL
+
+ def _get_whatToShow(self):
+ return self.whatToShow
+
+ def acceptNode(self, element):
+ return self.FILTER_ACCEPT
+
+ def startContainer(self, element):
+ return self.FILTER_ACCEPT
+
+del NodeFilter
+
+
+class DocumentLS:
+ """Mixin to create documents that conform to the load/save spec."""
+
+ async = False
+
+ def _get_async(self):
+ return False
+ def _set_async(self, async):
+ if async:
+ raise xml.dom.NotSupportedErr(
+ "asynchronous document loading is not supported")
+
+ def abort(self):
+ # What does it mean to "clear" a document? Does the
+ # documentElement disappear?
+ raise NotImplementedError(
+ "haven't figured out what this means yet")
+
+ def load(self, uri):
+ raise NotImplementedError("haven't written this yet")
+
+ def loadXML(self, source):
+ raise NotImplementedError("haven't written this yet")
+
+ def saveXML(self, snode):
+ if snode is None:
+ snode = self
+ elif snode.ownerDocument is not self:
+ raise xml.dom.WrongDocumentErr()
+ return snode.toxml()
+
+
+class DOMImplementationLS:
+ MODE_SYNCHRONOUS = 1
+ MODE_ASYNCHRONOUS = 2
+
+ def createDOMBuilder(self, mode, schemaType):
+ if schemaType is not None:
+ raise xml.dom.NotSupportedErr(
+ "schemaType not yet supported")
+ if mode == self.MODE_SYNCHRONOUS:
+ return DOMBuilder()
+ if mode == self.MODE_ASYNCHRONOUS:
+ raise xml.dom.NotSupportedErr(
+ "asynchronous builders are not supported")
+ raise ValueError("unknown value for mode")
+
+ def createDOMWriter(self):
+ raise NotImplementedError(
+ "the writer interface hasn't been written yet!")
+
+ def createDOMInputSource(self):
+ return DOMInputSource()
+
+# -*- coding: utf-8 -*-
+
+"""
+***************************************************************************
+ doPctRgb.py
+ ---------------------
+ Date : June 2010
+ Copyright : (C) 2010 by Giuseppe Sucameli
+ Email : brush dot tyler at gmail dot com
+***************************************************************************
+* *
+* This program is free software; you can redistribute it and/or modify *
+* it under the terms of the GNU General Public License as published by *
+* the Free Software Foundation; either version 2 of the License, or *
+* (at your option) any later version. *
+* *
+***************************************************************************
+"""
+
+__author__ = 'Giuseppe Sucameli'
+__date__ = 'June 2010'
+__copyright__ = '(C) 2010, Giuseppe Sucameli'
+# This will get replaced with a git SHA1 when you do a git archive
+__revision__ = '$Format:%H$'
+
+from PyQt4.QtCore import QObject, SIGNAL, QCoreApplication
+from PyQt4.QtGui import QWidget
+
+from ui_widgetConvert import Ui_GdalToolsWidget as Ui_Widget
+from widgetBatchBase import GdalToolsBaseBatchWidget as BaseBatchWidget
+import GdalTools_utils as Utils
+
+class GdalToolsDialog(QWidget, Ui_Widget, BaseBatchWidget):
+
+ def __init__(self, iface):
+ QWidget.__init__(self)
+ self.iface = iface
+
+ self.setupUi(self)
+ BaseBatchWidget.__init__(self, self.iface, "pct2rgb.py")
+
+ # we use one widget for two tools
+ self.base.setWindowTitle( self.tr( "Convert paletted image to RGB" ) )
+
+ self.outSelector.setType( self.outSelector.FILE )
+
+ # set the default QSpinBoxes and QProgressBar value
+ self.bandSpin.setValue(1)
+ self.progressBar.setValue(0)
+
+ self.progressBar.hide()
+ self.outputFormat = Utils.fillRasterOutputFormat()
+
+ self.setParamsStatus([
+ (self.inSelector, SIGNAL("filenameChanged()")),
+ (self.outSelector, SIGNAL("filenameChanged()")),
+ (self.colorsSpin, SIGNAL("valueChanged(int)"), self.colorsCheck, "-1"), # hide this option
+ (self.bandSpin, SIGNAL("valueChanged(int)"), self.bandCheck)
+ ])
+
+ self.connect(self.inSelector, SIGNAL("selectClicked()"), self.fillInputFile)
+ self.connect(self.outSelector, SIGNAL("selectClicked()"), self.fillOutputFileEdit)
+ self.connect( self.batchCheck, SIGNAL( "stateChanged( int )" ), self.switchToolMode )
+
+
+ # switch to batch or normal mode
+ def switchToolMode( self ):
+ self.setCommandViewerEnabled( not self.batchCheck.isChecked() )
+ self.progressBar.setVisible( self.batchCheck.isChecked() )
+
+ self.inSelector.setType( self.inSelector.FILE if self.batchCheck.isChecked() else self.inSelector.FILE_LAYER )
+ self.outSelector.clear()
+
+ if self.batchCheck.isChecked():
+ self.inFileLabel = self.label.text()
+ self.outFileLabel = self.label_2.text()
+ self.label.setText( QCoreApplication.translate( "GdalTools", "&Input directory" ) )
+ self.label_2.setText( QCoreApplication.translate( "GdalTools", "&Output directory" ) )
+
+ QObject.disconnect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputFile )
+ QObject.disconnect( self.outSelector, SIGNAL( "selectClicked()" ), self.fillOutputFileEdit )
+
+ QObject.connect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputDir )
+ QObject.connect( self.outSelector, SIGNAL( "selectClicked()" ), self.fillOutputDir )
+ else:
+ self.label.setText( self.inFileLabel )
+ self.label_2.setText( self.outFileLabel )
+
+ QObject.disconnect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputDir )
+ QObject.disconnect( self.outSelector, SIGNAL( "selectClicked()" ), self.fillOutputDir )
+
+ QObject.connect( self.inSelector, SIGNAL( "selectClicked()" ), self.fillInputFile )
+ QObject.connect( self.outSelector, SIGNAL( "selectClicked()" ), self.fillOutputFileEdit )
+
+ def onLayersChanged(self):
+ self.inSelector.setLayers( Utils.LayerRegistry.instance().getRasterLayers() )
+
+ def fillInputFile(self):
+ lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
+ inputFile = Utils.FileDialog.getOpenFileName(self, self.tr( "Select the input file for convert" ), Utils.FileFilter.allRastersFilter(), lastUsedFilter )
+ if not inputFile:
+ return
+ Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
+ self.inSelector.setFilename(inputFile)
+
+ def fillOutputFileEdit(self):
+ lastUsedFilter = Utils.FileFilter.lastUsedRasterFilter()
+ outputFile = Utils.FileDialog.getSaveFileName(self, self.tr( "Select the raster file to save the results to" ), Utils.FileFilter.saveRastersFilter(), lastUsedFilter )
+ if not outputFile:
+ return
+ Utils.FileFilter.setLastUsedRasterFilter(lastUsedFilter)
+
+ self.outputFormat = Utils.fillRasterOutputFormat( lastUsedFilter, outputFile )
+ self.outSelector.setFilename(outputFile)
+
+ def fillInputDir( self ):
+ inputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the input directory with files for convert" ))
+ if not inputDir:
+ return
+ self.inSelector.setFilename( inputDir )
+
+ def fillOutputDir( self ):
+ outputDir = Utils.FileDialog.getExistingDirectory( self, self.tr( "Select the output directory to save the results to" ))
+ if not outputDir:
+ return
+ self.outSelector.setFilename( outputDir )
+
+ def getArguments(self):
+ arguments = []
+ if self.bandCheck.isChecked():
+ arguments.append( "-b")
+ arguments.append( unicode( self.bandSpin.value() ))
+ if self.isBatchEnabled():
+ return arguments
+
+ outputFn = self.getOutputFileName()
+ if outputFn:
+ arguments.append( "-of")
+ arguments.append( self.outputFormat)
+ arguments.append( self.getInputFileName())
+ arguments.append( outputFn)
+ return arguments
+
+ def getInputFileName(self):
+ return self.inSelector.filename()
+
+ def getOutputFileName(self):
+ return self.outSelector.filename()
+
+ def addLayerIntoCanvas(self, fileInfo):
+ self.iface.addRasterLayer(fileInfo.filePath())
+
+ def isBatchEnabled(self):
+ return self.batchCheck.isChecked()
+
+ def setProgressRange(self, maximum):
+ self.progressBar.setRange(0, maximum)
+
+ def updateProgress(self, index, total):
+ if index < total:
+ self.progressBar.setValue( index + 1 )
+ else:
+ self.progressBar.setValue( 0 )
+
+#! /usr/bin/env python
+# -*- coding:Utf8 -*-
+
+## Cette variante utilise une liste de listes ##
+## (que l'on pourrait aisément remplacer par deux listes distinctes)
+
+# La liste ci-dessous contient deux éléments qui sont eux-mêmes des listes.
+# l'élément 0 contient les nombres de jours de chaque mois, tandis que
+# l'élément 1 contient les noms des douze mois :
+mois = [[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31],
+ ['Janvier', 'Février', 'Mars', 'Avril', 'Mai', 'Juin', 'Juillet',
+ 'Août', 'Septembre', 'Octobre', 'Novembre', 'Décembre']]
+
+jour = ['Dimanche','Lundi','Mardi','Mercredi','Jeudi','Vendredi','Samedi']
+
+ja, jm, js, m = 0, 0, 0, 0
+
+while ja <365:
+ ja, jm = ja +1, jm +1 # ja = jour dans l'année, jm = jour dans le mois
+ js = (ja +3) % 7 # js = jour de la semaine. Le décalage ajouté
+ # permet de choisir le jour de départ
+
+ if jm > mois[0][m]: # élément m de l'élément 0 de la liste
+ jm, m = 1, m+1
+
+ print(jour[js], jm, mois[1][m]) # élément m de l'élément 1 de la liste
+
+#!/usr/bin/env python
+
+"""
+@package mi.dataset.parser.test.test_flobn_cm_subcon
+@fid marine-integrations/mi/dataset/parser/test/test_flobn_cm_subcon.py
+@author Rachel Manoni
+@brief Test code for FLOBN-CM data parser
+"""
+from mi.dataset.parser.flobn_cm_subcon import FlobnMSubconTemperatureParser, FlobnCSubconParser, FlobnMSubconParser
+
+__author__ = 'Rachel Manoni'
+
+import os
+from mi.core.log import get_logger
+log = get_logger()
+from nose.plugins.attrib import attr
+from mi.dataset.dataset_parser import DataSetDriverConfigKeys
+from mi.dataset.driver.flobn.resource import RESOURCE_PATH
+from mi.dataset.test.test_parser import ParserUnitTestCase
+
+
+TEMPERATURE_LOG_FILE = 'FLOBN-M_Temp_Record_ver_0-05.csv'
+TEMPERATURE_YAML_FILE = 'FLOBN-M_Temp_Record_ver_0-05.yml'
+INVALID_TEMPERATURE_DATA_FILE = 'FLOBN-M_Temp_Record_bad.csv'
+TEMPERATURE_RECORDS = 242
+
+C_LOG_FILE = 'FLOBN-C_Sample_Record_ver_0-05.csv'
+C_YAML_FILE = 'FLOBN-C_Sample_Record_ver_0-05.yml'
+INVALID_C_DATA_FILE = 'FLOBN-C_Sample_Record_bad.csv'
+C_RECORDS = 168
+
+M_LOG_FILE = 'FLOBN-M_Sample_Record_ver_0-05.csv'
+M_YAML_FILE = 'FLOBN-M_Sample_Record_ver_0-05.yml'
+INVALID_M_DATA_FILE = 'FLOBN-M_Sample_Record_bad.csv'
+M_RECORDS = 1008
+
+
+@attr('UNIT', group='mi')
+class FlobnCmSubconParserUnitTestCase(ParserUnitTestCase):
+ """
+ flobn_cm_subcon Parser unit test suite
+ """
+
+ def setUp(self):
+ ParserUnitTestCase.setUp(self)
+
+ self.rec_config = {
+ DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flobn_cm_subcon',
+ DataSetDriverConfigKeys.PARTICLE_CLASS: None
+ }
+
+ def open_file(self, filename):
+ return open(os.path.join(RESOURCE_PATH, filename), mode='r')
+
+ def open_file_write(self, filename):
+ return open(os.path.join(RESOURCE_PATH, filename), mode='w')
+
+ def create_temp_rec_parser(self, file_handle):
+ return FlobnMSubconTemperatureParser(self.rec_config, file_handle, self.exception_callback)
+
+ def create_c_parser(self, file_handle):
+ return FlobnCSubconParser(self.rec_config, file_handle, self.exception_callback)
+
+ def create_m_parser(self, file_handle):
+ return FlobnMSubconParser(self.rec_config, file_handle, self.exception_callback)
+
+ def create_yml_file(self, input_file, output_file, number_samples):
+ """
+ Create a yml file corresponding to an actual recovered dataset. This is not an actual test - it allows
+ us to create what we need for integration testing, i.e. a yml file.
+ """
+ in_file = self.open_file(input_file)
+ parser = self.create_c_parser(in_file)
+ log.debug("Getting records...")
+ result = parser.get_records(number_samples)
+ log.debug("Done.")
+ self.particle_to_yml(result, output_file)
+ log.debug("File written")
+
+ def particle_to_yml(self, particles, filename):
+ """
+ This is added as a testing helper, not actually as part of the parser tests. Since the same particles
+ will be used for the driver test it is helpful to write them to .yml in the same form they need in the
+ results.yml here.
+ """
+ fid = self.open_file_write(filename)
+ fid.write('header:\n')
+ fid.write(" particle_object: 'MULTIPLE'\n")
+ fid.write(" particle_type: 'MULTIPLE'\n")
+ fid.write('data:\n')
+ for i in range(0, len(particles)):
+ particle_dict = particles[i].generate_dict()
+ fid.write(' - _index: %d\n' % (i + 1))
+ fid.write(' particle_object: %s\n' % particles[i].__class__.__name__)
+ fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
+ fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
+ for val in particle_dict.get('values'):
+ if isinstance(val.get('value'), float):
+ fid.write(' %s: %f\n' % (val.get('value_id'), val.get('value')))
+ elif isinstance(val.get('value'), str):
+ fid.write(" %s: '%s'\n" % (val.get('value_id'), val.get('value')))
+ else:
+ fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
+ fid.close()
+
+ def test_subcon_m_record_invalid_data(self):
+ """
+ Read data from a file containing invalid data.
+ Verify that no particles are created and the correct number of exceptions are detected.
+ """
+ log.debug('===== START TEST INVALID SENSOR DATA =====')
+ in_file = self.open_file(INVALID_M_DATA_FILE)
+ parser = self.create_m_parser(in_file)
+
+ # Try to get records and verify that none are returned.
+ # Input file's records contain all invalid samples
+ result = parser.get_records(1)
+ self.assertEqual(result, [])
+
+ in_file.close()
+ log.debug('===== END TEST INVALID SENSOR DATA =====')
+
+ def test_verify_subcon_m_record_against_yaml(self):
+ """
+ Read data from a file and pull out data particles
+ one at a time. Verify that the results are those we expected.
+ """
+ log.debug('===== START YAML TEST =====')
+ in_file = self.open_file(M_LOG_FILE)
+ parser = self.create_m_parser(in_file)
+
+ #uncomment to create yml results file
+ #self.create_yml_file(M_LOG_FILE, M_YAML_FILE, M_RECORDS)
+
+ result = parser.get_records(M_RECORDS)
+ self.assert_particles(result, M_YAML_FILE, RESOURCE_PATH)
+
+ in_file.close()
+ self.assertListEqual(self.exception_callback_value, [])
+ log.debug('===== END YAML TEST =====')
+
+ def test_subcon_c_record_invalid_data(self):
+ """
+ Read data from a file containing invalid data.
+ Verify that no particles are created and the correct number of exceptions are detected.
+ """
+ log.debug('===== START TEST INVALID SENSOR DATA =====')
+ in_file = self.open_file(INVALID_C_DATA_FILE)
+ parser = self.create_c_parser(in_file)
+
+ # Try to get records and verify that none are returned.
+ # Input file's records contain all invalid samples
+ result = parser.get_records(1)
+ self.assertEqual(result, [])
+
+ in_file.close()
+ log.debug('===== END TEST INVALID SENSOR DATA =====')
+
+ def test_verify_subcon_c_record_against_yaml(self):
+ """
+ Read data from a file and pull out data particles
+ one at a time. Verify that the results are those we expected.
+ """
+ log.debug('===== START YAML TEST =====')
+ in_file = self.open_file(C_LOG_FILE)
+ parser = self.create_c_parser(in_file)
+
+ #uncomment to create yml results file
+ #self.create_yml_file(C_LOG_FILE, C_YAML_FILE, C_RECORDS)
+
+ result = parser.get_records(C_RECORDS)
+ self.assert_particles(result, C_YAML_FILE, RESOURCE_PATH)
+
+ in_file.close()
+ self.assertListEqual(self.exception_callback_value, [])
+ log.debug('===== END YAML TEST =====')
+
+ def test_temp_record_invalid_data(self):
+ """
+ Read data from a file containing invalid data.
+ Verify that no particles are created and the correct number of exceptions are detected.
+ """
+ log.debug('===== START TEST INVALID SENSOR DATA =====')
+ in_file = self.open_file(INVALID_TEMPERATURE_DATA_FILE)
+ parser = self.create_temp_rec_parser(in_file)
+
+ # Try to get records and verify that none are returned.
+ # Input file's records contain all invalid samples
+ result = parser.get_records(1)
+ self.assertEqual(result, [])
+
+ in_file.close()
+ log.debug('===== END TEST INVALID SENSOR DATA =====')
+
+ def test_verify_temp_record_against_yaml(self):
+ """
+ Read data from a file and pull out data particles
+ one at a time. Verify that the results are those we expected.
+ """
+ log.debug('===== START YAML TEST =====')
+ in_file = self.open_file(TEMPERATURE_LOG_FILE)
+ parser = self.create_temp_rec_parser(in_file)
+
+ #uncomment to create yml results file
+ #self.create_yml_file(TEMPERATURE_LOG_FILE, TEMPERATURE_YAML_FILE, TEMPERATURE_RECORDS)
+
+ result = parser.get_records(TEMPERATURE_RECORDS)
+ self.assert_particles(result, TEMPERATURE_YAML_FILE, RESOURCE_PATH)
+
+ in_file.close()
+ self.assertListEqual(self.exception_callback_value, [])
+ log.debug('===== END YAML TEST =====')
+# -*- test-case-name: twisted.test.test_tcp -*-
+# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+
+
+"""Various asynchronous TCP/IP classes.
+
+End users shouldn't use this module directly - use the reactor APIs instead.
+
+Maintainer: U{Itamar Shtull-Trauring}
+"""
+
+
+# System Imports
+import os
+import stat
+import types
+import exceptions
+import socket
+import sys
+import select
+import operator
+import warnings
+
+try:
+ import fcntl
+except ImportError:
+ fcntl = None
+from zope.interface import implements, classImplements
+
+try:
+ from OpenSSL import SSL
+except ImportError:
+ SSL = None
+
+from twisted.python.runtime import platform, platformType
+
+
+if platformType == 'win32':
+ # no such thing as WSAEPERM or error code 10001 according to winsock.h or MSDN
+ EPERM = object()
+ from errno import WSAEINVAL as EINVAL
+ from errno import WSAEWOULDBLOCK as EWOULDBLOCK
+ from errno import WSAEINPROGRESS as EINPROGRESS
+ from errno import WSAEALREADY as EALREADY
+ from errno import WSAECONNRESET as ECONNRESET
+ from errno import WSAEISCONN as EISCONN
+ from errno import WSAENOTCONN as ENOTCONN
+ from errno import WSAEINTR as EINTR
+ from errno import WSAENOBUFS as ENOBUFS
+ from errno import WSAEMFILE as EMFILE
+ # No such thing as WSAENFILE, either.
+ ENFILE = object()
+ # Nor ENOMEM
+ ENOMEM = object()
+ EAGAIN = EWOULDBLOCK
+ from errno import WSAECONNRESET as ECONNABORTED
+else:
+ from errno import EPERM
+ from errno import EINVAL
+ from errno import EWOULDBLOCK
+ from errno import EINPROGRESS
+ from errno import EALREADY
+ from errno import ECONNRESET
+ from errno import EISCONN
+ from errno import ENOTCONN
+ from errno import EINTR
+ from errno import ENOBUFS
+ from errno import EMFILE
+ from errno import ENFILE
+ from errno import ENOMEM
+ from errno import EAGAIN
+ from errno import ECONNABORTED
+
+from errno import errorcode
+
+# Twisted Imports
+from twisted.internet import protocol, defer, base, address
+from twisted.persisted import styles
+from twisted.python import log, failure, reflect
+from twisted.python.util import unsignedID
+from twisted.internet.error import CannotListenError
+
+# Sibling Imports
+import abstract
+import main
+import interfaces
+import error
+
+class _SocketCloser:
+ _socketShutdownMethod = 'shutdown'
+
+ def _closeSocket(self):
+ # socket.close() doesn't *really* close if there's another reference
+ # to it in the TCP/IP stack, e.g. if it was was inherited by a
+ # subprocess. And we really do want to close the connection. So we
+ # use shutdown() instead, and then close() in order to release the
+ # filedescriptor.
+ skt = self.socket
+ try:
+ getattr(skt, self._socketShutdownMethod)(2)
+ except socket.error:
+ pass
+ try:
+ skt.close()
+ except socket.error:
+ pass
+
+class _TLSMixin:
+ _socketShutdownMethod = 'sock_shutdown'
+
+ writeBlockedOnRead = 0
+ readBlockedOnWrite = 0
+ _userWantRead = _userWantWrite = True
+
+ def getPeerCertificate(self):
+ return self.socket.get_peer_certificate()
+
+ def doRead(self):
+ if self.writeBlockedOnRead:
+ self.writeBlockedOnRead = 0
+ self._resetReadWrite()
+ try:
+ return Connection.doRead(self)
+ except SSL.ZeroReturnError:
+ return main.CONNECTION_DONE
+ except SSL.WantReadError:
+ return
+ except SSL.WantWriteError:
+ self.readBlockedOnWrite = 1
+ Connection.startWriting(self)
+ Connection.stopReading(self)
+ return
+ except SSL.SysCallError, (retval, desc):
+ if ((retval == -1 and desc == 'Unexpected EOF')
+ or retval > 0):
+ return main.CONNECTION_LOST
+ log.err()
+ return main.CONNECTION_LOST
+ except SSL.Error, e:
+ return e
+
+ def doWrite(self):
+ # Retry disconnecting
+ if self.disconnected:
+ return self._postLoseConnection()
+ if self._writeDisconnected:
+ return self._closeWriteConnection()
+
+ if self.readBlockedOnWrite:
+ self.readBlockedOnWrite = 0
+ self._resetReadWrite()
+ return Connection.doWrite(self)
+
+ def writeSomeData(self, data):
+ try:
+ return Connection.writeSomeData(self, data)
+ except SSL.WantWriteError:
+ return 0
+ except SSL.WantReadError:
+ self.writeBlockedOnRead = 1
+ Connection.stopWriting(self)
+ Connection.startReading(self)
+ return 0
+ except SSL.ZeroReturnError:
+ return main.CONNECTION_LOST
+ except SSL.SysCallError, e:
+ if e[0] == -1 and data == "":
+ # errors when writing empty strings are expected
+ # and can be ignored
+ return 0
+ else:
+ return main.CONNECTION_LOST
+ except SSL.Error, e:
+ return e
+
+ def _postLoseConnection(self):
+ """Gets called after loseConnection(), after buffered data is sent.
+
+ We try to send an SSL shutdown alert, but if it doesn't work, retry
+ when the socket is writable.
+ """
+ self.disconnected=1
+ if hasattr(self.socket, 'set_shutdown'):
+ self.socket.set_shutdown(SSL.RECEIVED_SHUTDOWN)
+ return self._sendCloseAlert()
+
+ _first=False
+ def _sendCloseAlert(self):
+ # Okay, *THIS* is a bit complicated.
+
+ # Basically, the issue is, OpenSSL seems to not actually return
+ # errors from SSL_shutdown. Therefore, the only way to
+ # determine if the close notification has been sent is by
+ # SSL_shutdown returning "done". However, it will not claim it's
+ # done until it's both sent *and* received a shutdown notification.
+
+ # I don't actually want to wait for a received shutdown
+ # notification, though, so, I have to set RECEIVED_SHUTDOWN
+ # before calling shutdown. Then, it'll return True once it's
+ # *SENT* the shutdown.
+
+ # However, RECEIVED_SHUTDOWN can't be left set, because then
+ # reads will fail, breaking half close.
+
+ # Also, since shutdown doesn't report errors, an empty write call is
+ # done first, to try to detect if the connection has gone away.
+ # (*NOT* an SSL_write call, because that fails once you've called
+ # shutdown)
+ try:
+ os.write(self.socket.fileno(), '')
+ except OSError, se:
+ if se.args[0] in (EINTR, EWOULDBLOCK, ENOBUFS):
+ return 0
+ # Write error, socket gone
+ return main.CONNECTION_LOST
+
+ try:
+ if hasattr(self.socket, 'set_shutdown'):
+ laststate = self.socket.get_shutdown()
+ self.socket.set_shutdown(laststate | SSL.RECEIVED_SHUTDOWN)
+ done = self.socket.shutdown()
+ if not (laststate & SSL.RECEIVED_SHUTDOWN):
+ self.socket.set_shutdown(SSL.SENT_SHUTDOWN)
+ else:
+ #warnings.warn("SSL connection shutdown possibly unreliable, "
+ # "please upgrade to ver 0.XX", category=UserWarning)
+ self.socket.shutdown()
+ done = True
+ except SSL.Error, e:
+ return e
+
+ if done:
+ self.stopWriting()
+ # Note that this is tested for by identity below.
+ return main.CONNECTION_DONE
+ else:
+ self.startWriting()
+ return None
+
+ def _closeWriteConnection(self):
+ result = self._sendCloseAlert()
+
+ if result is main.CONNECTION_DONE:
+ return Connection._closeWriteConnection(self)
+
+ return result
+
+ def startReading(self):
+ self._userWantRead = True
+ if not self.readBlockedOnWrite:
+ return Connection.startReading(self)
+
+ def stopReading(self):
+ self._userWantRead = False
+ if not self.writeBlockedOnRead:
+ return Connection.stopReading(self)
+
+ def startWriting(self):
+ self._userWantWrite = True
+ if not self.writeBlockedOnRead:
+ return Connection.startWriting(self)
+
+ def stopWriting(self):
+ self._userWantWrite = False
+ if not self.readBlockedOnWrite:
+ return Connection.stopWriting(self)
+
+ def _resetReadWrite(self):
+ # After changing readBlockedOnWrite or writeBlockedOnRead,
+ # call this to reset the state to what the user requested.
+ if self._userWantWrite:
+ self.startWriting()
+ else:
+ self.stopWriting()
+
+ if self._userWantRead:
+ self.startReading()
+ else:
+ self.stopReading()
+
+def _getTLSClass(klass, _existing={}):
+ if klass not in _existing:
+ class TLSConnection(_TLSMixin, klass):
+ implements(interfaces.ISSLTransport)
+ _existing[klass] = TLSConnection
+ return _existing[klass]
+
+class Connection(abstract.FileDescriptor, _SocketCloser):
+ """I am the superclass of all socket-based FileDescriptors.
+
+ This is an abstract superclass of all objects which represent a TCP/IP
+ connection based socket.
+ """
+
+ implements(interfaces.ITCPTransport, interfaces.ISystemHandle)
+
+ TLS = 0
+
+ def __init__(self, skt, protocol, reactor=None):
+ abstract.FileDescriptor.__init__(self, reactor=reactor)
+ self.socket = skt
+ self.socket.setblocking(0)
+ self.fileno = skt.fileno
+ self.protocol = protocol
+
+ if SSL:
+
+ def startTLS(self, ctx):
+ assert not self.TLS
+ error=False
+ if self.dataBuffer or self._tempDataBuffer:
+ self.dataBuffer += "".join(self._tempDataBuffer)
+ self._tempDataBuffer = []
+ self._tempDataLen = 0
+ written = self.writeSomeData(buffer(self.dataBuffer, self.offset))
+ offset = self.offset
+ dataLen = len(self.dataBuffer)
+ self.offset = 0
+ self.dataBuffer = ""
+ if isinstance(written, Exception) or (offset + written != dataLen):
+ error=True
+
+
+ self.stopReading()
+ self.stopWriting()
+ self._startTLS()
+ self.socket = SSL.Connection(ctx.getContext(), self.socket)
+ self.fileno = self.socket.fileno
+ self.startReading()
+ if error:
+ warnings.warn("startTLS with unwritten buffered data currently doesn't work right. See issue #686. Closing connection.", category=RuntimeWarning, stacklevel=2)
+ self.loseConnection()
+ return
+
+ def _startTLS(self):
+ self.TLS = 1
+ self.__class__ = _getTLSClass(self.__class__)
+
+ def getHandle(self):
+ """Return the socket for this connection."""
+ return self.socket
+
+ def doRead(self):
+ """Calls self.protocol.dataReceived with all available data.
+
+ This reads up to self.bufferSize bytes of data from its socket, then
+ calls self.dataReceived(data) to process it. If the connection is not
+ lost through an error in the physical recv(), this function will return
+ the result of the dataReceived call.
+ """
+ try:
+ data = self.socket.recv(self.bufferSize)
+ except socket.error, se:
+ if se.args[0] == EWOULDBLOCK:
+ return
+ else:
+ return main.CONNECTION_LOST
+ if not data:
+ return main.CONNECTION_DONE
+ return self.protocol.dataReceived(data)
+
+ def writeSomeData(self, data):
+ """Connection.writeSomeData(data) -> #of bytes written | CONNECTION_LOST
+ This writes as much data as possible to the socket and returns either
+ the number of bytes read (which is positive) or a connection error code
+ (which is negative)
+ """
+ try:
+ # Limit length of buffer to try to send, because some OSes are too
+ # stupid to do so themselves (ahem windows)
+ return self.socket.send(buffer(data, 0, self.SEND_LIMIT))
+ except socket.error, se:
+ if se.args[0] == EINTR:
+ return self.writeSomeData(data)
+ elif se.args[0] in (EWOULDBLOCK, ENOBUFS):
+ return 0
+ else:
+ return main.CONNECTION_LOST
+
+ def _closeWriteConnection(self):
+ try:
+ getattr(self.socket, self._socketShutdownMethod)(1)
+ except socket.error:
+ pass
+ p = interfaces.IHalfCloseableProtocol(self.protocol, None)
+ if p:
+ try:
+ p.writeConnectionLost()
+ except:
+ f = failure.Failure()
+ log.err()
+ self.connectionLost(f)
+
+ def readConnectionLost(self, reason):
+ p = interfaces.IHalfCloseableProtocol(self.protocol, None)
+ if p:
+ try:
+ p.readConnectionLost()
+ except:
+ log.err()
+ self.connectionLost(failure.Failure())
+ else:
+ self.connectionLost(reason)
+
+ def connectionLost(self, reason):
+ """See abstract.FileDescriptor.connectionLost().
+ """
+ abstract.FileDescriptor.connectionLost(self, reason)
+ self._closeSocket()
+ protocol = self.protocol
+ del self.protocol
+ del self.socket
+ del self.fileno
+ protocol.connectionLost(reason)
+
+ logstr = "Uninitialized"
+
+ def logPrefix(self):
+ """Return the prefix to log with when I own the logging thread.
+ """
+ return self.logstr
+
+ def getTcpNoDelay(self):
+ return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
+
+ def setTcpNoDelay(self, enabled):
+ self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
+
+ def getTcpKeepAlive(self):
+ return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
+ socket.SO_KEEPALIVE))
+
+ def setTcpKeepAlive(self, enabled):
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
+
+if SSL:
+ classImplements(Connection, interfaces.ITLSTransport)
+
+class BaseClient(Connection):
+ """A base class for client TCP (and similiar) sockets.
+ """
+ addressFamily = socket.AF_INET
+ socketType = socket.SOCK_STREAM
+
+ def _finishInit(self, whenDone, skt, error, reactor):
+ """Called by base classes to continue to next stage of initialization."""
+ if whenDone:
+ Connection.__init__(self, skt, None, reactor)
+ self.doWrite = self.doConnect
+ self.doRead = self.doConnect
+ reactor.callLater(0, whenDone)
+ else:
+ reactor.callLater(0, self.failIfNotConnected, error)
+
+ def startTLS(self, ctx, client=1):
+ holder = Connection.startTLS(self, ctx)
+ if client:
+ self.socket.set_connect_state()
+ else:
+ self.socket.set_accept_state()
+ return holder
+
+ def stopConnecting(self):
+ """Stop attempt to connect."""
+ self.failIfNotConnected(error.UserError())
+
+ def failIfNotConnected(self, err):
+ """
+ Generic method called when the attemps to connect failed. It basically
+ cleans everything it can: call connectionFailed, stop read and write,
+ delete socket related members.
+ """
+ if (self.connected or self.disconnected or
+ not hasattr(self, "connector")):
+ return
+
+ self.connector.connectionFailed(failure.Failure(err))
+ if hasattr(self, "reactor"):
+ # this doesn't happen if we failed in __init__
+ self.stopReading()
+ self.stopWriting()
+ del self.connector
+
+ try:
+ self._closeSocket()
+ except AttributeError:
+ pass
+ else:
+ del self.socket, self.fileno
+
+ def createInternetSocket(self):
+ """(internal) Create a non-blocking socket using
+ self.addressFamily, self.socketType.
+ """
+ s = socket.socket(self.addressFamily, self.socketType)
+ s.setblocking(0)
+ if fcntl and hasattr(fcntl, 'FD_CLOEXEC'):
+ old = fcntl.fcntl(s.fileno(), fcntl.F_GETFD)
+ fcntl.fcntl(s.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
+ return s
+
+ def resolveAddress(self):
+ if abstract.isIPAddress(self.addr[0]):
+ self._setRealAddress(self.addr[0])
+ else:
+ d = self.reactor.resolve(self.addr[0])
+ d.addCallbacks(self._setRealAddress, self.failIfNotConnected)
+
+ def _setRealAddress(self, address):
+ self.realAddress = (address, self.addr[1])
+ self.doConnect()
+
+ def doConnect(self):
+ """I connect the socket.
+
+ Then, call the protocol's makeConnection, and start waiting for data.
+ """
+ if not hasattr(self, "connector"):
+ # this happens when connection failed but doConnect
+ # was scheduled via a callLater in self._finishInit
+ return
+
+ err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err:
+ self.failIfNotConnected(error.getConnectError((err, os.strerror(err))))
+ return
+
+
+ # doConnect gets called twice. The first time we actually need to
+ # start the connection attempt. The second time we don't really
+ # want to (SO_ERROR above will have taken care of any errors, and if
+ # it reported none, the mere fact that doConnect was called again is
+ # sufficient to indicate that the connection has succeeded), but it
+ # is not /particularly/ detrimental to do so. This should get
+ # cleaned up some day, though.
+ try:
+ connectResult = self.socket.connect_ex(self.realAddress)
+ except socket.error, se:
+ connectResult = se.args[0]
+ if connectResult:
+ if connectResult == EISCONN:
+ pass
+ # on Windows EINVAL means sometimes that we should keep trying:
+ # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winsock/winsock/connect_2.asp
+ elif ((connectResult in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or
+ (connectResult == EINVAL and platformType == "win32")):
+ self.startReading()
+ self.startWriting()
+ return
+ else:
+ self.failIfNotConnected(error.getConnectError((connectResult, os.strerror(connectResult))))
+ return
+
+ # If I have reached this point without raising or returning, that means
+ # that the socket is connected.
+ del self.doWrite
+ del self.doRead
+ # we first stop and then start, to reset any references to the old doRead
+ self.stopReading()
+ self.stopWriting()
+ self._connectDone()
+
+ def _connectDone(self):
+ self.protocol = self.connector.buildProtocol(self.getPeer())
+ self.connected = 1
+ self.protocol.makeConnection(self)
+ self.logstr = self.protocol.__class__.__name__+",client"
+ self.startReading()
+
+ def connectionLost(self, reason):
+ if not self.connected:
+ self.failIfNotConnected(error.ConnectError(string=reason))
+ else:
+ Connection.connectionLost(self, reason)
+ self.connector.connectionLost(reason)
+
+
+class Client(BaseClient):
+ """A TCP client."""
+
+ def __init__(self, host, port, bindAddress, connector, reactor=None):
+ # BaseClient.__init__ is invoked later
+ self.connector = connector
+ self.addr = (host, port)
+
+ whenDone = self.resolveAddress
+ err = None
+ skt = None
+
+ try:
+ skt = self.createInternetSocket()
+ except socket.error, se:
+ err = error.ConnectBindError(se[0], se[1])
+ whenDone = None
+ if whenDone and bindAddress is not None:
+ try:
+ skt.bind(bindAddress)
+ except socket.error, se:
+ err = error.ConnectBindError(se[0], se[1])
+ whenDone = None
+ self._finishInit(whenDone, skt, err, reactor)
+
+ def getHost(self):
+ """Returns an IPv4Address.
+
+ This indicates the address from which I am connecting.
+ """
+ return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
+
+ def getPeer(self):
+ """Returns an IPv4Address.
+
+ This indicates the address that I am connected to.
+ """
+ return address.IPv4Address('TCP', *(self.addr + ('INET',)))
+
+ def __repr__(self):
+ s = '<%s to %s at %x>' % (self.__class__, self.addr, unsignedID(self))
+ return s
+
+
+class Server(Connection):
+ """Serverside socket-stream connection class.
+
+ I am a serverside network connection transport; a socket which came from an
+ accept() on a server.
+ """
+
+ def __init__(self, sock, protocol, client, server, sessionno):
+ """Server(sock, protocol, client, server, sessionno)
+
+ Initialize me with a socket, a protocol, a descriptor for my peer (a
+ tuple of host, port describing the other end of the connection), an
+ instance of Port, and a session number.
+ """
+ Connection.__init__(self, sock, protocol)
+ self.server = server
+ self.client = client
+ self.sessionno = sessionno
+ self.hostname = client[0]
+ self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__, sessionno, self.hostname)
+ self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__, self.sessionno, self.server.port)
+ self.startReading()
+ self.connected = 1
+
+ def __repr__(self):
+ """A string representation of this connection.
+ """
+ return self.repstr
+
+ def startTLS(self, ctx, server=1):
+ holder = Connection.startTLS(self, ctx)
+ if server:
+ self.socket.set_accept_state()
+ else:
+ self.socket.set_connect_state()
+ return holder
+
+ def getHost(self):
+ """Returns an IPv4Address.
+
+ This indicates the server's address.
+ """
+ return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
+
+ def getPeer(self):
+ """Returns an IPv4Address.
+
+ This indicates the client's address.
+ """
+ return address.IPv4Address('TCP', *(self.client + ('INET',)))
+
+class Port(base.BasePort, _SocketCloser):
+ """I am a TCP server port, listening for connections.
+
+ When a connection is accepted, I will call my factory's buildProtocol with
+ the incoming connection as an argument, according to the specification
+ described in twisted.internet.interfaces.IProtocolFactory.
+
+ If you wish to change the sort of transport that will be used, my
+ `transport' attribute will be called with the signature expected for
+ Server.__init__, so it can be replaced.
+ """
+
+ implements(interfaces.IListeningPort)
+
+ addressFamily = socket.AF_INET
+ socketType = socket.SOCK_STREAM
+
+ transport = Server
+ sessionno = 0
+ interface = ''
+ backlog = 50
+
+ # Actual port number being listened on, only set to a non-None
+ # value when we are actually listening.
+ _realPortNumber = None
+
+ def __init__(self, port, factory, backlog=50, interface='', reactor=None):
+ """Initialize with a numeric port to listen on.
+ """
+ base.BasePort.__init__(self, reactor=reactor)
+ self.port = port
+ self.factory = factory
+ self.backlog = backlog
+ self.interface = interface
+
+ def __repr__(self):
+ if self._realPortNumber is not None:
+ return "<%s of %s on %s>" % (self.__class__, self.factory.__class__,
+ self._realPortNumber)
+ else:
+ return "<%s of %s (not listening)>" % (self.__class__, self.factory.__class__)
+
+ def createInternetSocket(self):
+ s = base.BasePort.createInternetSocket(self)
+ if platformType == "posix" and sys.platform != "cygwin":
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ return s
+
+ def startListening(self):
+ """Create and bind my socket, and begin listening on it.
+
+ This is called on unserialization, and must be called after creating a
+ server to begin listening on the specified port.
+ """
+ try:
+ skt = self.createInternetSocket()
+ skt.bind((self.interface, self.port))
+ except socket.error, le:
+ raise CannotListenError, (self.interface, self.port, le)
+
+ # Make sure that if we listened on port 0, we update that to
+ # reflect what the OS actually assigned us.
+ self._realPortNumber = skt.getsockname()[1]
+
+ log.msg("%s starting on %s" % (self.factory.__class__, self._realPortNumber))
+
+ # The order of the next 6 lines is kind of bizarre. If no one
+ # can explain it, perhaps we should re-arrange them.
+ self.factory.doStart()
+ skt.listen(self.backlog)
+ self.connected = 1
+ self.socket = skt
+ self.fileno = self.socket.fileno
+ self.numberAccepts = 100
+
+ self.startReading()
+
+ def _buildAddr(self, (host, port)):
+ return address._ServerFactoryIPv4Address('TCP', host, port)
+
+ def doRead(self):
+ """Called when my socket is ready for reading.
+
+ This accepts a connection and calls self.protocol() to handle the
+ wire-level protocol.
+ """
+ try:
+ if platformType == "posix":
+ numAccepts = self.numberAccepts
+ else:
+ # win32 event loop breaks if we do more than one accept()
+ # in an iteration of the event loop.
+ numAccepts = 1
+ for i in range(numAccepts):
+ # we need this so we can deal with a factory's buildProtocol
+ # calling our loseConnection
+ if self.disconnecting:
+ return
+ try:
+ skt, addr = self.socket.accept()
+ except socket.error, e:
+ if e.args[0] in (EWOULDBLOCK, EAGAIN):
+ self.numberAccepts = i
+ break
+ elif e.args[0] == EPERM:
+ # Netfilter on Linux may have rejected the
+ # connection, but we get told to try to accept()
+ # anyway.
+ continue
+ elif e.args[0] in (EMFILE, ENOBUFS, ENFILE, ENOMEM, ECONNABORTED):
+
+ # Linux gives EMFILE when a process is not allowed
+ # to allocate any more file descriptors. *BSD and
+ # Win32 give (WSA)ENOBUFS. Linux can also give
+ # ENFILE if the system is out of inodes, or ENOMEM
+ # if there is insufficient memory to allocate a new
+ # dentry. ECONNABORTED is documented as possible on
+ # both Linux and Windows, but it is not clear
+ # whether there are actually any circumstances under
+ # which it can happen (one might expect it to be
+ # possible if a client sends a FIN or RST after the
+ # server sends a SYN|ACK but before application code
+ # calls accept(2), however at least on Linux this
+ # _seems_ to be short-circuited by syncookies.
+
+ log.msg("Could not accept new connection (%s)" % (
+ errorcode[e.args[0]],))
+ break
+ raise
+
+ protocol = self.factory.buildProtocol(self._buildAddr(addr))
+ if protocol is None:
+ skt.close()
+ continue
+ s = self.sessionno
+ self.sessionno = s+1
+ transport = self.transport(skt, protocol, addr, self, s)
+ transport = self._preMakeConnection(transport)
+ protocol.makeConnection(transport)
+ else:
+ self.numberAccepts = self.numberAccepts+20
+ except:
+ # Note that in TLS mode, this will possibly catch SSL.Errors
+ # raised by self.socket.accept()
+ #
+ # There is no "except SSL.Error:" above because SSL may be
+ # None if there is no SSL support. In any case, all the
+ # "except SSL.Error:" suite would probably do is log.deferr()
+ # and return, so handling it here works just as well.
+ log.deferr()
+
+ def _preMakeConnection(self, transport):
+ return transport
+
+ def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
+ """Stop accepting connections on this port.
+
+ This will shut down my socket and call self.connectionLost().
+ It returns a deferred which will fire successfully when the
+ port is actually closed.
+ """
+ self.disconnecting = 1
+ self.stopReading()
+ if self.connected:
+ self.deferred = defer.Deferred()
+ self.reactor.callLater(0, self.connectionLost, connDone)
+ return self.deferred
+
+ stopListening = loseConnection
+
+ def connectionLost(self, reason):
+ """Cleans up my socket.
+ """
+ log.msg('(Port %s Closed)' % self._realPortNumber)
+ self._realPortNumber = None
+ base.BasePort.connectionLost(self, reason)
+ self.connected = 0
+ self._closeSocket()
+ del self.socket
+ del self.fileno
+ self.factory.doStop()
+ if hasattr(self, "deferred"):
+ self.deferred.callback(None)
+ del self.deferred
+
+ def logPrefix(self):
+ """Returns the name of my class, to prefix log entries with.
+ """
+ return reflect.qual(self.factory.__class__)
+
+ def getHost(self):
+ """Returns an IPv4Address.
+
+ This indicates the server's address.
+ """
+ return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
+
+class Connector(base.BaseConnector):
+ def __init__(self, host, port, factory, timeout, bindAddress, reactor=None):
+ self.host = host
+ if isinstance(port, types.StringTypes):
+ try:
+ port = socket.getservbyname(port, 'tcp')
+ except socket.error, e:
+ raise error.ServiceNameUnknownError(string="%s (%r)" % (e, port))
+ self.port = port
+ self.bindAddress = bindAddress
+ base.BaseConnector.__init__(self, factory, timeout, reactor)
+
+ def _makeTransport(self):
+ return Client(self.host, self.port, self.bindAddress, self, self.reactor)
+
+ def getDestination(self):
+ return address.IPv4Address('TCP', self.host, self.port, 'INET')
+
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2011, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Willow Garage, Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+## ROS Message generatation
+##
+##
+
+import sys
+import os
+import em
+import genmsg.command_line
+import genmsg.msgs
+import genmsg.msg_loader
+import genmsg.gentools
+
+# generate msg or srv files from a template file
+# template_map of the form { 'template_file':'output_file'} output_file can contain @NAME@ which will be replaced by the message/service name
+def _generate_from_spec(input_file, output_dir, template_dir, msg_context, spec, template_map, search_path):
+
+ md5sum = genmsg.gentools.compute_md5(msg_context, spec)
+
+ # precompute msg definition once
+ if isinstance(spec, genmsg.msgs.MsgSpec):
+ msg_definition = genmsg.gentools.compute_full_text(msg_context, spec)
+
+ # Loop over all files to generate
+ for template_file_name, output_file_name in template_map.items():
+ template_file = os.path.join(template_dir, template_file_name)
+ output_file = os.path.join(output_dir, output_file_name.replace("@NAME@", spec.short_name))
+
+ #print "generate_from_template %s %s %s" % (input_file, template_file, output_file)
+
+ ofile = open(output_file, 'w') #todo try
+
+ # Set dictionary for the generator interpreter
+ g = {
+ "file_name_in": input_file,
+ "spec": spec,
+ "md5sum": md5sum,
+ "search_path": search_path,
+ "msg_context": msg_context
+ }
+ if isinstance(spec, genmsg.msgs.MsgSpec):
+ g['msg_definition'] = msg_definition
+
+ # todo, reuse interpreter
+ interpreter = em.Interpreter(output=ofile, globals=g, options={em.RAW_OPT:True,em.BUFFERED_OPT:True})
+ if not os.path.isfile(template_file):
+ ofile.close()
+ os.remove(output_file)
+ raise RuntimeError("Template file %s not found in template dir %s" % (template_file_name, template_dir))
+ interpreter.file(open(template_file)) #todo try
+ interpreter.shutdown()
+
+def _generate_msg_from_file(input_file, output_dir, template_dir, search_path, package_name, msg_template_dict):
+ # Read MsgSpec from .msg file
+ msg_context = genmsg.msg_loader.MsgContext.create_default()
+ full_type_name = genmsg.gentools.compute_full_type_name(package_name, os.path.basename(input_file))
+ spec = genmsg.msg_loader.load_msg_from_file(msg_context, input_file, full_type_name)
+ # Load the dependencies
+ genmsg.msg_loader.load_depends(msg_context, spec, search_path)
+ # Generate the language dependent msg file
+ _generate_from_spec(input_file,
+ output_dir,
+ template_dir,
+ msg_context,
+ spec,
+ msg_template_dict,
+ search_path)
+
+def _generate_srv_from_file(input_file, output_dir, template_dir, search_path, package_name, srv_template_dict, msg_template_dict):
+ # Read MsgSpec from .srv.file
+ msg_context = genmsg.msg_loader.MsgContext.create_default()
+ full_type_name = genmsg.gentools.compute_full_type_name(package_name, os.path.basename(input_file))
+ spec = genmsg.msg_loader.load_srv_from_file(msg_context, input_file, full_type_name)
+ # Load the dependencies
+ genmsg.msg_loader.load_depends(msg_context, spec, search_path)
+ # Generate the language dependent srv file
+ _generate_from_spec(input_file,
+ output_dir,
+ template_dir,
+ msg_context,
+ spec,
+ srv_template_dict,
+ search_path)
+ # Generate the language dependent msg file for the srv request
+ _generate_from_spec(input_file,
+ output_dir,
+ template_dir,
+ msg_context,
+ spec.request,
+ msg_template_dict,
+ search_path)
+ # Generate the language dependent msg file for the srv response
+ _generate_from_spec(input_file,
+ output_dir,
+ template_dir,
+ msg_context,
+ spec.response,
+ msg_template_dict,
+ search_path)
+
+# uniform interface for genering either srv or msg files
+def generate_from_file(input_file, package_name, output_dir, template_dir, include_path, msg_template_dict, srv_template_dict):
+ # Normalize paths
+ input_file = os.path.abspath(input_file)
+ output_dir = os.path.abspath(output_dir)
+
+ # Create output dir
+ try:
+ os.makedirs(output_dir)
+ except OSError as e:
+ if e.errno != 17: # ignore file exists error
+ raise
+
+ # Parse include path dictionary
+ if( include_path ):
+ search_path = genmsg.command_line.includepath_to_dict(include_path)
+ else:
+ search_path = {}
+
+ # Generate the file(s)
+ if input_file.endswith(".msg"):
+ _generate_msg_from_file(input_file, output_dir, template_dir, search_path, package_name, msg_template_dict)
+ elif input_file.endswith(".srv"):
+ _generate_srv_from_file(input_file, output_dir, template_dir, search_path, package_name, srv_template_dict, msg_template_dict)
+ else:
+ assert False, "Uknown file extension for %s"%input_file
+
+def generate_module(package_name, output_dir, template_dir, template_dict):
+ # Locate generate msg files
+ files = os.listdir(output_dir)
+
+ # Loop over all files to generate
+ for template_file_name, output_file_name in template_dict.items():
+ template_file = os.path.join(template_dir, template_file_name)
+ output_file = os.path.join(output_dir, output_file_name)
+
+ ofile = open(output_file, 'w') #todo try
+
+ # Set dictionary for the generator intepreter
+ g = dict(files=files,
+ package=package_name)
+
+ # todo, reuse interpreter
+ interpreter = em.Interpreter(output=ofile, options={em.RAW_OPT:True,em.BUFFERED_OPT:True})
+ interpreter.updateGlobals(g)
+ if not os.path.isfile(template_file):
+ ofile.close()
+ os.remove(output_file)
+ raise RuntimeError("Template file %s not found in template dir %s" % (template_file_name, template_dir))
+ interpreter.file(open(template_file)) #todo try
+ interpreter.shutdown()
+
+# Uniform interface to support the standard command line options
+def generate_from_command_line_options(argv, msg_template_dict, srv_template_dict, module_template_dict = {}):
+ from optparse import OptionParser
+ parser = OptionParser("[options] ")
+ parser.add_option("-p", dest='package',
+ help="ros package the generated msg/srv files belongs to")
+ parser.add_option("-o", dest='outdir',
+ help="directory in which to place output files")
+ parser.add_option("-I", dest='includepath',
+ help="include path to search for messages",
+ action="append")
+ parser.add_option("-m", dest='module',
+ help="write the module file",
+ action='store_true', default=False)
+ parser.add_option("-e", dest='emdir',
+ help="directory containing template files",
+ default=sys.path[0])
+
+ (options, argv) = parser.parse_args(argv)
+
+ if( not options.package or not options.outdir or not options.emdir):
+ parser.print_help()
+ exit(-1)
+
+ if( options.module ):
+ generate_module(options.package, options.outdir, options.emdir, module_template_dict)
+ else:
+ if len(argv) > 1:
+ generate_from_file(argv[1], options.package, options.outdir, options.emdir, options.includepath, msg_template_dict, srv_template_dict)
+ else:
+ parser.print_help()
+ exit(-1)
+
+
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2017, Ansible by Red Hat, inc
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = r'''
+---
+module: net_ping
+version_added: "2.4"
+author: "Jacob McGill (@jmcgill298)"
+short_description: Tests reachability using ping from a network device
+description:
+ - Tests reachability using ping from network device to a remote destination.
+ - For Windows targets, use the M(win_ping) module instead.
+ - For targets running Python, use the M(ping) module instead.
+extends_documentation_fragment: network_agnostic
+options:
+ count:
+ description:
+ - Number of packets to send.
+ default: 5
+ dest:
+ description:
+ - The IP Address or hostname (resolvable by switch) of the remote node.
+ required: true
+ source:
+ description:
+ - The source IP Address.
+ state:
+ description:
+ - Determines if the expected result is success or fail.
+ choices: [ absent, present ]
+ default: present
+ vrf:
+ description:
+ - The VRF to use for forwarding.
+ default: default
+notes:
+ - For Windows targets, use the M(win_ping) module instead.
+ - For targets running Python, use the M(ping) module instead.
+'''
+
+
+EXAMPLES = r'''
+- name: Test reachability to 10.10.10.10 using default vrf
+ net_ping:
+ dest: 10.10.10.10
+
+- name: Test reachability to 10.20.20.20 using prod vrf
+ net_ping:
+ dest: 10.20.20.20
+ vrf: prod
+
+- name: Test unreachability to 10.30.30.30 using default vrf
+ net_ping:
+ dest: 10.30.30.30
+ state: absent
+
+- name: Test reachability to 10.40.40.40 using prod vrf and setting count and source
+ net_ping:
+ dest: 10.40.40.40
+ source: loopback0
+ vrf: prod
+ count: 20
+'''
+
+RETURN = r'''
+commands:
+ description: Show the command sent.
+ returned: always
+ type: list
+ sample: ["ping vrf prod 10.40.40.40 count 20 source loopback0"]
+packet_loss:
+ description: Percentage of packets lost.
+ returned: always
+ type: str
+ sample: "0%"
+packets_rx:
+ description: Packets successfully received.
+ returned: always
+ type: int
+ sample: 20
+packets_tx:
+ description: Packets successfully transmitted.
+ returned: always
+ type: int
+ sample: 20
+rtt:
+ description: Show RTT stats.
+ returned: always
+ type: dict
+ sample: {"avg": 2, "max": 8, "min": 1}
+'''
+
+import os
+import os.path
+import glob
+import shutil
+import datetime
+from fnmatch import fnmatch
+
+PROJECT_NAME = "draggin"
+if 'PROJECT_NAME' in ARGUMENTS:
+ PROJECT_NAME = ARGUMENTS["PROJECT_NAME"]
+
+env = Environment(tools = [])
+
+#the full path to this SConscript file
+this_sconscript_file = (lambda x:x).func_code.co_filename
+
+# matches anything in the list
+def fnmatchList(_name, _filters):
+
+ for f in _filters:
+ if fnmatch(_name, f):
+ # found one match so get out of here
+ return True
+
+ # no matches
+ return False
+
+###################################################################################################
+# copy files
+zipfiles = []
+def zipAction( target, source, env ):
+ print "zipAction"
+ fpair = os.path.splitext(str(target[0]))
+ print "Zipping "+fpair[0]
+ try:
+ shutil.make_archive(fpair[0], "zip", "package/windows")
+
+ except (IOError, os.error), why:
+ #easygui.exceptionbox(str(source[0])+", "+str(target[0])+" FAILED")
+ raw_input(str(source[0])+", "+str(target[0])+" FAILED: "+str(why))
+
+copyBuilder = Builder(action = zipAction)
+env.Append(BUILDERS = {'zipComplier' : copyBuilder})
+
+inputfiles = []
+
+for root, dirs, files in os.walk("package/windows"):
+ for f in files:
+ filename = os.path.join(root, f)
+ inputfiles.append(str(filename))
+
+# the exe
+outputfiles = []
+outputfiles.append(str("package/" + PROJECT_NAME + "-windows-" + str(datetime.date.today()) + ".zip"))
+zipfiles.append(env.zipComplier(outputfiles, inputfiles))
+
+
+if len(zipfiles) > 0:
+ Default(zipfiles)
+
+from ..interpreterWorker import *
+from common.tools import tools
+from lang.gbs_board import Board
+import common.utils
+import common.i18n as i18n
+import lang
+import logging
+
+class GUIGobstonesApi(lang.GobstonesApi):
+ def __init__(self, communicator):
+ self.comm = communicator
+ def read(self):
+ self.comm.send('READ_REQUEST')
+ message = self.comm.receive()
+ if message.header != 'READ_DONE': assert False
+ return message.body
+ def show(self, board):
+ self.comm.send('PARTIAL', tools.board_format.to_string(board))
+ def log(self, msg):
+ self.comm.send('LOG', msg)
+
+class Interpreter(InterpreterWorker):
+ def prepare(self):
+ api = GUIGobstonesApi(self.communicator)
+ options = lang.GobstonesOptions()
+ self.gobstones = lang.Gobstones(options, api)
+ def start(self, filename, program_text, initial_board_string, run_mode):
+ board = tools.board_format.from_string(initial_board_string)
+ try:
+ if run_mode == Interpreter.RunMode.FULL:
+ self.success(self.gobstones.run(filename, program_text, board))
+ else:
+ # Parse gobstones script
+ self.gobstones.api.log(i18n.i18n('Parsing.'))
+ tree = self.gobstones.parse(program_text, filename)
+ assert tree
+ # Explode macros
+ self.gobstones.api.log(i18n.i18n('Exploding program macros.'))
+ self.gobstones.explode_macros(tree)
+ # Check semantics, liveness and types
+ self.gobstones.check(tree)
+ self.success()
+ except Exception as exception:
+ self.failure(exception)
+ def success(self, gbs_run=None):
+ if gbs_run is None:
+ self.communicator.send('OK', (None, None))
+ else:
+ self.communicator.send('OK', (tools.board_format.to_string(gbs_run.final_board), gbs_run.result))
+ def failure(self, exception):
+ self.communicator.send('FAIL', (exception.__class__, (exception.msg, exception.area)))
+from __future__ import division
+import sys
+from table import *
+from where2 import *
+from dtree import *
+
+
+def csv2py(f):
+ sym2num = {} # hold all the characters with assinged numbers that never seen
+ def str2num(t, p=0):
+ for r,row in enumerate(t._rows):
+ for c, cell in enumerate(row.cells):
+ if isinstance(cell, str) and c max(leavesdic) or l.lvl-abs(i) <0):
+ branch = findbetter(leavesdic, -l.lvl, l) # find the better leaves on the level 0
+ if branch:
+ contrastset+=[branch]
+ elif bestscore == l.score:
+ contrastset+=[{"This is the best one!":"No Contrast", "targetscore":l.score}]
+ else:
+ contrastset+=[gl] # not found, give the global best contrast set
+ l = addtoleaf(l, contrastset)
+ break
+ branch = findbetter(leavesdic, -i, l) # go up level
+ if branch:
+ contrastset+=[branch]
+ l=addtoleaf(l, contrastset)
+ break
+ i = -i #up
+ branch = findbetter(leavesdic, -i, l) # go down i level
+ if branch:
+ contrastset+=[branch]
+ l=addtoleaf(l, contrastset)
+ break
+ i = abs(i)+1
+ return contrastset
+ def br(node, score):
+ if not node:
+ return
+ contrastdic = {}
+ for i, b in enumerate(node.branch):
+ contrastdic[b[0].name]= contrastdic.get(b[0].name,"")+str(b[1])
+ contrastdic.update({"targetscore":score})
+ return contrastdic
+ def findbetter1(kids, testscore, betternode = None):
+ target =testscore
+ for bro in kids:
+ if bro.kids:
+ continue
+ if bro.score < target:
+ target=bro.score # find the better brother
+ betternode=bro
+ return br(betternode, target)
+ def findbetter(leavesdic, i,l):
+ if not int(i+l.lvl) in leavesdic:
+ return
+ if len(l.up.kids)>1: # priority1: find in brothers/Sisters
+ branch = findbetter1(l.up.kids, l.score)
+ if branch:
+ return branch
+ if l.up.up and len(l.up.up.kids)>1:# priority2: find in aunts and uncles
+ branch = findbetter1(l.up.up.kids,l.score)
+ if branch:
+ return branch
+ for node in leavesdic[i+l.lvl]: # priority3: find in cousins
+ # tempscore = leafscore(node)
+ if node.score < l.score:
+ branch = br(node,node.score)
+ return branch
+ contrastset = []
+ for sub in tree.kids:
+ subleaves= [i for i in dtleaves(sub)]
+ leavesdic = {}
+ for l in subleaves: # make teh subleaves dic
+ leavesdic[l.lvl] = leavesdic.get(l.lvl, []) +[l] # add all leaves under one subroot in to dic, according to lvl
+ # {1:[leaf1, leaf2,leaf4] 2:[]}
+ for l in subleaves: # build contrast set
+ contrastset = findset(leavesdic, l)
+ showTdiv(tree)
+ printcontrastset(contrastset, allleaves)
+ return tree
+
+def globalleaf(allleaves, node= None):
+ mins = 10**10
+ contrastset= {}
+ for leaf in allleaves:
+ if leaf.score < mins:
+ node = leaf
+ mins = leaf.score
+ for i in node.branch:
+ contrastset[i[0].name]= i[1]
+ contrastset["targetscore"]=mins
+ return contrastset, mins
+
+def leafscore(leaf):
+ score =[]
+ # rows = map(lambda x:x.cells, leaf.rows)
+ for row in leaf.rows:
+ score += [row.cells[-1]]
+ n = len(score)
+ p= q = max(0, int(n*0.5) - 1)
+ if len(score)%2==0:p = q+1
+ median = (score[p]+score[q])*0.5
+ return median
+
+def printcontrastset(contrastset,allleaves):
+ print "\n"+ "+"*20+"\nCONSTRAST SET:"+ "\n"+ "+"*20
+ for k, adit in enumerate(contrastset):
+ out = "leaf #"+str(k)+" score:" + str(allleaves[k].score)
+ # sortdic = dict(sorted(adit.iteritems(), key= lambda x:x[1]))
+ # sortdic = dict(sorted(adit.iteritems(), key = adit.get))
+ for key, val in adit.iteritems(): # sort dict by key
+ out += " ==>"+str(key) +"="+str(val)
+ print out
+ out = ""
+def printtogo(nodelst):
+ if not nodelst:
+ return
+ print "\n"+ "+"*20+"\nTEST DATA:"+ "\n"+ "+"*20
+ for i, node in enumerate(nodelst):
+ out ="testdata "+str(i)+ " will go to"
+ try:
+ out +=" leaf #"+str(node.leafid) +": "
+ except Exception, e:
+ out+= " node # "+str(node.mode)+": "
+ for i, b in enumerate(node.branch):
+ out +=b[0].name+"="+str(b[1])+" "
+ print out
+
+def contrast(tree, testleaf, testscore):
+ def myup(node, testleaf, testscore, conset=[]):
+ if node.lvl==0:
+ return []
+ if len(node.up.kids)>1: # have local neighbors, here ,can't go down
+ for neigh in node.up.kids:
+ if leafscore(neigh)< testscore:
+ return [neigh]
+ if node.up.up and node.up.lvl!=0:
+ conset +=myup(node.up, testleaf, testscore, conset)
+ return [node]
+ else:
+ return ["noset"]
+ contrastdic = {}
+ # testscore = leafscore(testleaf)
+ temp = myup(testleaf,testleaf, testscore)
+ if "noset" in temp:
+ return []
+ if temp ==[]:
+ return []
+ for s in reversed(temp):
+ # contrastdic+=[s.f.name +":"+s.val]
+ contrastdic[s.f.name]= contrastdic.get(s.f.name,"")+str(s.val)
+ contrastdic["clusterID"]= contrastdic.get("clusterID", 0)+ int(temp[0].mode)
+ return contrastdic
+def showTdiv(n,lvl=-1, ):
+ if n.f:
+ say( ('|..' * lvl) + str(n.f.name)+ "="+str(n.val) + \
+ "\t:" + str(n.mode) + " #" + str(nmodes(n)))
+ if n.kids:
+ nl();
+ for k in n.kids:
+ showTdiv(k, lvl+1)
+ else:
+ s=classStats(n)
+ print ' '+str(int(100*s.counts[s.mode()]/len(n.rows)))+'% * '+str(len(n.rows))+' leaf #'+str(n.leafid) +' score:'+str(n.score)
+
+def clustertbl(f,tree, num2sym, row=[]):
+ tbl1 = tbl = table(f)
+ newheader = Num()
+ newheader.col = len(tbl.headers)
+ newheader.name = "=klass"
+ tbl1.headers +=[newheader] # tbl1 : the new table with cluster ID
+ for k,_ in leaves(tree):
+ for j in k.val:
+ for i, cell in enumerate(j.cells):
+ if isinstance(tbl.headers[i], Sym):
+ j.cells[i] = num2sym.get(cell, cell)
+ tmp=j.cells
+ tmp.append(id(k) % 1000)
+ tmp.append(j.cells[tbl1.depen[0].col]) # add the FIRST objective into the last cell of the row
+ # j.__dict__.update({'cells': tmp})
+ j.update(cells=tmp)
+ row.append(j.cells)
+ tbl1 = clone(tbl1, row)
+ return tbl1, row
+
+def summerize(leaves, Dtree, befscore = 0, aftscore=0):
+ for leaf in leaves:
+ try:
+ leaf.testdata
+ befscore +=leaf.score * len(leaf.testdata)
+ try:
+ leaf.contrastset["This is the best one!"]
+ aftscore += leaf.score * len(leaf.testdata)
+ except Exception, e:
+ aftscore += len(leaf.testdata)*(leaf.contrastset["targetscore"])
+ except Exception, e:
+ continue
+ # try:
+ # befscore +=leaf.score * len(leaf.testdata)
+ # except Exception, e:
+ # # befscore +=0
+ # try:
+ # leaf.contrastset["This is the best one!"]
+ # aftscore += leaf.score * len(leaf.testdata)
+ # except Exception, e:
+ # try:
+ # aftscore +=len(leaf.testdata)*int(leaf.contrastset["targetscore"])
+ # except Exception, e:
+ # aftscore+=0
+ print "\n"+ "+"*20+"\nSummerize:"+ "\n"+ "+"*20
+ print "before appying contrastset: %s"%str(befscore)
+ print "after appying contrastset: %s"%str(aftscore)
+
+
+
+
+def main():
+ random.seed(1)
+ data = o(src = "data/nasa93train.csv")
+ # data = o(src = "data/ant-1.3.csv")
+ m, sym2num= csv2py(data.src)
+ num2sym = dict(zip(sym2num.values(), sym2num.keys()))
+ Init(m) # init The class
+ tree= where2(m, m._rows) # tree generated by clustering
+ tbl1, row = clustertbl(data.src, tree, num2sym) # new table with cluster ID
+ fname = data.src[:-4]+'_copy'+data.src[-4:]
+ savetbl(tbl1,fname) # write new table to a file
+ # clusterscore = calScore(tree)
+ testdata = buildtestdata(tbl1, 30) # select the testdata
+ Dtree = buildtdiv(tbl1)
+ leaves=findleaves(Dtree)
+ testleaf = gotoleaf(testdata, Dtree) # all the leaves the testdata should go
+ buildcontrast1(Dtree, leaves)
+ printtogo(testleaf)
+ summerize(leaves, Dtree)
+if __name__ =="__main__": eval(cmd())
+
+
+
+
+
+# -*- coding: utf-8 -*-
+
+from openerp.osv import fields, osv
+
+
+class ResCompany(osv.Model):
+ _inherit = "res.company"
+
+ def _get_paypal_account(self, cr, uid, ids, name, arg, context=None):
+ Acquirer = self.pool['payment.acquirer']
+ company_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.id
+ paypal_ids = Acquirer.search(cr, uid, [
+ ('website_published', '=', True),
+ ('name', 'ilike', 'paypal'),
+ ('company_id', '=', company_id),
+ ], limit=1, context=context)
+ if paypal_ids:
+ paypal = Acquirer.browse(cr, uid, paypal_ids[0], context=context)
+ return dict.fromkeys(ids, paypal.paypal_email_account)
+ return dict.fromkeys(ids, False)
+
+ def _set_paypal_account(self, cr, uid, id, name, value, arg, context=None):
+ Acquirer = self.pool['payment.acquirer']
+ company_id = self.pool['res.users'].browse(cr, uid, uid, context=context).company_id.id
+ paypal_account = self.browse(cr, uid, id, context=context).paypal_account
+ paypal_ids = Acquirer.search(cr, uid, [
+ ('website_published', '=', True),
+ ('paypal_email_account', '=', paypal_account),
+ ('company_id', '=', company_id),
+ ], context=context)
+ if paypal_ids:
+ Acquirer.write(cr, uid, paypal_ids, {'paypal_email_account': value}, context=context)
+ return True
+
+ _columns = {
+ 'paypal_account': fields.function(
+ _get_paypal_account,
+ fnct_inv=_set_paypal_account,
+ nodrop=True,
+ type='char', string='Paypal Account',
+ help="Paypal username (usually email) for receiving online payments."
+ ),
+ }
+
+""".. Ignore pydocstyle D400.
+
+===============
+Slurm Connector
+===============
+
+"""
+import logging
+import os
+import shlex
+import subprocess
+
+from django.conf import settings
+
+from resolwe.utils import BraceMessage as __
+
+from .base import BaseConnector
+
+logger = logging.getLogger(__name__) # pylint: disable=invalid-name
+
+# We add this much to the memory limit to account for executor overhead,
+# since the executor is running in the same environment as the process.
+EXECUTOR_MEMORY_OVERHEAD = 200
+
+
+class Connector(BaseConnector):
+ """Slurm-based connector for job execution."""
+
+ def submit(self, data, runtime_dir, argv):
+ """Run process with SLURM.
+
+ For details, see
+ :meth:`~resolwe.flow.managers.workload_connectors.base.BaseConnector.submit`.
+ """
+ limits = data.process.get_resource_limits()
+ logger.debug(__(
+ "Connector '{}' running for Data with id {} ({}).",
+ self.__class__.__module__,
+ data.id,
+ repr(argv)
+ ))
+
+ # Compute target partition.
+ partition = getattr(settings, 'FLOW_SLURM_PARTITION_DEFAULT', None)
+ if data.process.slug in getattr(settings, 'FLOW_SLURM_PARTITION_OVERRIDES', {}):
+ partition = settings.FLOW_SLURM_PARTITION_OVERRIDES[data.process.slug]
+
+ try:
+ # Make sure the resulting file is executable on creation.
+ script_path = os.path.join(runtime_dir, 'slurm.sh')
+ file_descriptor = os.open(script_path, os.O_WRONLY | os.O_CREAT, mode=0o555)
+ with os.fdopen(file_descriptor, 'wt') as script:
+ script.write('#!/bin/bash\n')
+ script.write('#SBATCH --mem={}M\n'.format(limits['memory'] + EXECUTOR_MEMORY_OVERHEAD))
+ script.write('#SBATCH --cpus-per-task={}\n'.format(limits['cores']))
+ if partition:
+ script.write('#SBATCH --partition={}\n'.format(partition))
+
+ # Render the argument vector into a command line.
+ line = ' '.join(map(shlex.quote, argv))
+ script.write(line + '\n')
+
+ command = ['/usr/bin/env', 'sbatch', script_path]
+ subprocess.Popen(
+ command,
+ cwd=runtime_dir,
+ stdin=subprocess.DEVNULL
+ ).wait()
+ except OSError as err:
+ logger.error(__(
+ "OSError occurred while preparing SLURM script for Data {}: {}",
+ data.id, err
+ ))
+
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2005-2011, TUBITAK/UEKAE
+#
+# This program is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# Please read the COPYING file.
+#
+#
+# installation database
+#
+
+import os
+import re
+import gettext
+__trans = gettext.translation('pisi', fallback=True)
+_ = __trans.ugettext
+
+import piksemel
+
+# PiSi
+import pisi
+import pisi.context as ctx
+import pisi.dependency
+import pisi.files
+import pisi.util
+import pisi.db.lazydb as lazydb
+
+class InstallDBError(pisi.Error):
+ pass
+
+class InstallInfo:
+
+ state_map = { 'i': _('installed'), 'ip':_('installed-pending') }
+
+ def __init__(self, state, version, release, distribution, time):
+ self.state = state
+ self.version = version
+ self.release = release
+ self.distribution = distribution
+ self.time = time
+
+ def one_liner(self):
+ import time
+ time_str = time.strftime("%d %b %Y %H:%M", self.time)
+ s = '%2s|%15s|%6s|%8s|%12s' % (self.state, self.version, self.release,
+ self.distribution, time_str)
+ return s
+
+ def __str__(self):
+ s = _("State: %s\nVersion: %s, Release: %s\n") % \
+ (InstallInfo.state_map[self.state], self.version, self.release)
+ import time
+ time_str = time.strftime("%d %b %Y %H:%M", self.time)
+ s += _('Distribution: %s, Install Time: %s\n') % (self.distribution,
+ time_str)
+ return s
+
+class InstallDB(lazydb.LazyDB):
+
+ def __init__(self):
+ lazydb.LazyDB.__init__(self, cacheable=True, cachedir=ctx.config.packages_dir())
+
+ def init(self):
+ self.installed_db = self.__generate_installed_pkgs()
+ self.rev_deps_db = self.__generate_revdeps()
+
+ def __generate_installed_pkgs(self):
+ def split_name(dirname):
+ name, version, release = dirname.rsplit("-", 2)
+ return name, version + "-" + release
+
+ return dict(map(split_name, os.listdir(ctx.config.packages_dir())))
+
+ def __get_marked_packages(self, _type):
+ info_path = os.path.join(ctx.config.info_dir(), _type)
+ if os.path.exists(info_path):
+ return open(info_path, "r").read().split()
+ return []
+
+ def __add_to_revdeps(self, package, revdeps):
+ metadata_xml = os.path.join(self.package_path(package), ctx.const.metadata_xml)
+ try:
+ meta_doc = piksemel.parse(metadata_xml)
+ pkg = meta_doc.getTag("Package")
+ except:
+ pkg = None
+
+ if pkg is None:
+ # If package info is broken or not available, skip it.
+ ctx.ui.warning(_("Installation info for package '%s' is broken. "
+ "Reinstall it to fix this problem.") % package)
+ del self.installed_db[package]
+ return
+
+ deps = pkg.getTag('RuntimeDependencies')
+ if deps:
+ for dep in deps.tags("Dependency"):
+ revdep = revdeps.setdefault(dep.firstChild().data(), {})
+ revdep[package] = dep.toString()
+ for anydep in deps.tags("AnyDependency"):
+ for dep in anydep.tags("Dependency"):
+ revdep = revdeps.setdefault(dep.firstChild().data(), {})
+ revdep[package] = anydep.toString()
+
+ def __generate_revdeps(self):
+ revdeps = {}
+ for package in self.list_installed():
+ self.__add_to_revdeps(package, revdeps)
+ return revdeps
+
+ def list_installed(self):
+ return self.installed_db.keys()
+
+ def has_package(self, package):
+ return self.installed_db.has_key(package)
+
+ def list_installed_with_build_host(self, build_host):
+ build_host_re = re.compile("(.*?)")
+ found = []
+ for name in self.list_installed():
+ xml = open(os.path.join(self.package_path(name), ctx.const.metadata_xml)).read()
+ matched = build_host_re.search(xml)
+ if matched:
+ if build_host != matched.groups()[0]:
+ continue
+ elif build_host:
+ continue
+
+ found.append(name)
+
+ return found
+
+ def __get_version(self, meta_doc):
+ history = meta_doc.getTag("Package").getTag("History")
+ version = history.getTag("Update").getTagData("Version")
+ release = history.getTag("Update").getAttribute("release")
+
+ # TODO Remove None
+ return version, release, None
+
+ def __get_distro_release(self, meta_doc):
+ distro = meta_doc.getTag("Package").getTagData("Distribution")
+ release = meta_doc.getTag("Package").getTagData("DistributionRelease")
+
+ return distro, release
+
+ def get_version_and_distro_release(self, package):
+ metadata_xml = os.path.join(self.package_path(package), ctx.const.metadata_xml)
+ meta_doc = piksemel.parse(metadata_xml)
+ return self.__get_version(meta_doc) + self.__get_distro_release(meta_doc)
+
+ def get_version(self, package):
+ metadata_xml = os.path.join(self.package_path(package), ctx.const.metadata_xml)
+ meta_doc = piksemel.parse(metadata_xml)
+ return self.__get_version(meta_doc)
+
+ def get_files(self, package):
+ files = pisi.files.Files()
+ files_xml = os.path.join(self.package_path(package), ctx.const.files_xml)
+ files.read(files_xml)
+ return files
+
+ def get_config_files(self, package):
+ files = self.get_files(package)
+ return filter(lambda x: x.type == 'config', files.list)
+
+ def search_package(self, terms, lang=None, fields=None):
+ """
+ fields (dict) : looks for terms in the fields which are marked as True
+ If the fields is equal to None this method will search in all fields
+
+ example :
+ if fields is equal to : {'name': True, 'summary': True, 'desc': False}
+ This method will return only package that contents terms in the package
+ name or summary
+ """
+ resum = '.*?%s.*?'
+ redesc = '.*?%s.*?'
+ if not fields:
+ fields = {'name': True, 'summary': True, 'desc': True}
+ if not lang:
+ lang = pisi.pxml.autoxml.LocalText.get_lang()
+ found = []
+ for name in self.list_installed():
+ xml = open(os.path.join(self.package_path(name), ctx.const.metadata_xml)).read()
+ if terms == filter(lambda term: (fields['name'] and \
+ re.compile(term, re.I).search(name)) or \
+ (fields['summary'] and \
+ re.compile(resum % (lang, term), re.I).search(xml)) or \
+ (fields['desc'] and \
+ re.compile(redesc % (lang, term), re.I).search(xml)), terms):
+ found.append(name)
+ return found
+
+ def get_isa_packages(self, isa):
+ risa = '%s' % isa
+ packages = []
+ for name in self.list_installed():
+ xml = open(os.path.join(self.package_path(name), ctx.const.metadata_xml)).read()
+ if re.compile(risa).search(xml):
+ packages.append(name)
+ return packages
+
+ def get_info(self, package):
+ files_xml = os.path.join(self.package_path(package), ctx.const.files_xml)
+ ctime = pisi.util.creation_time(files_xml)
+ pkg = self.get_package(package)
+ state = "i"
+ if pkg.name in self.list_pending():
+ state = "ip"
+
+ info = InstallInfo(state,
+ pkg.version,
+ pkg.release,
+ pkg.distribution,
+ ctime)
+ return info
+
+ def __make_dependency(self, depStr):
+ node = piksemel.parseString(depStr)
+ dependency = pisi.dependency.Dependency()
+ dependency.package = node.firstChild().data()
+ if node.attributes():
+ attr = node.attributes()[0]
+ dependency.__dict__[attr] = node.getAttribute(attr)
+ return dependency
+
+ def __create_dependency(self, depStr):
+ if "" in depStr:
+ anydependency = pisi.specfile.AnyDependency()
+ for dep in re.compile('(.*?)').findall(depStr):
+ anydependency.dependencies.append(self.__make_dependency(dep))
+ return anydependency
+ else:
+ return self.__make_dependency(depStr)
+
+ def get_rev_deps(self, name):
+ rev_deps = []
+
+ package_revdeps = self.rev_deps_db.get(name)
+ if package_revdeps:
+ for pkg, dep in package_revdeps.items():
+ dependency = self.__create_dependency(dep)
+ rev_deps.append((pkg, dependency))
+
+ return rev_deps
+
+ def pkg_dir(self, pkg, version, release):
+ return pisi.util.join_path(ctx.config.packages_dir(), pkg + '-' + version + '-' + release)
+
+ def get_package(self, package):
+ metadata = pisi.metadata.MetaData()
+ metadata_xml = os.path.join(self.package_path(package), ctx.const.metadata_xml)
+ metadata.read(metadata_xml)
+ return metadata.package
+
+ def __mark_package(self, _type, package):
+ packages = self.__get_marked_packages(_type)
+ if package not in packages:
+ packages.append(package)
+ self.__write_marked_packages(_type, packages)
+
+ def mark_pending(self, package):
+ self.__mark_package(ctx.const.config_pending, package)
+
+ def mark_needs_restart(self, package):
+ self.__mark_package(ctx.const.needs_restart, package)
+
+ def mark_needs_reboot(self, package):
+ self.__mark_package(ctx.const.needs_reboot, package)
+
+ def add_package(self, pkginfo):
+ # Cleanup old revdep info
+ for revdep_info in self.rev_deps_db.values():
+ if pkginfo.name in revdep_info:
+ del revdep_info[pkginfo.name]
+
+ self.installed_db[pkginfo.name] = "%s-%s" % (pkginfo.version, pkginfo.release)
+ self.__add_to_revdeps(pkginfo.name, self.rev_deps_db)
+
+ def remove_package(self, package_name):
+ if self.installed_db.has_key(package_name):
+ del self.installed_db[package_name]
+
+ # Cleanup revdep info
+ for revdep_info in self.rev_deps_db.values():
+ if package_name in revdep_info:
+ del revdep_info[package_name]
+
+ self.clear_pending(package_name)
+
+ def list_pending(self):
+ return self.__get_marked_packages(ctx.const.config_pending)
+
+ def list_needs_restart(self):
+ return self.__get_marked_packages(ctx.const.needs_restart)
+
+ def list_needs_reboot(self):
+ return self.__get_marked_packages(ctx.const.needs_reboot)
+
+ def __write_marked_packages(self, _type, packages):
+ info_file = os.path.join(ctx.config.info_dir(), _type)
+ config = open(info_file, "w")
+ for pkg in set(packages):
+ config.write("%s\n" % pkg)
+ config.close()
+
+ def __clear_marked_packages(self, _type, package):
+ if package == "*":
+ self.__write_marked_packages(_type, [])
+ return
+ packages = self.__get_marked_packages(_type)
+ if package in packages:
+ packages.remove(package)
+ self.__write_marked_packages(_type, packages)
+
+ def clear_pending(self, package):
+ self.__clear_marked_packages(ctx.const.config_pending, package)
+
+ def clear_needs_restart(self, package):
+ self.__clear_marked_packages(ctx.const.needs_restart, package)
+
+ def clear_needs_reboot(self, package):
+ self.__clear_marked_packages(ctx.const.needs_reboot, package)
+
+ def package_path(self, package):
+
+ if self.installed_db.has_key(package):
+ return os.path.join(ctx.config.packages_dir(), "%s-%s" % (package, self.installed_db[package]))
+
+ raise Exception(_('Package %s is not installed') % package)
+
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+""" HoribaJobinYvon
+"""
+from datetime import datetime
+from lims.exportimport.instruments.resultsimport import \
+ AnalysisResultsImporter, InstrumentCSVResultsFileParser
+
+
+class HoribaJobinYvonCSVParser(InstrumentCSVResultsFileParser):
+ def __init__(self, csv):
+ InstrumentCSVResultsFileParser.__init__(self, csv)
+ self._columns = [] # The different columns names
+ self._linedata = {} # The line with the data
+ self._resid = ''
+ self._method = ''
+ self._date = ''
+
+ def _parseline(self, line):
+ # Net intensity has commas, but is a unique value
+ if '"' in line:
+ line_before, net_intensity, line_after = line.split('"')
+ net_intensity = net_intensity.replace(',', '/')
+ line = line_before + net_intensity + line_after
+ # The spreadsheet creates a column for the measurement units, but is not needed anymore
+ line = line.replace(',mg/l,', '')
+
+ sline = line.split(',')
+ if len(sline) > 0 and sline[0] == 'Sample:':
+ # This line contains the resid (sample) and the date.
+ for idx, e in enumerate(sline):
+ if e == 'Sample:':
+ self._resid = sline[idx+1]
+ elif e == 'Method:':
+ self._method = sline[idx+1]
+ elif e == 'Measured:':
+ self._date = self.csvDate2BikaDate(sline[idx+1])
+ return 1
+ elif len(sline) > 0 and sline[0] == 'LineName':
+ self._columns = sline
+ return 0
+ elif len(sline) > 0 and sline[0] != '':
+ self.parse_data_line(sline)
+
+ def parse_data_line(self, sline):
+ """
+ Parses the data line and builds the dictionary.
+ :param sline: a split data line to parse
+ :return: the number of rows to jump and parse the next data line or return the code error -1
+ """
+ values = {'Remarks': ''}
+ name = ''
+ test_line = ''
+ for idx, result in enumerate(sline):
+ if self._columns[idx] == 'LineName':
+ # It's the analysis name
+ name = result.split(' ')[0]
+ test_line = result.split(' ')[1]
+ elif self._columns[idx] == 'Cc':
+ values['Concentration'] = sline[idx+2]
+ elif self._columns[idx] == 'SD':
+ values['StandardDeviation'] = sline[idx+2]
+ elif self._columns[idx] == 'RSD':
+ values['ResidualError'] = sline[idx+2]
+ elif self._columns[idx] == 'Net_Intensity':
+ values['NetIntensity'] = result.split('/')
+
+ values['DefaultResult'] = 'Concentration'
+ values['DateTime'] = self._date
+ values['Method'] = self._method
+ values['TestLine'] = test_line
+ self._addRawResult(self._resid, {name: values}, False)
+ return 0
+
+ def csvDate2BikaDate(self, DateTime):
+ # example: 11/03/2014 14:46:46 --> %d/%m/%Y %H:%M%S
+ dtobj = datetime.strptime(DateTime, "%d.%m.%Y %H:%M")
+ return dtobj.strftime("%Y%m%d %H:%M")
+
+
+class HoribaJobinYvonImporter(AnalysisResultsImporter):
+ def __init__(self, parser, context, idsearchcriteria, override,
+ allowed_ar_states=None, allowed_analysis_states=None,
+ instrument_uid=None):
+ AnalysisResultsImporter.__init__(self, parser, context,
+ idsearchcriteria, override,
+ allowed_ar_states,
+ allowed_analysis_states,
+ instrument_uid)
+
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+# OpenERP, Open Source Management Solution
+# Copyright (C) 2004-2010 Tiny SPRL ().
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+#
+##############################################################################
+
+from openerp.osv import fields, osv
+from openerp.tools.translate import _
+
+
+class showdiff(osv.osv_memory):
+ """ Disp[ay Difference for History """
+
+ _name = 'blog.post.history.show_diff'
+
+ def get_diff(self, cr, uid, context=None):
+ if context is None:
+ context = {}
+ history = self.pool.get('blog.post.history')
+ ids = context.get('active_ids', [])
+
+ diff = ""
+ if len(ids) == 2:
+ if ids[0] > ids[1]:
+ diff = history.getDiff(cr, uid, ids[1], ids[0])
+ else:
+ diff = history.getDiff(cr, uid, ids[0], ids[1])
+
+ elif len(ids) == 1:
+ old = history.browse(cr, uid, ids[0])
+ nids = history.search(cr, uid, [('post_id', '=', old.post_id.id)])
+ nids.sort()
+ diff = history.getDiff(cr, uid, ids[0], nids[-1])
+ else:
+ raise osv.except_osv(_('Warning!'), _('You need to select minimum one or maximum two history revisions!'))
+ return diff
+
+ _columns = {
+ 'diff': fields.text('Diff', readonly=True),
+ }
+
+ _defaults = {
+ 'diff': get_diff
+ }
+
+# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
+
+# -*- coding: utf-8 -*-
+##############################################################################
+#
+# OpenERP, Open Source Management Solution
+# Copyright (C) 2004-2010 Tiny SPRL ().
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+#
+##############################################################################
+import datetime
+
+from openerp.osv import fields, osv
+from openerp.tools.translate import _
+
+
+class account_analytic_profit(osv.osv_memory):
+ _name = 'hr.timesheet.analytic.profit'
+ _description = 'Print Timesheet Profit'
+ _columns = {
+ 'date_from': fields.date('From', required=True),
+ 'date_to': fields.date('To', required=True),
+ 'journal_ids': fields.many2many('account.analytic.journal', 'analytic_profit_journal_rel', 'analytic_id', 'journal_id', 'Journal', required=True),
+ 'employee_ids': fields.many2many('res.users', 'analytic_profit_emp_rel', 'analytic_id', 'emp_id', 'User', required=True),
+ }
+
+ def _date_from(*a):
+ return datetime.date.today().replace(day=1).strftime('%Y-%m-%d')
+
+ def _date_to(*a):
+ return datetime.date.today().strftime('%Y-%m-%d')
+
+ _defaults = {
+ 'date_from': _date_from,
+ 'date_to': _date_to
+ }
+
+ def print_report(self, cr, uid, ids, context=None):
+ line_obj = self.pool.get('account.analytic.line')
+ data = {}
+ data['form'] = self.read(cr, uid , ids, context=context)[0]
+ ids_chk = line_obj.search(cr, uid, [
+ ('date', '>=', data['form']['date_from']),
+ ('date', '<=', data['form']['date_to']),
+ ('journal_id', 'in', data['form']['journal_ids']),
+ ('user_id', 'in', data['form']['employee_ids']),
+ ], context=context)
+ if not ids_chk:
+ raise osv.except_osv(_('Insufficient Data!'), _('No record(s) found for this report.'))
+
+ data['form']['journal_ids'] = [(6, 0, data['form']['journal_ids'])] # Improve me => Change the rml/sxw so that it can support withou [0][2]
+ data['form']['employee_ids'] = [(6, 0, data['form']['employee_ids'])]
+ datas = {
+ 'ids': [],
+ 'model': 'account.analytic.line',
+ 'form': data['form']
+ }
+ return self.pool['report'].get_action(
+ cr, uid, [], 'hr_timesheet_invoice.report_analyticprofit', data=datas, context=context
+ )
+
+# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
+
+#!/usr/bin/python
+#
+# Copyright (c) 2017 Ansible Project
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+ANSIBLE_METADATA = {'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'}
+
+
+DOCUMENTATION = """
+---
+module: elasticache
+short_description: Manage cache clusters in Amazon Elasticache.
+description:
+ - Manage cache clusters in Amazon Elasticache.
+ - Returns information about the specified cache cluster.
+version_added: "1.4"
+requirements: [ boto3 ]
+author: "Jim Dalton (@jsdalton)"
+options:
+ state:
+ description:
+ - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster,
+ resulting in a momentary outage.
+ choices: ['present', 'absent', 'rebooted']
+ required: true
+ name:
+ description:
+ - The cache cluster identifier
+ required: true
+ engine:
+ description:
+ - Name of the cache engine to be used.
+ required: false
+ default: memcached
+ choices: ['redis', 'memcached']
+ cache_engine_version:
+ description:
+ - The version number of the cache engine
+ required: false
+ default: None
+ node_type:
+ description:
+ - The compute and memory capacity of the nodes in the cache cluster
+ required: false
+ default: cache.m1.small
+ num_nodes:
+ description:
+ - The initial number of cache nodes that the cache cluster will have. Required when state=present.
+ required: false
+ cache_port:
+ description:
+ - The port number on which each of the cache nodes will accept connections
+ required: false
+ default: None
+ cache_parameter_group:
+ description:
+ - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
+ for the specified engine will be used.
+ required: false
+ default: None
+ version_added: "2.0"
+ aliases: [ 'parameter_group' ]
+ cache_subnet_group:
+ description:
+ - The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
+ required: false
+ default: None
+ version_added: "2.0"
+ security_group_ids:
+ description:
+ - A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
+ required: false
+ default: None
+ version_added: "1.6"
+ cache_security_groups:
+ description:
+ - A list of cache security group names to associate with this cache cluster. Must be an empty list if inside a vpc
+ required: false
+ default: None
+ zone:
+ description:
+ - The EC2 Availability Zone in which the cache cluster will be created
+ required: false
+ default: None
+ wait:
+ description:
+ - Wait for cache cluster result before returning
+ required: false
+ default: yes
+ choices: [ "yes", "no" ]
+ hard_modify:
+ description:
+ - Whether to destroy and recreate an existing cache cluster if necessary in order to modify its state
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+"""
+
+EXAMPLES = """
+# Note: None of these examples set aws_access_key, aws_secret_key, or region.
+# It is assumed that their matching environment variables are set.
+
+# Basic example
+- elasticache:
+ name: "test-please-delete"
+ state: present
+ engine: memcached
+ cache_engine_version: 1.4.14
+ node_type: cache.m1.small
+ num_nodes: 1
+ cache_port: 11211
+ cache_security_groups:
+ - default
+ zone: us-east-1d
+
+
+# Ensure cache cluster is gone
+- elasticache:
+ name: "test-please-delete"
+ state: absent
+
+# Reboot cache cluster
+- elasticache:
+ name: "test-please-delete"
+ state: rebooted
+
+"""
+from time import sleep
+from traceback import format_exc
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn, HAS_BOTO3, camel_dict_to_snake_dict
+
+try:
+ import boto3
+ import botocore
+except ImportError:
+ pass # will be detected by imported HAS_BOTO3
+
+
+class ElastiCacheManager(object):
+
+ """Handles elasticache creation and destruction"""
+
+ EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
+
+ def __init__(self, module, name, engine, cache_engine_version, node_type,
+ num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
+ cache_security_groups, security_group_ids, zone, wait,
+ hard_modify, region, **aws_connect_kwargs):
+ self.module = module
+ self.name = name
+ self.engine = engine.lower()
+ self.cache_engine_version = cache_engine_version
+ self.node_type = node_type
+ self.num_nodes = num_nodes
+ self.cache_port = cache_port
+ self.cache_parameter_group = cache_parameter_group
+ self.cache_subnet_group = cache_subnet_group
+ self.cache_security_groups = cache_security_groups
+ self.security_group_ids = security_group_ids
+ self.zone = zone
+ self.wait = wait
+ self.hard_modify = hard_modify
+
+ self.region = region
+ self.aws_connect_kwargs = aws_connect_kwargs
+
+ self.changed = False
+ self.data = None
+ self.status = 'gone'
+ self.conn = self._get_elasticache_connection()
+ self._refresh_data()
+
+ def ensure_present(self):
+ """Ensure cache cluster exists or create it if not"""
+ if self.exists():
+ self.sync()
+ else:
+ self.create()
+
+ def ensure_absent(self):
+ """Ensure cache cluster is gone or delete it if not"""
+ self.delete()
+
+ def ensure_rebooted(self):
+ """Ensure cache cluster is gone or delete it if not"""
+ self.reboot()
+
+ def exists(self):
+ """Check if cache cluster exists"""
+ return self.status in self.EXIST_STATUSES
+
+ def create(self):
+ """Create an ElastiCache cluster"""
+ if self.status == 'available':
+ return
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ return
+ if self.status == 'deleting':
+ if self.wait:
+ self._wait_for_status('gone')
+ else:
+ msg = "'%s' is currently deleting. Cannot create."
+ self.module.fail_json(msg=msg % self.name)
+
+ kwargs = dict(CacheClusterId=self.name,
+ NumCacheNodes=self.num_nodes,
+ CacheNodeType=self.node_type,
+ Engine=self.engine,
+ EngineVersion=self.cache_engine_version,
+ CacheSecurityGroupNames=self.cache_security_groups,
+ SecurityGroupIds=self.security_group_ids,
+ CacheParameterGroupName=self.cache_parameter_group,
+ CacheSubnetGroupName=self.cache_subnet_group,
+ PreferredAvailabilityZone=self.zone)
+ if self.cache_port is not None:
+ kwargs['Port'] = self.cache_port
+
+ try:
+ self.conn.create_cache_cluster(**kwargs)
+
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg=e.message, exception=format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+ return True
+
+ def delete(self):
+ """Destroy an ElastiCache cluster"""
+ if self.status == 'gone':
+ return
+ if self.status == 'deleting':
+ if self.wait:
+ self._wait_for_status('gone')
+ return
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ msg = "'%s' is currently %s. Cannot delete."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ try:
+ response = self.conn.delete_cache_cluster(CacheClusterId=self.name)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg=e.message, exception=format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ cache_cluster_data = response['CacheCluster']
+ self._refresh_data(cache_cluster_data)
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('gone')
+
+ def sync(self):
+ """Sync settings to cluster if required"""
+ if not self.exists():
+ msg = "'%s' is %s. Cannot sync."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ if self.status in ['creating', 'rebooting', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ # Cluster can only be synced if available. If we can't wait
+ # for this, then just be done.
+ return
+
+ if self._requires_destroy_and_create():
+ if not self.hard_modify:
+ msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed."
+ self.module.fail_json(msg=msg % self.name)
+ if not self.wait:
+ msg = "'%s' requires destructive modification. 'wait' must be set to true."
+ self.module.fail_json(msg=msg % self.name)
+ self.delete()
+ self.create()
+ return
+
+ if self._requires_modification():
+ self.modify()
+
+ def modify(self):
+ """Modify the cache cluster. Note it's only possible to modify a few select options."""
+ nodes_to_remove = self._get_nodes_to_remove()
+ try:
+ self.conn.modify_cache_cluster(CacheClusterId=self.name,
+ NumCacheNodes=self.num_nodes,
+ CacheNodeIdsToRemove=nodes_to_remove,
+ CacheSecurityGroupNames=self.cache_security_groups,
+ CacheParameterGroupName=self.cache_parameter_group,
+ SecurityGroupIds=self.security_group_ids,
+ ApplyImmediately=True,
+ EngineVersion=self.cache_engine_version)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg=e.message, exception=format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+
+ def reboot(self):
+ """Reboot the cache cluster"""
+ if not self.exists():
+ msg = "'%s' is %s. Cannot reboot."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+ if self.status == 'rebooting':
+ return
+ if self.status in ['creating', 'modifying']:
+ if self.wait:
+ self._wait_for_status('available')
+ else:
+ msg = "'%s' is currently %s. Cannot reboot."
+ self.module.fail_json(msg=msg % (self.name, self.status))
+
+ # Collect ALL nodes for reboot
+ cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
+ try:
+ self.conn.reboot_cache_cluster(CacheClusterId=self.name,
+ CacheNodeIdsToReboot=cache_node_ids)
+ except botocore.exceptions.ClientError as e:
+ self.module.fail_json(msg=e.message, exception=format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+
+ self._refresh_data()
+
+ self.changed = True
+ if self.wait:
+ self._wait_for_status('available')
+
+ def get_info(self):
+ """Return basic info about the cache cluster"""
+ info = {
+ 'name': self.name,
+ 'status': self.status
+ }
+ if self.data:
+ info['data'] = self.data
+ return info
+
+ def _wait_for_status(self, awaited_status):
+ """Wait for status to change from present status to awaited_status"""
+ status_map = {
+ 'creating': 'available',
+ 'rebooting': 'available',
+ 'modifying': 'available',
+ 'deleting': 'gone'
+ }
+ if self.status == awaited_status:
+ # No need to wait, we're already done
+ return
+ if status_map[self.status] != awaited_status:
+ msg = "Invalid awaited status. '%s' cannot transition to '%s'"
+ self.module.fail_json(msg=msg % (self.status, awaited_status))
+
+ if awaited_status not in set(status_map.values()):
+ msg = "'%s' is not a valid awaited status."
+ self.module.fail_json(msg=msg % awaited_status)
+
+ while True:
+ sleep(1)
+ self._refresh_data()
+ if self.status == awaited_status:
+ break
+
+ def _requires_modification(self):
+ """Check if cluster requires (nondestructive) modification"""
+ # Check modifiable data attributes
+ modifiable_data = {
+ 'NumCacheNodes': self.num_nodes,
+ 'EngineVersion': self.cache_engine_version
+ }
+ for key, value in modifiable_data.items():
+ if value is not None and self.data[key] != value:
+ return True
+
+ # Check cache security groups
+ cache_security_groups = []
+ for sg in self.data['CacheSecurityGroups']:
+ cache_security_groups.append(sg['CacheSecurityGroupName'])
+ if set(cache_security_groups) != set(self.cache_security_groups):
+ return True
+
+ # check vpc security groups
+ if self.security_group_ids:
+ vpc_security_groups = []
+ security_groups = self.data['SecurityGroups'] or []
+ for sg in security_groups:
+ vpc_security_groups.append(sg['SecurityGroupId'])
+ if set(vpc_security_groups) != set(self.security_group_ids):
+ return True
+
+ return False
+
+ def _requires_destroy_and_create(self):
+ """
+ Check whether a destroy and create is required to synchronize cluster.
+ """
+ unmodifiable_data = {
+ 'node_type': self.data['CacheNodeType'],
+ 'engine': self.data['Engine'],
+ 'cache_port': self._get_port()
+ }
+ # Only check for modifications if zone is specified
+ if self.zone is not None:
+ unmodifiable_data['zone'] = self.data['PreferredAvailabilityZone']
+ for key, value in unmodifiable_data.items():
+ if getattr(self, key) is not None and getattr(self, key) != value:
+ return True
+ return False
+
+ def _get_elasticache_connection(self):
+ """Get an elasticache connection"""
+ region, ec2_url, aws_connect_params = get_aws_connection_info(self.module, boto3=True)
+ if region:
+ return boto3_conn(self.module, conn_type='client', resource='elasticache',
+ region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ self.module.fail_json(msg="region must be specified")
+
+ def _get_port(self):
+ """Get the port. Where this information is retrieved from is engine dependent."""
+ if self.data['Engine'] == 'memcached':
+ return self.data['ConfigurationEndpoint']['Port']
+ elif self.data['Engine'] == 'redis':
+ # Redis only supports a single node (presently) so just use
+ # the first and only
+ return self.data['CacheNodes'][0]['Endpoint']['Port']
+
+ def _refresh_data(self, cache_cluster_data=None):
+ """Refresh data about this cache cluster"""
+
+ if cache_cluster_data is None:
+ try:
+ response = self.conn.describe_cache_clusters(CacheClusterId=self.name, ShowCacheNodeInfo=True)
+ except botocore.exceptions.ClientError as e:
+ if e.response['Error']['Code'] == 'CacheClusterNotFound':
+ self.data = None
+ self.status = 'gone'
+ return
+ else:
+ self.module.fail_json(msg=e.message, exception=format_exc(),
+ **camel_dict_to_snake_dict(e.response))
+ cache_cluster_data = response['CacheClusters'][0]
+ self.data = cache_cluster_data
+ self.status = self.data['CacheClusterStatus']
+
+ # The documentation for elasticache lies -- status on rebooting is set
+ # to 'rebooting cache cluster nodes' instead of 'rebooting'. Fix it
+ # here to make status checks etc. more sane.
+ if self.status == 'rebooting cache cluster nodes':
+ self.status = 'rebooting'
+
+ def _get_nodes_to_remove(self):
+ """If there are nodes to remove, it figures out which need to be removed"""
+ num_nodes_to_remove = self.data['NumCacheNodes'] - self.num_nodes
+ if num_nodes_to_remove <= 0:
+ return []
+
+ if not self.hard_modify:
+ msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed."
+ self.module.fail_json(msg=msg % self.name)
+
+ cache_node_ids = [cn['CacheNodeId'] for cn in self.data['CacheNodes']]
+ return cache_node_ids[-num_nodes_to_remove:]
+
+
+def main():
+ """ elasticache ansible module """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent', 'rebooted']),
+ name=dict(required=True),
+ engine=dict(default='memcached'),
+ cache_engine_version=dict(default=""),
+ node_type=dict(default='cache.t2.small'),
+ num_nodes=dict(default=1, type='int'),
+ # alias for compat with the original PR 1950
+ cache_parameter_group=dict(default="", aliases=['parameter_group']),
+ cache_port=dict(type='int'),
+ cache_subnet_group=dict(default=""),
+ cache_security_groups=dict(default=[], type='list'),
+ security_group_ids=dict(default=[], type='list'),
+ zone=dict(default=""),
+ wait=dict(default=True, type='bool'),
+ hard_modify=dict(type='bool')
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+
+ name = module.params['name']
+ state = module.params['state']
+ engine = module.params['engine']
+ cache_engine_version = module.params['cache_engine_version']
+ node_type = module.params['node_type']
+ num_nodes = module.params['num_nodes']
+ cache_port = module.params['cache_port']
+ cache_subnet_group = module.params['cache_subnet_group']
+ cache_security_groups = module.params['cache_security_groups']
+ security_group_ids = module.params['security_group_ids']
+ zone = module.params['zone']
+ wait = module.params['wait']
+ hard_modify = module.params['hard_modify']
+ cache_parameter_group = module.params['cache_parameter_group']
+
+ if cache_subnet_group and cache_security_groups:
+ module.fail_json(msg="Can't specify both cache_subnet_group and cache_security_groups")
+
+ if state == 'present' and not num_nodes:
+ module.fail_json(msg="'num_nodes' is a required parameter. Please specify num_nodes > 0")
+
+ elasticache_manager = ElastiCacheManager(module, name, engine,
+ cache_engine_version, node_type,
+ num_nodes, cache_port,
+ cache_parameter_group,
+ cache_subnet_group,
+ cache_security_groups,
+ security_group_ids, zone, wait,
+ hard_modify, region, **aws_connect_kwargs)
+
+ if state == 'present':
+ elasticache_manager.ensure_present()
+ elif state == 'absent':
+ elasticache_manager.ensure_absent()
+ elif state == 'rebooted':
+ elasticache_manager.ensure_rebooted()
+
+ facts_result = dict(changed=elasticache_manager.changed,
+ elasticache=elasticache_manager.get_info())
+
+ module.exit_json(**facts_result)
+
+if __name__ == '__main__':
+ main()
+
+# Copyright (c) 2007 The Hewlett-Packard Development Company
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Gabe Black
+
+categories = ["arithmetic",
+ "cache_and_memory_management",
+ "compare_and_test",
+ "control_transfer",
+ "data_conversion",
+ "data_transfer",
+ "flags",
+ "input_output",
+ "load_effective_address",
+ "load_segment_registers",
+ "logical",
+ "no_operation",
+ "rotate_and_shift",
+ "semaphores",
+ "string",
+ "system_calls"]
+
+microcode = '''
+# Microcode for general purpose instructions
+'''
+for category in categories:
+ exec "import %s as cat" % category
+ microcode += cat.microcode
+
+from rest_framework import serializers as ser
+from api.base.serializers import JSONAPISerializer, LinksField
+from api.base.utils import absolute_reverse
+
+class NodeAddonFolderSerializer(JSONAPISerializer):
+ class Meta:
+ type_ = 'node_addon_folders'
+
+ id = ser.CharField(read_only=True)
+ kind = ser.CharField(default='folder', read_only=True)
+ name = ser.CharField(read_only=True)
+ folder_id = ser.CharField(source='id', read_only=True)
+ path = ser.CharField(read_only=True)
+ provider = ser.CharField(source='addon', read_only=True)
+
+ links = LinksField({
+ 'children': 'get_absolute_url',
+ 'root': 'get_root_folder',
+ })
+
+ def get_absolute_url(self, obj):
+ if obj['addon'] in ('s3', 'figshare', ):
+ # These addons don't currently support linking anything other
+ # than top-level objects.
+ return
+ return absolute_reverse(
+ 'nodes:node-addon-folders',
+ kwargs=self.context['request'].parser_context['kwargs'],
+ query_kwargs={
+ 'path': obj['path'],
+ 'id': obj['id']
+ }
+ )
+
+ def get_root_folder(self, obj):
+ return absolute_reverse(
+ 'nodes:node-addon-folders',
+ kwargs=self.context['request'].parser_context['kwargs'],
+ )
+
+class AddonSerializer(JSONAPISerializer):
+ filterable_fields = frozenset([
+ 'categories',
+ ])
+
+ class Meta:
+ type_ = 'addon'
+
+ id = ser.CharField(source='short_name', read_only=True)
+ name = ser.CharField(source='full_name', read_only=True)
+ description = ser.CharField(read_only=True)
+ url = ser.CharField(read_only=True)
+ categories = ser.ListField(read_only=True)
+
+ def get_absolute_url(self, obj):
+ return absolute_reverse(
+ 'addons:addon-list',
+ kwargs=self.context['request'].parser_context['kwargs'],
+ )
+
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2017 F5 Networks Inc.
+# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import json
+import pytest
+import sys
+
+from nose.plugins.skip import SkipTest
+if sys.version_info < (2, 7):
+ raise SkipTest("F5 Ansible modules require Python >= 2.7")
+
+from ansible.compat.tests import unittest
+from ansible.compat.tests.mock import Mock
+from ansible.compat.tests.mock import patch
+from ansible.module_utils.basic import AnsibleModule
+
+try:
+ from library.bigip_selfip import ApiParameters
+ from library.bigip_selfip import ModuleParameters
+ from library.bigip_selfip import ModuleManager
+ from library.bigip_selfip import ArgumentSpec
+ from library.module_utils.network.f5.common import F5ModuleError
+ from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
+ from test.unit.modules.utils import set_module_args
+except ImportError:
+ try:
+ from ansible.modules.network.f5.bigip_selfip import ApiParameters
+ from ansible.modules.network.f5.bigip_selfip import ModuleParameters
+ from ansible.modules.network.f5.bigip_selfip import ModuleManager
+ from ansible.modules.network.f5.bigip_selfip import ArgumentSpec
+ from ansible.module_utils.network.f5.common import F5ModuleError
+ from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
+ from units.modules.utils import set_module_args
+ except ImportError:
+ raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
+
+fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
+fixture_data = {}
+
+
+def load_fixture(name):
+ path = os.path.join(fixture_path, name)
+
+ if path in fixture_data:
+ return fixture_data[path]
+
+ with open(path) as f:
+ data = f.read()
+
+ try:
+ data = json.loads(data)
+ except Exception:
+ pass
+
+ fixture_data[path] = data
+ return data
+
+
+class TestParameters(unittest.TestCase):
+ def test_module_parameters(self):
+ args = dict(
+ address='10.10.10.10',
+ allow_service=[
+ 'tcp:80',
+ 'udp:53',
+ 'gre'
+ ],
+ name='net1',
+ netmask='255.255.255.0',
+ partition='Common',
+ route_domain='1',
+ state='present',
+ traffic_group='traffic-group-local-only',
+ vlan='net1'
+ )
+ p = ModuleParameters(params=args)
+ assert p.address == '10.10.10.10%1/24'
+ assert p.allow_service == ['gre:0', 'tcp:80', 'udp:53']
+ assert p.name == 'net1'
+ assert p.netmask == 24
+ assert p.route_domain == 1
+ assert p.traffic_group == '/Common/traffic-group-local-only'
+ assert p.vlan == '/Common/net1'
+
+ def test_module_invalid_service(self):
+ args = dict(
+ allow_service=[
+ 'tcp:80',
+ 'udp:53',
+ 'grp'
+ ]
+ )
+ p = ModuleParameters(params=args)
+ with pytest.raises(F5ModuleError) as ex:
+ assert p.allow_service == ['grp', 'tcp:80', 'udp:53']
+ assert 'The provided protocol' in str(ex)
+
+ def test_api_parameters(self):
+ args = dict(
+ address='10.10.10.10%1/24',
+ allowService=[
+ 'tcp:80',
+ 'udp:53',
+ 'gre'
+ ],
+ name='net1',
+ state='present',
+ trafficGroup='/Common/traffic-group-local-only',
+ vlan='net1'
+ )
+ p = ApiParameters(params=args)
+ assert p.address == '10.10.10.10%1/24'
+ assert p.allow_service == ['gre', 'tcp:80', 'udp:53']
+ assert p.name == 'net1'
+ assert p.netmask == 24
+ assert p.traffic_group == '/Common/traffic-group-local-only'
+ assert p.vlan == '/Common/net1'
+
+
+class TestManager(unittest.TestCase):
+
+ def setUp(self):
+ self.spec = ArgumentSpec()
+
+ def test_create_selfip(self, *args):
+ set_module_args(dict(
+ address='10.10.10.10',
+ allow_service=[
+ 'tcp:80',
+ 'udp:53',
+ 'gre'
+ ],
+ name='net1',
+ netmask='255.255.255.0',
+ partition='Common',
+ route_domain='1',
+ state='present',
+ traffic_group='traffic-group-local-only',
+ vlan='net1',
+ password='passsword',
+ server='localhost',
+ user='admin'
+ ))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+ mm = ModuleManager(module=module)
+
+ # Override methods to force specific logic in the module to happen
+ mm.exists = Mock(side_effect=[False, True])
+ mm.create_on_device = Mock(return_value=True)
+
+ results = mm.exec_module()
+
+ assert results['changed'] is True
+
+ def test_create_selfip_idempotent(self, *args):
+ set_module_args(dict(
+ address='10.10.10.10',
+ allow_service=[
+ 'tcp:80',
+ 'udp:53',
+ 'gre'
+ ],
+ name='net1',
+ netmask='255.255.255.0',
+ partition='Common',
+ route_domain='1',
+ state='present',
+ traffic_group='traffic-group-local-only',
+ vlan='net1',
+ password='passsword',
+ server='localhost',
+ user='admin'
+ ))
+
+ current = ApiParameters(params=load_fixture('load_tm_net_self.json'))
+
+ module = AnsibleModule(
+ argument_spec=self.spec.argument_spec,
+ supports_check_mode=self.spec.supports_check_mode
+ )
+ mm = ModuleManager(module=module)
+
+ # Override methods to force specific logic in the module to happen
+ mm.exists = Mock(side_effect=[True, True])
+ mm.read_current_from_device = Mock(return_value=current)
+
+ results = mm.exec_module()
+
+ assert results['changed'] is False
+
+from cms.toolbar.items import Menu, ModalItem, SubMenu
+from cms.utils.i18n import get_language_object
+from django.contrib.auth.models import Permission, User
+from django.test.utils import override_settings
+from django.urls import reverse
+from django.utils.encoding import force_text
+
+from djangocms_page_meta.cms_toolbars import PAGE_META_ITEM_TITLE, PAGE_META_MENU_TITLE
+from djangocms_page_meta.models import PageMeta, TitleMeta
+
+from . import BaseTest
+
+
+class ToolbarTest(BaseTest):
+ def test_no_page(self):
+ """
+ Test that no page menu is present if request not in a page
+ """
+ from cms.toolbar.toolbar import CMSToolbar
+
+ request = self.get_page_request(None, self.user, "/", edit=True)
+ toolbar = CMSToolbar(request)
+ toolbar.get_left_items()
+ page_menu = toolbar.find_items(Menu, name="Page")
+ self.assertEqual(page_menu, [])
+
+ def test_no_perm(self):
+ """
+ Test that no page menu is present if user has no perm
+ """
+ from cms.toolbar.toolbar import CMSToolbar
+
+ page1, __ = self.get_pages()
+ request = self.get_page_request(page1, self.user_staff, "/", edit=True)
+ toolbar = CMSToolbar(request)
+ toolbar.get_left_items()
+ page_menu = toolbar.find_items(Menu, name="Page")
+ try:
+ self.assertEqual(page_menu, [])
+ except AssertionError:
+ meta_menu = page_menu[0].item.find_items(SubMenu, name=force_text(PAGE_META_MENU_TITLE))
+ self.assertEqual(meta_menu, [])
+
+ def test_perm(self):
+ """
+ Test that page meta menu is present if user has Page.change_perm
+ """
+ from cms.toolbar.toolbar import CMSToolbar
+
+ page1, __ = self.get_pages()
+ self.user_staff.user_permissions.add(Permission.objects.get(codename="change_page"))
+ self.user_staff = User.objects.get(pk=self.user_staff.pk)
+ request = self.get_page_request(page1, self.user_staff, "/", edit=True)
+ toolbar = CMSToolbar(request)
+ toolbar.get_left_items()
+ page_menu = toolbar.menus["page"]
+ meta_menu = page_menu.find_items(SubMenu, name=force_text(PAGE_META_MENU_TITLE))[0].item
+ self.assertEqual(
+ len(meta_menu.find_items(ModalItem, name="{}...".format(force_text(PAGE_META_ITEM_TITLE)))), 1
+ )
+
+ @override_settings(CMS_PERMISSION=True)
+ def test_perm_permissions(self):
+ """
+ Test that no page menu is present if user has general page Page.change_perm but not permission on current page
+ """
+ from cms.toolbar.toolbar import CMSToolbar
+
+ page1, __ = self.get_pages()
+ self.user_staff.user_permissions.add(Permission.objects.get(codename="change_page"))
+ self.user_staff = User.objects.get(pk=self.user_staff.pk)
+ request = self.get_page_request(page1, self.user_staff, "/", edit=True)
+ toolbar = CMSToolbar(request)
+ toolbar.get_left_items()
+ page_menu = toolbar.find_items(Menu, name="Page")
+ try:
+ self.assertEqual(page_menu, [])
+ except AssertionError:
+ meta_menu = page_menu[0].item.find_items(SubMenu, name=force_text(PAGE_META_MENU_TITLE))
+ self.assertEqual(meta_menu, [])
+
+ def test_toolbar(self):
+ """
+ Test that PageMeta/TitleMeta items are present for superuser
+ """
+ from cms.toolbar.toolbar import CMSToolbar
+
+ NEW_CMS_LANGS = { # noqa: N806
+ 1: [
+ {
+ "code": "en",
+ "name": "English",
+ "public": True,
+ },
+ {
+ "code": "it",
+ "name": "Italiano",
+ "public": True,
+ },
+ ],
+ "default": {
+ "hide_untranslated": False,
+ },
+ }
+
+ page1, __ = self.get_pages()
+ with self.settings(CMS_LANGUAGES=NEW_CMS_LANGS):
+ request = self.get_page_request(page1, self.user, "/", edit=True)
+ toolbar = CMSToolbar(request)
+ toolbar.get_left_items()
+ page_menu = toolbar.menus["page"]
+ meta_menu = page_menu.find_items(SubMenu, name=force_text(PAGE_META_MENU_TITLE))[0].item
+ self.assertEqual(
+ len(meta_menu.find_items(ModalItem, name="{}...".format(force_text(PAGE_META_ITEM_TITLE)))), 1
+ )
+ self.assertEqual(len(meta_menu.find_items(ModalItem)), len(NEW_CMS_LANGS[1]) + 1)
+
+ def test_toolbar_with_items(self):
+ """
+ Test that PageMeta/TitleMeta items are present for superuser if PageMeta/TitleMeta exists for current page
+ """
+ from cms.toolbar.toolbar import CMSToolbar
+
+ page1, __ = self.get_pages()
+ page_ext = PageMeta.objects.create(extended_object=page1)
+ title_meta = TitleMeta.objects.create(extended_object=page1.get_title_obj("en"))
+ request = self.get_page_request(page1, self.user, "/", edit=True)
+ toolbar = CMSToolbar(request)
+ toolbar.get_left_items()
+ page_menu = toolbar.menus["page"]
+ meta_menu = page_menu.find_items(SubMenu, name=force_text(PAGE_META_MENU_TITLE))[0].item
+ pagemeta_menu = meta_menu.find_items(ModalItem, name="{}...".format(force_text(PAGE_META_ITEM_TITLE)))
+ self.assertEqual(len(pagemeta_menu), 1)
+ self.assertTrue(
+ pagemeta_menu[0].item.url.startswith(
+ reverse("admin:djangocms_page_meta_pagemeta_change", args=(page_ext.pk,))
+ )
+ )
+ url_change = False
+ url_add = False
+ for title in page1.title_set.all():
+ language = get_language_object(title.language)
+ titlemeta_menu = meta_menu.find_items(ModalItem, name="{}...".format(language["name"]))
+ self.assertEqual(len(titlemeta_menu), 1)
+ try:
+ title_ext = TitleMeta.objects.get(extended_object_id=title.pk)
+ self.assertEqual(title_ext, title_meta)
+ self.assertTrue(
+ titlemeta_menu[0].item.url.startswith(
+ reverse("admin:djangocms_page_meta_titlemeta_change", args=(title_ext.pk,))
+ )
+ )
+ url_change = True
+ except TitleMeta.DoesNotExist:
+ self.assertTrue(
+ titlemeta_menu[0].item.url.startswith(reverse("admin:djangocms_page_meta_titlemeta_add"))
+ )
+ url_add = True
+ self.assertTrue(url_change and url_add)
+
+"""
+Tests for Course Blocks forms
+"""
+import ddt
+from django.http import Http404, QueryDict
+from urllib import urlencode
+from rest_framework.exceptions import PermissionDenied
+
+from opaque_keys.edx.locator import CourseLocator
+from openedx.core.djangoapps.util.test_forms import FormTestMixin
+from student.models import CourseEnrollment
+from student.tests.factories import UserFactory, CourseEnrollmentFactory
+from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
+from xmodule.modulestore.tests.factories import CourseFactory
+
+from ..forms import BlockListGetForm
+
+
+@ddt.ddt
+class TestBlockListGetForm(FormTestMixin, SharedModuleStoreTestCase):
+ """
+ Tests for BlockListGetForm
+ """
+ FORM_CLASS = BlockListGetForm
+
+ @classmethod
+ def setUpClass(cls):
+ super(TestBlockListGetForm, cls).setUpClass()
+
+ cls.course = CourseFactory.create()
+
+ def setUp(self):
+ super(TestBlockListGetForm, self).setUp()
+
+ self.student = UserFactory.create()
+ self.student2 = UserFactory.create()
+ self.staff = UserFactory.create(is_staff=True)
+
+ CourseEnrollmentFactory.create(user=self.student, course_id=self.course.id)
+ CourseEnrollmentFactory.create(user=self.student2, course_id=self.course.id)
+
+ usage_key = self.course.location
+ self.initial = {'requesting_user': self.student}
+ self.form_data = QueryDict(
+ urlencode({
+ 'username': self.student.username,
+ 'usage_key': unicode(usage_key),
+ }),
+ mutable=True,
+ )
+ self.cleaned_data = {
+ 'all_blocks': None,
+ 'block_counts': set(),
+ 'depth': 0,
+ 'nav_depth': None,
+ 'return_type': 'dict',
+ 'requested_fields': {'display_name', 'type'},
+ 'student_view_data': set(),
+ 'usage_key': usage_key,
+ 'username': self.student.username,
+ 'user': self.student,
+ }
+
+ def assert_raises_permission_denied(self):
+ """
+ Fail unless permission is denied to the form
+ """
+ with self.assertRaises(PermissionDenied):
+ self.get_form(expected_valid=False)
+
+ def assert_raises_not_found(self):
+ """
+ Fail unless a 404 occurs
+ """
+ with self.assertRaises(Http404):
+ self.get_form(expected_valid=False)
+
+ def assert_equals_cleaned_data(self):
+ """
+ Check that the form returns the expected data
+ """
+ form = self.get_form(expected_valid=True)
+ self.assertDictEqual(form.cleaned_data, self.cleaned_data)
+
+ def test_basic(self):
+ self.assert_equals_cleaned_data()
+
+ #-- usage key
+
+ def test_no_usage_key_param(self):
+ self.form_data.pop('usage_key')
+ self.assert_error('usage_key', "This field is required.")
+
+ def test_invalid_usage_key(self):
+ self.form_data['usage_key'] = 'invalid_usage_key'
+ self.assert_error('usage_key', "'invalid_usage_key' is not a valid usage key.")
+
+ def test_non_existent_usage_key(self):
+ self.form_data['usage_key'] = self.store.make_course_usage_key(CourseLocator('non', 'existent', 'course'))
+ self.assert_raises_permission_denied()
+
+ #-- user
+
+ @ddt.data("True", "true", True)
+ def test_no_user_all_blocks_true(self, all_blocks_value):
+ self.initial = {'requesting_user': self.staff}
+
+ self.form_data.pop('username')
+ self.form_data['all_blocks'] = all_blocks_value
+ self.get_form(expected_valid=True)
+
+ @ddt.data("False", "false", False)
+ def test_no_user_all_blocks_false(self, all_blocks_value):
+ self.initial = {'requesting_user': self.staff}
+
+ self.form_data.pop('username')
+ self.form_data['all_blocks'] = all_blocks_value
+ self.assert_error('username', "This field is required unless all_blocks is requested.")
+
+ def test_no_user_all_blocks_none(self):
+ self.initial = {'requesting_user': self.staff}
+
+ self.form_data.pop('username')
+ self.assert_error('username', "This field is required unless all_blocks is requested.")
+
+ def test_no_user_non_staff(self):
+ self.form_data.pop('username')
+ self.form_data['all_blocks'] = True
+ self.assert_raises_permission_denied()
+
+ def test_nonexistent_user_by_student(self):
+ self.form_data['username'] = 'non_existent_user'
+ self.assert_raises_permission_denied()
+
+ def test_nonexistent_user_by_staff(self):
+ self.initial = {'requesting_user': self.staff}
+ self.form_data['username'] = 'non_existent_user'
+ self.assert_raises_not_found()
+
+ def test_other_user_by_student(self):
+ self.form_data['username'] = self.student2.username
+ self.assert_raises_permission_denied()
+
+ def test_other_user_by_staff(self):
+ self.initial = {'requesting_user': self.staff}
+ self.get_form(expected_valid=True)
+
+ def test_unenrolled_student(self):
+ CourseEnrollment.unenroll(self.student, self.course.id)
+ self.assert_raises_permission_denied()
+
+ def test_unenrolled_staff(self):
+ CourseEnrollment.unenroll(self.staff, self.course.id)
+ self.initial = {'requesting_user': self.staff}
+ self.form_data['username'] = self.staff.username
+ self.get_form(expected_valid=True)
+
+ def test_unenrolled_student_by_staff(self):
+ CourseEnrollment.unenroll(self.student, self.course.id)
+ self.initial = {'requesting_user': self.staff}
+ self.get_form(expected_valid=True)
+
+ #-- depth
+
+ def test_depth_integer(self):
+ self.form_data['depth'] = 3
+ self.cleaned_data['depth'] = 3
+ self.assert_equals_cleaned_data()
+
+ def test_depth_all(self):
+ self.form_data['depth'] = 'all'
+ self.cleaned_data['depth'] = None
+ self.assert_equals_cleaned_data()
+
+ def test_depth_invalid(self):
+ self.form_data['depth'] = 'not_an_integer'
+ self.assert_error('depth', "'not_an_integer' is not a valid depth value.")
+
+ #-- nav depth
+
+ def test_nav_depth(self):
+ self.form_data['nav_depth'] = 3
+ self.cleaned_data['nav_depth'] = 3
+ self.cleaned_data['requested_fields'] |= {'nav_depth'}
+ self.assert_equals_cleaned_data()
+
+ def test_nav_depth_invalid(self):
+ self.form_data['nav_depth'] = 'not_an_integer'
+ self.assert_error('nav_depth', "Enter a whole number.")
+
+ def test_nav_depth_negative(self):
+ self.form_data['nav_depth'] = -1
+ self.assert_error('nav_depth', "Ensure this value is greater than or equal to 0.")
+
+ #-- return_type
+
+ def test_return_type(self):
+ self.form_data['return_type'] = 'list'
+ self.cleaned_data['return_type'] = 'list'
+ self.assert_equals_cleaned_data()
+
+ def test_return_type_invalid(self):
+ self.form_data['return_type'] = 'invalid_return_type'
+ self.assert_error(
+ 'return_type',
+ "Select a valid choice. invalid_return_type is not one of the available choices."
+ )
+
+ #-- requested fields
+
+ def test_requested_fields(self):
+ self.form_data.setlist('requested_fields', ['graded', 'nav_depth', 'some_other_field'])
+ self.cleaned_data['requested_fields'] |= {'graded', 'nav_depth', 'some_other_field'}
+ self.assert_equals_cleaned_data()
+
+ @ddt.data('block_counts', 'student_view_data')
+ def test_higher_order_field(self, field_name):
+ field_value = {'block_type1', 'block_type2'}
+ self.form_data.setlist(field_name, field_value)
+ self.cleaned_data[field_name] = field_value
+ self.cleaned_data['requested_fields'].add(field_name)
+ self.assert_equals_cleaned_data()
+
+ def test_combined_fields(self):
+ # add requested fields
+ self.form_data.setlist('requested_fields', ['field1', 'field2'])
+
+ # add higher order fields
+ block_types_list = {'block_type1', 'block_type2'}
+ for field_name in ['block_counts', 'student_view_data']:
+ self.form_data.setlist(field_name, block_types_list)
+ self.cleaned_data[field_name] = block_types_list
+
+ # verify the requested_fields in cleaned_data includes all fields
+ self.cleaned_data['requested_fields'] |= {'field1', 'field2', 'student_view_data', 'block_counts'}
+ self.assert_equals_cleaned_data()
+
+# ----------------------------------------------------------------------------
+# Copyright 2014 Nervana Systems Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ----------------------------------------------------------------------------
+import logging
+import os
+import numpy as np
+
+logger = logging.getLogger(__name__)
+
+
+class NoPar(object):
+
+ def __init__(self):
+ self.backend = None
+ self.device_id = None
+
+ def init_model(self, model, backend):
+ backend.actual_batch_size = model.batch_size
+
+ def associate(self, backend):
+ backend.par = self
+ self.backend = backend
+
+ def distribute(self, batchdata, dtype):
+ return self.backend.array(batchdata, dtype)
+
+ def reduce_tensor(self, tensor):
+ return tensor.asnumpyarray()
+
+ def rank(self):
+ return 0
+
+ def is_distributed(self):
+ return False
+
+
+class BasePar(object):
+
+ def __init__(self):
+ self.backend = None
+ self.device_id = None
+ try:
+ from mpi4py import MPI
+ self.mpi = MPI
+ self.comm = self.mpi.COMM_WORLD
+ self.mpi_size = self.comm.size
+ self.mpi_rank = self.comm.rank
+ except ImportError:
+ raise RuntimeError(
+ "mpi4py not found, can't run in datapar or modelpar")
+
+ try:
+ # Determine local rank (assumes OpenMPI).
+ self.mpi_local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
+ self.mpi_local_size = int(os.environ['OMPI_COMM_WORLD_LOCAL_SIZE'])
+ except:
+ raise RuntimeError(
+ "OpenMPI variable OMPI_COMM_WORLD_LOCAL_RANK or "
+ "OMPI_COMM_WORLD_LOCAL_SIZE not found.\n"
+ "Are you using: mpirun -n <#procs> neon ?")
+ self.device_id = self.mpi_local_rank
+
+ def init_model(self, model, backend):
+ # save the original batch_size value that is specified in
+ # the configuration file
+ backend.actual_batch_size = model.batch_size
+
+ def associate(self, backend):
+ backend.par = self
+ self.backend = backend
+
+ def distribute(self, batchdata):
+ raise NotImplementedError()
+
+ def reduce_tensor(self, tensor):
+ raise NotImplementedError()
+
+ def distributable(self, layer):
+ if hasattr(layer, 'distributable'):
+ return layer.distributable
+ return False
+
+ def rank(self):
+ return self.mpi_rank
+
+ def is_distributed(self):
+ return True
+
+
+class ModelPar(BasePar):
+
+ class Config(object):
+ pass
+
+ def __init__(self):
+ super(ModelPar, self).__init__()
+ if self.mpi_rank == 0:
+ logger.info('Model-parallel mode. Number of nodes = %d.',
+ self.mpi_size)
+
+ def init_model(self, model, backend):
+ super(ModelPar, self).init_model(model, backend)
+ for layer in model.layers:
+ if not self.distributable(layer):
+ continue
+ assert hasattr(layer, 'nin')
+ assert not hasattr(layer, 'parconf')
+ conf = ModelPar.Config()
+ nout = layer.nout
+ realnin = layer.nin
+ nin = realnin // self.mpi_size
+ conf.start = self.mpi_rank * nin
+ if self.mpi_rank == (self.mpi_size - 1):
+ # If the weights cannot be evenly partitioned, let the last
+ # MPI node handle the extra weights.
+ conf.end = realnin
+ else:
+ conf.end = conf.start + nin
+ bs = model.batch_size
+ bufshape = (layer.nout, bs)
+ conf.fpropbuf = np.empty(bufshape, dtype=np.float32)
+ bufshape = (layer.nin, bs)
+ conf.bpropbuf = np.empty(bufshape, dtype=np.float32)
+ conf.rcount = np.empty(self.mpi_size, dtype=np.int32)
+ conf.rcount.fill(nin)
+ conf.scount = conf.end - conf.start
+ conf.rcount[-1] = realnin - nin * (self.mpi_size - 1)
+ conf.displ = np.arange(0, realnin - nin + 1, nin)
+ conf.scount *= bs
+ conf.rcount *= bs
+ conf.displ *= bs
+ layer.weight_shape = (nout, conf.end - conf.start)
+ layer.parconf = conf
+
+ def associate(self, backend):
+ super(ModelPar, self).associate(backend)
+ self.orig_fprop_fc = backend.fprop_fc
+ self.orig_bprop_fc = backend.bprop_fc
+ self.orig_update_fc = backend.update_fc
+ backend.fprop_fc = self.fprop_fc
+ backend.bprop_fc = self.bprop_fc
+ backend.update_fc = self.update_fc
+
+ def distribute(self, batchdata, dtype):
+ return self.backend.array(batchdata, dtype)
+
+ def reduce_tensor(self, tensor):
+ return tensor.asnumpyarray()
+
+ def fprop_fc(self, out, inputs, weights, layer):
+ conf = layer.parconf
+ self.orig_fprop_fc(out, inputs[conf.start:conf.end], weights)
+ sendbuf = [out.asnumpyarray(), self.mpi.FLOAT]
+ recvbuf = [conf.fpropbuf, self.mpi.FLOAT]
+ self.comm.Reduce(sendbuf, recvbuf, op=self.mpi.SUM)
+ self.comm.Bcast(buf=[conf.fpropbuf, self.mpi.FLOAT])
+ out.copy_from(conf.fpropbuf)
+
+ def bprop_fc(self, out, weights, deltas, layer):
+ conf = layer.parconf
+ self.orig_bprop_fc(out[conf.start:conf.end], weights, deltas)
+ outbuf = out.asnumpyarray()[conf.start:conf.end]
+ sendbuf = [outbuf, conf.scount, self.mpi.FLOAT]
+ recvbuf = [conf.bpropbuf, conf.rcount,
+ conf.displ, self.mpi.FLOAT]
+ self.comm.Allgatherv(sendbuf, recvbuf)
+ out.copy_from(conf.bpropbuf)
+
+ def update_fc(self, out, inputs, deltas, layer):
+ conf = layer.parconf
+ self.orig_update_fc(out, inputs[conf.start:conf.end], deltas)
+
+
+class DataPar(BasePar):
+
+ class Config(object):
+ pass
+
+ def __init__(self):
+ super(DataPar, self).__init__()
+ if self.mpi_rank == 0:
+ logger.info('Data-parallel mode. Number of nodes = %d.',
+ self.mpi_size)
+ self.reducebuf = np.empty((1, 1), dtype=np.float32)
+
+ def init_model(self, model, backend):
+ super(DataPar, self).init_model(model, backend)
+ self.batch_size = backend.actual_batch_size // self.mpi_size
+ self.start = self.mpi_rank * self.batch_size
+ if self.mpi_rank == (self.mpi_size - 1):
+ self.batch_size = backend.actual_batch_size - self.start
+ self.end = self.start + self.batch_size
+ model.batch_size = self.batch_size
+
+ for layer in model.layers:
+ if not self.distributable(layer):
+ continue
+ assert hasattr(layer, 'nin')
+ assert not hasattr(layer, 'parconf')
+ conf = DataPar.Config()
+ conf.updatebuf = np.empty(layer.weight_shape, dtype=np.float32)
+ layer.parconf = conf
+
+ def associate(self, backend):
+ super(DataPar, self).associate(backend)
+ self.orig_update_fc = backend.update_fc
+ self.orig_update_conv = backend.update_conv
+ backend.update_fc = self.update_fc
+ backend.update_conv = self.update_conv
+
+ def distribute(self, batchdata, dtype):
+ return self.backend.array(batchdata[:, self.start:self.end], dtype)
+
+ def reduce_tensor(self, tensor):
+ self.comm.Reduce([tensor.asnumpyarray(), self.mpi.FLOAT],
+ [self.reducebuf, self.mpi.FLOAT], op=self.mpi.SUM)
+ if self.mpi_rank == 0:
+ return self.reducebuf / self.mpi_size
+ return 0
+
+ def update(self, out, conf):
+ # NOTE: To make this faster, compute the weight updates
+ # asynchronously. There is no need to wait for completion
+ # until the updates are to be applied to the weights (the
+ # weights are updated after the gradients are propagated
+ # all the way back).
+ sendbuf = [out.asnumpyarray(), self.mpi.FLOAT]
+ recvbuf = [conf.updatebuf, self.mpi.FLOAT]
+ self.comm.Reduce(sendbuf, recvbuf, op=self.mpi.SUM)
+ self.comm.Bcast(buf=[conf.updatebuf, self.mpi.FLOAT])
+ out.copy_from(conf.updatebuf)
+
+ def update_fc(self, out, inputs, deltas, layer):
+ self.orig_update_fc(out, inputs, deltas)
+ self.update(out, layer.parconf)
+
+ def update_conv(self, out, inputs, weights, deltas, ofmshape, ofmsize,
+ ofmlocs, ifmshape, links, nifm, padding, stride,
+ ngroups, fwidth, updatebuf, local=False, layer=None):
+ self.orig_update_conv(out, inputs, weights, deltas, ofmshape, ofmsize,
+ ofmlocs, ifmshape, links, nifm, padding, stride,
+ ngroups, fwidth, updatebuf, local)
+ self.update(out, layer.parconf)
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from pyspark.sql import SparkSession
+
+# $example on$
+from pyspark.ml.evaluation import RegressionEvaluator
+from pyspark.ml.recommendation import ALS
+from pyspark.sql import Row
+# $example off$
+
+if __name__ == "__main__":
+ spark = SparkSession\
+ .builder\
+ .appName("ALSExample")\
+ .getOrCreate()
+
+ # $example on$
+ lines = spark.read.text("data/mllib/als/sample_movielens_ratings.txt").rdd
+ parts = lines.map(lambda row: row.value.split("::"))
+ ratingsRDD = parts.map(lambda p: Row(userId=int(p[0]), movieId=int(p[1]),
+ rating=float(p[2]), timestamp=int(p[3])))
+ ratings = spark.createDataFrame(ratingsRDD)
+ (training, test) = ratings.randomSplit([0.8, 0.2])
+
+ # Build the recommendation model using ALS on the training data
+ # Note we set cold start strategy to 'drop' to ensure we don't get NaN evaluation metrics
+ als = ALS(maxIter=5, regParam=0.01, userCol="userId", itemCol="movieId", ratingCol="rating",
+ coldStartStrategy="drop")
+ model = als.fit(training)
+
+ # Evaluate the model by computing the RMSE on the test data
+ predictions = model.transform(test)
+ evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
+ predictionCol="prediction")
+ rmse = evaluator.evaluate(predictions)
+ print("Root-mean-square error = " + str(rmse))
+
+ # Generate top 10 movie recommendations for each user
+ userRecs = model.recommendForAllUsers(10)
+ # Generate top 10 user recommendations for each movie
+ movieRecs = model.recommendForAllItems(10)
+
+ # Generate top 10 movie recommendations for a specified set of users
+ users = ratings.select(als.getUserCol()).distinct().limit(3)
+ userSubsetRecs = model.recommendForUserSubset(users, 10)
+ # Generate top 10 user recommendations for a specified set of movies
+ movies = ratings.select(als.getItemCol()).distinct().limit(3)
+ movieSubSetRecs = model.recommendForItemSubset(movies, 10)
+ # $example off$
+ userRecs.show()
+ movieRecs.show()
+ userSubsetRecs.show()
+ movieSubSetRecs.show()
+
+ spark.stop()
+
+# This file is part of Booktype.
+# Copyright (c) 2012 Douglas Bagnall
+#
+# Booktype is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Booktype is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with Booktype. If not, see .
+
+"""In Python 2.5, the json module we use is an external module called
+'simplejson'. From Python 2.6, it is a standard module called 'json'.
+Just to complicate things, in Debian's Python 2.5, there is an
+entirely different module called 'json', so 'import json' might apeear
+to work there but do the worng thing.
+
+This module includes the logic for ensuring that the right module gets
+imported. For simplcity of backwards compatibility, the module it
+finds is called both 'simplejson' and 'json'.
+
+>>> from booki.utils.json_wrapper import json
+>>> from booki.utils.json_wrapper import simplejson
+>>> json is simplejson
+True
+"""
+
+try:
+ import json
+ if not hasattr(json, 'loads'):
+ raise ImportError('accidentally imported the wrong json module.')
+except ImportError, e:
+ from warnings import warn
+ warn('json not found: "%s", trying simplejson' % e)
+ del warn
+ import simplejson as json
+
+simplejson = json
+
+# Fill in holes, flood fill
+
+import numpy as np
+import os
+from plantcv.plantcv import print_image
+from plantcv.plantcv import plot_image
+from plantcv.plantcv import fatal_error
+from plantcv.plantcv import params
+from scipy.ndimage.morphology import binary_fill_holes
+
+
+def fill_holes(bin_img):
+ """Flood fills holes in a binary mask
+
+ Inputs:
+ bin_img = Binary image data
+
+ Returns:
+ filtered_img = image with objects filled
+
+ :param bin_img: numpy.ndarray
+ :return filtered_img: numpy.ndarray
+ """
+ params.device += 1
+
+ # Make sure the image is binary
+ if len(np.shape(bin_img)) != 2 or len(np.unique(bin_img)) != 2:
+ fatal_error("Image is not binary")
+
+ # Cast binary image to boolean
+ bool_img = bin_img.astype(bool)
+
+ # Flood fill holes
+ bool_img = binary_fill_holes(bool_img)
+
+ # Cast boolean image to binary and make a copy of the binary image for returning
+ filtered_img = np.copy(bool_img.astype(np.uint8) * 255)
+
+ if params.debug == 'print':
+ print_image(filtered_img, os.path.join(params.debug_outdir, str(params.device) + '_fill_holes' + '.png'))
+ elif params.debug == 'plot':
+ plot_image(filtered_img, cmap='gray')
+
+ return filtered_img
+
+from miasm.expression.expression import ExprId, ExprInt, ExprMem
+from miasm.expression.expression_reduce import ExprReducer
+
+
+class StructLookup(ExprReducer):
+ """
+ ExprReduce example.
+ This example retrieve the nature of a given expression
+ Input:
+ ECX is a pointer on a structure STRUCT_A
+
+ Reduction rules:
+ ECX -> FIELD_A_PTR
+ ECX + CST -> FIELD_A_PTR
+ ECX + CST*CST... -> FIELD_A_PTR
+ @ECX -> FIELD_A
+ @(ECX + CST) -> FIELD_A
+ """
+ CST = "CST"
+ FIELD_A_PTR = "FIELD_A_PTR"
+ FIELD_A = "FIELD_A"
+
+ def reduce_int(self, node, **kwargs):
+ """
+ Reduction: int -> CST
+ """
+ if node.expr.is_int():
+ return self.CST
+ return None
+
+ def reduce_ptr_struct(self, node, **kwargs):
+ """
+ Reduction: ECX -> FIELD_A_PTR
+ """
+ if node.expr.is_id("ECX"):
+ return self.FIELD_A_PTR
+ return None
+
+ def reduce_ptr_plus_int(self, node, **kwargs):
+ """
+ Reduction: ECX + CST -> FIELD_A_PTR
+ """
+ if not node.expr.is_op('+'):
+ return None
+ if [arg.info for arg in node.args] == [self.FIELD_A_PTR, self.CST]:
+ return self.FIELD_A_PTR
+ return None
+
+ def reduce_cst_op(self, node, **kwargs):
+ """
+ Reduction: CST + CST -> CST
+ """
+ if not node.expr.is_op():
+ return None
+ if set(arg.info for arg in node.args) == set([self.CST]):
+ return self.CST
+ return None
+
+ def reduce_at_struct_ptr(self, node, **kwargs):
+ """
+ Reduction: @FIELD_A_PTR -> FIELD_A
+ """
+ if not node.expr.is_mem():
+ return None
+ return self.FIELD_A
+
+ reduction_rules = [reduce_int,
+ reduce_ptr_struct,
+ reduce_ptr_plus_int,
+ reduce_cst_op,
+ reduce_at_struct_ptr
+ ]
+
+
+def test():
+ struct_lookup = StructLookup()
+
+ ptr = ExprId('ECX', 32)
+ int4 = ExprInt(4, 32)
+ tests = [
+ (ptr, StructLookup.FIELD_A_PTR),
+ (ptr + int4, StructLookup.FIELD_A_PTR),
+ (ptr + int4 * int4, StructLookup.FIELD_A_PTR),
+ (ExprMem(ptr, 32), StructLookup.FIELD_A),
+ (ExprMem(ptr + int4 * int4, 32), StructLookup.FIELD_A),
+ ]
+
+ for expr_in, result in tests:
+ assert struct_lookup.reduce(expr_in).info == result
+
+
+if __name__ == "__main__":
+ test()
+
+"""
+PHP date() style date formatting
+See http://www.php.net/date for format strings
+
+Usage:
+>>> import datetime
+>>> d = datetime.datetime.now()
+>>> df = DateFormat(d)
+>>> print(df.format('jS F Y H:i'))
+7th October 2003 11:39
+>>>
+"""
+from __future__ import unicode_literals
+
+import re
+import time
+import calendar
+import datetime
+
+from django.utils.dates import MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR
+from django.utils.tzinfo import LocalTimezone
+from django.utils.translation import ugettext as _
+from django.utils.encoding import force_unicode
+from django.utils.timezone import is_aware, is_naive
+
+re_formatchars = re.compile(r'(? 11:
+ return _('p.m.')
+ return _('a.m.')
+
+ def A(self):
+ "'AM' or 'PM'"
+ if self.data.hour > 11:
+ return _('PM')
+ return _('AM')
+
+ def B(self):
+ "Swatch Internet time"
+ raise NotImplementedError
+
+ def f(self):
+ """
+ Time, in 12-hour hours and minutes, with minutes left off if they're
+ zero.
+ Examples: '1', '1:30', '2:05', '2'
+ Proprietary extension.
+ """
+ if self.data.minute == 0:
+ return self.g()
+ return '%s:%s' % (self.g(), self.i())
+
+ def g(self):
+ "Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
+ if self.data.hour == 0:
+ return 12
+ if self.data.hour > 12:
+ return self.data.hour - 12
+ return self.data.hour
+
+ def G(self):
+ "Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
+ return self.data.hour
+
+ def h(self):
+ "Hour, 12-hour format; i.e. '01' to '12'"
+ return '%02d' % self.g()
+
+ def H(self):
+ "Hour, 24-hour format; i.e. '00' to '23'"
+ return '%02d' % self.G()
+
+ def i(self):
+ "Minutes; i.e. '00' to '59'"
+ return '%02d' % self.data.minute
+
+ def P(self):
+ """
+ Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
+ if they're zero and the strings 'midnight' and 'noon' if appropriate.
+ Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
+ Proprietary extension.
+ """
+ if self.data.minute == 0 and self.data.hour == 0:
+ return _('midnight')
+ if self.data.minute == 0 and self.data.hour == 12:
+ return _('noon')
+ return '%s %s' % (self.f(), self.a())
+
+ def s(self):
+ "Seconds; i.e. '00' to '59'"
+ return '%02d' % self.data.second
+
+ def u(self):
+ "Microseconds"
+ return self.data.microsecond
+
+
+class DateFormat(TimeFormat):
+ year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
+
+ def __init__(self, dt):
+ # Accepts either a datetime or date object.
+ self.data = dt
+ self.timezone = None
+ if isinstance(dt, datetime.datetime):
+ if is_naive(dt):
+ self.timezone = LocalTimezone(dt)
+ else:
+ self.timezone = dt.tzinfo
+
+ def b(self):
+ "Month, textual, 3 letters, lowercase; e.g. 'jan'"
+ return MONTHS_3[self.data.month]
+
+ def c(self):
+ """
+ ISO 8601 Format
+ Example : '2008-01-02T10:30:00.000123'
+ """
+ return self.data.isoformat()
+
+ def d(self):
+ "Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
+ return '%02d' % self.data.day
+
+ def D(self):
+ "Day of the week, textual, 3 letters; e.g. 'Fri'"
+ return WEEKDAYS_ABBR[self.data.weekday()]
+
+ def e(self):
+ "Timezone name if available"
+ try:
+ if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
+ # Have to use tzinfo.tzname and not datetime.tzname
+ # because datatime.tzname does not expect Unicode
+ return self.data.tzinfo.tzname(self.data) or ""
+ except NotImplementedError:
+ pass
+ return ""
+
+ def E(self):
+ "Alternative month names as required by some locales. Proprietary extension."
+ return MONTHS_ALT[self.data.month]
+
+ def F(self):
+ "Month, textual, long; e.g. 'January'"
+ return MONTHS[self.data.month]
+
+ def I(self):
+ "'1' if Daylight Savings Time, '0' otherwise."
+ if self.timezone and self.timezone.dst(self.data):
+ return '1'
+ else:
+ return '0'
+
+ def j(self):
+ "Day of the month without leading zeros; i.e. '1' to '31'"
+ return self.data.day
+
+ def l(self):
+ "Day of the week, textual, long; e.g. 'Friday'"
+ return WEEKDAYS[self.data.weekday()]
+
+ def L(self):
+ "Boolean for whether it is a leap year; i.e. True or False"
+ return calendar.isleap(self.data.year)
+
+ def m(self):
+ "Month; i.e. '01' to '12'"
+ return '%02d' % self.data.month
+
+ def M(self):
+ "Month, textual, 3 letters; e.g. 'Jan'"
+ return MONTHS_3[self.data.month].title()
+
+ def n(self):
+ "Month without leading zeros; i.e. '1' to '12'"
+ return self.data.month
+
+ def N(self):
+ "Month abbreviation in Associated Press style. Proprietary extension."
+ return MONTHS_AP[self.data.month]
+
+ def o(self):
+ "ISO 8601 year number matching the ISO week number (W)"
+ return self.data.isocalendar()[0]
+
+ def O(self):
+ "Difference to Greenwich time in hours; e.g. '+0200', '-0430'"
+ seconds = self.Z()
+ sign = '-' if seconds < 0 else '+'
+ seconds = abs(seconds)
+ return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
+
+ def r(self):
+ "RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
+ return self.format('D, j M Y H:i:s O')
+
+ def S(self):
+ "English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
+ if self.data.day in (11, 12, 13): # Special case
+ return 'th'
+ last = self.data.day % 10
+ if last == 1:
+ return 'st'
+ if last == 2:
+ return 'nd'
+ if last == 3:
+ return 'rd'
+ return 'th'
+
+ def t(self):
+ "Number of days in the given month; i.e. '28' to '31'"
+ return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
+
+ def T(self):
+ "Time zone of this machine; e.g. 'EST' or 'MDT'"
+ name = self.timezone and self.timezone.tzname(self.data) or None
+ if name is None:
+ name = self.format('O')
+ return unicode(name)
+
+ def U(self):
+ "Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
+ if isinstance(self.data, datetime.datetime) and is_aware(self.data):
+ return int(calendar.timegm(self.data.utctimetuple()))
+ else:
+ return int(time.mktime(self.data.timetuple()))
+
+ def w(self):
+ "Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
+ return (self.data.weekday() + 1) % 7
+
+ def W(self):
+ "ISO-8601 week number of year, weeks starting on Monday"
+ # Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
+ week_number = None
+ jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
+ weekday = self.data.weekday() + 1
+ day_of_year = self.z()
+ if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
+ if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year-1)):
+ week_number = 53
+ else:
+ week_number = 52
+ else:
+ if calendar.isleap(self.data.year):
+ i = 366
+ else:
+ i = 365
+ if (i - day_of_year) < (4 - weekday):
+ week_number = 1
+ else:
+ j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
+ week_number = j // 7
+ if jan1_weekday > 4:
+ week_number -= 1
+ return week_number
+
+ def y(self):
+ "Year, 2 digits; e.g. '99'"
+ return unicode(self.data.year)[2:]
+
+ def Y(self):
+ "Year, 4 digits; e.g. '1999'"
+ return self.data.year
+
+ def z(self):
+ "Day of the year; i.e. '0' to '365'"
+ doy = self.year_days[self.data.month] + self.data.day
+ if self.L() and self.data.month > 2:
+ doy += 1
+ return doy
+
+ def Z(self):
+ """
+ Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
+ timezones west of UTC is always negative, and for those east of UTC is
+ always positive.
+ """
+ if not self.timezone:
+ return 0
+ offset = self.timezone.utcoffset(self.data)
+ # `offset` is a datetime.timedelta. For negative values (to the west of
+ # UTC) only days can be negative (days=-1) and seconds are always
+ # positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
+ # Positive offsets have days=0
+ return offset.days * 86400 + offset.seconds
+
+def format(value, format_string):
+ "Convenience function"
+ df = DateFormat(value)
+ return df.format(format_string)
+
+def time_format(value, format_string):
+ "Convenience function"
+ tf = TimeFormat(value)
+ return tf.format(format_string)
+
+#####################################################################################
+#
+# Copyright (c) Microsoft Corporation. All rights reserved.
+#
+# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
+# copy of the license can be found in the License.html file at the root of this distribution. If
+# you cannot locate the Apache License, Version 2.0, please send an email to
+# ironpy@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
+# by the terms of the Apache License, Version 2.0.
+#
+# You must not remove this notice, or any other, from this software.
+#
+#
+#####################################################################################
+
+from generate import generate
+
+def get_type(mutable):
+ if mutable:
+ return 'SetCollection'
+ else:
+ return 'FrozenSetCollection'
+
+def get_arg_ts(mutable):
+ return [get_type(mutable), get_type(not mutable), 'object']
+
+def get_clrname(name):
+ return ''.join(map(str.capitalize, name.split('_')))
+
+def get_items(arg_t):
+ if arg_t == 'object':
+ return 'SetStorage.GetItems(set)'
+ else:
+ return 'set._items'
+
+def copy(cw, mutable):
+ if mutable:
+ cw.writeline('return copy();')
+ else:
+ cw.writeline('return Make(_items);')
+
+def copy_op(cw, mutable, name):
+ t = get_type(mutable)
+
+ cw.enter_block('public %s %s()' % (t, name))
+ copy(cw, mutable)
+ cw.exit_block()
+ cw.writeline()
+
+def simple_op(cw, t, arg_t, name):
+ clrname = get_clrname(name)
+
+ cw.enter_block('public %s %s(%s set)' % (t, name, arg_t))
+ simple_op_worker(cw, t, arg_t, name)
+ cw.exit_block()
+ cw.writeline()
+
+def simple_op_worker(cw, t, arg_t, name):
+ clrname = get_clrname(name)
+
+ if arg_t == 'object':
+ cw.writeline('SetStorage items;')
+ cw.enter_block('if (SetStorage.GetItems(set, out items))')
+ cw.writeline('items = SetStorage.%s(_items, items);' % clrname)
+ cw.else_block()
+ cw.writeline('items.%sUpdate(_items);' % clrname)
+ cw.exit_block()
+ cw.writeline('return Make(items);')
+ else:
+ cw.writeline(
+ 'return Make(SetStorage.%s(_items, set._items));' % clrname
+ )
+
+def enter_multiarg_op(cw, t, name):
+ cw.enter_block('public %s %s([NotNull]params object[]/*!*/ sets)' % (t, name))
+ cw.writeline('Debug.Assert(sets != null);')
+ cw.writeline()
+
+def union_multiarg(cw, mutable):
+ t = get_type(mutable)
+ enter_multiarg_op(cw, t, 'union')
+
+ cw.writeline('SetStorage res = _items.Clone();')
+ cw.enter_block('foreach (object set in sets)')
+ cw.writeline('res.UnionUpdate(SetStorage.GetItems(set));')
+ cw.exit_block()
+ cw.writeline()
+ cw.writeline('return Make(res);')
+
+ cw.exit_block()
+ cw.writeline()
+
+def intersection_multiarg(cw, mutable):
+ t = get_type(mutable)
+ enter_multiarg_op(cw, t, 'intersection')
+
+ cw.enter_block('if (sets.Length == 0)')
+ copy(cw, mutable)
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('SetStorage res = _items;')
+ cw.enter_block('foreach (object set in sets)')
+ cw.writeline('SetStorage items, x = res, y;')
+ cw.enter_block('if (SetStorage.GetItems(set, out items))')
+ cw.writeline('y = items;')
+ cw.writeline('SetStorage.SortBySize(ref x, ref y);')
+ cw.writeline()
+ cw.enter_block('if (%s(x, items) || %s(x, _items))' %
+ (('object.ReferenceEquals',) * 2))
+ cw.writeline('x = x.Clone();')
+ cw.exit_block()
+ cw.else_block()
+ cw.writeline('y = items;')
+ cw.writeline('SetStorage.SortBySize(ref x, ref y);')
+ cw.writeline()
+ cw.enter_block('if (object.ReferenceEquals(x, _items))')
+ cw.writeline('x = x.Clone();')
+ cw.exit_block()
+ cw.exit_block()
+ cw.writeline('x.IntersectionUpdate(y);')
+ cw.writeline('res = x;')
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('Debug.Assert(!object.ReferenceEquals(res, _items));')
+ cw.writeline('return Make(res);')
+
+ cw.exit_block()
+ cw.writeline()
+
+def difference(cw, t, arg_t):
+ items = get_items(arg_t)
+
+ cw.enter_block('public %s difference(%s set)' % (t, arg_t))
+
+ if (t == arg_t):
+ cw.enter_block('if (object.ReferenceEquals(set, this))')
+ cw.writeline('return Empty;')
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('return Make(')
+ cw.indent()
+ cw.writeline('SetStorage.Difference(_items, %s)' % items)
+ cw.dedent()
+ cw.writeline(');');
+
+ cw.exit_block()
+ cw.writeline()
+
+def difference_multiarg(cw, mutable):
+ t = get_type(mutable)
+ enter_multiarg_op(cw, t, 'difference')
+
+ cw.enter_block('if (sets.Length == 0)')
+ copy(cw, mutable)
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('SetStorage res = _items;')
+ cw.enter_block('foreach (object set in sets)')
+ cw.enter_block('if (object.ReferenceEquals(set, this))')
+ cw.writeline('return Empty;')
+ cw.exit_block()
+ cw.writeline()
+ cw.writeline('SetStorage items = SetStorage.GetItems(set);')
+ cw.enter_block('if (object.ReferenceEquals(res, _items))')
+ cw.writeline('res = SetStorage.Difference(_items, items);')
+ cw.else_block()
+ cw.writeline('res.DifferenceUpdate(items);')
+ cw.exit_block()
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('Debug.Assert(!object.ReferenceEquals(res, _items));')
+ cw.writeline('return Make(res);')
+
+ cw.exit_block()
+ cw.writeline()
+
+def symmetric_difference(cw, t, arg_t):
+ cw.enter_block('public %s symmetric_difference(%s set)' % (t, arg_t))
+
+ if (t == arg_t):
+ cw.enter_block('if (object.ReferenceEquals(set, this))')
+ cw.writeline('return Empty;')
+ cw.exit_block()
+ cw.writeline()
+
+ simple_op_worker(cw, t, arg_t, 'symmetric_difference')
+
+ cw.exit_block()
+ cw.writeline()
+
+def gen_setops(mutable):
+ def _gen_setops(cw):
+ t = get_type(mutable)
+ arg_ts = get_arg_ts(mutable)
+
+ for arg_t in arg_ts:
+ items = get_items(arg_t)
+ cw.enter_block('public bool isdisjoint(%s set)' % arg_t)
+ cw.writeline('return _items.IsDisjoint(%s);' % items)
+ cw.exit_block()
+ cw.writeline()
+
+ for arg_t in arg_ts:
+ items = get_items(arg_t)
+ cw.enter_block('public bool issubset(%s set)' % arg_t)
+ cw.writeline('return _items.IsSubset(%s);' % items)
+ cw.exit_block()
+ cw.writeline()
+
+ for arg_t in arg_ts:
+ items = get_items(arg_t)
+ cw.enter_block('public bool issuperset(%s set)' % arg_t)
+ cw.writeline('return %s.IsSubset(_items);' % items)
+ cw.exit_block()
+ cw.writeline()
+
+ copy_op(cw, mutable, 'union')
+ for arg_t in arg_ts:
+ simple_op(cw, t, arg_t, 'union')
+ union_multiarg(cw, mutable)
+
+ copy_op(cw, mutable, 'intersection')
+ for arg_t in arg_ts:
+ simple_op(cw, t, arg_t, 'intersection')
+ intersection_multiarg(cw, mutable)
+
+ copy_op(cw, mutable, 'difference')
+ for arg_t in arg_ts:
+ difference(cw, t, arg_t)
+ difference_multiarg(cw, mutable)
+
+ for arg_t in arg_ts:
+ symmetric_difference(cw, t, arg_t)
+
+ return _gen_setops
+
+op_symbols = [ '|', '&', '^', '-' ]
+op_names = [ 'union', 'intersection', 'symmetric_difference', 'difference' ]
+op_upnames = [ 'update' ] + map(lambda x: x + '_update', op_names[1:])
+op_clrnames = [ 'BitwiseOr', 'BitwiseAnd', 'ExclusiveOr', 'Subtract' ]
+
+def gen_op(cw, t_left, t_right, symbol, name):
+ cw.enter_block(
+ 'public static %s operator %s(%s x, %s y)' %
+ (t_left, symbol, t_left, t_right)
+ )
+ cw.writeline('return x.%s(y);' % name)
+ cw.exit_block()
+ cw.writeline()
+
+def gen_ops(mutable):
+ def _gen_ops(cw):
+ t = get_type(mutable)
+ u = get_type(not mutable)
+ ops = zip(op_symbols, op_names)
+
+ for symbol, name in ops:
+ gen_op(cw, t, t, symbol, name)
+ for symbol, name in ops:
+ gen_op(cw, t, u, symbol, name)
+
+ return _gen_ops
+
+def gen_mutating_op(cw, t, arg_t, symbol, upname, clrname):
+ cw.writeline('[SpecialName]')
+ cw.enter_block('public %s InPlace%s(%s set)' % (t, clrname, arg_t))
+
+ if arg_t == 'object':
+ cw.enter_block(
+ 'if (set is %s || set is %s)' %
+ tuple(map(get_type, [False, True]))
+ )
+
+ cw.writeline('%s(set);' % upname)
+ cw.writeline('return this;')
+
+ if arg_t == 'object':
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('throw PythonOps.TypeError(')
+ cw.indent()
+ cw.writeline(
+ '''"unsupported operand type(s) for %s=: '{0}' and '{1}'",''' %
+ symbol
+ )
+ cw.writeline('%s(this), %s(set)' % (('PythonTypeOps.GetName',) * 2))
+ cw.dedent()
+ cw.writeline(');')
+
+ cw.exit_block()
+ cw.writeline()
+
+def gen_mutating_ops(cw):
+ t = get_type(True)
+ arg_ts = get_arg_ts(True)
+
+ for op in zip(op_symbols, op_upnames, op_clrnames):
+ for arg_t in arg_ts:
+ gen_mutating_op(cw, t, arg_t, *op)
+
+compares = [ '>', '<', '>=', '<=' ]
+
+def is_subset(compare):
+ return compare == '<' or compare == '<='
+
+def is_strict(compare):
+ return not compare.endswith('=')
+
+def gen_comparison(cw, t, compare):
+ cw.enter_block(
+ 'public static bool operator %s(%s self, object other)' %
+ (compare, t)
+ )
+
+ cw.writeline('SetStorage items;')
+ cw.enter_block('if (SetStorage.GetItemsIfSet(other, out items))')
+ if is_subset(compare):
+ left = 'self._items'
+ right = 'items'
+ else:
+ left = 'items'
+ right = 'self._items'
+ if is_strict(compare):
+ func = 'IsStrictSubset'
+ else:
+ func = 'IsSubset'
+ cw.writeline('return %s.%s(%s);' % (left, func, right))
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('throw PythonOps.TypeError("can only compare to a set");')
+
+ cw.exit_block()
+ cw.writeline()
+
+def suppress(cw, *msgs):
+ if len(msgs) == 0:
+ return
+
+ comma = ''
+ res = '['
+ for msg in msgs:
+ res += comma + 'System.Diagnostics.CodeAnalysis.SuppressMessage('
+ res += msg + ')'
+ comma = ' ,'
+ res += ']'
+
+ cw.writeline(res)
+
+def gen_comparisons(cw, t):
+ cw.writeline('#region IRichComparable')
+ cw.writeline()
+
+ for compare in compares:
+ gen_comparison(cw, t, compare)
+
+ ca1822 = '"Microsoft.Performance", "CA1822:MarkMembersAsStatic"'
+ ca1801 = '"Microsoft.Usage", "CA1801:ReviewUnusedParameters", MessageId = "o"'
+ throw_msg = 'throw PythonOps.TypeError("cannot compare sets using cmp()");'
+
+ suppress(cw, ca1822, ca1801)
+ cw.writeline('[SpecialName]')
+ cw.enter_block('public int Compare(object o)')
+ cw.writeline(throw_msg)
+ cw.exit_block()
+ cw.writeline()
+
+ suppress(cw, ca1822, ca1801)
+ cw.enter_block('public int __cmp__(object o)')
+ cw.writeline(throw_msg)
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('#endregion')
+ cw.writeline()
+
+def gen_ienumerable(cw, mutable):
+ cw.writeline('#region IEnumerable Members')
+ cw.writeline()
+
+ cw.enter_block('IEnumerator IEnumerable.GetEnumerator()')
+ cw.writeline('return new SetIterator(_items, %s);' % str(mutable).lower())
+ cw.exit_block()
+ cw.writeline()
+
+ cw.writeline('#endregion')
+ cw.writeline()
+ cw.writeline('#region IEnumerable