text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import unicode_literals, absolute_import
import six
from collections import namedtuple
from decimal import Decimal as D
from dateutil.parser import parse
from . import base
from . import stores
from . import devices
Price = namedtuple('Price', ['value', 'currency'])
class Product(base.AppFigureObject):
def _load_from_json(self, json):
self.id = json.get('id')
self.name = json.get('name')
self.developer = json.get('developer')
self.icon = json.get('icon')
self.vendor_identifier = json.get('vendor_identifier')
self.package_name = json.get('package_name')
self.store = json.get('store')
self.store_id = json.get('store_id')
self.sku = json.get('sku')
self.ref_no = json.get('ref_no')
self.vendor_identifier = json.get('vendor_identifier')
self.release_date = parse(json.get('release_date'))
self.added_date = parse(json.get('added_date'))
self.updated_date = parse(json.get('updated_date'))
self.version = json.get('version')
if self.version:
self.version = self.version.strip()
self.source = json.get('source')
self.type = json.get('type')
self.devices = json.get('devices', [])
price = json.get('price')
if not price:
self.price = None
try:
self.price = Price(D(price.get('price')), price.get('currency'))
except (InvalidOperation, ValueError):
self.price = None
meta_json = json.get('meta', {})
self.metadata = ProductMetadataCollection.from_json(meta_json)
@property
def is_handheld(self):
return devices.HANDHELD in self.devices
@property
def is_tablet(self):
return devices.TABLET in self.devices
@property
def is_desktop(self):
return devices.DESKTOP in self.devices
@property
def has_metadata(self):
return len(self.metadata) > 0
@classmethod
def from_json(cls, json):
return cls(json)
def json(self):
return self._json_data
class ProductMetadataCollection(dict):
DEFAULT_LANGUAGE = 'en'
def __init__(self, json):
for language, metadata in six.iteritems(json):
self[language] = ProductMetadata.from_json(language, metadata)
def __getattr__(self, key):
"""
Expose the language metadata as attributes and allow direct access
to attributes of the english language metadata if it is present.
"""
if key in self:
return self[key]
elif hasattr(self[self.DEFAULT_LANGUAGE], key):
return getattr(self[self.DEFAULT_LANGUAGE], key)
raise AttributeError()
@classmethod
def from_json(cls, json):
return cls(json)
class ProductMetadata(base.AppFigureObject):
def __init__(self, language, json):
self.language = language
super(ProductMetadata, self).__init__(json)
def _load_from_json(self, json):
self.all_rating = D(json.get('all_rating'))
self.all_rating_count = int(json.get('all_rating_count'))
self.description = json.get('description')
self.developer_email = json.get('developer_email')
self.developer_site = json.get('developer_site')
self.downloads = json.get('downloads')
try:
self.download_size = int(json.get('download_size'))
except (ValueError, TypeError):
self.download_size = None
self.has_in_app_purchases = (json.get('has_inapps') == 'true')
self.name = json.get('name')
self.rating = json.get('rating')
self.release_notes = json.get('release_notes')
self.top_developer = (json.get('top_developer') == 'true')
self.view_url = json.get('view_url')
@classmethod
def from_json(cls, language, json):
flattened_json = {}
for data in json:
if data.get('language') != language:
continue
flattened_json[data.get('key')] = data.get('value')
return cls(language, flattened_json)
| {
"content_hash": "d465c5a8d9878c4c5c47a768bc2de671",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 76,
"avg_line_length": 29.985507246376812,
"alnum_prop": 0.6075398743354278,
"repo_name": "mobify/python-appfigures",
"id": "e29df54310eb684821c2d1ad4a989720754eb04d",
"size": "4162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appfigures/products.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33940"
}
],
"symlink_target": ""
} |
import os
import json
import requests
from requests.auth import HTTPDigestAuth
from collections import OrderedDict
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def get_solr_json(solr_url,
query,
api_key=None,
digest_user=None,
digest_pswd=None):
'''get the solr json response for the given URL.
Use the requests library. The production API uses headers for auth,
while the ingest environment uses http digest auth
Returns an python object created from the json response.
Query is a dictionary of parameters
'''
solr_auth = { 'X-Authentication-Token': api_key } if api_key else None
digest_auth = HTTPDigestAuth(
digest_user, digest_pswd) if digest_user else None
return json.loads(requests.get(solr_url,
headers=solr_auth,
auth=digest_auth,
params=query,
verify=False).text)
def create_facet_dict(json_results, facet_field):
'''Create a dictionary consisting of keys = facet_field_values, values =
facet_field coutns
Takes a json result that must have the given facet field in it & the
'''
results = json_results.get('facet_counts').get('facet_fields').get(facet_field)
#zip into array with elements of ('collection_data_value', count)
facet_list = zip(*[iter(results)]*2)
d = OrderedDict()
for val, count in facet_list:
if count > 0: #reject ones that have 0 records?
d[val] = count
return d
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| {
"content_hash": "15ecdf83f1e61e059fc571c701785e53",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 83,
"avg_line_length": 48.343283582089555,
"alnum_prop": 0.715961716579191,
"repo_name": "mredar/ucldc_api_data_quality",
"id": "37975406d6be9d06e98f62b42559b45eb0fe8120",
"size": "3264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporting/get_solr_json.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gherkin",
"bytes": "1161"
},
{
"name": "HTML",
"bytes": "115349"
},
{
"name": "Jupyter Notebook",
"bytes": "28608"
},
{
"name": "Python",
"bytes": "70303"
},
{
"name": "Ruby",
"bytes": "794"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
} |
class GaloisFieldElement(object):
"""
Element of a finite field
"""
def __init__(self):
pass
def __eq__(self, other):
return self.__dict__ == other.__dict__
class GaloisFieldArithmetic(object):
"""
A collection of arithmetic operators for finite field elements
"""
def __init__(self, add_identity, mul_identity):
self.add_identity = add_identity # additive identity
self.mul_identity = mul_identity # multiplicative identity
def add(self, a, b):
"""
a + b
"""
pass
def neg(self, a):
"""
-a
"""
pass
def sub(self, a, b):
"""
a - b
"""
pass
def mul(self, a, b):
"""
a * b
"""
pass
def invert(self, a):
"""
a^(-1)
"""
pass
def div(self, a, b):
"""
a / b
"""
pass
def pow(self, a, e):
"""
a^e
"""
pass
def get_add_identity(self):
return self.add_identity
def get_mul_identity(self):
return self.mul_identity
| {
"content_hash": "e4dbc091ff04d0de4125af4cd4f22ebf",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 69,
"avg_line_length": 17.176470588235293,
"alnum_prop": 0.4460616438356164,
"repo_name": "FederatedAI/FATE",
"id": "2c452790ef60fa6130810f619f1779261009a4a8",
"size": "1833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/federatedml/secureprotol/number_theory/field/base_galois_field.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('structure', '0016_customerpermissionreview'),
]
operations = [
migrations.RemoveField(model_name='customer', name='is_company',),
]
| {
"content_hash": "0b169104f49ea8ae8ede6a42e5720baf",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 74,
"avg_line_length": 21.583333333333332,
"alnum_prop": 0.6602316602316602,
"repo_name": "opennode/nodeconductor-assembly-waldur",
"id": "6ef428be3cfbd0720dad1f0fda08efa155103ee7",
"size": "309",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_core/structure/migrations/0017_remove_customer_is_company.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1624"
},
{
"name": "Python",
"bytes": "412263"
},
{
"name": "Shell",
"bytes": "2031"
}
],
"symlink_target": ""
} |
import unittest
from unittest import mock
import wikipedia
from orangecontrib.text.wikipedia import WikipediaAPI
from orangecontrib.text.corpus import Corpus
class StopingMock(mock.Mock):
def __init__(self, allow_calls=0):
super().__init__()
self.allow_calls = allow_calls
self.call_count = 0
def __call__(self, *args, **kwargs):
self.call_count += 1
if self.call_count > self.allow_calls:
return True
else:
return False
class WikipediaTests(unittest.TestCase):
def test_search(self):
on_progress = mock.MagicMock()
api = WikipediaAPI()
result = api.search('en', ['Clinton'], articles_per_query=2, on_progress=on_progress)
self.assertIsInstance(result, Corpus)
self.assertEquals(len(result.domain.attributes), 0)
self.assertEquals(len(result.domain.metas), 7)
self.assertEquals(len(result), 2)
self.assertEquals(on_progress.call_count, 2)
progress = 0
for arg in on_progress.call_args_list:
self.assertGreater(arg[0][0], progress)
progress = arg[0][0]
def test_search_disambiguation(self):
api = WikipediaAPI()
result = api.search('en', ['Scarf'], articles_per_query=3)
self.assertIsInstance(result, Corpus)
self.assertGreaterEqual(len(result), 3)
def test_search_break(self):
api = WikipediaAPI()
# stop immediately
result = api.search('en', ['Clinton'], articles_per_query=2,
should_break=mock.Mock(return_value=True))
self.assertEqual(len(result), 0)
# stop inside recursion
result = api.search('en', ['Scarf'], articles_per_query=3,
should_break=StopingMock(allow_calls=4))
self.assertEqual(len(result), 2)
def page(*args, **kwargs):
raise wikipedia.exceptions.PageError('1')
@mock.patch('wikipedia.page', page)
def test_page_error(self):
on_error = mock.MagicMock()
api = WikipediaAPI(on_error=on_error)
api.search('en', ['Barack Obama'])
self.assertEqual(on_error.call_count, 0)
def search(*args, **kwargs):
raise IOError('Network error')
@mock.patch('wikipedia.search', search)
def test_network_errors(self):
on_error = mock.MagicMock()
api = WikipediaAPI(on_error=on_error)
api.search('en', ['Barack Obama'])
self.assertEqual(on_error.call_count, 1)
| {
"content_hash": "0974b2d83a00f8f83374361f21a975a4",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 93,
"avg_line_length": 31.5125,
"alnum_prop": 0.6120587068623562,
"repo_name": "cheral/orange3-text",
"id": "b8c78e939aba6abd79648de743505698d501d01b",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orangecontrib/text/tests/test_wikipedia.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "6578"
},
{
"name": "HTML",
"bytes": "613"
},
{
"name": "JavaScript",
"bytes": "39421"
},
{
"name": "Python",
"bytes": "378938"
}
],
"symlink_target": ""
} |
import json
import nltk.sentiment.util
import os
import random
from _operator import itemgetter
from datetime import timedelta
from nltk.sentiment import SentimentIntensityAnalyzer
from Clustering.clustering import find_tweets_with_keywords_idf
from config.config import PROJECT_DIR, PCLOUD_DIR
from inputoutput.cli import query_yes_no
from inputoutput.getters import get_articles, update_tweets_cache, get_articles_by_date
# output
article_clusters = {} # article_id -> tweet_id
article_clusters_filepath = os.path.join(PROJECT_DIR, 'article_clusters_PD_after_2016_11_07.json')
# the idf baseline to use
idf_file = os.path.join(PCLOUD_DIR, 'idf', 'idf_tweet_PD_ALL.json')
# treshold to make sure only words that are unique are searched for
TRESHOLD = 15
def exe(article_clusters, article_clusters_filepath, TRESHOLD):
tweets_cache = {}
# if os.path.exists(article_clusters_filepath):
# if not query_yes_no("Are you sure you want to overwrite %s" % article_clusters_filepath, default='no'):
# exit()
# get idf values
with open(idf_file) as fp:
idf = json.load(fp)
# For all articles after 2016_11_06
articles = []
for m in [11, 12]:
if m == 11:
for d in range(7, 32):
articles += get_articles_by_date(filename_prefix='articles_2016_%d_%02d' % (m, d))
else:
for d in range(1, 32):
articles += get_articles_by_date(filename_prefix='articles_2016_%d_%02d' % (m, d))
articles += get_articles_by_date(filename_prefix='articles_2017')
i = 1
last_start_date = None
for a in articles:
try:
if a.id[0] != 'r':
raise Exception("Non article is get_articles! %s" % a)
kwds = a.get_preproc_title()
if a.get_date() != last_start_date:
last_start_date = a.get_date()
update_tweets_cache(last_start_date - timedelta(days=0), last_start_date + timedelta(days=10), tweets_cache)
all_tweets = []
for tweets in tweets_cache.values():
all_tweets += tweets
ts = find_tweets_with_keywords_idf(all_tweets, kwds, idf, TRESHOLD)
if len(ts) > 0:
ts.sort(reverse=True, key=itemgetter(0))
article_clusters[a.id] = process_cluster(a, ts)
i += 1
else:
# Do not add to output
pass
except Exception as err:
try:
print("Writing to %s" % article_clusters_filepath)
json.dump(article_clusters, open(article_clusters_filepath, 'w+', encoding='utf-8'), indent=1)
except Exception as e:
print(article_clusters)
raise e
print("Error: Could not cluster with article %s\n%s" % (a, err))
if i % 200 == 0:
print("Writing to %s" % article_clusters_filepath)
json.dump(article_clusters, open(article_clusters_filepath, 'w+', encoding='utf-8'), indent=1)
print("Writing to %s" % article_clusters_filepath)
json.dump(article_clusters, open(article_clusters_filepath, 'w+', encoding='utf-8'), indent=1)
vader_analyzer = SentimentIntensityAnalyzer()
####
#### DO THIS
####
def process_cluster(article, idf_and_tweets):
article_max_hit = idf_and_tweets[0][0]
rumor_value = 0
tweets_output = []
for idf, tweet in idf_and_tweets:
quotation_marks = int(tweet['n_quationmarks']) / len(tweet['keywords']) * 0.5
abbreviations = - int( tweet['n_abbriviations']) / len(tweet['keywords']) * 1
question_marks = - (0.5 if tweet['questionmark'] else 0)
media = -len(tweet['media']) * 0.3
source = 0.2 if tweet['source_type'] == 'web_client' else -0.1
polarity_scores = vader_analyzer.polarity_scores(tweet['full_text'])
sentiment = polarity_scores['neu'] - 0.5
tweet_rumor_value = quotation_marks + abbreviations + question_marks + media + source + sentiment
tweet_output = {
'id': tweet.id_str(),
'idf_sum': idf,
'quotation_marks': quotation_marks,
'abbreviations': abbreviations,
'question_marks': question_marks,
'media': media,
'source': source,
'sentiment': sentiment,
'tweet_rumor_value': tweet_rumor_value,
}
# print("tweets_output:\n%s\n%s\n" % (tweet, tweet_output))
rumor_value += tweet_rumor_value
tweets_output.append(tweet_output)
rumor_value /= len(idf_and_tweets)
return {
'article_max_hit': article_max_hit,
'rumor_value': rumor_value,
'tweets': tweets_output,
}
if __name__ == '__main__':
exe(article_clusters, article_clusters_filepath, TRESHOLD)
| {
"content_hash": "bf9bb759ff55fea2ff466c0735a13089",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 124,
"avg_line_length": 36.621212121212125,
"alnum_prop": 0.5982623086470832,
"repo_name": "den1den/web-inf-ret-ml",
"id": "8cba69d28c7bf632b287a1970dce0c27dfdc4480",
"size": "4834",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Clustering/exe_articles_to_tweets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "72580"
},
{
"name": "HTML",
"bytes": "19702"
},
{
"name": "JavaScript",
"bytes": "33368"
},
{
"name": "Python",
"bytes": "171576"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
from __future__ import division, with_statement
import functools
# http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
def memoized(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
class Name:
def __init__(self, name, years):
self.name = name
self.years = years
self.score = 0 # until scored
self.scores = {}
self.yearly_popularity = {"M": [0] * len(self.years),
"F": [0] * len(self.years)}
self.normed_popularity = {"M": [0] * len(self.years),
"F": [0] * len(self.years)}
self.nicknames = {}
self.full_names = {}
def add_popularity(self, year, gender, count):
self.yearly_popularity[gender][year - self.years[0]] = count
def normalize_popularities(self, yearly_totals):
for g in ["M", "F"]:
for i, total in enumerate(yearly_totals):
self.normed_popularity[g][i] = (
self.yearly_popularity[g][i] / total)
@memoized
def get_popularity(self, gender=None, year=None, emphasize_recent=False,
normalized=False):
popularity = 0
pops = self.normed_popularity if normalized else self.yearly_popularity
for g in ["M", "F"]:
if gender and gender != g: continue
if year:
popularity += pops[g][year - self.years[0]]
else:
if emphasize_recent:
for i, pop in enumerate(pops[g]):
popularity += pop * 2 * i / len(self.years)
else:
popularity += sum(pops[g])
return popularity
def add_metaphones(self, primary, secondary):
if secondary:
self.metaphones = [primary, secondary]
else:
self.metaphones = [primary]
def __str__(self):
return "<%s, F: %d, M: %d | %s>"%(
self.name, self.get_popularity('F'), self.get_popularity('M'),
', '.join(self.metaphones))
def to_dict(self):
o = {"name": self.name, "scores": self.scores, "genders": self.get_genders()}
if hasattr(self, "meaning"):
o['meaning'] = self.meaning
return o
def get_genders(self):
male_pop = self.get_popularity("M", normalized=True, emphasize_recent=True)
female_pop = self.get_popularity("F", normalized=True, emphasize_recent=True)
genders = []
if male_pop > 10 * female_pop:
genders = ["M"]
elif female_pop > 10 * male_pop:
genders = ["F"]
else:
genders = ["F", "M"]
return genders
def add_nickname(self, nick):
if nick.name in self.nicknames: return 0
if nick.name is self.name: return 0
#print "Found nickname", nick.name, "for", self.name, "from", nick.meaning
self.nicknames[nick.name] = nick
return 1 + nick.add_full_name(self)
def add_full_name(self, full):
if full.name in self.full_names: return 0
if full.name is self.name: return 0
self.full_names[full.name] = full
return 1 + full.add_nickname(self)
| {
"content_hash": "f0a567df335485d21b198c7fa9b90543",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 85,
"avg_line_length": 35.395833333333336,
"alnum_prop": 0.5397292525014714,
"repo_name": "nwinter/bantling",
"id": "c597b24bf562067bce9475c53fa9c9441c507502",
"size": "3440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "27785"
},
{
"name": "CoffeeScript",
"bytes": "7673"
},
{
"name": "HTML",
"bytes": "39109"
},
{
"name": "Java",
"bytes": "189898"
},
{
"name": "JavaScript",
"bytes": "262063"
},
{
"name": "Python",
"bytes": "1733571"
}
],
"symlink_target": ""
} |
from .base import BaseImporter
from pupa.utils import get_pseudo_id, _make_pseudo_id
from opencivicdata.legislative.models import (Event, EventLocation, EventSource, EventDocument,
EventDocumentLink, EventLink, EventParticipant,
EventMedia, EventMediaLink, EventAgendaItem,
EventRelatedEntity, EventAgendaMedia,
EventAgendaMediaLink)
class EventImporter(BaseImporter):
_type = 'event'
model_class = Event
related_models = {
'sources': (EventSource, 'event_id', {}),
'documents': (EventDocument, 'event_id', {
'links': (EventDocumentLink, 'document_id', {})
}),
'links': (EventLink, 'event_id', {}),
'participants': (EventParticipant, 'event_id', {}),
'media': (EventMedia, 'event_id', {
'links': (EventMediaLink, 'media_id', {}),
}),
'agenda': (EventAgendaItem, 'event_id', {
'related_entities': (EventRelatedEntity, 'agenda_item_id', {}),
'media': (EventAgendaMedia, 'agenda_item_id', {
'links': (EventAgendaMediaLink, 'media_id', {}),
}),
})
}
preserve_order = ('agenda',)
def __init__(self, jurisdiction_id, org_importer, person_importer, bill_importer,
vote_event_importer):
super(EventImporter, self).__init__(jurisdiction_id)
self.org_importer = org_importer
self.person_importer = person_importer
self.bill_importer = bill_importer
self.vote_event_importer = vote_event_importer
def get_object(self, event):
if event.get('pupa_id'):
e_id = self.lookup_obj_id(event['pupa_id'], Event)
if e_id:
spec = {'id': e_id}
else:
return None
else:
spec = {
'name': event['name'],
'description': event['description'],
'start_date': event['start_date'],
'end_date': event['end_date'],
'jurisdiction_id': self.jurisdiction_id
}
return self.model_class.objects.get(**spec)
def get_location(self, location_data):
obj, created = EventLocation.objects.get_or_create(name=location_data['name'],
url=location_data.get('url', ''),
jurisdiction_id=self.jurisdiction_id)
# TODO: geocode here?
return obj
def prepare_for_db(self, data):
data['jurisdiction_id'] = self.jurisdiction_id
if data['location']:
data['location'] = self.get_location(data['location'])
data['start_date'] = data['start_date']
data['end_date'] = data.get('end_date', "")
for participant in data['participants']:
if 'person_id' in participant:
participant['person_id'] = self.person_importer.resolve_json_id(
participant['person_id'],
allow_no_match=True)
elif 'organization_id' in participant:
participant['organization_id'] = self.org_importer.resolve_json_id(
participant['organization_id'],
allow_no_match=True)
for item in data['agenda']:
for entity in item['related_entities']:
if 'person_id' in entity:
entity['person_id'] = self.person_importer.resolve_json_id(
entity['person_id'],
allow_no_match=True)
elif 'organization_id' in entity:
entity['organization_id'] = self.org_importer.resolve_json_id(
entity['organization_id'],
allow_no_match=True)
elif 'bill_id' in entity:
# unpack and repack bill psuedo id in case filters alter it
bill = get_pseudo_id(entity['bill_id'])
self.bill_importer.apply_transformers(bill)
bill = _make_pseudo_id(**bill)
entity['bill_id'] = self.bill_importer.resolve_json_id(
bill,
allow_no_match=True)
elif 'vote_event_id' in entity:
entity['vote_event_id'] = self.vote_event_importer.resolve_json_id(
entity['vote_event_id'],
allow_no_match=True)
return data
| {
"content_hash": "93d576f2d063ce06ea0f7987e775e3ab",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 96,
"avg_line_length": 44.371428571428574,
"alnum_prop": 0.5091221292122773,
"repo_name": "opencivicdata/pupa",
"id": "8d4e0dac52f85bf225d45d836f42ae53a562bd1b",
"size": "4659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pupa/importers/events.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "297332"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
} |
import pyaudio
import wave
import sys
CHUNK = 1024
if len(sys.argv) < 2:
print("Plays a wave file.\n\nUsage: %s filename.wav" % sys.argv[0])
sys.exit(-1)
wf = wave.open(sys.argv[1], 'rb')
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
data = wf.readframes(CHUNK)
while data != '':
stream.write(data)
data = wf.readframes(CHUNK)
stream.stop_stream()
stream.close()
p.terminate()
| {
"content_hash": "44377b4bfdcc753f031591703a7daf00",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 18.633333333333333,
"alnum_prop": 0.6189624329159212,
"repo_name": "tdb-alcorn/pymusic",
"id": "f828549b7ec984c74f87eae157b7482fa396233a",
"size": "559",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "play.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7139"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1HostAlias(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, hostnames=None, ip=None):
"""
V1HostAlias - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'hostnames': 'list[str]',
'ip': 'str'
}
self.attribute_map = {
'hostnames': 'hostnames',
'ip': 'ip'
}
self._hostnames = hostnames
self._ip = ip
@property
def hostnames(self):
"""
Gets the hostnames of this V1HostAlias.
Hostnames for the above IP address.
:return: The hostnames of this V1HostAlias.
:rtype: list[str]
"""
return self._hostnames
@hostnames.setter
def hostnames(self, hostnames):
"""
Sets the hostnames of this V1HostAlias.
Hostnames for the above IP address.
:param hostnames: The hostnames of this V1HostAlias.
:type: list[str]
"""
self._hostnames = hostnames
@property
def ip(self):
"""
Gets the ip of this V1HostAlias.
IP address of the host file entry.
:return: The ip of this V1HostAlias.
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""
Sets the ip of this V1HostAlias.
IP address of the host file entry.
:param ip: The ip of this V1HostAlias.
:type: str
"""
self._ip = ip
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1HostAlias):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "fc327dd05927dc0c7c8098d1facb1fdb",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 105,
"avg_line_length": 25.49645390070922,
"alnum_prop": 0.5171070931849792,
"repo_name": "sebgoa/client-python",
"id": "1af24f87300316f48f0e888977fbf9cd9550c884",
"size": "3612",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_host_alias.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5855378"
},
{
"name": "Shell",
"bytes": "16387"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
from django.contrib import admin
from django.conf import settings
admin.autodiscover()
urlpatterns = patterns('',
url(r'^niwi-admin/', include(admin.site.urls)),
)
from django.views.generic import RedirectView
from niwi.web.views.main import Sitemap, Robots
urlpatterns += patterns('',
url(r'^', include('niwi.web.urls', namespace="web")),
url(r'^photo/', include('niwi.photo.urls', namespace='photo')),
#url(r'^filepaste/', include('niwi_apps.filepaste.urls', namespace='filepaste')),
url(r'^robots.txt$', Robots.as_view(), name='robots'),
url(r'^sitemap.xml$', Sitemap.as_view(), name='sitemap'),
)
# Static files
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
def mediafiles_urlpatterns():
"""
Method for serve media files with runserver.
"""
_media_url = settings.MEDIA_URL
if _media_url.startswith('/'):
_media_url = _media_url[1:]
from django.views.static import serve
return patterns('',
(r'^%s(?P<path>.*)$' % _media_url, serve,
{'document_root': settings.MEDIA_ROOT})
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += mediafiles_urlpatterns()
| {
"content_hash": "c2e277a32781728995035d40320055cd",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 85,
"avg_line_length": 29.609756097560975,
"alnum_prop": 0.6729818780889621,
"repo_name": "niwinz/niwi-web",
"id": "10ef6efcf544a56e68005ab2e6356eb9b62403eb",
"size": "1239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/niwi/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "29305"
},
{
"name": "Python",
"bytes": "177942"
},
{
"name": "Shell",
"bytes": "343"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
from pre_commit_hooks.check_case_conflict import find_conflicting_filenames
from pre_commit_hooks.check_case_conflict import main
from pre_commit_hooks.util import cmd_output
from testing.util import cwd
from testing.util import write_file
def test_nothing_added(temp_git_dir):
with cwd(temp_git_dir):
assert find_conflicting_filenames(['f.py']) == 0
def test_adding_something(temp_git_dir):
with cwd(temp_git_dir):
write_file('f.py', "print('hello world')")
cmd_output('git', 'add', 'f.py')
assert find_conflicting_filenames(['f.py']) == 0
def test_adding_something_with_conflict(temp_git_dir):
with cwd(temp_git_dir):
write_file('f.py', "print('hello world')")
cmd_output('git', 'add', 'f.py')
write_file('F.py', "print('hello world')")
cmd_output('git', 'add', 'F.py')
assert find_conflicting_filenames(['f.py', 'F.py']) == 1
def test_added_file_not_in_pre_commits_list(temp_git_dir):
with cwd(temp_git_dir):
write_file('f.py', "print('hello world')")
cmd_output('git', 'add', 'f.py')
assert find_conflicting_filenames(['g.py']) == 0
def test_file_conflicts_with_committed_file(temp_git_dir):
with cwd(temp_git_dir):
write_file('f.py', "print('hello world')")
cmd_output('git', 'add', 'f.py')
cmd_output('git', 'commit', '--no-verify', '-m', 'Add f.py')
write_file('F.py', "print('hello world')")
cmd_output('git', 'add', 'F.py')
assert find_conflicting_filenames(['F.py']) == 1
def test_integration(temp_git_dir):
with cwd(temp_git_dir):
assert main(argv=[]) == 0
write_file('f.py', "print('hello world')")
cmd_output('git', 'add', 'f.py')
assert main(argv=['f.py']) == 0
write_file('F.py', "print('hello world')")
cmd_output('git', 'add', 'F.py')
assert main(argv=['F.py']) == 1
| {
"content_hash": "7e695e98d0a7f7dbddbc339a2f7de7c1",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 75,
"avg_line_length": 30.439393939393938,
"alnum_prop": 0.600298656047785,
"repo_name": "bgschiller/pre-commit-hooks",
"id": "2a32918a7374840047427b951cc3be8a5c6bdf77",
"size": "2009",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/check_case_conflict_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Python",
"bytes": "95546"
}
],
"symlink_target": ""
} |
'''
Copyright (c) OS-Networks, http://os-networks.net
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the HWIOS Project nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.'''
import sys
import random
from twisted.spread.pb import PBServerFactory
from twisted.cred import portal
from twisted.application import internet
from zope.interface import implements, Interface
from twisted.spread.flavors import Referenceable, NoSuchMethod
from twisted.spread.pb import _JellyableAvatarMixin, IPerspective, IUsernameMD5Password
from twisted.python.components import registerAdapter
from twisted.cred import credentials, error
from twisted.cred.credentials import IAnonymous, ICredentials, IUsernamePassword
from twisted.cred.credentials import IUsernameHashedPassword, Anonymous
from twisted.internet import defer
from twisted.python import failure, log
from hwios.core.application import HWIOS
from dsm_server import DSMServer,DSMRealm,DSMCredChecker,DSMPortalRoot
class DsmService(object):
"""
Twisted PB Service for remote service management
"""
def __init__(self,service_config, hwios_config):
self.hwios_config = hwios_config
self.config = service_config
self.realm = DSMRealm()
self.realm.server = DSMServer(self)
django_checker = DSMCredChecker()
p = portal.Portal(self.realm)
p.registerChecker(django_checker)
pr = DSMPortalRoot(p)
if self.hwios_config.has_option('general','ssl'):
from twisted.internet import ssl
from hwios.core.connection import ServerContextFactory
self.__service = internet.SSLServer(self.config.getint('service','port'),PBServerFactory(pr),ServerContextFactory())
else:
if self.config.getboolean('service','ssl') == True:
from twisted.internet import ssl
from hwios.core.connection import ServerContextFactory
self.__service = internet.SSLServer(self.config.getint('service','port'),PBServerFactory(pr),ServerContextFactory())
else:
self.__service = internet.TCPServer(self.config.getint('service','port'),PBServerFactory(pr))
def get_service(self):
return self.__service
def register_server(self,pb_server):
HWIOS.pb_server = pb_server
def update_pb_pool(self,pb_clients):
HWIOS.pb_pool = pb_clients
def notify(self,data):
print data
#dispatch a pb call to the matching
def dispatch(self, url, params):
try:
method = HWIOS.ws_realm.dispatcher.route(url)
if method is None: raise MethodNotFound()
t = type(params)
if t is list:
#mix client and list params in
method[2]['params'] = params
res = getattr(method[0],method[1])(**method[2])
elif t is dict:
params.update(method[2])
res = getattr(method[0],method[1])(**params)
else: raise InvalidParams()
if isinstance(res, defer.Deferred):
res.addBoth(self.respAny)
return
except Exception, e:
res = e
def respResult(self, result):
print result
def respErr(self, err):
log.err(err)
#self.transport.write(HWIOS.tools.json_encode({'error':'error'}))
class JsonRPCErr(Exception):
def __call__(self):
ret = {'code': self.code, 'message': self.message}
if self.args:
ret['data'] = self.args[0]
return ret;
class ParseErr(JsonRPCErr):
code = -32700
message = "Parse error."
class InvalidReq(JsonRPCErr):
code = -32600
message = "Invalid Request."
class MethodNotFound(JsonRPCErr):
code = -32601
message = "Method not found."
class InvalidParams(JsonRPCErr):
code = -32602
message = "Invalid params."
class InternalError(JsonRPCErr):
code = -32603
message = "Internal error."
| {
"content_hash": "eb532926f8687bd6a89dbca053fc4bfd",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 132,
"avg_line_length": 39.74264705882353,
"alnum_prop": 0.6732654949121184,
"repo_name": "Knygar/hwios",
"id": "17162d28f70b9c1b9aa851f1f78ebbc0c5b8c3c1",
"size": "5405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/dsm/service.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import numpy as np
import pytest
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils import _to_object_array
from sklearn.preprocessing._label import LabelBinarizer
from sklearn.preprocessing._label import MultiLabelBinarizer
from sklearn.preprocessing._label import LabelEncoder
from sklearn.preprocessing._label import label_binarize
from sklearn.preprocessing._label import _inverse_binarize_thresholding
from sklearn.preprocessing._label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
# one-class case defaults to negative label
# For dense case:
inp = ["pos", "pos", "pos", "pos"]
lb = LabelBinarizer(sparse_output=False)
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# For sparse case:
lb = LabelBinarizer(sparse_output=True)
got = lb.fit_transform(inp)
assert issparse(got)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got.toarray())
assert_array_equal(lb.inverse_transform(got.toarray()), inp)
lb = LabelBinarizer(sparse_output=False)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
with pytest.raises(ValueError):
lb.transform(multi_label)
lb = LabelBinarizer()
with pytest.raises(ValueError):
lb.transform([])
with pytest.raises(ValueError):
lb.inverse_transform([])
with pytest.raises(ValueError):
LabelBinarizer(neg_label=2, pos_label=1)
with pytest.raises(ValueError):
LabelBinarizer(neg_label=2, pos_label=2)
with pytest.raises(ValueError):
LabelBinarizer(neg_label=1, pos_label=2, sparse_output=True)
# Fail on y_type
with pytest.raises(ValueError):
_inverse_binarize_thresholding(y=csr_matrix([[1, 2], [2, 1]]),
output_type="foo", classes=[1, 2],
threshold=0)
# Sequence of seq type should raise ValueError
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with pytest.raises(ValueError):
LabelBinarizer().fit_transform(y_seq_of_seqs)
# Fail on the number of classes
with pytest.raises(ValueError):
_inverse_binarize_thresholding(y=csr_matrix([[1, 2], [2, 1]]),
output_type="foo",
classes=[1, 2, 3],
threshold=0)
# Fail on the dimension of 'binary'
with pytest.raises(ValueError):
_inverse_binarize_thresholding(y=np.array([[1, 2, 3], [2, 1, 3]]),
output_type="binary",
classes=[1, 2, 3],
threshold=0)
# Fail on multioutput data
with pytest.raises(ValueError):
LabelBinarizer().fit(np.array([[1, 3], [2, 1]]))
with pytest.raises(ValueError):
label_binarize(np.array([[1, 3], [2, 1]]), classes=[1, 2, 3])
@pytest.mark.parametrize(
"values, classes, unknown",
[(np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array([1, 2, 3], dtype='int64'), np.array([4], dtype='int64')),
(np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
np.array(['d'], dtype=object)),
(np.array(['b', 'a', 'c', 'a', 'c']),
np.array(['a', 'b', 'c']), np.array(['d']))],
ids=['int64', 'object', 'str'])
def test_label_encoder(values, classes, unknown):
# Test LabelEncoder's transform, fit_transform and
# inverse_transform methods
le = LabelEncoder()
le.fit(values)
assert_array_equal(le.classes_, classes)
assert_array_equal(le.transform(values), [1, 0, 2, 0, 2])
assert_array_equal(le.inverse_transform([1, 0, 2, 0, 2]), values)
le = LabelEncoder()
ret = le.fit_transform(values)
assert_array_equal(ret, [1, 0, 2, 0, 2])
with pytest.raises(ValueError, match="unseen labels"):
le.transform(unknown)
def test_label_encoder_negative_ints():
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
with pytest.raises(ValueError):
le.transform([0, 6])
@pytest.mark.parametrize("dtype", ['str', 'object'])
def test_label_encoder_str_bad_shape(dtype):
le = LabelEncoder()
le.fit(np.array(["apple", "orange"], dtype=dtype))
msg = "should be a 1d array"
with pytest.raises(ValueError, match=msg):
le.transform("apple")
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
with pytest.raises(ValueError):
le.transform([])
with pytest.raises(ValueError):
le.inverse_transform([])
# Fail on unseen labels
le = LabelEncoder()
le.fit([1, 2, 3, -1, 1])
msg = "contains previously unseen labels"
with pytest.raises(ValueError, match=msg):
le.inverse_transform([-2])
with pytest.raises(ValueError, match=msg):
le.inverse_transform([-2, -3, -4])
# Fail on inverse_transform("")
msg = r"should be a 1d array.+shape \(\)"
with pytest.raises(ValueError, match=msg):
le.inverse_transform("")
@pytest.mark.parametrize(
"values",
[np.array([2, 1, 3, 1, 3], dtype='int64'),
np.array(['b', 'a', 'c', 'a', 'c'], dtype=object),
np.array(['b', 'a', 'c', 'a', 'c'])],
ids=['int64', 'object', 'str'])
def test_label_encoder_empty_array(values):
le = LabelEncoder()
le.fit(values)
# test empty transform
transformed = le.transform([])
assert_array_equal(np.array([]), transformed)
# test empty inverse transform
inverse_transformed = le.inverse_transform([])
assert_array_equal(np.array([]), inverse_transformed)
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: ({2, 3}, {1}, {1, 2}),
lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert issparse(got) == sparse_output
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert got.indices.dtype == got.indptr.dtype
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert issparse(got) == sparse_output
if sparse_output:
# verify CSR assumption that indices and indptr have same dtype
assert got.indices.dtype == got.indptr.dtype
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
with pytest.raises(ValueError):
mlb.inverse_transform(csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: ({2, 3}, {1}, {1, 2}),
lambda: iter([iter((2, 3)), iter((1,)), {1, 2}]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_transform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert mlb.inverse_transform(got) == inverse
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
Y = np.array([[1, 0], [0, 1]])
warning_message = 'unknown class.* will be ignored'
with pytest.warns(UserWarning, match=warning_message):
matrix = mlb.fit(y).transform([[4, 1], [2, 0]])
Y = np.array([[1, 0, 0], [0, 1, 0]])
mlb = MultiLabelBinarizer(classes=[1, 2, 3])
with pytest.warns(UserWarning, match=warning_message):
matrix = mlb.fit(y).transform([[4, 1], [2, 0]])
assert_array_equal(matrix, Y)
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
# ensure a ValueError is thrown if given duplicate classes
err_msg = "The classes argument contains duplicate classes. Remove " \
"these duplicates before passing them to MultiLabelBinarizer."
mlb = MultiLabelBinarizer(classes=[1, 3, 2, 3])
with pytest.raises(ValueError, match=err_msg):
mlb.fit(inp)
def test_multilabel_binarizer_multiple_calls():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
indicator_mat2 = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
# first call
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
# second call change class
mlb.classes = [1, 2, 3]
assert_array_equal(mlb.fit_transform(inp), indicator_mat2)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = _to_object_array([(1,), (2,), (3,)])
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
inp = np.array(inp, dtype=object)
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat),
dtype=object)
assert_array_equal(indicator_mat_inv, inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
indicator_mat_inv = np.array(mlb.inverse_transform(indicator_mat),
dtype=object)
assert_array_equal(indicator_mat_inv, inp)
mlb = MultiLabelBinarizer()
with pytest.raises(TypeError):
mlb.fit_transform([({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
with pytest.raises(ValueError):
mlb.inverse_transform(np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
with pytest.raises(ValueError):
mlb.inverse_transform(np.array([[1]]))
with pytest.raises(ValueError):
mlb.inverse_transform(np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
out = label_binarize([0, 1, 2, 3], classes=[3, 2, 0, 1])
expected = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[1, 0, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
with pytest.raises(ValueError):
label_binarize(y, classes=classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes=classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert issparse(binarized) == sparse_output
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert issparse(binarized) == sparse_output
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert issparse(inverse_output) == issparse(y)
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
check_binarized_results(y, classes, pos_label, neg_label, expected)
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
check_binarized_results(y, classes, pos_label, neg_label, expected)
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
check_binarized_results(y, classes, pos_label, neg_label, expected)
with pytest.raises(ValueError):
label_binarize(y, classes=classes, neg_label=-1, pos_label=pos_label,
sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
check_binarized_results(y, classes, pos_label, neg_label,
expected)
with pytest.raises(ValueError):
label_binarize(y, classes=classes, neg_label=-1, pos_label=pos_label,
sparse_output=True)
def test_invalid_input_label_binarize():
with pytest.raises(ValueError):
label_binarize([0, 2], classes=[0, 2], pos_label=0, neg_label=1)
with pytest.raises(ValueError, match="continuous target data is not "):
label_binarize([1.2, 2.7], classes=[0, 1])
with pytest.raises(ValueError, match="mismatch with the labels"):
label_binarize([[1, 3]], classes=[1, 2, 3])
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| {
"content_hash": "e44878918a9a12389cfcee987f9927c0",
"timestamp": "",
"source": "github",
"line_count": 618,
"max_line_length": 79,
"avg_line_length": 35.99514563106796,
"alnum_prop": 0.54488649134637,
"repo_name": "glemaitre/scikit-learn",
"id": "fd396ceb90712c3602cc12ca329035248d922810",
"size": "22245",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "sklearn/preprocessing/tests/test_label.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2232"
},
{
"name": "C",
"bytes": "41025"
},
{
"name": "C++",
"bytes": "146835"
},
{
"name": "Makefile",
"bytes": "1711"
},
{
"name": "Python",
"bytes": "10011694"
},
{
"name": "Shell",
"bytes": "44168"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import sys
from collections import OrderedDict
import math
import torch
import torch.nn.functional as F
from torch import nn
from pytorch_lightning import LightningModule
from pytorch_lightning import seed_everything
from .doppelganger import DoppelGANger
from .network import RNNInitialStateType
from .loss import doppelganger_loss
from .util import gen_attribute_input_noise, gen_feature_input_noise,\
gen_feature_input_data_free, renormalize_per_sample
class DoppelGANger_pl(LightningModule):
def __init__(self,
data_feature_outputs,
data_attribute_outputs,
L_max,
num_real_attribute,
sample_len=10,
discriminator_num_layers=5,
discriminator_num_units=200,
attr_discriminator_num_layers=5,
attr_discriminator_num_units=200,
attribute_num_units=100,
attribute_num_layers=3,
feature_num_units=100,
feature_num_layers=1,
attribute_input_noise_dim=5,
addi_attribute_input_noise_dim=5,
d_gp_coe=10,
attr_d_gp_coe=10,
g_attr_d_coe=1,
d_lr=0.001,
attr_d_lr=0.001,
g_lr=0.001,
g_rounds=1,
d_rounds=1,
**kwargs):
super().__init__()
self.automatic_optimization = False
self.save_hyperparameters("discriminator_num_layers",
"discriminator_num_units",
"attr_discriminator_num_layers",
"attr_discriminator_num_units",
"attribute_num_units",
"attribute_num_layers",
"feature_num_units",
"feature_num_layers",
"attribute_input_noise_dim",
"addi_attribute_input_noise_dim",
"d_gp_coe",
"attr_d_gp_coe",
"g_attr_d_coe",
"d_lr",
"attr_d_lr",
"g_lr",
"g_rounds",
"d_rounds",
"L_max",
"sample_len",
"num_real_attribute")
self.g_rounds = g_rounds
self.d_rounds = d_rounds
self.sample_len = sample_len
self.L_max = L_max
self.data_feature_outputs = data_feature_outputs
self.data_attribute_outputs = data_attribute_outputs
self.length = self.L_max // self.sample_len
self.real_attribute_mask = ([True] * num_real_attribute +
[False] * (len(data_attribute_outputs)-num_real_attribute))
self.gen_flag_dims = []
dim = 0
from bigdl.nano.utils.log4Error import invalidInputError
for output in self.data_feature_outputs:
if output.is_gen_flag:
if output.dim != 2:
invalidInputError(False,
"gen flag output's dim should be 2")
self.gen_flag_dims = [dim, dim + 1]
break
dim += output.dim
if len(self.gen_flag_dims) == 0:
invalidInputError(False, "gen flag not found")
# model init
self.model =\
DoppelGANger(
data_feature_outputs=self.data_feature_outputs,
data_attribute_outputs=self.data_attribute_outputs,
real_attribute_mask=self.real_attribute_mask,
sample_len=self.sample_len,
L_max=self.L_max,
num_packing=1, # any num other than 1 will be supported later
discriminator_num_layers=self.hparams.discriminator_num_layers,
discriminator_num_units=self.hparams.discriminator_num_units,
attr_discriminator_num_layers=self.hparams.attr_discriminator_num_layers,
attr_discriminator_num_units=self.hparams.attr_discriminator_num_units,
attribute_num_units=self.hparams.attribute_num_units,
attribute_num_layers=self.hparams.attribute_num_layers,
feature_num_units=self.hparams.feature_num_units,
feature_num_layers=self.hparams.feature_num_layers,
attribute_input_noise_dim=self.hparams.attribute_input_noise_dim,
addi_attribute_input_noise_dim=self.hparams.addi_attribute_input_noise_dim,
initial_state=RNNInitialStateType.RANDOM) # currently we fix this value
def forward(self,
data_feature,
real_attribute_input_noise,
addi_attribute_input_noise,
feature_input_noise,
data_attribute):
return self.model([data_feature],
[real_attribute_input_noise],
[addi_attribute_input_noise],
[feature_input_noise],
[data_attribute])
def training_step(self, batch, batch_idx):
# data preparation
data_feature, data_attribute = batch
optimizer_d, optimizer_attr_d, optimizer_g = self.optimizers()
# generate noise input
real_attribute_input_noise = gen_attribute_input_noise(data_feature.shape[0])
addi_attribute_input_noise = gen_attribute_input_noise(data_feature.shape[0])
feature_input_noise = gen_feature_input_noise(data_feature.shape[0], self.length)
real_attribute_input_noise = torch.from_numpy(real_attribute_input_noise).float()
addi_attribute_input_noise = torch.from_numpy(addi_attribute_input_noise).float()
feature_input_noise = torch.from_numpy(feature_input_noise).float()
# g backward
# open the generator grad since we need to update the weights in g
for p in self.model.generator.parameters():
p.requires_grad = True
for i in range(self.g_rounds):
d_fake, attr_d_fake,\
d_real, attr_d_real = self(data_feature,
real_attribute_input_noise,
addi_attribute_input_noise,
feature_input_noise,
data_attribute)
g_loss, _, _ =\
doppelganger_loss(d_fake, attr_d_fake, d_real, attr_d_real)
optimizer_g.zero_grad()
self.manual_backward(g_loss)
optimizer_g.step()
# d backward
# close the generator grad since we only need to update the weights in d
for p in self.model.generator.parameters():
p.requires_grad = False
for i in range(self.d_rounds):
d_fake, attr_d_fake,\
d_real, attr_d_real = self(data_feature,
real_attribute_input_noise,
addi_attribute_input_noise,
feature_input_noise,
data_attribute)
_, d_loss, attr_d_loss =\
doppelganger_loss(d_fake, attr_d_fake, d_real, attr_d_real,
g_attr_d_coe=self.hparams.g_attr_d_coe,
gradient_penalty=True,
discriminator=self.model.discriminator,
attr_discriminator=self.model.attr_discriminator,
g_output_feature_train_tf=self.model.g_feature_train,
g_output_attribute_train_tf=self.model.g_attribute_train,
real_feature_pl=self.model.real_feature_pl,
real_attribute_pl=self.model.real_attribute_pl,
d_gp_coe=self.hparams.d_gp_coe,
attr_d_gp_coe=self.hparams.attr_d_gp_coe)
optimizer_d.zero_grad()
optimizer_attr_d.zero_grad()
self.manual_backward(d_loss)
self.manual_backward(attr_d_loss)
optimizer_d.step()
optimizer_attr_d.step()
# log tqdm
self.log("g_loss", g_loss.item(),
on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("d_loss", d_loss.item(),
on_step=True, on_epoch=True, prog_bar=True, logger=True)
self.log("attr_d_loss", attr_d_loss.item(),
on_step=True, on_epoch=True, prog_bar=True, logger=True)
def configure_optimizers(self):
optimizer_d = torch.optim.Adam(self.model.discriminator.parameters(),
lr=self.hparams.d_lr, betas=(0.5, 0.999))
optimizer_attr_d = torch.optim.Adam(self.model.attr_discriminator.parameters(),
lr=self.hparams.attr_d_lr, betas=(0.5, 0.999))
optimizer_g = torch.optim.Adam(self.model.generator.parameters(),
lr=self.hparams.g_lr, betas=(0.5, 0.999))
return optimizer_d, optimizer_attr_d, optimizer_g
def sample_from(self,
real_attribute_input_noise,
addi_attribute_input_noise,
feature_input_noise,
feature_input_data,
batch_size=32):
features, attributes, gen_flags, lengths\
= self.model.sample_from(real_attribute_input_noise,
addi_attribute_input_noise,
feature_input_noise,
feature_input_data,
self.gen_flag_dims,
batch_size=batch_size)
return features, attributes, gen_flags, lengths
| {
"content_hash": "4610b8159ae00a41550f0e521445a8b9",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 95,
"avg_line_length": 47.77880184331797,
"alnum_prop": 0.5048225308641975,
"repo_name": "yangw1234/BigDL",
"id": "fbf96e5379a2b46ae7e90b2fd06452b73f5e53ca",
"size": "12708",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/chronos/src/bigdl/chronos/simulator/doppelganger/doppelganger_pl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "138760"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54063856"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8762180"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216038"
},
{
"name": "Shell",
"bytes": "844916"
}
],
"symlink_target": ""
} |
import mechanize
class Navigator():
br = mechanize.Browser()
logged_in = False
def clearBrowser(self):
self.br = mechanize.Browser()
def login(self, username, password):
login_url = "https://auth.vt.edu/login?service=https://webapps.banner.vt.edu/banner-cas-prod/authorized/banner/SelfService"
br = self.br
br.open(login_url)
br.select_form(nr=0) # Select the 'Login' Form
br.set_handle_robots(False) # Ignore robots.txt file
br.form["username"] = username
br.form["password"] = password
resp = br.submit()
if ("Invalid username or password" in resp.read()):
return False
else:
self.logged_in = True
return True
def find(self, subj="", crse="", crn="", term="", year=""):
url = "https://banweb.banner.vt.edu/ssb/prod/HZSKVTSC.P_ProcRequest?"
# CAMPUS=0 is Blacksburg
# AR%25 is All Areas of Classes
url += "CAMPUS=" + str(0) + "&TERMYEAR=" + year + term
url += "&CORE_CODE=" + "AR%25" + "&SUBJ_CODE=" + subj
url += "&CRSE_NUMBER=" + crse + "&crn=" + crn + "&open_only=" + ""
url += "&PRINT_FRIEND=" + "Y" # + "&BTN_PRESSED=" + "FIND+class+sections"
browser = self.br.open(url)
contents = browser.read()
if ("NO SECTIONS FOUND FOR THIS INQUIRY." in contents):
return None
return contents
| {
"content_hash": "e61a923b3ec687a01aaa02cb9e832975",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 131,
"avg_line_length": 32.15555555555556,
"alnum_prop": 0.5618521078092605,
"repo_name": "ajn123/VT-Python-Class-Add",
"id": "09c36ec771923c4802c162a440f733b69ebeccd4",
"size": "1447",
"binary": false,
"copies": "1",
"ref": "refs/heads/testing",
"path": "Navigator.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
import logging
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAdminUser
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from django.urls import reverse
from seaserv import ccnet_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.permissions import IsProVersion
from seahub.api2.utils import api_error
from seahub.api2.endpoints.utils import check_time_period_valid, \
generate_links_header_for_paginator, get_user_name_dict, \
get_user_contact_email_dict
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.utils.timeutils import datetime_to_isoformat_timestr
logger = logging.getLogger(__name__)
class LoginLogs(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAdminUser, IsProVersion)
throttle_classes = (UserRateThrottle,)
def get(self, request):
if not request.user.admin_permissions.can_view_admin_log():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
# check the date format, should be like '2015-10-10'
start = request.GET.get('start', None)
end = request.GET.get('end', None)
if not check_time_period_valid(start, end):
error_msg = 'start or end date invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
# Filtering a DateTimeField with dates won't include items on the last day,
# because the bounds are interpreted as '0am on the given date'.
end = end + ' 23:59:59'
result = []
from seahub_extra.sysadmin_extra.models import UserLoginLog
logs = UserLoginLog.objects.filter(login_date__range=(start, end))
for log in logs:
result.append({
'login_time': datetime_to_isoformat_timestr(log.login_date),
'login_ip': log.login_ip,
'name': email2nickname(log.username),
'email':log.username
})
return Response(result)
class AdminLoginLogs(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAdminUser, IsProVersion)
throttle_classes = (UserRateThrottle,)
def _get_admin_user_emails(self):
admin_users = ccnet_api.get_superusers()
admin_user_emails = []
for user in admin_users:
admin_user_emails.append(user.email)
return admin_user_emails
def _get_response_data(self, logs):
user_list = []
for log in logs:
user_list.append(log.username)
name_dict = get_user_name_dict(user_list)
contact_email_dict = get_user_contact_email_dict(user_list)
data = []
for log in logs:
email = log.username
data.append({
'login_time': datetime_to_isoformat_timestr(log.login_date),
'login_ip': log.login_ip,
'login_success': log.login_success,
'email': email,
'name': name_dict[email],
'contact_email': contact_email_dict[email],
})
return data
def get(self, request):
if not request.user.admin_permissions.can_view_admin_log():
return api_error(status.HTTP_403_FORBIDDEN, 'Permission denied.')
try:
page = int(request.GET.get('page', '1'))
per_page = int(request.GET.get('per_page', '100'))
except ValueError:
page = 1
per_page = 100
if page <= 0:
error_msg = 'page invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
if per_page <= 0:
error_msg = 'per_page invalid.'
return api_error(status.HTTP_400_BAD_REQUEST, error_msg)
offset = per_page * (page -1)
from seahub_extra.sysadmin_extra.models import UserLoginLog
admin_user_emails = self._get_admin_user_emails()
all_logs = UserLoginLog.objects.filter(username__in=admin_user_emails)
total_count = all_logs.count()
logs = all_logs[offset:offset+per_page]
data = self._get_response_data(logs)
result = {'data': data, 'total_count': total_count}
resp = Response(result)
## generate `Links` header for paginator
base_url = reverse('api-v2.1-admin-admin-login-logs')
links_header = generate_links_header_for_paginator(base_url,
page, per_page, total_count)
resp['Links'] = links_header
return resp
| {
"content_hash": "3308db42aefa2e0a170da9463a8e52aa",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 83,
"avg_line_length": 34.02857142857143,
"alnum_prop": 0.6351805205709488,
"repo_name": "miurahr/seahub",
"id": "d48a13618e176729747e9cd9d70e62a4ab632fa8",
"size": "4803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seahub/api2/endpoints/admin/login_logs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231001"
},
{
"name": "HTML",
"bytes": "750509"
},
{
"name": "JavaScript",
"bytes": "2430915"
},
{
"name": "Python",
"bytes": "1500021"
},
{
"name": "Shell",
"bytes": "8856"
}
],
"symlink_target": ""
} |
'''
Simple test script which prints all HTTP responses' URLs and their total
count from a warc file whose path is passed as command line argument.
Does not support gzipped files.
'''
from warcreader import WarcFile
from sys import argv
from io import open
def main():
filename = argv[1]
with open(filename, 'rb') as raw_warc_file:
warc_file = WarcFile(raw_warc_file)
count = 0
for webpage in warc_file:
count += 1
print(webpage.payload)
print('Total count of HTTP responses: %d' % count)
if __name__ == '__main__':
main()
| {
"content_hash": "add6cd499f1af9475c2086057b4a42c6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 72,
"avg_line_length": 22.8,
"alnum_prop": 0.6701754385964912,
"repo_name": "msvana/warcreader",
"id": "f6bfae608e41cc19d3e5a6da75d1fc83db69ac2b",
"size": "1158",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6942"
}
],
"symlink_target": ""
} |
from django.dispatch.robustapply import *
import unittest
def noArgument():
pass
def oneArgument(blah):
pass
def twoArgument(blah, other):
pass
class TestCases(unittest.TestCase):
def test01(self):
robustApply(noArgument)
def test02(self):
self.assertRaises(TypeError, robustApply, noArgument, "this")
def test03(self):
self.assertRaises(TypeError, robustApply, oneArgument)
def test04(self):
"""Raise error on duplication of a particular argument"""
self.assertRaises(TypeError, robustApply, oneArgument, "this", blah = "that")
def getSuite():
return unittest.makeSuite(TestCases,'test')
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b44539acec87b0fee058ba7a74119c03",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 85,
"avg_line_length": 21.647058823529413,
"alnum_prop": 0.6616847826086957,
"repo_name": "diofeher/django-nfa",
"id": "499450eec4873ccf218c58fb63e8a4664424f47a",
"size": "736",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "tests/regressiontests/dispatch/tests/test_robustapply.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "66105"
},
{
"name": "Python",
"bytes": "5174003"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
'''
Contributors of this code: Alexander Belchenko, Harco Kuppens, Justin Riley.
http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/
https://stackoverflow.com/questions/566746/how-to-get-linux-console-window-width-in-python
https://gist.github.com/jtriley/1108174
I changed python2 to python3, and added crediting printing.
'''
import os
import shlex
import struct
import platform
import subprocess
import random
def get_terminal_size():
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
originally retrieved from:
http://stackoverflow.com/questions/566746/how-to-get-console-window-width-in-python
"""
current_os = platform.system()
tuple_xy = None
if current_os == 'Windows':
tuple_xy = _get_terminal_size_windows()
if tuple_xy is None:
tuple_xy = _get_terminal_size_tput()
# needed for window's python in cygwin's xterm!
if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'):
tuple_xy = _get_terminal_size_linux()
if tuple_xy is None:
print("default")
tuple_xy = (80, 25) # default value
return tuple_xy
def _get_terminal_size_windows():
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom,
maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
except:
pass
def _get_terminal_size_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
cols = int(subprocess.check_call(shlex.split('tput cols')))
rows = int(subprocess.check_call(shlex.split('tput lines')))
return (cols, rows)
except:
pass
def _get_terminal_size_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr
except:
pass
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (os.environ['LINES'], os.environ['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex, sizey = get_terminal_size()
print('width =', sizex, 'height =', sizey)
else:
if random.randint(0, 5) == 0:
print('import terminalsize: Special thank to Alexander Belchenko, Harco Kuppens, and Justin Riley for writing a wonderful script that detects terminal size across OSes. ')
else:
print('Thanks to Belchenko, Kuppens, and Riley! ')
print()
| {
"content_hash": "465e9f9cc81729e912fbd4a13fbc5dc6",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 179,
"avg_line_length": 33.21782178217822,
"alnum_prop": 0.6038748137108793,
"repo_name": "willettk/common_language",
"id": "de162afa6c237c8e4898acd945debbb501b4d3bd",
"size": "3377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terminalsize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "118636"
}
],
"symlink_target": ""
} |
import json
import queue
import time
from collections import namedtuple
from random import shuffle
from threading import Thread
import numpy as np
import tensorflow as tf
from dataset import to_sentences, tokens_to_ids, tf_Examples, SENTENCE_END, PAD_TOKEN
ModelInput = namedtuple('ModelInput',
['input_context', 'input_question', 'input_answer',
'origin_context', 'origin_question', 'origin_answer'])
BUCKET_CACHE_BATCH = 3
QUEUE_NUM_BATCH = 3
class Generator:
"""Data class for batch generator."""
def __init__(self, file_path, vocab, params,
context_key, question_key, answer_key,
max_context, max_question, bucketing=True, truncate_input=False):
#Generator constructor.
self._file_path = file_path #file_path: Path to data file.
self._vocab = vocab #vocab: Vocabulary.
self._params = params #params: model hyperparameters.
self._context_key = context_key #context_key: context key for tf.Example.
self._question_key = question_key #question_key: question key for tf.Example.
self._answer_key = answer_key #answer_key: answer key for tf.Example.
self._max_context = max_context #max_context: Max number of sentences used from context.
self._max_question = max_question #max_question: Max number of sentences used from question.
self._bucketing = bucketing #bucketing: Whether bucket articles of similar length into the same batch.
self._truncate_input = truncate_input #truncate_input: Whether to truncate input that is too long. Alternative is to discard such examples.
self._input_queue = queue.Queue(QUEUE_NUM_BATCH * self._params.batch_size)
self._bucket_input_queue = queue.Queue(QUEUE_NUM_BATCH)
self._input_threads = []
for _ in range(2):
self._input_threads.append(Thread(target=self._enqueue))
self._input_threads[-1].daemon = True
self._input_threads[-1].start()
self._bucketing_threads = []
for _ in range(1):
self._bucketing_threads.append(Thread(target=self._fill_bucket))
self._bucketing_threads[-1].daemon = True
self._bucketing_threads[-1].start()
self._watch_thread = Thread(target=self._monitor)
self._watch_thread.daemon = True
self._watch_thread.start()
def next(self):
"""Returns next batch of inputs for model.
Returns:
batch_context: A batch of encoder inputs [c_timesteps, batch_size].
batch_question: A batch of encoder inputs [q_timesteps, batch_size].
batch_answer: A batch of one-hot encoded answers [2, batch_size].
origin_context: original context words.
origin_question: original question words.
origin_answer: original answer words.
"""
batch_context = np.zeros(
(self._params.c_timesteps, self._params.batch_size), dtype=np.int32)
batch_question = np.zeros(
(self._params.q_timesteps, self._params.batch_size), dtype=np.int32)
batch_answer = np.zeros(
(2, self._params.batch_size), dtype=np.int32)
origin_context = ['None'] * self._params.batch_size
origin_question = ['None'] * self._params.batch_size
origin_answer = ['None'] * self._params.batch_size
buckets = self._bucket_input_queue.get()
for i in range(self._params.batch_size):
(input_context, input_question, input_answer,
context, question, answer) = buckets[i]
origin_context[i] = context
origin_question[i] = question
origin_answer[i] = answer
batch_context[:, i] = input_context[:]
batch_question[:, i] = input_question[:]
batch_answer[:, i] = input_answer[:]
return (batch_context, batch_question, batch_answer,
origin_context, origin_question, origin_answer)
def _enqueue(self):
"""Fill input queue with ModelInput."""
end_id = self._vocab.tokenToId(SENTENCE_END)
pad_id = self._vocab.tokenToId(PAD_TOKEN)
input_gen = self._textGenerator(tf_Examples(self._file_path))
while True:
(context, question, answer) = next(input_gen)
context_sentences = [sent.strip() for sent in to_sentences(context)]
question_sentences = [sent.strip() for sent in to_sentences(question)]
answer_sentences = [sent.strip() for sent in to_sentences(answer)]
input_context = []
input_question = []
# Convert first N sentences to word IDs, stripping existing <s> and </s>.
for i in range(min(self._max_context,
len(context_sentences))):
input_context += tokens_to_ids(context_sentences[i], self._vocab)
for i in range(min(self._max_question,
len(question_sentences))):
input_question += tokens_to_ids(question_sentences[i], self._vocab)
# assume single sentence answer
ans_ids = tokens_to_ids(answer_sentences[0], self._vocab)
# Filter out too-short input
if (len(input_context) < self._params.min_input_len or
len(input_question) < self._params.min_input_len):
tf.logging.warning('Drop an example - too short.\nc_enc: %d\nq_enc: %d',
len(input_context), len(input_question))
continue
# If we're not truncating input, throw out too-long input
if not self._truncate_input:
if (len(input_context) > self._params.c_timesteps or
len(input_question) > self._params.q_timesteps):
tf.logging.warning('Drop an example - too long.\nc_enc: %d\nq_enc: %d',
len(input_context), len(input_question))
continue
# If we are truncating input, do so if necessary
else:
if len(input_context) > self._params.c_timesteps:
input_context = input_context[:self._params.c_timesteps]
if len(input_question) > self._params.q_timesteps:
input_question = input_question[:self._params.q_timesteps]
# Pad if necessary
while len(input_context) < self._params.c_timesteps:
input_context.append(pad_id)
while len(input_question) < self._params.q_timesteps:
input_question.append(pad_id)
# start and end indices of answer
s = input_context.index(ans_ids[0])
e = input_context.index(ans_ids[-1])
input_answer = [s, e]
element = ModelInput(input_context, input_question, input_answer,
' '.join(context_sentences),
' '.join(question_sentences),
' '.join(answer_sentences))
self._input_queue.put(element)
def _fill_bucket(self):
"""Fill bucketed batches into the bucket_input_queue."""
while True:
inputs = []
for _ in range(self._params.batch_size * BUCKET_CACHE_BATCH):
inputs.append(self._input_queue.get())
if self._bucketing:
inputs = sorted(inputs, key=lambda inp: inp.enc_len)
batches = []
for i in range(0, len(inputs), self._params.batch_size):
batches.append(inputs[i:i+self._params.batch_size])
shuffle(batches)
for b in batches:
self._bucket_input_queue.put(b)
def _monitor(self):
"""Watch the daemon input threads and restart if dead."""
while True:
time.sleep(60)
input_threads = []
for t in self._input_threads:
if t.is_alive():
input_threads.append(t)
else:
tf.logging.error('Found input thread dead.')
new_t = Thread(target=self._enqueue)
input_threads.append(new_t)
input_threads[-1].daemon = True
input_threads[-1].start()
self._input_threads = input_threads
bucketing_threads = []
for t in self._bucketing_threads:
if t.is_alive():
bucketing_threads.append(t)
else:
tf.logging.error('Found bucketing thread dead.')
new_t = Thread(target=self._fill_bucket)
bucketing_threads.append(new_t)
bucketing_threads[-1].daemon = True
bucketing_threads[-1].start()
self._bucketing_threads = bucketing_threads
def _getExFeatureText(self, ex, key):
"""Extract text for a feature from td.Example.
Args:
ex: tf.Example.
key: key of the feature to be extracted.
Returns:
feature: a feature text extracted.
"""
return ex.features.feature[key].bytes_list.value[0]
def _textGenerator(self, example_gen):
"""Yields original (context, question, answer) tuple."""
while True:
e = next(example_gen)
try:
context_text = self._getExFeatureText(e, self._context_key)
question_text = self._getExFeatureText(e, self._question_key)
answer_text = self._getExFeatureText(e, self._answer_key)
except ValueError:
tf.logging.error('Failed to get data from example')
continue
yield (context_text, question_text, answer_text)
| {
"content_hash": "171e9a39839411eae90cdedd91f8008b",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 143,
"avg_line_length": 39.19736842105263,
"alnum_prop": 0.6295177352579165,
"repo_name": "drakessn/Question-Answering",
"id": "9a39f400d184898616ff0ceb25365d0170d60626",
"size": "8937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batch_reader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49488"
}
],
"symlink_target": ""
} |
import logging
import numpy as np
import pandas as pd
from . import snpmatch
from . import parsers
from . import snp_genotype
log = logging.getLogger(__name__)
def simulateSNPs(g, AccID, numSNPs, outFile=None, err_rate=0.001):
assert type(AccID) is str, "provide Accession ID as a string"
assert AccID in g.g.accessions, "accession is not present in the matrix!"
AccToCheck = np.where(g.g.accessions == AccID)[0][0]
log.info("loading input files")
acc_snp = g.g_acc.snps[:,AccToCheck]
informative_snps = np.where(acc_snp >= 0)[0] ## Removing NAs for accession
input_df = pd.DataFrame(np.column_stack((np.array(g.g.chromosomes)[informative_snps], g.g.positions[informative_snps], acc_snp[informative_snps] )), columns = ["chr", 'pos', 'snp'])
## Input -- pandas dataframe with chr, position and genotype
#assert type(input_df) == pd.core.frame.DataFrame, "please provide a pandas dataframe"
#assert input_df.shape[1] >= 3, "first three columns are needed in dataframe: chr, pos, snp"
## default error rates = 0.001
log.info("sampling %s positions" % numSNPs)
sampleSNPs = np.sort(np.random.choice(np.arange(input_df.shape[0]), numSNPs, replace=False))
input_df = input_df.iloc[sampleSNPs,:]
log.info("adding in error rate: %s" % err_rate)
num_to_change = int(err_rate * input_df.shape[0])
input_df.iloc[np.sort(np.random.choice(np.arange(input_df.shape[0]), num_to_change, replace=False)), 2] = np.random.choice(3, num_to_change)
input_df.iloc[:, 2] = parsers.snp_binary_to_gt( np.array(input_df.iloc[:,2]) )
if outFile is not None:
input_df.to_csv( outFile, sep = "\t", index = None, header = False )
return(input_df)
def simulateSNPs_F1(g, parents, numSNPs, outFile, err_rate, rm_hets = 1):
indP1 = np.where(g.g_acc.accessions == parents.split("x")[0])[0][0]
indP2 = np.where(g.g_acc.accessions == parents.split("x")[1])[0][0]
log.info("loading files!")
snpsP1 = g.g_acc.snps[:,indP1]
snpsP2 = g.g_acc.snps[:,indP2]
common_ix = np.where((snpsP1 >= 0) & (snpsP2 >= 0) & (snpsP1 < 2) & (snpsP2 < 2))[0]
segregating_ix = np.where(snpsP1[common_ix] != snpsP2[common_ix] )[0]
diff_ix = np.setdiff1d( np.arange(len(common_ix)), segregating_ix )
common_snps = np.zeros(len(common_ix), dtype="int8")
common_snps[segregating_ix] = 2
common_snps[diff_ix] = snpsP1[common_ix[diff_ix]]
input_df = pd.DataFrame( np.column_stack((np.array(g.g_acc.chromosomes)[common_ix], np.array(g.g_acc.positions)[common_ix], common_snps )), columns = ["chr", 'pos', 'snp'] )
log.info("sampling %s positions" % numSNPs)
sampleSNPs = np.sort(np.random.choice(np.arange(input_df.shape[0]), numSNPs, replace=False))
input_df = input_df.iloc[sampleSNPs,:]
input_df['snp'] = input_df['snp'].astype(int)
log.info("adding in error rate: %s" % err_rate)
num_to_change = int(err_rate * input_df.shape[0])
input_df.iloc[np.sort(np.random.choice(np.where(input_df['snp'] != 2)[0], num_to_change, replace=False)), 2] = np.random.choice(2, num_to_change)
## Also change hets randomly to homozygous
het_ix = np.where(input_df['snp'] == 2)[0]
input_df.iloc[het_ix, 2] = np.random.choice(3, het_ix.shape[0], p=[(1-rm_hets)/2,(1-rm_hets)/2,rm_hets])
## Save the file to a bed file
input_df.iloc[:, 2] = parsers.snp_binary_to_gt( np.array(input_df.iloc[:,2]) )
if outFile is not None:
input_df.to_csv( outFile, sep = "\t", index = None, header = False )
return(input_df)
def potatoSimulate(args):
g = snp_genotype.Genotype(args['hdf5File'], args['hdf5accFile'] )
if args['simF1']:
simulateSNPs_F1(g, args['AccID'], args['numSNPs'], args['outFile'], args['err_rate'], args['rm_het'])
else:
simulateSNPs(g, args['AccID'], args['numSNPs'], args['outFile'], args['err_rate'])
log.info("finished!")
| {
"content_hash": "58eb2b7dbe7d7da37a2258b1462a2bf8",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 185,
"avg_line_length": 57.19117647058823,
"alnum_prop": 0.651581383389046,
"repo_name": "Gregor-Mendel-Institute/SNPmatch",
"id": "bc10a3950ca1027577d27908a6d9c6baaafca64a",
"size": "3889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snpmatch/core/simulate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "338"
},
{
"name": "Jupyter Notebook",
"bytes": "67490"
},
{
"name": "Python",
"bytes": "150075"
}
],
"symlink_target": ""
} |
import re
import sys
from subprocess import PIPE, STDOUT
import subprocess
def err(msg="Undetermined error"):
print "ERROR"
print msg
sys.exit(0)
if __name__ == '__main__':
if len(sys.argv) < 2:
genders_query = '~NONE' # Trick to list all hosts
else:
genders_query = sys.argv[1]
if len(sys.argv) > 2:
header = sys.argv[2]
else:
header = "host"
if re.search('[^\w\d&|~\-()=:\.]', genders_query):
err("Inappropriate character in Genders query")
hosts = subprocess.Popen( ['/usr/bin/nodeattr', '-n', genders_query], \
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True).communicate()[0]
print header
print hosts,
| {
"content_hash": "fb0de1ff5069a26fe00d75060666c00f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 84,
"avg_line_length": 23.9,
"alnum_prop": 0.5913528591352859,
"repo_name": "wcooley/splunk-puppet",
"id": "56b73ee069c1e2b2d540e72bc0dec739ab8eb107",
"size": "1516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/gendershosts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "3827"
},
{
"name": "Shell",
"bytes": "1285"
}
],
"symlink_target": ""
} |
import logging
import unittest
import bus
class FakeHandler:
def __init__(self):
self.handle_called = False
self.command = None
def handle(self, command):
self.handle_called = True
self.command = command
class RaiseExceptionHandler:
def __init__(self):
pass
@staticmethod
def handle(command):
raise Exception
class BusTestCase(unittest.TestCase):
def setUp(self):
root = logging.getLogger()
root.setLevel(logging.CRITICAL)
def test_bus_can_register_handler(self):
b = bus.Bus()
command = object
handler = object()
b.register(command, handler)
self.assertTrue(command in b.handlers)
self.assertEqual(handler, b.handlers[command])
def test_execute_handle_method_from_handler(self):
b = bus.Bus()
handler = FakeHandler()
b.register(object, handler)
command = object()
b.execute(command)
self.assertTrue(handler.handle_called)
self.assertEqual(command, handler.command)
def test_handler_raise_exception_in_execute_method(self):
b = bus.Bus()
b.register(object, RaiseExceptionHandler())
command = object()
result = b.execute(command)
self.assertFalse(result.ok)
def test_raise_error_if_no_handlers_availables(self):
b = bus.Bus()
with self.assertRaises(Exception):
b.execute(object())
| {
"content_hash": "eb2925e048863bf549bf8f56f6c17318",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 61,
"avg_line_length": 23.046875,
"alnum_prop": 0.6196610169491525,
"repo_name": "guillaumevincent/rangevoting",
"id": "6fe729973136f9779c45b1f2dda3ceb2cb369709",
"size": "1475",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_bus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12535"
},
{
"name": "HTML",
"bytes": "18111"
},
{
"name": "JavaScript",
"bytes": "19557"
},
{
"name": "Python",
"bytes": "38836"
}
],
"symlink_target": ""
} |
import win32com.client
import array
import PPCOM
from PPCOM import enumInterfaces
from PPCOM import enumFrequencies
from PPCOM import enumSonosArrays
import sys
import os
#Define global variables
m_sLastError = ""
PY3 = sys.version_info.major == 3
#Distinguishing identifier of PSoC3/5 families
LEOPARD_ID = 0xE0;
PANTHER_ID = 0xE1;
def SUCCEEDED(hr):
return hr >= 0
def GetGenerationByJtagID(hr):
return hr >= 0
#Check JTAG ID of device - identify family PSoC3 or PSoC5
def GetGenerationByJtagID(JtagID):
if PY3:
JtagID = [chr(c) for c in JtagID]
distinguisher = (((ord(JtagID[0]) & 0x0F) << 4) | (ord(JtagID[1]) >> 4))
return distinguisher
def IsPSoC3ES3(jtagID):
global LEOPARD_ID
if PY3:
jtagID = [chr(c) for c in jtagID]
if (GetGenerationByJtagID(jtagID) == LEOPARD_ID):
if ((ord(jtagID[0]) >> 4) >= 0x01): return 1 #silicon version==0x01 saved in bits [4..7]
#For ES0-2==0x00, ES3==0x01
return 0
def Is_PSoC5_TM_ID(jtagID):
if PY3:
jtagID = [chr(c) for c in jtagID]
return ((ord(jtagID[0]) & 0xFF) == 0x0E) and ((ord(jtagID[1]) & 0xF0) == 0x10) and (ord(jtagID[3]) == 0x69)
def Is_PSoC_5_LP_ID(jtagID):
#Check whether SWD ID belongs to PSoC5LP
#PSoC5LP: 0x1BA01477 (SWD/JTAG read ID read request retuns CM3 ID always)
# 0x2E1xx069 (LP) - this Device ID must be read from PANTHER_DEVICE_ID reg (0x4008001C)
#2E0xx069
if PY3:
jtagID = [chr(c) for c in jtagID]
return (((ord(jtagID[0]) & 0xFF) >= 0x2E) and ((ord(jtagID[1]) & 0xF0) == 0x10) and (ord(jtagID[3]) == 0x69))
def OpenPort():
global m_sLastError
# Open Port - get last (connected) port in the ports list
hResult = pp.GetPorts()
hr = hResult[0]
portArray = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
if (len(portArray) <= 0):
m_sLastError = "Connect any Programmer to PC"
return -1
bFound = 0
for i in range(0, len(portArray)):
if (portArray[i].startswith("MiniProg3") or portArray[i].startswith("DVKProg") or portArray[i].startswith("FirstTouch") or portArray[i].startswith("Gen-FX2LP") or portArray[i].startswith("KitProg")):
portName = portArray[i]
print('FOUND DEVICE:', portName)
bFound = 1;
break
if(bFound == 0):
m_sLastError = "Connect any MiniProg3/DVKProg/FirstTouch/Gen-FX2LP/KitProg device to the PC"
return -1
#Port should be opened just once to connect Programmer device (MiniProg1/3,etc).
#After that you can use Chip-/Programmer- specific APIs as long as you need.
#No need to repoen port when you need to acquire chip 2nd time, just call Acquire() again.
#This is true for all other APIs which get available once port is opened.
#You have to call OpenPort() again if port was closed by ClosePort() method, or
#when there is a need to connect to other programmer, or
#if programmer was physically reconnected to USB-port.
hr = pp.OpenPort(portName)
m_sLastError = hr[1]
return hr[0]
def ClosePort():
hResult = pp.ClosePort()
hr = hResult[0]
strError = hResult[1]
return hr
def Allow_Hard_Reset():
listResult = []
hResult = pp.DAP_GetJtagID();
hr = hResult[0]
chipJtagID = hResult[1]
m_sLastError = hResult[2]
if not SUCCEEDED(hr):
listResult.append(hr)
listResult.append(result)
return listResult
#Apply to PSoC5 LP only
if Is_PSoC_5_LP_ID(chipJtagID):
#Set 'allow_rst_hrd' bit in MLOGIC_DEBUG register (see PSoC5 IROS: 001-43078, Book 2)
hr = pp.DAP_WriteIO(0x400046E8, 0x00000040)
return hr;
def CheckHexAndDeviceCompatibility():
global m_sLastError
listResult = []
result = 0
hResult = pp.DAP_GetJtagID();
hr = hResult[0]
chipJtagID = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)):
listResult.append(hr)
listResult.append(result)
return listResult
hResult = pp.HEX_ReadJtagID();
hr = hResult[0]
hexJtagID = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)):
listResult.append(hr)
listResult.append(result)
return listResult
result = 1
if PY3:
hexJtagID = [chr(c) for c in hexJtagID]
chipJtagID = [chr(c) for c in chipJtagID]
for i in range(0, 4):
if(ord(hexJtagID[i]) != ord(chipJtagID[i])):
result = 0
break
listResult.append(0)
listResult.append(result)
return listResult
def IsNvlUpdatePossible():
global m_sLastError
listResult = []
hResult = pp.GetPower1()
hr = hResult[0]
power = hResult[1]
voltage = hResult[2]
state = 0
if (not SUCCEEDED(hr)):
listResult.append(hr)
listResult.append(0)
return listResult
if (voltage > 3700):
state = 0
else:
state = 1
listResult.append(hr)
listResult.append(state)
return listResult
def ProgramNvlArrays(nvlArrayType):
global m_sLastError
hResult = pp.PSoC3_GetSonosArrays(nvlArrayType)
hr = hResult[0]
arrayInfo = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
addrHex = 0
for i in range(0, len(arrayInfo[0])):
arrayID = arrayInfo[0][i]
arraySize = arrayInfo[1][i]
#Read data from Hex file
if (nvlArrayType == enumSonosArrays.ARRAY_NVL_USER):
hResult = pp.PSoC3_ReadHexNvlCustom(addrHex, arraySize)
else: #enumSonosArrays.ARRAY_NVL_WO_LATCHES
hResult = pp.PSoC3_ReadHexNvlWo(addrHex, arraySize)
hr = hResult[0]
hexData = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
addrHex += arraySize
#Read data from device
hResult = pp.PSoC3_ReadNvlArray(arrayID)
hr = hResult[0]
chipData = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
#Compare data from Chip against corresponding Hex-block
if (len(chipData) != len(hexData)):
m_sLastError = "Hex file's NVL array differs from corresponding device's one!"
return -1
fIdentical = 1
if PY3:
chipData = [chr(c) for c in chipData]
hexData = [chr(c) for c in hexData]
for i in range(0, arraySize):
if (ord(chipData[i]) != ord(hexData[i])):
fIdentical = 0
break
if (fIdentical == 1): continue #Arrays are equal, skip programming, goto following array
#Verify voltage range for TM silicon. Do not allow TM programming if voltage is greater than 3.3 V
hResult = pp.DAP_GetJtagID()
hr = hResult[0]
chipJtagID = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
if (Is_PSoC5_TM_ID(chipJtagID)):
hResult = IsNvlUpdatePossible()
hr = hResult[0]
state = hResult[1]
if (state == 0):
if(nvlArrayType == enumSonosArrays.ARRAY_NVL_USER):
m_sLastError = "User NVLs"
else:
m_sLastError = "WO NVLs"
m_sLastError = m_sLastError + " update failed. This operation can be completed in voltage range 1.8 - 3.3 V only."
return -1
#Program NVL array
hResult = pp.PSoC3_WriteNvlArray(arrayID, hexData);
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#PSoC3 ES3 support - check whether ECCEnable bit is changed and reacquire device
if (nvlArrayType == enumSonosArrays.ARRAY_NVL_USER):
hResult = pp.DAP_GetJtagID()
hr = hResult[0]
chipJtagID = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
#ES3 and probably newer revisions
if (IsPSoC3ES3(chipJtagID) or Is_PSoC_5_LP_ID(chipJtagID)):
eccEnableChanged = 0
if (len(hexData) >= 4):
eccEnableChanged = ((ord(hexData[3]) ^ ord(chipData[3])) & 0x08) == 0x08
#need to reacquire chip if EccEnable bit was changed to apply it for flash rows.
if (eccEnableChanged):
hResult = pp.DAP_Acquire()
hr = hResult[0]
m_sLastError = hResult[1]
return hr
return 0
def GetEccOption(arrayID):
global m_sLastError
hResult = pp.PSoC3_GetFlashArrayInfo(arrayID)
hr = hResult[0]
rowSize = hResult[1]
rowsPerArray = hResult[2]
eccPresence = hResult[3]
m_sLastError = hResult[4]
if (not SUCCEEDED(hr)): return hr
hResult = pp.PSoC3_GetEccStatus() #get ecc status of the acquired device
hr = hResult[0]
eccHwStatus = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
#take into account data from the Config area
if ((eccPresence != 0) and (eccHwStatus == 0)):
eccOption = 1
else:
eccOption = 0
return eccOption
def ProgramFlashArrays(flashSize):
global m_sLastError
hResult = pp.PSoC3_GetSonosArrays(enumSonosArrays.ARRAY_FLASH)
hr = hResult[0]
arrayInfo = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
flashProgrammed = 0
#Program Flash arrays
for i in range(0, len(arrayInfo[0])):
arrayID = arrayInfo[0][i]
arraySize = arrayInfo[1][i]
#Program flash array from hex file taking into account ECC (Config Data)
hResult = pp.PSoC3_GetFlashArrayInfo(arrayID)
hr = hResult[0]
rowSize = hResult[1]
rowsPerArray = hResult[2]
eccPresence = hResult[3]
m_sLastError = hResult[4]
if (not SUCCEEDED(hr)): return hr
eccOption = GetEccOption(arrayID); #take into account data from the Config area
for rowID in range(0, rowsPerArray):
hResult = pp.PSoC3_ProgramRowFromHex(arrayID, rowID, eccOption)
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#Limit programming to the device flash size
flashProgrammed += rowSize
if (flashProgrammed >= flashSize): return hr
return hr
def GetTotalFlashRowsCount(flashSize):
global m_sLastError
listResult = []
totalRows = 0
rowSize = 256
totalRows = flashSize / rowSize
listResult.append(0)
listResult.append(totalRows)
return listResult
def CheckSum_All(flashSize):
global m_sLastError
listResult = []
cs = 0
hResult = pp.PSoC3_GetSonosArrays(enumSonosArrays.ARRAY_FLASH)
hr = hResult[0]
arrayInfo = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)):
listResult.append(hr)
listResult.append(cs)
return listResult
hResult = GetTotalFlashRowsCount(flashSize)
hr = hResult[0]
RowsToChecksum = hResult[1]
for i in range(0, len(arrayInfo[0])):
arrayID = arrayInfo[0][i]
arraySize = arrayInfo[1][i]
#Get info about flash array
hResult = pp.PSoC3_GetFlashArrayInfo(arrayID)
hr = hResult[0]
rowSize = hResult[1]
rowsPerArray = hResult[2]
eccPresence = hResult[3]
m_sLastError = hResult[4]
if (not SUCCEEDED(hr)):
listResult.append(hr)
listResult.append(cs)
return listResult
#Find number of rows in array to be checksumed
if ((RowsToChecksum - rowsPerArray) < 0):
rowsPerArray = RowsToChecksum
#Calculate checksum of the array
arrayChecksum = 0;
hResult = pp.PSoC3_CheckSum(arrayID, 0, rowsPerArray)
hr = hResult[0]
arrayChecksum = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)):
listResult.append(hr)
listResult.append(cs)
return listResult
#Sum checksum
cs += arrayChecksum
#Update number of rows to be checksumed
RowsToChecksum -= rowsPerArray
if (RowsToChecksum <= 0):
break;
listResult.append(hr)
listResult.append(cs)
return listResult
def ProgramAll(hex_file):
global m_sLastError
#Setup Power - "5.0V" and internal
print('Setting Power Voltage to 5.0V')
hResult = pp.SetPowerVoltage("5.0")
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
'''
print('Turning on device')
hResult = pp.PowerOn()
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
'''
#Set protocol, connector and frequency
print('Setting communication protocols')
hResult = pp.SetProtocol(enumInterfaces.SWD)
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#hResult = pp.SetProtocolConnector(1); #10-pin connector
hResult = pp.SetProtocolConnector(0); #5-pin connector
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
hResult = pp.SetProtocolClock(enumFrequencies.FREQ_03_0); #3.0 MHz clock on SWD bus
hr = hResult[0]
m_sLastError = hResult[1]
# Set Hex File
print('Reading Hex file')
hResult = pp.ReadHexFile(hex_file)
hr = hResult[0]
hexImageSize = int(hResult[1])
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
# Set Acquire Mode
pp.SetAcquireMode("Reset")
#The "Programming Flow" proper
#Acquire Device
print('Acquiring device')
hResult = pp.DAP_Acquire()
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#Check Hex File and Device compatibility
fCompatibility = 0
print('Checking Hex file and device compatability')
hResult = CheckHexAndDeviceCompatibility()
hr = hResult[0]
fCompatibility = hResult[1]
if (not SUCCEEDED(hr)): return hr
if (fCompatibility == 0):
m_sLastError = "The Hex file does not match the acquired device, please connect the appropriate device";
return -1
#Erase All
print('Erasing all flash')
hResult = pp.PSoC3_EraseAll();
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#Program User NVL arrays
print('Programming User NVL arrays')
hr = ProgramNvlArrays(enumSonosArrays.ARRAY_NVL_USER)
if (not SUCCEEDED(hr)): return hr
#Program Flash arrays
print('Programming Flash arrays')
hr = ProgramFlashArrays(hexImageSize)
#Protect All arrays
print('Protecting all arrays')
hResult = pp.PSoC3_ProtectAll()
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#CheckSum
print('Generating checksum')
hResult = CheckSum_All(hexImageSize)
hr = hResult[0]
flashChecksum = hResult[1]
if (not SUCCEEDED(hr)): return hr
hResult = pp.ReadHexChecksum()
hr = hResult[0]
hexChecksum = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
flashChecksum &= 0xFFFF
if (flashChecksum != hexChecksum):
print("Mismatch of Checksum: Expected 0x%x, Got 0x%x" %(flashChecksum, hexChecksum))
return -1
else:
print("Checksum 0x%x" %(flashChecksum))
#Release PSoC3 device
#For PSoC5 LP call "Allow_Hard_Reset()" as in C#/C++ example
Allow_Hard_Reset()
hResult = pp.DAP_ReleaseChip()
hr = hResult[0]
m_sLastError = hResult[1]
return hr
def UpgradeBlock():
global m_sLastError
#Setup Power - "5.0V" and internal
hResult = pp.SetPowerVoltage("5.0")
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
hResult = pp.PowerOn()
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#Set protocol, connector and frequency
hResult = pp.SetProtocol(enumInterfaces.SWD)
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#hResult = pp.SetProtocolConnector(1); #10-pin connector
hResult = pp.SetProtocolConnector(0); #5-pin connector
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
hResult = pp.SetProtocolClock(enumFrequencies.FREQ_03_0); #<=3.0 MHz for Read operations
hr = hResult[0]
m_sLastError = hResult[1]
#Set Acquire Mode
pp.SetAcquireMode("Reset")
#Acquire Device
hResult = pp.DAP_Acquire()
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#Write Row, use PSoC3_WriteRow() instead PSoC3_ProgramRow()
writeData = [] #User and Config area of the Row (256+32)
for i in range(0, 288):
writeData.append(i & 0xFF)
data = array.array('B',writeData)
arrayID = 0
rowID = 255
hResult = pp.PSoC3_WriteRow(arrayID, rowID, buffer(data))
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
#Verify Row - only user area (without Config one)
hResult = pp.PSoC3_ReadRow(arrayID, rowID, 0)
hr = hResult[0]
readRow = hResult[1]
if PY3:
readRow = [chr(c) for c in readRow]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
for i in range(0, len(readRow)): #check 256 bytes
if (ord(readRow[i]) != writeData[i]):
hr = -1
break
if (not SUCCEEDED(hr)):
m_sLastError = "Verification of User area failed!"
return hr
#Verify Row - Config are only
hResult = pp.PSoC3_ReadRow(arrayID, rowID, 1)
hr = hResult[0]
readRow = hResult[1]
m_sLastError = hResult[2]
if (not SUCCEEDED(hr)): return hr
for i in range(0, len(readRow)): #check 256 bytes
if (ord(readRow[i]) != writeData[256+i]):
hr = -1
break
if (not SUCCEEDED(hr)):
m_sLastError = "Verification of Config area failed!"
return hr
#Release PSoC3 chip
#For PSoC5 LP call "Allow_Hard_Reset()" as in C#/C++ example
Allow_Hard_Reset()
hResult = pp.DAP_ReleaseChip()
hr = hResult[0]
m_sLastError = hResult[1]
return hr
def Execute(hex_file):
print("Opening Port")
hr = OpenPort()
if (not SUCCEEDED(hr)): return hr
hr = ProgramAll(hex_file)
#hr = UpgradeBlock()
print('Closing Port')
ClosePort()
return hr
def PowerOffDevice():
global m_sLastError
print("Opening Port")
hr = OpenPort()
if (not SUCCEEDED(hr)): return hr
#Setup Power - "5.0V" and internal
print('Setting Power Voltage to 5.0V')
hResult = pp.SetPowerVoltage("5.0")
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
print('Turning off device')
hResult = pp.PowerOff()
hr = hResult[0]
m_sLastError = hResult[1]
if (not SUCCEEDED(hr)): return hr
print('Closing Port')
ClosePort()
return hr
#Begin main program
if __name__ == '__main__':
#Use Version Independent Prog ID to instantiate COM-object
pp = win32com.client.Dispatch("PSoCProgrammerCOM.PSoCProgrammerCOM_Object")
#For version dependent Prog ID use below commented line, but update there COM-object version (e.g. 14)
#pp = win32com.client.Dispatch("PSoCProgrammerCOM.PSoCProgrammerCOM_Object.14")
if len(sys.argv) == 2 and sys.argv[1] == '--power-off-device':
hr = PowerOffDevice()
else:
print("Program All using COM-object interface only")
hex_file = os.path.abspath(sys.argv[1])
print("Using Hex File:", hex_file)
hr = Execute(hex_file)
if (SUCCEEDED(hr)):
str = "Succeeded!"
exit_code = 0
else:
str = "Failed! " + m_sLastError
exit_code = 1
print(str)
sys.exit(exit_code)
#End main function
| {
"content_hash": "4d4054120a341bab903243e342d484b6",
"timestamp": "",
"source": "github",
"line_count": 613,
"max_line_length": 207,
"avg_line_length": 32.54649265905383,
"alnum_prop": 0.61841511703674,
"repo_name": "open-storm/perfect-cell",
"id": "1972c2e50004c20b0be261b879d281de43980f1c",
"size": "21074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build_tools/psoc_program.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "188252"
},
{
"name": "C++",
"bytes": "148"
},
{
"name": "Python",
"bytes": "42431"
}
],
"symlink_target": ""
} |
import factory
from oscar.core.loading import get_model
__all__ = [
'RangeFactory', 'ConditionFactory', 'BenefitFactory',
'ConditionalOfferFactory',
]
class RangeFactory(factory.DjangoModelFactory):
name = factory.Sequence(lambda n: 'Range %d' % n)
slug = factory.Sequence(lambda n: 'range-%d' % n)
class Meta:
model = get_model('offer', 'Range')
@factory.post_generation
def products(self, create, extracted, **kwargs):
if not create or not extracted:
return
RangeProduct = get_model('offer', 'RangeProduct')
for product in extracted:
RangeProduct.objects.create(product=product, range=self)
class BenefitFactory(factory.DjangoModelFactory):
type = get_model('offer', 'Benefit').PERCENTAGE
value = 10
max_affected_items = None
range = factory.SubFactory(RangeFactory)
class Meta:
model = get_model('offer', 'Benefit')
class ConditionFactory(factory.DjangoModelFactory):
type = get_model('offer', 'Condition').COUNT
value = 10
range = factory.SubFactory(RangeFactory)
class Meta:
model = get_model('offer', 'Condition')
class ConditionalOfferFactory(factory.DjangoModelFactory):
name = 'Test offer'
benefit = factory.SubFactory(BenefitFactory)
condition = factory.SubFactory(ConditionFactory)
class Meta:
model = get_model('offer', 'ConditionalOffer')
| {
"content_hash": "ff909eadfc205b173cc9cc79ca9a7d9d",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 68,
"avg_line_length": 26.462962962962962,
"alnum_prop": 0.675297410776767,
"repo_name": "john-parton/django-oscar",
"id": "b5ddaf402321506f72c1f2c6558e09ee6fb692ab",
"size": "1429",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "src/oscar/test/factories/offer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "542048"
},
{
"name": "HTML",
"bytes": "495616"
},
{
"name": "JavaScript",
"bytes": "413706"
},
{
"name": "Makefile",
"bytes": "2653"
},
{
"name": "Python",
"bytes": "1712293"
},
{
"name": "Shell",
"bytes": "2751"
}
],
"symlink_target": ""
} |
import os
from pants.base.build_environment import get_buildroot, pants_version
from pants.build_graph.aliased_target import AliasTargetFactory
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.files import Files
from pants.build_graph.intransitive_dependency import (
IntransitiveDependencyFactory,
ProvidedDependencyFactory,
)
from pants.build_graph.prep_command import PrepCommand
from pants.build_graph.remote_sources import RemoteSources
from pants.build_graph.resources import Resources
from pants.build_graph.target import Target
from pants.build_graph.target_scopes import ScopedDependencyFactory
from pants.util.netrc import Netrc
"""Register the elementary BUILD file constructs."""
class BuildFilePath:
def __init__(self, parse_context):
self._parse_context = parse_context
def __call__(self):
"""
:returns: The absolute path of this BUILD file.
"""
return os.path.join(get_buildroot(), self._parse_context.rel_path)
def build_file_aliases():
return BuildFileAliases(
targets={
"alias": AliasTargetFactory(),
"files": Files,
"prep_command": PrepCommand,
"resources": Resources,
"remote_sources": RemoteSources,
"target": Target,
},
objects={"get_buildroot": get_buildroot, "netrc": Netrc, "pants_version": pants_version,},
context_aware_object_factories={
"buildfile_path": BuildFilePath,
"intransitive": IntransitiveDependencyFactory,
"provided": ProvidedDependencyFactory,
"scoped": ScopedDependencyFactory,
},
)
| {
"content_hash": "132742d3ac39ad74ccc054bd49c40765",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 98,
"avg_line_length": 34.61224489795919,
"alnum_prop": 0.6904481132075472,
"repo_name": "wisechengyi/pants",
"id": "cd57295efc5ef36df0c82425fbac654f8e084edb",
"size": "1828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/build_graph/register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "6634"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "507948"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7608990"
},
{
"name": "Rust",
"bytes": "1005243"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "105217"
},
{
"name": "Starlark",
"bytes": "489739"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
import json
import os
import shutil
from datetime import date, timedelta
import mock
from django.conf import settings
from django.core import management
from olympia import amo
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon, Persona
from olympia.stats.management.commands import (
save_stats_to_file, serialize_stats)
from olympia.stats.management.commands.download_counts_from_file import is_valid_source # noqa
from olympia.stats.management.commands.update_counts_from_file import Command
from olympia.stats.models import (
DownloadCount, ThemeUpdateCount, UpdateCount, ThemeUserCount)
from olympia.zadmin.models import DownloadSource
hive_folder = os.path.join(settings.ROOT, 'src/olympia/stats/fixtures/files')
class FixturesFolderMixin(object):
# You have to define these two values in your subclasses.
date = 'YYYY-MM-DD'
source_folder = 'dummy'
def clean_up_files(self):
dirpath = os.path.join(hive_folder, self.date)
if os.path.isdir(dirpath):
for name in os.listdir(dirpath):
os.unlink(os.path.join(dirpath, name))
os.rmdir(dirpath)
def setUp(self):
super(FixturesFolderMixin, self).setUp()
self.clean_up_files()
shutil.copytree(os.path.join(hive_folder, self.source_folder),
os.path.join(hive_folder, self.date))
def tearDown(self):
self.clean_up_files()
super(FixturesFolderMixin, self).tearDown()
class TestADICommand(FixturesFolderMixin, TestCase):
fixtures = ('base/addon_3615', 'base/featured', 'addons/persona',
'base/appversion.json')
date = '2014-07-10'
source_folder = 'src'
def setUp(self):
super(TestADICommand, self).setUp()
self.command = Command()
@mock.patch(
'olympia.stats.management.commands.update_counts_from_file.'
'save_stats_to_file')
def test_update_counts_from_file(self, mock_save_stats_to_file):
management.call_command('update_counts_from_file', hive_folder,
date=self.date)
assert UpdateCount.objects.all().count() == 1
update_count = UpdateCount.objects.last()
assert update_count.count == 5
assert update_count.date == date(2014, 7, 10)
assert update_count.versions == {u'3.8': 2, u'3.7': 3}
assert update_count.statuses == {u'userEnabled': 5}
application = u'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
assert update_count.applications[application] == {u'3.6': 18}
assert update_count.oses == {u'WINNT': 5}
assert update_count.locales == {u'en-us': 1, u'en-US': 4}
# save_stats_to_file is called with a non-saved model.
update_count.id = None
mock_save_stats_to_file.assert_called_once_with(update_count)
def test_update_version(self):
# Initialize the known addons and their versions.
self.command.addons_versions = {3615: ['3.5', '3.6']}
uc = UpdateCount(addon_id=3615)
self.command.update_version(uc, '3.6', 123)
assert uc.versions == {'3.6': 123}
# Test very long version:
self.command.update_version(uc, '1' * 33, 1)
assert uc.versions == {'3.6': 123, '1' * 32: 1} # Trimmed.
def test_update_status(self):
uc = UpdateCount(addon_id=3615)
self.command.update_status(uc, 'foobar', 123) # Non-existent status.
assert not uc.statuses
self.command.update_status(uc, 'userEnabled', 123)
assert uc.statuses == {'userEnabled': 123}
def test_update_app(self):
firefox_guid = '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}'
uc = UpdateCount(addon_id=3615)
self.command.update_app(uc, 'foobar', '1.0', 123) # Non-existent app.
assert not uc.applications
# Malformed versions.
self.command.update_app(uc, firefox_guid, '3.0.1.2', 123)
self.command.update_app(uc, firefox_guid, '3.0123', 123)
self.command.update_app(uc, firefox_guid, '3.0c2', 123)
self.command.update_app(uc, firefox_guid, 'a.b.c', 123)
assert not uc.applications
# Well formed versions.
self.command.update_app(uc, firefox_guid, '1.0', 123)
self.command.update_app(uc, firefox_guid, '1.0.1', 124)
self.command.update_app(uc, firefox_guid, '1.0a1', 125)
self.command.update_app(uc, firefox_guid, '1.0b2', 126)
assert uc.applications == {firefox_guid: {
'1.0': 123,
'1.0.1': 124,
'1.0a1': 125,
'1.0b2': 126}}
def test_update_os(self):
uc = UpdateCount(addon_id=3615)
self.command.update_os(uc, 'foobar', 123) # Non-existent OS.
assert not uc.oses
self.command.update_os(uc, 'WINNT', 123)
assert uc.oses == {'WINNT': 123}
def test_update_locale(self):
current_locales = [ # Taken from the language pack index.
'ach', 'af', 'ak', 'an', 'ar', 'as', 'ast', 'ast-ES', 'az',
'bb-BK', 'be', 'bg', 'bn-BD', 'bn-IN', 'br', 'bs', 'ca',
'ca-valencia', 'cs', 'csb', 'cy', 'cy-GB', 'da', 'de', 'dsb', 'el',
'en-GB', 'en-ZA', 'eo', 'es-AR', 'es-CL', 'es-ES', 'es-MX', 'et',
'eu', 'fa', 'ff', 'fi', 'fj-FJ', 'fr', 'fur-IT', 'fy-NL', 'ga-IE',
'gd', 'gl', 'gu-IN', 'he', 'hi', 'hi-IN', 'hr', 'hsb', 'hu',
'hy-AM', 'id', 'is', 'it', 'ja', 'kk', 'km', 'kn', 'ko', 'ku',
'lg', 'lij', 'lt', 'lv', 'mai', 'mg', 'mk', 'ml', 'mr', 'ms',
'nb-NO', 'nl', 'nn-NO', 'nr', 'nso', 'or', 'pa-IN', 'pl', 'pt-BR',
'pt-PT', 'rm', 'ro', 'ru', 'si', 'sk', 'sl', 'son', 'sq', 'sr',
'ss', 'st', 'sv-SE', 'sw', 'sw-TZ', 'ta', 'ta-IN', 'ta-LK', 'te',
'th', 'tn', 'tr', 'ts', 'uk', 've', 'vi', 'wa', 'wo-SN', 'xh',
'zap-MX-diiste', 'zh-CN', 'zh-TW', 'zu']
uc = UpdateCount(addon_id=3615)
self.command.update_locale(uc, 'foobar', 123) # Non-existent locale.
assert not uc.locales
for locale in current_locales:
self.command.update_locale(uc, locale, 1)
assert len(uc.locales) == len(current_locales)
def test_trim_field(self):
uc = UpdateCount(addon_id=3615, count=1, date='2015-01-11')
self.command.trim_field(uc.versions) # Empty field.
assert not uc.versions
uc.versions = {'3.6': 123, '3.7': 321}
self.command.trim_field(uc.versions) # Small enough to fit in the db.
assert uc.versions == {'3.6': 123, '3.7': 321} # Unchanged.
very_long_key = 'x' * (2 ** 16)
uc.versions[very_long_key] = 1
self.command.trim_field(uc.versions) # Too big, must be trimmed.
assert uc.versions == {'3.6': 123, '3.7': 321} # Keep the most used.
uc.versions[very_long_key] = 1000 # Most used.
self.command.trim_field(uc.versions) # Too big, must be trimmed.
# Nothing left: least used removed, but still too big, so all the keys
# were removed.
assert uc.versions == {}
# Make sure we can store a very large field in the database.
long_key = 'x' * 65528 # This makes the dict barely fit in the db.
uc.versions[long_key] = 1
assert len(json.dumps(uc.versions)) == (2 ** 16) - 1
uc.save()
uc = UpdateCount.objects.get(pk=uc.pk) # Reload
# Fits in the database, so no truncation.
assert len(json.dumps(uc.versions)) == (2 ** 16) - 1
@mock.patch(
'olympia.stats.management.commands.download_counts_from_file.'
'save_stats_to_file')
def test_download_counts_from_file(self, mock_save_stats_to_file):
# Create the necessary "valid download sources" entries.
DownloadSource.objects.create(name='search', type='full')
DownloadSource.objects.create(name='coll', type='prefix')
management.call_command('download_counts_from_file', hive_folder,
date=self.date)
assert DownloadCount.objects.all().count() == 1
download_count = DownloadCount.objects.last()
assert download_count.count == 2
assert download_count.date == date(2014, 7, 10)
assert download_count.sources == {u'search': 1, u'collection': 1}
# save_stats_to_file is called with a non-saved model.
download_count.id = None
mock_save_stats_to_file.assert_called_once_with(download_count)
@mock.patch('olympia.stats.management.commands.save_stats_to_file')
def test_theme_update_counts_from_file(self, mock_save_stats_to_file):
management.call_command('theme_update_counts_from_file', hive_folder,
date=self.date)
assert ThemeUpdateCount.objects.all().count() == 2
tuc1 = ThemeUpdateCount.objects.get(addon_id=3615)
assert tuc1.count == 2
# Persona 813 has addon id 15663: we need the count to be the sum of
# the "old" request on the persona_id 813 (only the one with the source
# "gp") and the "new" request on the addon_id 15663.
tuc2 = ThemeUpdateCount.objects.get(addon_id=15663)
assert tuc2.count == 15
assert mock_save_stats_to_file.call_count == 2
# save_stats_to_file is called with a non-saved model.
tuc1.id = None
tuc2.id = None
mock_save_stats_to_file.assert_has_calls(
[mock.call(tuc1), mock.call(tuc2)])
def test_update_theme_popularity_movers(self):
# Create ThemeUpdateCount entries for the persona 559 with addon_id
# 15663 and the persona 575 with addon_id 15679 for the last 28 days.
# We start from the previous day, as the theme_update_counts_from_*
# scripts are gathering data for the day before.
today = date.today()
yesterday = today - timedelta(days=1)
for i in range(28):
d = yesterday - timedelta(days=i)
ThemeUpdateCount.objects.create(addon_id=15663, count=i, date=d)
ThemeUpdateCount.objects.create(addon_id=15679,
count=i * 100, date=d)
# Compute the popularity and movers.
management.call_command('update_theme_popularity_movers')
p1 = Persona.objects.get(pk=559)
p2 = Persona.objects.get(pk=575)
# The popularity is the average over the last 7 days, and as we created
# entries with one more user per day in the past (or 100 more), the
# calculation is "sum(range(7)) / 7" (or "sum(range(7)) * 100 / 7").
assert p1.popularity == 3 # sum(range(7)) / 7
assert p2.popularity == 300 # sum(range(7)) * 100 / 7
# A ThemeUserCount row should have been created for each Persona with
# today's date and the Persona popularity.
t1 = ThemeUserCount.objects.get(addon_id=15663)
t2 = ThemeUserCount.objects.get(addon_id=15679)
assert t1.date == today
assert t1.count == p1.popularity
assert t2.date == today
assert t2.count == p2.popularity
# Three weeks avg (sum(range(21)) / 21) = 10 so (3 - 10) / 10.
# The movers is computed with the following formula:
# previous_3_weeks: the average over the 21 days before the last 7 days
# movers: (popularity - previous_3_weeks) / previous_3_weeks
# The calculation for the previous_3_weeks is:
# previous_3_weeks: (sum(range(28) - sum(range(7))) * 100 / 21 == 1700.
assert p1.movers == 0.0 # Because the popularity is <= 100.
# We round the results to cope with floating point imprecision.
assert round(p2.movers, 5) == round((300.0 - 1700) / 1700, 5)
def test_is_valid_source(self):
assert is_valid_source('foo',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert not is_valid_source('foob',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert is_valid_source('foobaz',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
assert not is_valid_source('ba',
fulls=['foo', 'bar'],
prefixes=['baz', 'cruux'])
class TestThemeADICommand(FixturesFolderMixin, TestCase):
date = '2014-11-06'
fixtures = ['base/appversion.json']
source_folder = '1093699'
@mock.patch(
'olympia.stats.management.commands.update_counts_from_file.'
'save_stats_to_file')
def test_update_counts_from_file_bug_1093699(self,
mock_save_stats_to_file):
Addon.objects.create(guid='{fe9e9f88-42f0-40dc-970b-4b0e6b7a3d0b}',
type=amo.ADDON_THEME)
management.call_command('update_counts_from_file', hive_folder,
date=self.date)
assert UpdateCount.objects.all().count() == 1
uc = UpdateCount.objects.last()
assert uc.count == 1320
assert uc.date == date(2014, 11, 06)
assert (uc.versions ==
{u'1.7.16': 1, u'userEnabled': 3, u'1.7.13': 2, u'1.7.11': 3,
u'1.6.0': 1, u'1.7.14': 1304, u'1.7.6': 6})
assert (uc.statuses ==
{u'Unknown': 3, u'userEnabled': 1259, u'userDisabled': 58})
assert uc.oses == {u'WINNT': 1122, u'Darwin': 114, u'Linux': 84}
assert uc.locales[u'es-ES'] == 20
assert (uc.applications[u'{92650c4d-4b8e-4d2a-b7eb-24ecf4f6b63a}'] ==
{u'2.0': 3})
uc.id = None # save_stats_to_file is called with a non-saved model.
mock_save_stats_to_file.assert_called_once_with(uc)
def test_stats_from_model_theme_update_count():
result = serialize_stats(
ThemeUpdateCount(addon_id=321, date='2016-01-18', count=123))
assert json.loads(result) == {
'date': '2016-01-18',
'addon': 321,
'count': 123}
def test_stats_from_model_update_count():
result = serialize_stats(
UpdateCount(
addon_id=321, date='2016-01-18',
count=123,
versions={u'3.8': 2, u'3.7': 3},
statuses={u'userEnabled': 5},
applications={u'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}':
{u'3.6': 18}},
oses={u'WINNT': 5},
locales={u'en-us': 1, u'en-US': 4}))
assert json.loads(result) == {
'date': '2016-01-18',
'addon': 321,
'count': 123,
'versions': {'3.7': 3, '3.8': 2},
'oses': {'WINNT': 5},
'applications': {
'{ec8030f7-c20a-464f-9b0e-13a3a9e97384}': {'3.6': 18}},
'locales': {'en-US': 4, 'en-us': 1},
'statuses': {'userEnabled': 5}}
def test_stats_from_model_download_count():
result = serialize_stats(
DownloadCount(
addon_id=321, date='2016-01-18', count=123,
sources={u'search': 1, u'collection': 1}))
assert json.loads(result) == {
'date': '2016-01-18',
'addon': 321,
'count': 123,
'sources': {'search': 1, 'collection': 1}}
@mock.patch('olympia.stats.management.commands.storage.save')
@mock.patch('olympia.stats.management.commands.ContentFile')
def test_save_stats_to_file(mock_ContentFile, mock_storage):
mock_ContentFile.return_value = mock.sentinel.content
theme_update_count = ThemeUpdateCount(
addon_id=321, date='2016-01-18', count=123)
save_stats_to_file(theme_update_count)
mock_storage.assert_called_once_with(
'321/2016/01/2016_01_18_themeupdatecount.json', mock.sentinel.content)
| {
"content_hash": "467adb1116ae6bb7d08c6c9d2d2b27ca",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 95,
"avg_line_length": 44.13966480446928,
"alnum_prop": 0.5801797240855587,
"repo_name": "jpetto/olympia",
"id": "cddc3e435062bc2c9ebad09ba2b998fde8b69f57",
"size": "15802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/stats/tests/test_commands.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "249"
},
{
"name": "CSS",
"bytes": "665496"
},
{
"name": "HTML",
"bytes": "1606994"
},
{
"name": "JavaScript",
"bytes": "1315514"
},
{
"name": "Makefile",
"bytes": "4235"
},
{
"name": "PLSQL",
"bytes": "74"
},
{
"name": "Python",
"bytes": "4026490"
},
{
"name": "Shell",
"bytes": "9145"
},
{
"name": "Smarty",
"bytes": "1930"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.conf.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 20
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test1', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@foo.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test2', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@apache.org', '-r', 'Viewer', '-p', 'test'
])
cli.create_user(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
try:
from airflow.hooks.http_hook import HttpHook
except ImportError:
HttpHook = None
@unittest.skipIf(HttpHook is None,
"Skipping test because HttpHook is not installed")
class HttpHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='us-ascii', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "aea6cde9003fa4a1cdb298bb432f0c9a",
"timestamp": "",
"source": "github",
"line_count": 2506,
"max_line_length": 109,
"avg_line_length": 39.89385474860335,
"alnum_prop": 0.5872526856982816,
"repo_name": "RealImpactAnalytics/airflow",
"id": "ce32482d04f2a45c9d8f5a4364310cf508aca4f2",
"size": "100792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "270710"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3765458"
},
{
"name": "Shell",
"bytes": "46923"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import logging
from indra.sources.cwms.processor import CWMSProcessor
from indra.sources.cwms.rdf_processor import CWMSRDFProcessor
from indra.sources.trips import client
logger = logging.getLogger(__name__)
def process_text(text, save_xml='cwms_output.xml'):
"""Processes text using the CWMS web service.
Parameters
----------
text : str
Text to process
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
xml = client.send_query(text, 'cwmsreader')
# There are actually two EKBs in the xml document. Extract the second.
first_end = xml.find('</ekb>') # End of first EKB
second_start = xml.find('<ekb', first_end) # Start of second EKB
second_end = xml.find('</ekb>', second_start) # End of second EKB
second_ekb = xml[second_start:second_end+len('</ekb>')] # second EKB
if save_xml:
with open(save_xml, 'wb') as fh:
fh.write(second_ekb.encode('utf-8'))
return process_ekb(second_ekb)
def process_ekb_file(fname):
"""Processes an EKB file produced by CWMS.
Parameters
----------
fname : str
Path to the EKB file to process.
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
# Process EKB XML file into statements
with open(fname, 'rb') as fh:
ekb_str = fh.read().decode('utf-8')
return process_ekb(ekb_str)
def process_ekb(ekb_str):
"""Processes an EKB string produced by CWMS.
Parameters
----------
ekb_str : str
EKB string to process
Returns
-------
cp : indra.sources.cwms.CWMSProcessor
A CWMSProcessor, which contains a list of INDRA statements in its
statements attribute.
"""
# Process EKB XML into statements
cp = CWMSProcessor(ekb_str)
cp.extract_causal_relations()
cp.extract_correlations()
cp.extract_events()
return cp
def process_rdf_file(text, rdf_filename):
"""Process CWMS's RDF output for the given statement and returns a
processor populated with INDRA statements.
Parameters
----------
text : str
Sentence to process
rdf_filename : str
The RDF filename to process
Returns
-------
cp : indra.sources.cwms.CWMSRDFProcessor
A CWMSProcessor instance, which contains a list of INDRA Statements
as its statements attribute.
"""
cp = CWMSRDFProcessor(text, rdf_filename)
return cp
| {
"content_hash": "5711357b391cde73cbfd17cab65ded7d",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 75,
"avg_line_length": 28.091836734693878,
"alnum_prop": 0.6465673810388667,
"repo_name": "pvtodorov/indra",
"id": "0fb3e0a9f9dfc6f14a98f2ac13c717572cc63868",
"size": "2753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indra/sources/cwms/api.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "169"
},
{
"name": "HTML",
"bytes": "17236"
},
{
"name": "JavaScript",
"bytes": "72960"
},
{
"name": "Python",
"bytes": "2660313"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
} |
import os
import sys
import knowledgebase
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = knowledgebase.__version__
if sys.argv[-1] == 'publish':
try:
import wheel
except ImportError:
print('Wheel library missing. Please run "pip install wheel"')
sys.exit()
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
install_requires = []
with open('requirements.txt') as f:
install_requires = f.read().splitlines()
setup(
name='django-knowledgebase',
version=version,
description="""A knowledgebase made with Django""",
long_description=readme + '\n\n' + history,
author='Julio Marquez',
author_email='j@bazzite.com',
url='https://github.com/bazzite/django-knowledgebase',
packages=[
'knowledgebase',
],
include_package_data=True,
install_requires=install_requires,
license="Apache License 2.0",
zip_safe=False,
keywords='django-knowledgebase',
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| {
"content_hash": "edbd85d5c2542468469d0fe233fa2852",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 70,
"avg_line_length": 28.88235294117647,
"alnum_prop": 0.6186354378818737,
"repo_name": "bazzite/django-knowledgebase",
"id": "376ae219d7658a8d874ecd840a6c7d2b2b93c805",
"size": "2011",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "19519"
},
{
"name": "JavaScript",
"bytes": "1197"
},
{
"name": "Makefile",
"bytes": "1259"
},
{
"name": "Python",
"bytes": "47357"
}
],
"symlink_target": ""
} |
import sys
import os.path
import unittest
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../")))
import configgen.utils.videoMode as videoMode
class VideoModeUtilTest(unittest.TestCase):
pass
#def test_createSimpleFillValues(self):
# self.assertEquals(videoMode.createVideoModeLine("10"), "tvservice -e 10 CEA HDMI")
#def test_createAddHDMI(self):
# self.assertEquals(videoMode.createVideoModeLine("10 CEA"), "tvservice -e 10 CEA HDMI")
#def test_createDontAddWhenLineCompelete(self):
# self.assertEquals(videoMode.createVideoModeLine("10 CEA HDMI"), "tvservice -e 10 CEA HDMI")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ed37058ccfc23a8bb1223e687919ce35",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 100,
"avg_line_length": 33.38095238095238,
"alnum_prop": 0.7061340941512125,
"repo_name": "digitalLumberjack/recalbox-configgen",
"id": "779b36513025df2566bebe4b6f3c13977678ce19",
"size": "723",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "configgen/tests/utils/videoModeUtil_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "131403"
}
],
"symlink_target": ""
} |
"""
Representation Tutorial for RLPy
================================
Assumes you have created the IncrTabularTut.py agent according to the tutorial and
placed it in the Representations/ directory.
Tests the Representation on the GridWorld domain usin SARSA
"""
__author__ = "Robert H. Klein"
from rlpy.Domains import GridWorld
from rlpy.Agents import SARSA
from rlpy.Representations import IncrTabularTut
from rlpy.Policies import eGreedy
from rlpy.Experiments import Experiment
import os
def make_experiment(exp_id=1, path="./Results/Tutorial/gridworld-IncrTabularTut"):
"""
Each file specifying an experimental setup should contain a
make_experiment function which returns an instance of the Experiment
class with everything set up.
@param id: number used to seed the random number generators
@param path: output directory where logs and results are stored
"""
opt = {}
opt["exp_id"] = exp_id
opt["path"] = path
## Domain:
maze = os.path.join(GridWorld.default_map_dir, '4x5.txt')
domain = GridWorld(maze, noise=0.3)
opt["domain"] = domain
## Representation
# discretization only needed for continuous state spaces, discarded otherwise
representation = IncrTabularTut(domain)
## Policy
policy = eGreedy(representation, epsilon=0.2)
## Agent
opt["agent"] = SARSA(representation=representation, policy=policy,
discount_factor=domain.discount_factor,
learn_rate=0.1)
opt["checks_per_policy"] = 100
opt["max_steps"] = 2000
opt["num_policy_checks"] = 10
experiment = Experiment(**opt)
return experiment
if __name__ == '__main__':
experiment = make_experiment(1)
experiment.run(visualize_steps=False, # should each learning step be shown?
visualize_learning=True, # show policy / value function?
visualize_performance=1) # show performance runs?
experiment.plot()
experiment.save()
| {
"content_hash": "d59b27f5cf6809d0eb7f0897737f3e76",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 82,
"avg_line_length": 33.74576271186441,
"alnum_prop": 0.6790557508789553,
"repo_name": "imanolarrieta/RL",
"id": "8ecaca2f7278ccbe40fad3e89ea4dcf9ee6f9de9",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/tutorial/IncrTabularTut_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "117712"
},
{
"name": "C++",
"bytes": "1575"
},
{
"name": "Python",
"bytes": "1350176"
}
],
"symlink_target": ""
} |
import os, re
import pytest
from pytest import raises
from jenkinsflow.flow import parallel, FailedChildJobsException
from .framework import api_select
from .framework.utils import assert_lines_in
from .framework.abort_job import abort
from .cfg import ApiType
here = os.path.abspath(os.path.dirname(__file__))
@pytest.mark.not_apis(ApiType.SCRIPT)
def test_abort(api_type, capsys):
with api_select.api(__file__, api_type, login=True) as api:
api.flow_job()
api.job('quick', exec_time=0.01, max_fails=0, expect_invocations=1, expect_order=1)
api.job('wait10_abort', exec_time=10, max_fails=0, expect_invocations=1, expect_order=1, final_result='ABORTED')
api.job('wait1_fail', exec_time=1, max_fails=1, expect_invocations=1, expect_order=1)
abort(api, 'wait10_abort', 2)
with raises(FailedChildJobsException) as exinfo:
with parallel(api, timeout=40, job_name_prefix=api.job_name_prefix, report_interval=3) as ctrl:
ctrl.invoke('quick')
ctrl.invoke('wait10_abort')
ctrl.invoke('wait1_fail')
assert "wait10_abort" in str(exinfo.value)
assert "wait1_fail" in str(exinfo.value)
sout, _ = capsys.readouterr()
assert_lines_in(
api_type, sout,
re.compile("^ABORTED: 'jenkinsflow_test__abort__wait10_abort' - build: .*/jenkinsflow_test__abort__wait10_abort.* after:"),
)
| {
"content_hash": "bb3fdf7b017f8fcb111d90466bef5375",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 135,
"avg_line_length": 36.275,
"alnum_prop": 0.656788421778084,
"repo_name": "lechat/jenkinsflow",
"id": "da5160310159cedee040c071769b0c8276b66e99",
"size": "1589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/abort_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "399"
},
{
"name": "HTML",
"bytes": "992"
},
{
"name": "JavaScript",
"bytes": "1410"
},
{
"name": "Python",
"bytes": "353496"
},
{
"name": "Shell",
"bytes": "801"
}
],
"symlink_target": ""
} |
"""The tests for the Sun component."""
from datetime import datetime, timedelta
from unittest.mock import patch
from pytest import mark
import homeassistant.components.sun as sun
from homeassistant.const import EVENT_STATE_CHANGED
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
async def test_setting_rising(hass):
"""Test retrieving sun setting and rising."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now):
await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
await hass.async_block_till_done()
state = hass.states.get(sun.ENTITY_ID)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
mod = -1
while True:
next_dawn = astral.dawn_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_dawn > utc_now:
break
mod += 1
mod = -1
while True:
next_dusk = astral.dusk_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_dusk > utc_now:
break
mod += 1
mod = -1
while True:
next_midnight = astral.solar_midnight_utc(
utc_today + timedelta(days=mod), longitude
)
if next_midnight > utc_now:
break
mod += 1
mod = -1
while True:
next_noon = astral.solar_noon_utc(utc_today + timedelta(days=mod), longitude)
if next_noon > utc_now:
break
mod += 1
mod = -1
while True:
next_rising = astral.sunrise_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_rising > utc_now:
break
mod += 1
mod = -1
while True:
next_setting = astral.sunset_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_setting > utc_now:
break
mod += 1
assert next_dawn == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_DAWN]
)
assert next_dusk == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_DUSK]
)
assert next_midnight == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_MIDNIGHT]
)
assert next_noon == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_NOON]
)
assert next_rising == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_RISING]
)
assert next_setting == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_SETTING]
)
async def test_state_change(hass):
"""Test if the state changes at next setting/rising."""
now = datetime(2016, 6, 1, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=now):
await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
await hass.async_block_till_done()
test_time = dt_util.parse_datetime(
hass.states.get(sun.ENTITY_ID).attributes[sun.STATE_ATTR_NEXT_RISING]
)
assert test_time is not None
assert sun.STATE_BELOW_HORIZON == hass.states.get(sun.ENTITY_ID).state
hass.bus.async_fire(
ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: test_time + timedelta(seconds=5)}
)
await hass.async_block_till_done()
assert sun.STATE_ABOVE_HORIZON == hass.states.get(sun.ENTITY_ID).state
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=now):
await hass.config.async_update(longitude=hass.config.longitude + 90)
await hass.async_block_till_done()
assert sun.STATE_ABOVE_HORIZON == hass.states.get(sun.ENTITY_ID).state
async def test_norway_in_june(hass):
"""Test location in Norway where the sun doesn't set in summer."""
hass.config.latitude = 69.6
hass.config.longitude = 18.8
june = datetime(2016, 6, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=june):
assert await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
state = hass.states.get(sun.ENTITY_ID)
assert state is not None
assert dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_RISING]
) == datetime(2016, 7, 25, 23, 23, 39, tzinfo=dt_util.UTC)
assert dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_SETTING]
) == datetime(2016, 7, 26, 22, 19, 1, tzinfo=dt_util.UTC)
assert state.state == sun.STATE_ABOVE_HORIZON
@mark.skip
async def test_state_change_count(hass):
"""Count the number of state change events in a location."""
# Skipped because it's a bit slow. Has been validated with
# multiple lattitudes and dates
hass.config.latitude = 10
hass.config.longitude = 0
now = datetime(2016, 6, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=now):
assert await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
events = []
@ha.callback
def state_change_listener(event):
if event.data.get("entity_id") == "sun.sun":
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, state_change_listener)
await hass.async_block_till_done()
for _ in range(24 * 60 * 60):
now += timedelta(seconds=1)
hass.bus.async_fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: now})
await hass.async_block_till_done()
assert len(events) < 721
| {
"content_hash": "f48f239a88d617fa6a63ebbac28db5bb",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 87,
"avg_line_length": 30.630208333333332,
"alnum_prop": 0.6340758374426118,
"repo_name": "leppa/home-assistant",
"id": "e04de7e2578ff514811d6dcbf0b466a912bfaf79",
"size": "5881",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/sun/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18957740"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import os
# Setup config name.
config.name = 'GWP-ASan' + config.name_suffix
# Setup source root.
config.test_source_root = os.path.dirname(__file__)
# Test suffixes.
config.suffixes = ['.c', '.cpp', '.test']
# C & CXX flags.
c_flags = ([config.target_cflags])
# Android doesn't want -lrt.
if not config.android:
c_flags += ["-lrt"]
cxx_flags = (c_flags + config.cxx_mode_flags + ["-std=c++11"])
gwp_asan_flags = ["-fsanitize=scudo", "-g", "-fno-omit-frame-pointer",
"-mno-omit-leaf-frame-pointer"]
def build_invocation(compile_flags):
return " " + " ".join([config.clang] + compile_flags) + " "
# Add substitutions.
config.substitutions.append(("%clang ", build_invocation(c_flags)))
config.substitutions.append(("%clang_gwp_asan ", build_invocation(c_flags + gwp_asan_flags)))
config.substitutions.append(("%clangxx_gwp_asan ", build_invocation(cxx_flags + gwp_asan_flags)))
# Platform-specific default GWP_ASAN for lit tests. Ensure that GWP-ASan is
# enabled and that it samples every allocation.
default_gwp_asan_options = 'Enabled=1:SampleRate=1'
config.environment['GWP_ASAN_OPTIONS'] = default_gwp_asan_options
default_gwp_asan_options += ':'
config.substitutions.append(('%env_gwp_asan_options=',
'env GWP_ASAN_OPTIONS=' + default_gwp_asan_options))
# GWP-ASan tests are currently supported on Linux only.
if config.host_os not in ['Linux']:
config.unsupported = True
| {
"content_hash": "423b5bfed721787d9e49d887b94f2847",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 97,
"avg_line_length": 33.55813953488372,
"alnum_prop": 0.6812196812196812,
"repo_name": "endlessm/chromium-browser",
"id": "a1b2551c2f9daf9372c3b7a9f197e1dfa41e5c92",
"size": "1461",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "third_party/llvm/compiler-rt/test/gwp_asan/lit.cfg.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import sys
import os
import subprocess
import time
import getpass
DEBUG=False
VERBOSE=False
cookies = "--insecure --cookie-jar .cookies.txt --cookie .cookies.txt"
PREFIX=os.path.dirname(os.path.realpath(__file__))
def cmd_exists(cmd):
""" Returns: bool, True if 'cmd' is in PATH, False otherwise."""
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) == 0
def confirm_cmd_exists(cmd):
if not cmd_exists(cmd):
print "Error: This script depends on '%s'." % cmd
print "We could not find '%s' in PATH. Please update PATH or" % cmd
print "install the package for '%s' on your system." % cmd
sys.exit(1)
def system(cmd):
## NOTE: use this rather than os.system() to catch
## KeyboardInterrupt correctly.
if DEBUG:
print cmd
# simulate success without running the command.
return 0
if VERBOSE:
print cmd
return subprocess.call(cmd, stdout=sys.stdout,
stderr=sys.stderr, shell=True)
REBOOT_MESSAGE="""
NOTE: please send this message to ops@measurementlab.net:
Around %(ts)s we rebooted this server due to the system not responding:
%(hostname)s
Once the reboot completes, all services on this system should return to normal.
"""
def usage():
return """
usage:
All commands take a host specification. A host spec is a FQHN, or a
shorter pattern. For example, "mlab1.nuq01", or "mlab1d.nuq01"
without quotes are valid host specs and may be used interchangably.
drac.py <host spec>
Take hostname argument and print out associated PCU information.
<hostname> may be a pattern, such as '*.site.measurement-lab.org'.
Acts like the original 'drac-password.py' script.
drac.py reboot <drac host spec>
Use DRAC to reboot <hostname>
drac.py shell <drac host spec>
Take the drac-hostname argument and log into the DRAC interface via
SSH. Then, control is returned to the user to enter DRAC commands
in the shell. i.e. reboot, or get system info, etc.
drac.py console5 <drac host spec>
drac.py console6 <drac host spec>
Take the drac-hostname argument and open the JavaWebStart Virtual
Console. This depends upon correct configuration of JavaWebStart,
which is platform dependent. Check that 'javaws' is in your path.
console5 is for DRAC5
ams01, ams02, atl01, dfw01, ham01, iad01, lax01, lga01, lga02,
lhr01, mia01, nuq01, ord01, par01, sea01,
console6 is for iDRAC6
arn01, ath01, ath02, dub01, hnd01, mad01, mil01, syd01, syd02,
tpe01, vie01, wlg01,
unknown
svg01,
unsupported (hpilo)
trn01
Not all systems have been tested. There may not be 100% coverage
for MLab DRAC's.
drac.py getsysinfo <drac host spec>
Take the hostname argument and log into the DRAC interface via
SSH. Then run 'racadm getsysinfo'.
<hostname> may be a pattern, such as '*.site.measurement-lab.org'.
drac.py resetpassword <drac host spec> <newpassword>
Take the drac-hostname and set a new password.
The current password is taken from the PCU entry in the PLC
database. Then, this command will log into the DRAC interface
and reset the password there. Finally, it will update PLC's PCU
entry.
"""
def parse_options():
from optparse import OptionParser
parser = OptionParser(usage=usage())
parser.set_defaults(promptpassword=False,
user="admin",
verbose=False,
debug=False)
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="Verbose mode: print extra details.")
parser.add_option("-n", "--dryrun", dest="debug", action="store_true",
help="Debug mode: perform no updates.")
parser.add_option("-u", "--user", dest="user",
metavar="admin",
help=("The DRAC username. Should be used with '-p'"))
parser.add_option("-p", "--promptpassword", dest="promptpassword",
action="store_true",
help=("Prompt for DRAC password rather than querying "+
"PLC. This is useful if you do not have a PLC "+
"account"))
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
command = "list"
host_spec = None
newpasswd = None
if len(args) == 1:
host_spec = args[0]
elif len(args) == 2:
command = args[0]
host_spec = args[1]
elif len(args) == 3:
command = args[0]
host_spec = args[1]
newpasswd = args[2]
return (command, host_spec, newpasswd, options, args)
def hspec_to_pcu(host_spec):
f = host_spec.split(".")
suffix = "measurement-lab.org"
if len(f) == 2: ## short form.
if f[0][-1] == 'd': ## already a pcu name.
return host_spec + "." + suffix
else:
return "%sd.%s." % (f[0],f[1]) + suffix
elif len(f) == 4: ## long form
if f[0][-1] == 'd': ## already a pcu name.
return host_spec
else:
f[0] = f[0]+"d"
return ".".join(f)
else:
return host_spec
return None
def drac_formatLoginRequest(username, passwd):
def escapeStr(val):
escstr=""
val = val.replace("\\", "\\\\")
tmp = [ i for i in val ]
for i in range(0,len(val)):
if tmp[i] in ['@','(',')',',',':','?','=','&','#','+','%']:
dec = ord(tmp[i])
escstr+= "@0"+ "%02x" % dec
else:
escstr+=tmp[i]
return escstr
postData = ('user=' + escapeStr(username) +
'&password=' + escapeStr(passwd))
return postData
def drac_getLoginURL(console):
if console == "console5":
login_url = "cgi-bin/webcgi/login"
elif console == "console6":
login_url = "data/login"
else:
print "unknown console type: %s" % console
sys.exit(1)
return login_url
def drac_login(login_url, postData, hostname, output):
ret = run_curl(hostname, login_url,
output, "-d '%s'" % postData)
return ret
def run_curl(hostname, url, output, extra_args=""):
cmd_fmt = "curl -D /tmp/out.headers %s -s %s -o %s 'https://%s/%s'"
ret = system(cmd_fmt % (extra_args, cookies, output, hostname, url))
if ret != 0:
return False
if DEBUG:
# if DEBUG is true, out.headers will not exist, and it doesn't matter
return True
headers = open("/tmp/out.headers", 'r').read().strip()
if VERBOSE:
print headers
if "200 OK" in headers or "302 Found" in headers:
return True
return False
def drac_downloadConsoleJNLP(console, user, passwd, hostname, jnlp_output):
date_s=int((time.time())*1000)
postData = drac_formatLoginRequest(user, passwd)
login_url = drac_getLoginURL(console)
login_output = "/tmp/out.login"
print "Logging in.."
login_ok = drac_login(login_url, postData, hostname, login_output)
if not login_ok:
print "Failed to login to %s" % hostname
return False
if VERBOSE: system("cat "+login_output); time.sleep(10)
print "Getting *.jnlp for Java Web Start."
if console == "console5":
return drac5_downloadConsoleJNLP(hostname, date_s, jnlp_output)
elif console == "console6":
return drac6_downloadConsoleJNLP(hostname, date_s,
login_output, jnlp_output)
else:
raise Exception("Unrecognized console type: %s" % console)
def drac6_downloadConsoleJNLP(hostname, date_s, login_output, jnlp_output):
cmd = (r"sed -e "+
r"'s/.*forwardUrl>index.html\(.*\)<\/forwardUrl.*/\1/g'"+
r" " + login_output + r" | tr '?' ' '")
if DEBUG:
print cmd
token = "faketoken"
else:
token = os.popen(cmd, 'r').read().strip()
## NOTE: handle the many variations on a theme.
if "ath01" in hostname or "syd01" in hostname:
url = "viewer.jnlp(%s@0@%s)" % (hostname, date_s)
elif len(token) > 10:
url = "viewer.jnlp(%s@0@title@%s@%s)" % (hostname, date_s, token)
else:
url = "viewer.jnlp(%s@0@title@%s)" % (hostname, date_s)
ret = run_curl(hostname, url, jnlp_output)
if VERBOSE: system("cat "+ jnlp_output)
return ret
def drac5_downloadConsoleJNLP(hostname, date_s, jnlp_output):
print "Getting Virtual Console SessionID.."
session_url="cgi-bin/webcgi/vkvm?state=1"
session_ok = run_curl(hostname, session_url, "/tmp/tmp.out")
if not session_ok: return session_ok
cmd = ("cat /tmp/tmp.out | grep vKvmSessionId |"+
" tr '<>' ' ' | awk '{print $5}' ")
if DEBUG:
print cmd
kvmSessionId = "fakeSessionID"
else:
kvmSessionId = os.popen(cmd).read().strip()
jnlp_url="vkvm/%s.jnlp" % kvmSessionId
jnlp_ok = run_curl(hostname, jnlp_url, jnlp_output)
# NOTE: <sessionid>.jnlp is not always valid, so try the second variation
cmd = "grep 'was not found on this server' "+jnlp_output+" >/dev/null"
not_found = system(cmd)
if not_found == 0:
print jnlp_ok, "Second attempt..."
jnlp_url="cgi-bin/webcgi/vkvmjnlp?id=%s" % date_s
jnlp_ok = run_curl(hostname, jnlp_url, jnlp_output)
if VERBOSE: system("cat "+jnlp_output)
return jnlp_ok
def get_pcu_fields(host_spec, options, return_ip=False):
pcuname = hspec_to_pcu(host_spec)
ret = []
if options.promptpassword:
passwd = getpass.getpass("DRAC passwd: ")
ret = [(pcuname, options.user, passwd, "DRAC")]
else:
cmd=(PREFIX+"/plcquery.py --action=get --type pcu --filter hostname=%s "+
"--fields hostname,username,password,model,ip") % pcuname
if DEBUG: print cmd
lines= os.popen(cmd, 'r').readlines()
for line in lines:
h_u_pw_model= line.strip().split()
hostname = h_u_pw_model[0]
user = h_u_pw_model[1]
passwd = h_u_pw_model[2]
model = h_u_pw_model[3]
ip = h_u_pw_model[4]
if return_ip:
ret.append((hostname, user, passwd, model, ip))
else:
ret.append((hostname, user, passwd, model))
return ret
def main():
global DEBUG
global VERBOSE
(command, host_spec, newpasswd, options, args) = parse_options()
DEBUG=options.debug
VERBOSE=options.verbose
confirm_cmd_exists("expect")
## NOTE: Make sure the session is setup correctly.
## Use os.system() b/c the custom system() function
## doesn't flush stdout correctly. :-/
if not options.promptpassword:
print "Verifying PLC Session...\n"
cmd=PREFIX+"/plcquery.py --action=checksession"
if DEBUG:
print cmd
else:
os.system(cmd)
if command == "shell":
pcu_fields = get_pcu_fields(host_spec, options)
print "Login can be slow. When you receive a prompt, try typing"
print " 'help' or 'racadm help' for a list of available commands."
print " 'exit' will exit the shell and 'drac.py' script.\n"
for hostname,user,passwd,model in pcu_fields:
system("expect %s/exp/SHELL.exp %s %s '%s'" %
(PREFIX, hostname, user, passwd))
elif command in ["console6", "console5"]:
pcu_fields = get_pcu_fields(host_spec, options)
if len(pcu_fields) != 1:
print "host spec '%s' did not return a solitary record" % host_spec
sys.exit(1)
(hostname,user,passwd,model) = pcu_fields[0]
if model != "DRAC":
msg = "Automatic console loading is not supported "
msg+= "for this model PCU: %s." % model
print msg
sys.exit(1)
print "Virtual Console depends on correct setup of JavaWebStart..."
jnlp_output = "/tmp/out.jnlp"
download_ok = drac_downloadConsoleJNLP(command, user, passwd,
hostname, jnlp_output)
if not download_ok:
print "Failed to download JNLP file from %s" % hostname
sys.exit(1)
print "Loading JavaWebStart."
system("javaws "+jnlp_output)
elif command == "getsysinfo":
pcu_fields = get_pcu_fields(host_spec, options)
if len(pcu_fields) == 0:
print "host spec '%s' did not return any records" % host_spec
sys.exit(1)
for hostname,user,passwd,model in pcu_fields:
if model not in ["DRAC", "IMM", "HPiLO"]:
print "%s is an unsupported PCU model" % model
continue
system("expect %s/exp/GETSYSINFO.exp %s %s '%s'" %
(PREFIX, hostname, user, passwd))
elif command == "reboot":
pcu_fields = get_pcu_fields(host_spec, options)
if len(pcu_fields) == 0:
print "host spec '%s' did not return any records" % host_spec
sys.exit(1)
for hostname,user,passwd,model in pcu_fields:
if model in ["DRAC", "IMM", "HPiLO"]:
system("expect %s/exp/REBOOT.exp %s %s '%s' %s %s" %
(PREFIX, hostname, user, passwd, model, options.debug))
elif model == "OpenIPMI":
cmd = "ipmitool -I lanplus -H %s -U %s -P '%s' power cycle"
cmd = cmd % (hostname, user, passwd)
system(cmd)
else:
print "%s is an unsupported PCU model" % model
continue
ts = time.strftime("%b %d %H:%M UTC", time.gmtime())
msg = REBOOT_MESSAGE % {'ts' : ts, 'hostname' : host_spec }
# TODO: add option to --send this message to ops@ list
print msg
elif command == "rebootdrac":
# After a shell login, some pcus can be "reset". i.e.
# TODO: IMM can be soft reset using 'resetsp'
# TODO: DRAC can be soft reset using 'racreset soft'
# TODO: HPiLO can be soft reset using 'reset /map1'
pass
elif command == "resetpassword":
## NOTE: be extra verbose for password resets, in case something goes
## wrong, to see where.
if options.promptpassword:
print "Password resets are not supported without updating PLC db."
print "Do not specify password prompt, and try again."
sys.exit(1)
pcu_fields = get_pcu_fields(host_spec, options)
if len(pcu_fields) != 1:
print "host spec '%s' did not return a single record" % host_spec
sys.exit(1)
(hostname,user,passwd,model) = pcu_fields[0]
if model != "DRAC":
print "Unsupported PCU model '%s' for password reset." % model
sys.exit(1)
cmd = ("expect %s/exp/RESET_PASSWORD.exp %s %s '%s' '%s'" %
(PREFIX, hostname, user, passwd, newpasswd))
# Always print, even if DEBUG is not on
if not DEBUG: print cmd
ret = system(cmd)
if ret != 0:
print "An error occurred resetting the password. Stopping"
sys.exit(1)
print "Updating password in PLC database."
cmd = (PREFIX+"/plcquery.py --action=update --type pcu "+
"--filter 'hostname=%s' "+
"--fields 'password=%s'") % (hostname, newpasswd)
# Always print, even if DEBUG is not on
if not DEBUG: print cmd
ret = system(cmd)
if ret != 0:
print "Password update may have failed."
print ("Before proceeding double check that the password "+
"update was successful.")
print "e.g. drac.py %s" % host_spec
sys.exit(1)
elif command == "list":
if options.promptpassword:
print "Password prompt is not supported for 'list'"
sys.exit(1)
pcu_fields = get_pcu_fields(host_spec, options, True)
if len(pcu_fields) == 0:
print "host spec '%s' did not return any records" % host_spec
sys.exit(1)
for hostname,user,passwd,model,ip in pcu_fields:
print "host: %s" % hostname[0:5]+hostname[6:]
print "pcu hostname: https://%s" % hostname
print "pcu IP: %s" % ip
print "pcu username: %s" % user
print "pcu password: %s" % passwd
print "pcu model: %s" % model
if __name__ == "__main__":
main()
| {
"content_hash": "9e00399999aadf89ee69fad1c06a4da7",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 81,
"avg_line_length": 35.30103092783505,
"alnum_prop": 0.5614158051515683,
"repo_name": "nkinkade/operator",
"id": "eca9d2ac147160b57a2dfe20ac1256614a3b7968",
"size": "17144",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/drac.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "149528"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
} |
import theano.tensor as T
class Operation():
def __init__(self, input, op_name):
self.input = input
self.operate = self.get_operation(op_name)
self.output = self.operate(input, axis=1)
def get_operation(self, op_name):
if op_name == 'sum':
return T.sum
elif op_name == 'mean':
return T.mean
elif op_name == 'max':
return T.max
else:
L.error('Invalid operation name given: ' + op_name)
| {
"content_hash": "b5994888496fcd4eca7f25680292fd44",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 23.166666666666668,
"alnum_prop": 0.6474820143884892,
"repo_name": "nusnlp/corelm",
"id": "9612f394a9628642182d1ce81d0a0f9d6bb3ca73",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlm/models/components/operation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119460"
}
],
"symlink_target": ""
} |
"""ttsa.py: Traveling Tournament Problem Using Simulated Annealing"""
__author__ = "Colin Burgin"
__copyright__ = "Copyright 2017, Virginia Tech"
__credits__ = [""]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Colin Burgin"
__email__ = "cburgin@vt.edu"
__status__ = "in progress"
# Standard Python Libraries
import random
import sys, copy
import math
class TTSA():
"""Traveling Tournament Simulated Annealing"""
def __init__(self, number_teams, seed, tau, beta, omega, delta, theta, maxc, maxp, maxr, gamma):
# Seed PRNG
if seed is 0:
random.seed()
else:
random.seed(seed)
# Calculate schedule vars
self.number_teams = number_teams
self.weeks = (2 * self.number_teams) - 2
self.best_feasible_S = []
self.best_infeasible_S = []
# SA Parameters
self.tau_not = tau
self.beta = beta
self.omega_not = omega
self.omega = omega
self.delta = delta
self.theta = theta
self.maxC = maxc
self.maxP = maxp
self.maxR = maxr
self.gamma = gamma
# Set all the default vars for SA
self.S = self.build_schedule(self.number_teams)
# Read in the cost matrix
self.cost_matrix = []
self.cost_matrix = self.get_cost_matrix(self.number_teams)
# Perform the simulated annealing to solve the schedule
self.simulated_annealing()
# Print out the stats / result
print("\nThe best feasible schedule:")
self.print_schedule(self.best_feasible_S)
print("\nCost: " + str(self.cost_ttsa(self.best_feasible_S)))
print("Seed:", seed, "\tTau_0:", self.tau_not, "\tBeta:", self.beta, "\tOmega_0:", self.omega_not, "\tDelta:", self.delta, "\tTheta:", self.theta, "\tMaxC:", self.maxC, "\tMaxP:", self.maxP, "\tMaxR:", self.maxR, "\tGamma:", self.gamma, "\n")
# The Simulated Annelaing Algorithm TTSA from the TTP paper figure 2
def simulated_annealing(self):
# Set default vars
best_feasible = sys.maxsize
nbf = sys.maxsize
best_infeasible = sys.maxsize
nbi = sys.maxsize
best_tau = self.tau_not
tau = self.tau_not
reheat = 0
counter = 0
# Loop until no more reheats
while reheat <= self.maxR:
phase = 0
while phase <= self.maxP:
counter = 0
while counter <= self.maxC:
# Make a deepcopy of the schedule
S_prime = copy.deepcopy(self.S)
S_prime = self.random_move(S_prime)
cost_s = self.cost_ttsa(self.S)
cost_s_p = self.cost_ttsa(S_prime)
nbv_s_p = self.nbv(S_prime)
if( (cost_s_p < cost_s) or
(nbv_s_p == 0) and (cost_s_p < best_feasible) or
(nbv_s_p > 0) and (cost_s_p < best_infeasible) ):
accept = True
else:
if math.exp(-abs(cost_s - cost_s_p) / tau) > random.random():
accept = True
else:
accept = False
# Update best found feasible and infeasible schedules if necessary
if cost_s_p < best_feasible and nbv_s_p == 0:
self.best_feasible_S = copy.deepcopy(S_prime)
if cost_s_p < best_infeasible and nbv_s_p > 0:
self.best_infeasible_S = copy.deepcopy(S_prime)
# Set new values if it is accepted
if accept is True:
self.S = copy.deepcopy(S_prime)
# Calculate new values for nbf or nbi
if self.nbv(self.S) == 0:
nbf = min(self.cost_ttsa(self.S), best_feasible)
else:
nbi = min(self.cost_ttsa(self.S), best_infeasible)
self.best_infeasible_S = copy.deepcopy(S_prime)
# Restart the process if a better feasible or infeasible solution is found
if (nbf < best_feasible) or (nbi < best_infeasible):
reheat = 0
counter = 0
phase = 0
best_tau = tau
best_feasible = nbf
best_infeasible = nbi
# Calculate new omega
if self.nbv(self.S) == 0:
self.omega = self.omega / self.theta
else:
self.omega = self.omega * self.delta
else:
counter += 1
# End counter Loop
phase += 1
tau = tau * self.beta
# End phase Loop
reheat += 1
tau = 2 * best_tau
# End reheat Loop
def random_move(self, S):
# Select a random function to call on the schedule
choice = random.randint(0,4)
# Select and perform the operation
if choice is 0:
return self.swap_homes(S)
elif choice is 1:
return self.swap_rounds(S)
elif choice is 2:
return self.swap_teams(S)
elif choice is 3:
return self.partial_swap_rounds(S)
else:
return self.partial_swap_teams(S)
# Determine the number of violations in a given schedule
def nbv(self, S):
violations = 0
# Loop through the schedule looking for non-repeat violations
for team in range(len(S)):
for game in range(1, len(S[team])):
if S[team][game-1][0] is S[team][game][0]:
violations += 1
# Loop through the schedule looking for atmost violations
for team in range(len(S)):
for game in range(3, len(S[team])):
if S[team][game-3][1] == "home" and S[team][game-2][1] == "home" and S[team][game-1][1] == "home" and S[team][game][1] == "home":
violations += 1
if S[team][game-3][1] == "away" and S[team][game-2][1] == "away" and S[team][game-1][1] == "away" and S[team][game][1] == "away":
violations += 1
return violations
# Builds the cost matrix for the coresponding number of teams
def get_cost_matrix(self, number_teams):
file_name = "data/data" + str(number_teams) + ".txt"
l = []
with open(file_name, 'r') as f:
for line in f:
line = line.strip()
if len(line) > 0:
l.append(line.split())
return l
# Calculate the TTSA cost
def cost_ttsa(self, S):
if self.nbv(S) == 0:
return self.cost(S)
else:
return math.sqrt(self.cost(S)**2 + (self.omega * self.fun(self.nbv(S))**2))
# define fun (f function)
def fun(self, v):
return 1 + math.sqrt(v) * math.log(v / 2)
# Calculate the cost of the input schedule
def cost(self, S):
total_cost = 0
cost_m = self.cost_matrix
# Loop through the schedule calculating the cost along the way
for team in S:
i = S.index(team)
team.append((None, "home"))
for game in team:
j = team.index(game)
start_loc = None
dest_loc = None
# Handle the first game case, get start location
if j is 0:
start_loc = i
else:
if team[j-1][1] is "home":
start_loc = i
else:
start_loc = team[j-1][0] - 1
# Handle the last game case, get the travel location
if j is len(team) - 1:
dest_loc = i
else:
if team[j][1] is "home":
dest_loc = i
else:
dest_loc = team[j][0] - 1
# Cost
total_cost += int(cost_m[start_loc][dest_loc])
# Pop off the placeholder location
team.pop()
return total_cost
# Builds a random starting schedule to build and improve on
def build_schedule(self, number_teams):
# Create an empty schedule
S = [[None for i in range(self.weeks)] for j in range(number_teams)]
# Call the recursive build function
return self.r_build_schedule(S, 0, 0)
# Recursive part of build schedule
def r_build_schedule(self, S, team, week):
# If the schedule is full then return becuase it is complete
if self.schedule_full(S):
return S
# Calculate the next location
next_week = week + 1
next_team = team
if next_week == self.weeks:
next_week = 0
next_team += 1
# If there is already a game scheduled then move forward
if S[team][week] is not None:
return self.r_build_schedule(S, next_team, next_week)
# Find all of the possible games that can be scheduled, return if it isn't schedulable
possibilities = self.get_game(S, team, week)
random.shuffle(possibilities)
if possibilities is None:
return None
# Try all the possible games until one works
for p in possibilities:
try_S = [[c for c in r] for r in S]
# Set the game as well as the opponent
try_S[team][week] = p
self.set_opponent(try_S, team, week)
# Move forward with this attempt
result_S = self.r_build_schedule(try_S, next_team, next_week)
if result_S is not None:
return result_S
# Catch all
return None
# Check to see if the schedule is full, inefficent
def schedule_full(self, S):
for week in S:
for game in week:
if game is None:
return False
return True
# Given the schedule and a specfic match, schedule the opponent for that match
def set_opponent(self, S, i, j):
match = S[i][j]
if match[1] is "home":
S[match[0]-1][j] = (i+1, "away")
else:
S[match[0]-1][j] = (i+1, "home")
return S
# Given the schedule and an empty slot, determine the possible games that can be scheduled here
def get_game(self, S, i, j):
# Create a list of available teams
home = lambda x: (x, "home")
away = lambda x: (x, "away")
available = [f(x) for x in range(1, self.number_teams+1) for f in (home, away)]
# Remove self from list
available = [k for k in available if k[0] is not i+1]
# Remove games that this team already has on its schedule
available = [l for l in available if l not in S[i]]
# Remove opponents that are in concurrent games
col = [o[0] for o in [row[j] for row in S] if o is not None]
available = [m for m in available if m[0] not in col]
return available
# The move swaps the home and away roles of team T in pos i and j
# Because this is going to be a random choice everytime the function is called,
# the choice is just made inside of the function instead of being passed in.
def swap_homes(self, S):
# Choose a team to swap on
team = len(S) - 1
swap_loc = S[team].index(random.choice(S[team]))
swap_loc_mirror = S[team].index(self.home_away(S[team][swap_loc]))
# Swap the first game and its opponent
S[team][swap_loc] = self.home_away(S[team][swap_loc])
S = self.set_opponent(S, team, swap_loc)
# Swap the matching game and its opponent
S[team][swap_loc_mirror] = self.home_away(S[team][swap_loc_mirror])
S = self.set_opponent(S, team, swap_loc_mirror)
return S
# Given a game, swap the home/awayness of that game
def home_away(self, game):
if game[1] is 'home':
return (game[0], 'away')
else:
return (game[0], 'home')
# The move simply swaps rounds k and l
# Because this is going to be a random choice everytime the function is called,
# the choice is just made inside of the function instead of being passed in.
def swap_rounds(self, S):
# Choose two different rounds to swap
choices = random.sample(list(range(len(S[0]))), 2)
# Iterate through the teams swapping each rounds
for team in range(len(S)):
game_one = S[team][choices[0]]
game_two = S[team][choices[1]]
S[team][choices[0]] = game_two
S[team][choices[1]] = game_one
return S
# This move swaps the schedule for teams i and j except of course, when they play against each other
# Because this is going to be a random choice everytime the function is called,
# the choice is just made inside of the function instead of being passed in.
def swap_teams(self, S):
# Choose two different teams to swap
choices = random.sample(list(range(len(S))), 2)
# Swap the teams completely
team_one = S[choices[0]]
team_two = S[choices[1]]
S[choices[0]] = team_two
S[choices[1]] = team_one
# Resolve the same team conflicts
for game in range(len(S[choices[0]])):
# If the team is playing itself fix it and resolve opponent
if S[choices[0]][game][0] - 1 is choices[0]:
S[choices[0]][game] = self.home_away(S[choices[1]][game])
S = self.set_opponent(S, choices[0], game)
# Resolve the opponents
for team in choices:
for game in range(len(S[team])):
S = self.set_opponent(S, team, game)
return S
# This mode considers team T and swaps its games at round k and l
# Because this is going to be a random choice everytime the function is called,
# the choice is just made inside of the function instead of being passed in.
def partial_swap_rounds(self, S):
# Choose a random team and two random rounds to swap
s_team = random.sample(list(range(len(S))), 1)[0]
s_rounds = random.sample(list(range(len(S[0]))), 2)
# Create a starting list
p_swap = [s_team]
# Chain ejection until everything is in the list
while 1:
# loop through the list adding new teams if necessary
for item in p_swap:
if S[item][s_rounds[0]][0]-1 not in p_swap:
p_swap.append(S[item][s_rounds[0]][0]-1)
if S[item][s_rounds[1]][0]-1 not in p_swap:
p_swap.append(S[item][s_rounds[1]][0]-1)
# Check to see if the list is fully inclusive
if (S[p_swap[-1]][s_rounds[0]][0]-1 in p_swap) and (S[p_swap[-1]][s_rounds[1]][0]-1 in p_swap) and (S[p_swap[-2]][s_rounds[0]][0]-1 in p_swap) and (S[p_swap[-2]][s_rounds[1]][0]-1 in p_swap):
break
# Loop through the list for one of the rounds and swap all the games in the list
for item in p_swap:
S = self.swap_game_round(S, item, s_rounds[0], s_rounds[1])
return S
# Swap games by same team different rounds
def swap_game_round(self, S, t, rl, rk):
game_one = S[t][rl]
game_two = S[t][rk]
S[t][rl] = game_two
S[t][rk] = game_one
return S
# This move considers round rk and swaps the games of teams Ti and Tj
# Because this is going to be a random choice everytime the function is called,
# the choice is just made inside of the function instead of being passed in.
def partial_swap_teams(self, S):
# Choose a random round and two random teams to swap
s_round = random.sample(list(range(len(S[0]))), 1)[0]
s_teams = random.sample(list(range(len(S))), 2)
# Handle case where the games cannot be swapped because it is invalid (cant play yourself)
if not (set(s_teams) - set([S[s_teams[0]][s_round][0]-1, S[s_teams[1]][s_round][0]-1])):
return S
# Create a starting list
p_swap = [S[s_teams[0]][s_round], S[s_teams[1]][s_round]]
# Chain ejection until everything is in the list
while 1:
# Loop through the list adding new teams if necessary
for item in p_swap:
if self.get_concurrent(S, s_teams[0], s_teams[1], item) not in p_swap:
p_swap.append(self.get_concurrent(S, s_teams[0], s_teams[1], item))
if self.get_concurrent(S, s_teams[1], s_teams[0], item) not in p_swap:
p_swap.append(self.get_concurrent(S, s_teams[1], s_teams[0], item))
if( (self.get_concurrent(S, s_teams[0], s_teams[1], p_swap[-1]) in p_swap) and (self.get_concurrent(S, s_teams[1], s_teams[0], p_swap[-1]) in p_swap) and
(self.get_concurrent(S, s_teams[0], s_teams[1], p_swap[-2]) in p_swap) and (self.get_concurrent(S, s_teams[1], s_teams[0], p_swap[-2]) in p_swap) ):
break
# Get the indices of the games found
p_indices = []
for item in p_swap:
p_indices.append(S[s_teams[0]].index(item))
# Loop through the list for one of the teams and swap all of the games and resolve opponents
for idx in p_indices:
S = self.swap_game_team(S, idx, s_teams[0], s_teams[1])
return S
# Swap games by same round different teams and resolve opponents
def swap_game_team(self, S, r, T1, T2):
game_one = S[T1][r]
game_two = S[T2][r]
S[T1][r] = game_two
S[T2][r] = game_one
S = self.set_opponent(S, T1, r)
S = self.set_opponent(S, T2, r)
return S
# Given a two teams and a game, find the concurrent game for the other teams
def get_concurrent(self, S, T1, T2, game):
for i, j in enumerate(S[T1]):
if j == game:
return S[T2][i]
# Prints the schedule in a way that is readable
def print_schedule(self, S):
for row in S:
print(*row, sep="\t")
| {
"content_hash": "6e2a25277ff2f8b124f96e3d47fc0860",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 250,
"avg_line_length": 38.77405857740586,
"alnum_prop": 0.5336678536743282,
"repo_name": "cburgin/TPP-SA",
"id": "065f6bf3cf0bd224638c34864f6b296e9c693d4f",
"size": "18558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ttsa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21432"
}
],
"symlink_target": ""
} |
from functools import partial
import time
import os
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import bit_common
import bit_hyperrule
import bit_tf2.models as models
import input_pipeline_tf2_or_jax as input_pipeline
def reshape_for_keras(features, batch_size, crop_size):
features["image"] = tf.reshape(features["image"], (batch_size, crop_size, crop_size, 3))
features["label"] = tf.reshape(features["label"], (batch_size, -1))
return (features["image"], features["label"])
class BiTLRSched(tf.keras.callbacks.Callback):
def __init__(self, base_lr, num_samples):
self.step = 0
self.base_lr = base_lr
self.num_samples = num_samples
def on_train_batch_begin(self, batch, logs=None):
lr = bit_hyperrule.get_lr(self.step, self.num_samples, self.base_lr)
tf.keras.backend.set_value(self.model.optimizer.lr, lr)
self.step += 1
def main(args):
tf.io.gfile.makedirs(args.logdir)
logger = bit_common.setup_logger(args)
logger.info(f'Available devices: {tf.config.list_physical_devices()}')
tf.io.gfile.makedirs(args.bit_pretrained_dir)
bit_model_file = os.path.join(args.bit_pretrained_dir, f'{args.model}.h5')
if not tf.io.gfile.exists(bit_model_file):
model_url = models.KNOWN_MODELS[args.model]
logger.info(f'Downloading the model from {model_url}...')
tf.io.gfile.copy(model_url, bit_model_file)
# Set up input pipeline
dataset_info = input_pipeline.get_dataset_info(
args.dataset, 'train', args.examples_per_class)
# Distribute training
strategy = tf.distribute.MirroredStrategy()
num_devices = strategy.num_replicas_in_sync
print('Number of devices: {}'.format(num_devices))
resize_size, crop_size = bit_hyperrule.get_resolution_from_dataset(args.dataset)
data_train = input_pipeline.get_data(
dataset=args.dataset, mode='train',
repeats=None, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=args.examples_per_class,
examples_per_class_seed=args.examples_per_class_seed,
mixup_alpha=bit_hyperrule.get_mixup(dataset_info['num_examples']),
num_devices=num_devices,
tfds_manual_dir=args.tfds_manual_dir)
data_test = input_pipeline.get_data(
dataset=args.dataset, mode='test',
repeats=1, batch_size=args.batch,
resize_size=resize_size, crop_size=crop_size,
examples_per_class=1, examples_per_class_seed=0,
mixup_alpha=None,
num_devices=num_devices,
tfds_manual_dir=args.tfds_manual_dir)
data_train = data_train.map(lambda x: reshape_for_keras(
x, batch_size=args.batch, crop_size=crop_size))
data_test = data_test.map(lambda x: reshape_for_keras(
x, batch_size=args.batch, crop_size=crop_size))
with strategy.scope():
filters_factor = int(args.model[-1])*4
model = models.ResnetV2(
num_units=models.NUM_UNITS[args.model],
num_outputs=21843,
filters_factor=filters_factor,
name="resnet",
trainable=True,
dtype=tf.float32)
model.build((None, None, None, 3))
logger.info(f'Loading weights...')
model.load_weights(bit_model_file)
logger.info(f'Weights loaded into model!')
model._head = tf.keras.layers.Dense(
units=dataset_info['num_classes'],
use_bias=True,
kernel_initializer="zeros",
trainable=True,
name="head/dense")
lr_supports = bit_hyperrule.get_schedule(dataset_info['num_examples'])
schedule_length = lr_supports[-1]
# NOTE: Let's not do that unless verified necessary and we do the same
# across all three codebases.
# schedule_length = schedule_length * 512 / args.batch
optimizer = tf.keras.optimizers.SGD(momentum=0.9)
loss_fn = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
model.compile(optimizer=optimizer, loss=loss_fn, metrics=['accuracy'])
logger.info(f'Fine-tuning the model...')
steps_per_epoch = args.eval_every or schedule_length
history = model.fit(
data_train,
steps_per_epoch=steps_per_epoch,
epochs=schedule_length // steps_per_epoch,
validation_data=data_test, # here we are only using
# this data to evaluate our performance
callbacks=[BiTLRSched(args.base_lr, dataset_info['num_examples'])],
)
for epoch, accu in enumerate(history.history['val_accuracy']):
logger.info(
f'Step: {epoch * args.eval_every}, '
f'Test accuracy: {accu:0.3f}')
if __name__ == "__main__":
parser = bit_common.argparser(models.KNOWN_MODELS.keys())
parser.add_argument("--tfds_manual_dir", default=None,
help="Path to maually downloaded dataset.")
parser.add_argument("--batch_eval", default=32, type=int,
help="Eval batch size.")
main(parser.parse_args())
| {
"content_hash": "15d26e15e05c54b9ad8948c9bb5f1c3c",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 90,
"avg_line_length": 35.330882352941174,
"alnum_prop": 0.6795005202913632,
"repo_name": "google-research/big_transfer",
"id": "5292887839d96f884e66a35882199fa15baff319",
"size": "5417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bit_tf2/train.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "70283"
}
],
"symlink_target": ""
} |
"""
pyexcel.plugin.parsers.django
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Export data into database datables
:copyright: (c) 2015-2017 by Onni Software Ltd.
:license: New BSD License
"""
import pyexcel_io.database.common as django
from pyexcel_io import get_data, iget_data
from pyexcel.parser import DbParser
class DjangoExporter(DbParser):
"""Export data from django model"""
def parse_db(self, argument,
export_columns_list=None, on_demand=True,
**keywords):
models = argument
exporter = django.DjangoModelExporter()
if export_columns_list is None:
export_columns_list = [None] * len(models)
for model, export_columns in zip(models, export_columns_list):
adapter = django.DjangoModelExportAdapter(model, export_columns)
exporter.append(adapter)
if on_demand:
sheets, _ = iget_data(
exporter, file_type=self._file_type, **keywords)
else:
sheets = get_data(exporter, file_type=self._file_type, **keywords)
return sheets
| {
"content_hash": "953dc617215e625c8b0d67f4924b2785",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 33.878787878787875,
"alnum_prop": 0.6082289803220036,
"repo_name": "caspartse/QQ-Groups-Spider",
"id": "a2bf1b155a32c1d3375b9134de4f54a683270cfa",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/pyexcel/plugins/parsers/django.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "157970"
},
{
"name": "Python",
"bytes": "10416"
},
{
"name": "Smarty",
"bytes": "9490"
}
],
"symlink_target": ""
} |
import gc
import sys
import time
import unittest
from django.dispatch import Signal, receiver
if sys.platform.startswith('java'):
def garbage_collect():
# Some JVM GCs will execute finalizers in a different thread, meaning
# we need to wait for that to complete before we go on looking for the
# effects of that.
gc.collect()
time.sleep(0.1)
elif hasattr(sys, "pypy_version_info"):
def garbage_collect():
# Collecting weakreferences can take two collections on PyPy.
gc.collect()
gc.collect()
else:
def garbage_collect():
gc.collect()
def receiver_1_arg(val, **kwargs):
return val
class Callable(object):
def __call__(self, val, **kwargs):
return val
def a(self, val, **kwargs):
return val
a_signal = Signal(providing_args=["val"])
b_signal = Signal(providing_args=["val"])
c_signal = Signal(providing_args=["val"])
class DispatcherTests(unittest.TestCase):
"""Test suite for dispatcher (barely started)"""
def _testIsClean(self, signal):
"""Assert that everything has been cleaned up automatically"""
self.assertEqual(signal.receivers, [])
# force cleanup just in case
signal.receivers = []
def testExact(self):
a_signal.connect(receiver_1_arg, sender=self)
expected = [(receiver_1_arg,"test")]
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
a_signal.disconnect(receiver_1_arg, sender=self)
self._testIsClean(a_signal)
def testIgnoredSender(self):
a_signal.connect(receiver_1_arg)
expected = [(receiver_1_arg,"test")]
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
a_signal.disconnect(receiver_1_arg)
self._testIsClean(a_signal)
def testGarbageCollected(self):
a = Callable()
a_signal.connect(a.a, sender=self)
expected = []
del a
garbage_collect()
result = a_signal.send(sender=self, val="test")
self.assertEqual(result, expected)
self._testIsClean(a_signal)
def testMultipleRegistration(self):
a = Callable()
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
result = a_signal.send(sender=self, val="test")
self.assertEqual(len(result), 1)
self.assertEqual(len(a_signal.receivers), 1)
del a
del result
garbage_collect()
self._testIsClean(a_signal)
def testUidRegistration(self):
def uid_based_receiver_1(**kwargs):
pass
def uid_based_receiver_2(**kwargs):
pass
a_signal.connect(uid_based_receiver_1, dispatch_uid = "uid")
a_signal.connect(uid_based_receiver_2, dispatch_uid = "uid")
self.assertEqual(len(a_signal.receivers), 1)
a_signal.disconnect(dispatch_uid = "uid")
self._testIsClean(a_signal)
def testRobust(self):
"""Test the sendRobust function"""
def fails(val, **kwargs):
raise ValueError('this')
a_signal.connect(fails)
result = a_signal.send_robust(sender=self, val="test")
err = result[0][1]
self.assertIsInstance(err, ValueError)
self.assertEqual(err.args, ('this',))
a_signal.disconnect(fails)
self._testIsClean(a_signal)
def testDisconnection(self):
receiver_1 = Callable()
receiver_2 = Callable()
receiver_3 = Callable()
a_signal.connect(receiver_1)
a_signal.connect(receiver_2)
a_signal.connect(receiver_3)
a_signal.disconnect(receiver_1)
del receiver_2
garbage_collect()
a_signal.disconnect(receiver_3)
self._testIsClean(a_signal)
def test_has_listeners(self):
self.assertFalse(a_signal.has_listeners())
self.assertFalse(a_signal.has_listeners(sender=object()))
receiver_1 = Callable()
a_signal.connect(receiver_1)
self.assertTrue(a_signal.has_listeners())
self.assertTrue(a_signal.has_listeners(sender=object()))
a_signal.disconnect(receiver_1)
self.assertFalse(a_signal.has_listeners())
self.assertFalse(a_signal.has_listeners(sender=object()))
class ReceiverTestCase(unittest.TestCase):
"""
Test suite for receiver.
"""
def testReceiverSingleSignal(self):
@receiver(a_signal)
def f(val, **kwargs):
self.state = val
self.state = False
a_signal.send(sender=self, val=True)
self.assertTrue(self.state)
def testReceiverSignalList(self):
@receiver([a_signal, b_signal, c_signal])
def f(val, **kwargs):
self.state.append(val)
self.state = []
a_signal.send(sender=self, val='a')
c_signal.send(sender=self, val='c')
b_signal.send(sender=self, val='b')
self.assertIn('a', self.state)
self.assertIn('b', self.state)
self.assertIn('c', self.state)
| {
"content_hash": "78c1d264f3c13edafd5d77100f774748",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 78,
"avg_line_length": 31.48170731707317,
"alnum_prop": 0.6135967460778617,
"repo_name": "makinacorpus/django",
"id": "5f7dca87cc5f44530123fb94bd25c2b1eb94f139",
"size": "5163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/dispatch/tests/test_dispatcher.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "98175"
},
{
"name": "Python",
"bytes": "8391980"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
"""Disallow seconds in durations
Revision ID: 178d297eae7e
Revises: cf9e1b4e2f5f
Create Date: 2021-05-27 13:14:59.253773
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '178d297eae7e'
down_revision = 'cf9e1b4e2f5f'
branch_labels = None
depends_on = None
def upgrade():
op.execute('''
UPDATE events.contributions SET duration = date_trunc('minute', duration) WHERE date_trunc('minute', duration) != duration;
UPDATE events.subcontributions SET duration = date_trunc('minute', duration) WHERE date_trunc('minute', duration) != duration;
UPDATE events.breaks SET duration = date_trunc('minute', duration) WHERE date_trunc('minute', duration) != duration;
UPDATE events.session_blocks SET duration = date_trunc('minute', duration) WHERE date_trunc('minute', duration) != duration;
UPDATE events.sessions SET default_contribution_duration = date_trunc('minute', default_contribution_duration) WHERE date_trunc('minute', default_contribution_duration) != default_contribution_duration;
''')
# force execution of trigger events
op.execute('SET CONSTRAINTS ALL IMMEDIATE')
op.create_check_constraint(
'duration_no_seconds',
'breaks',
"date_trunc('minute', duration) = duration",
schema='events'
)
op.create_check_constraint(
'duration_no_seconds',
'contributions',
"date_trunc('minute', duration) = duration",
schema='events'
)
op.create_check_constraint(
'duration_no_seconds',
'session_blocks',
"date_trunc('minute', duration) = duration",
schema='events'
)
op.create_check_constraint(
'duration_no_seconds',
'subcontributions',
"date_trunc('minute', duration) = duration",
schema='events'
)
op.create_check_constraint(
'default_contribution_duration_no_seconds',
'sessions',
"date_trunc('minute', default_contribution_duration) = default_contribution_duration",
schema='events'
)
def downgrade():
op.drop_constraint('ck_breaks_duration_no_seconds', 'breaks', schema='events')
op.drop_constraint('ck_contributions_duration_no_seconds', 'contributions', schema='events')
op.drop_constraint('ck_session_blocks_duration_no_seconds', 'session_blocks', schema='events')
op.drop_constraint('ck_subcontributions_duration_no_seconds', 'subcontributions', schema='events')
op.drop_constraint('ck_sessions_default_contribution_duration_no_seconds', 'sessions', schema='events')
| {
"content_hash": "e22d4e08cb1e6695edf94d89527eb8bb",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 210,
"avg_line_length": 38.492537313432834,
"alnum_prop": 0.6793330748352074,
"repo_name": "ThiefMaster/indico",
"id": "71e9328684c4b474ab09b2edfe27bda9521e0577",
"size": "2579",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "indico/migrations/versions/20210527_1314_178d297eae7e_disallow_seconds_in_durations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""Checker functions for filtering."""
from warnings import warn
import numpy as np
###################################################################################################
###################################################################################################
def check_filter_definition(pass_type, f_range):
"""Check a filter definition for validity, and get f_lo and f_hi.
Parameters
----------
pass_type : {'bandpass', 'bandstop', 'lowpass', 'highpass'}
Which kind of filter to apply:
* 'bandpass': apply a bandpass filter
* 'bandstop': apply a bandstop (notch) filter
* 'lowpass': apply a lowpass filter
* 'highpass' : apply a highpass filter
f_range : tuple of (float, float) or float
Cutoff frequency(ies) used for filter, specified as f_lo & f_hi.
For 'bandpass' & 'bandstop', must be a tuple.
For 'lowpass' or 'highpass', can be a float that specifies pass frequency, or can be
a tuple and is assumed to be (None, f_hi) for 'lowpass', and (f_lo, None) for 'highpass'.
Returns
-------
f_lo : float or None
The lower frequency range of the filter, specifying the highpass frequency, if specified.
f_hi : float or None
The higher frequency range of the filter, specifying the lowpass frequency, if specified.
"""
if pass_type not in ['bandpass', 'bandstop', 'lowpass', 'highpass']:
raise ValueError('Filter passtype not understood.')
## Check that frequency cutoff inputs are appropriate
# For band filters, 2 inputs required & second entry must be > first
if pass_type in ('bandpass', 'bandstop'):
if isinstance(f_range, tuple) and f_range[0] >= f_range[1]:
raise ValueError('Second cutoff frequency must be greater than first.')
elif isinstance(f_range, (int, float)) or len(f_range) != 2:
raise ValueError('Two cutoff frequencies required for bandpass and bandstop filters')
# Map f_range to f_lo and f_hi
f_lo, f_hi = f_range
# For lowpass and highpass can be tuple or int/float
if pass_type == 'lowpass':
if isinstance(f_range, (int, float)):
f_hi = f_range
elif isinstance(f_range, tuple):
f_hi = f_range[1]
f_lo = None
if pass_type == 'highpass':
if isinstance(f_range, (int, float)):
f_lo = f_range
elif isinstance(f_range, tuple):
f_lo = f_range[0]
f_hi = None
# Make sure pass freqs are floats
f_lo = float(f_lo) if f_lo else f_lo
f_hi = float(f_hi) if f_hi else f_hi
return f_lo, f_hi
def check_filter_properties(b_vals, a_vals, fs, pass_type, f_range, transitions=(-20, -3), verbose=True):
"""Check a filters properties, including pass band and transition band.
Parameters
----------
b_vals : 1d array
B value filter coefficients for a filter.
a_vals : 1d array
A value filter coefficients for a filter.
fs : float
Sampling rate, in Hz.
pass_type : {'bandpass', 'bandstop', 'lowpass', 'highpass'}
Which kind of filter to apply:
* 'bandpass': apply a bandpass filter
* 'bandstop': apply a bandstop (notch) filter
* 'lowpass': apply a lowpass filter
* 'highpass' : apply a highpass filter
f_range : tuple of (float, float) or float
Cutoff frequency(ies) used for filter, specified as f_lo & f_hi.
For 'bandpass' & 'bandstop', must be a tuple.
For 'lowpass' or 'highpass', can be a float that specifies pass frequency, or can be
a tuple and is assumed to be (None, f_hi) for 'lowpass', and (f_lo, None) for 'highpass'.
transitions : tuple of (float, float), optional, default: (-20, -3)
Cutoffs, in dB, that define the transition band.
verbose : bool, optional, default: True
Whether to print out transition and pass bands.
Returns
-------
passes : bool
Whether all the checks pass. False if one or more checks fail.
"""
# Import utility functions inside function to avoid circular imports
from neurodsp.filt.utils import (compute_frequency_response,
compute_pass_band, compute_transition_band)
# Initialize variable to keep track if all checks pass
passes = True
# Compute the frequency response
f_db, db = compute_frequency_response(b_vals, a_vals, fs)
# Check that frequency response goes below transition level (has significant attenuation)
if np.min(db) >= transitions[0]:
passes = False
warn('The filter attenuation never goes below {} dB.'\
'Increase filter length.'.format(transitions[0]))
# If there is no attenuation, cannot calculate bands, so return here
return passes
# Check that both sides of a bandpass have significant attenuation
if pass_type == 'bandpass':
if db[0] >= transitions[0] or db[-1] >= transitions[0]:
passes = False
warn('The low or high frequency stopband never gets attenuated by'\
'more than {} dB. Increase filter length.'.format(abs(transitions[0])))
# Compute pass & transition bandwidth
pass_bw = compute_pass_band(fs, pass_type, f_range)
transition_bw = compute_transition_band(f_db, db, transitions[0], transitions[1])
# Raise warning if transition bandwidth is too high
if transition_bw > pass_bw:
passes = False
warn('Transition bandwidth is {:.1f} Hz. This is greater than the desired'\
'pass/stop bandwidth of {:.1f} Hz'.format(transition_bw, pass_bw))
# Print out transition bandwidth and pass bandwidth to the user
if verbose:
print('Transition bandwidth is {:.1f} Hz.'.format(transition_bw))
print('Pass/stop bandwidth is {:.1f} Hz.'.format(pass_bw))
return passes
| {
"content_hash": "e63b33b7fe5c6162bdcc32208355898d",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 105,
"avg_line_length": 40.66438356164384,
"alnum_prop": 0.6090618157318511,
"repo_name": "srcole/neurodsp",
"id": "4140e88ce28973f969cc9a7d8610c00a4114ce74",
"size": "5937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neurodsp/filt/checks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "878"
},
{
"name": "Makefile",
"bytes": "1550"
},
{
"name": "Python",
"bytes": "192864"
},
{
"name": "Shell",
"bytes": "771"
},
{
"name": "TeX",
"bytes": "6424"
}
],
"symlink_target": ""
} |
""" Framework for filtered REST requests
@copyright: 2013-14 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{gluon}} <http://web2py.com>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DateFilter",
"S3Filter",
"S3FilterForm",
"S3FilterString",
"S3FilterWidget",
"S3HierarchyFilter",
"S3LocationFilter",
"S3OptionsFilter",
"S3RangeFilter",
"S3TextFilter",
"get_s3_filter_opts",
)
import datetime
import re
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import *
from gluon.storage import Storage
from gluon.tools import callback
from s3rest import S3Method
from s3query import S3ResourceField, S3ResourceQuery, S3URLQuery
from s3utils import s3_get_foreign_key, s3_unicode, S3TypeConverter
from s3validators import *
from s3widgets import S3DateWidget, S3DateTimeWidget, S3GroupedOptionsWidget, S3MultiSelectWidget, S3HierarchyWidget
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
def get_s3_filter_opts(tablename,
fieldname = "name",
location_filter = False,
org_filter = False,
none = False,
translate = False,
):
"""
Lazy options getter
- this is useful when the expected number of options is significantly smaller than the number of records to iterate through
NB This reason is no longer required with S3Filter, but is a legacy from S3Search: S3Filter already does an efficient Reverse-Query
@ToDo: Deprecate
- note this doesn't check if options are actually in-use
@param tablename: the name of the lookup table
@param fieldname: the name of the field to represent options with
@param location_filter: whether to filter the values by location
@param org_filter: whether to filter the values by root_org
@param none: whether to include an option for None
@param translate: whether to translate the values
"""
auth = current.auth
table = current.s3db.table(tablename)
if auth.s3_has_permission("read", table):
query = auth.s3_accessible_query("read", table)
if location_filter:
location = current.session.s3.location_filter
if location:
query &= (table.location_id == location)
if org_filter:
root_org = auth.root_org()
if root_org:
query &= ((table.organisation_id == root_org) | \
(table.organisation_id == None))
#else:
# query &= (table.organisation_id == None)
rows = current.db(query).select(table.id,
table[fieldname],
# Options are sorted later
#orderby = table[fieldname]
)
if translate:
T = current.T
opts = dict((row.id, T(row[fieldname])) for row in rows)
else:
opts = dict((row.id, row[fieldname]) for row in rows)
if none:
opts[None] = current.messages["NONE"]
else:
opts = {}
return opts
# =============================================================================
class S3FilterWidget(object):
""" Filter widget for interactive search forms (base class) """
#: the HTML class for the widget type
_class = "generic-filter"
#: the default query operator(s) for the widget type
operator = None
#: alternatives for client-side changeable operators
alternatives = None
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Prototype method to render this widget as an instance of
a web2py HTML helper class, to be implemented by subclasses.
@param resource: the S3Resource to render with widget for
@param values: the values for this widget from the URL query
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def variable(self, resource, get_vars=None):
"""
Prototype method to generate the name for the URL query variable
for this widget, can be overwritten in subclasses.
@param resource: the resource
@return: the URL query variable name (or list of
variable names if there are multiple operators)
"""
label, self.selector = self._selector(resource, self.field)
if not self.selector:
return None
if self.alternatives and get_vars is not None:
# Get the actual operator from get_vars
operator = self._operator(get_vars, self.selector)
if operator:
self.operator = operator
if "label" not in self.opts:
self.opts["label"] = label
return self._variable(self.selector, self.operator)
# -------------------------------------------------------------------------
def data_element(self, variable):
"""
Prototype method to construct the hidden element that holds the
URL query term corresponding to an input element in the widget.
@param variable: the URL query variable
"""
if type(variable) is list:
variable = "&".join(variable)
return INPUT(_type="hidden",
_id="%s-data" % self.attr["_id"],
_class="filter-widget-data %s-data" % self._class,
_value=variable)
# -------------------------------------------------------------------------
# Helper methods
#
def __init__(self, field=None, **attr):
"""
Constructor to configure the widget
@param field: the selector(s) for the field(s) to filter by
@param attr: configuration options for this widget
Configuration options:
@keyword label: label for the widget
@keyword comment: comment for the widget
@keyword hidden: render widget initially hidden (="advanced"
option)
@keyword levels: list of location hierarchy levels
(L{S3LocationFilter})
@keyword widget: widget to use (L{S3OptionsFilter}),
"select", "multiselect" (default),
or "groupedopts"
@keyword cols: number of columns of checkboxes (L{S3OptionsFilter}
and L{S3LocationFilter} with "groupedopts" widget)
@keyword filter: show filter for options (L{S3OptionsFilter},
L{S3LocationFilter} with "multiselect" widget)
@keyword header: show header in widget (L{S3OptionsFilter},
L{S3LocationFilter} with "multiselect" widget)
@keyword selectedList: number of selected items to show before
collapsing into number of items
(L{S3OptionsFilter}, L{S3LocationFilter}
with "multiselect" widget)
@keyword no_opts: text to show if no options available
(L{S3OptionsFilter}, L{S3LocationFilter})
@keyword resource: alternative resource to look up options
(L{S3LocationFilter}, L{S3OptionsFilter})
@keyword lookup: field in the alternative resource to look up
options (L{S3LocationFilter})
@keyword options: fixed set of options (L{S3OptionsFilter}: dict
of {value: label} or a callable that returns one,
L{S3LocationFilter}: list of gis_location IDs)
@keyword size: maximum size of multi-letter options groups
(L{S3OptionsFilter} with "groupedopts" widget)
@keyword help_field: field in the referenced table to display on
hovering over a foreign key option
(L{S3OptionsFilter} with "groupedopts" widget)
@keyword none: label for explicit None-option in many-to-many
fields (L{S3OptionsFilter})
@keyword fieldtype: explicit field type "date" or "datetime" to
use for context or virtual fields
(L{S3DateFilter})
@keyword hide_time: don't show time selector (L{S3DateFilter})
"""
self.field = field
self.alias = None
attributes = Storage()
options = Storage()
for k, v in attr.iteritems():
if k[0] == "_":
attributes[k] = v
else:
options[k] = v
self.attr = attributes
self.opts = options
self.selector = None
self.values = Storage()
# -------------------------------------------------------------------------
def __call__(self, resource, get_vars=None, alias=None):
"""
Entry point for the form builder
@param resource: the S3Resource to render the widget for
@param get_vars: the GET vars (URL query vars) to prepopulate
the widget
@param alias: the resource alias to use
"""
self.alias = alias
# Initialize the widget attributes
self._attr(resource)
# Extract the URL values to populate the widget
variable = self.variable(resource, get_vars)
if type(variable) is list:
values = Storage()
for k in variable:
if k in self.values:
values[k] = self.values[k]
else:
values[k] = self._values(get_vars, k)
else:
if variable in self.values:
values = self.values[variable]
else:
values = self._values(get_vars, variable)
# Construct and populate the widget
widget = self.widget(resource, values)
# Recompute variable in case operator got changed in widget()
if self.alternatives:
variable = self._variable(self.selector, self.operator)
# Construct the hidden data element
data = self.data_element(variable)
if type(data) is list:
data.append(widget)
else:
data = [data, widget]
return TAG[""](*data)
# -------------------------------------------------------------------------
def _attr(self, resource):
""" Initialize and return the HTML attributes for this widget """
_class = self._class
# Construct name and id for the widget
attr = self.attr
if "_name" not in attr:
if not resource:
raise SyntaxError("%s: _name parameter required " \
"when rendered without resource." % \
self.__class__.__name__)
flist = self.field
if type(flist) is not list:
flist = [flist]
colnames = []
for f in flist:
rfield = S3ResourceField(resource, f)
colname = rfield.colname
if colname:
colnames.append(colname)
else:
colnames.append(rfield.fname)
name = "%s-%s-%s" % (resource.alias, "-".join(colnames), _class)
attr["_name"] = name.replace(".", "_")
if "_id" not in attr:
attr["_id"] = attr["_name"]
return attr
# -------------------------------------------------------------------------
@classmethod
def _operator(cls, get_vars, selector):
"""
Helper method to get the operators from the URL query
@param get_vars: the GET vars (a dict)
@param selector: field selector
@return: query operator - None, str or list
"""
variables = ["%s__%s" % (selector, op) for op in cls.alternatives]
slen = len(selector) + 2
operators = [k[slen:] for k, v in get_vars.iteritems()
if k in variables]
if not operators:
return None
elif len(operators) == 1:
return operators[0]
else:
return operators
# -------------------------------------------------------------------------
def _prefix(self, selector):
"""
Helper method to prefix an unprefixed field selector
@param alias: the resource alias to use as prefix
@param selector: the field selector
@return: the prefixed selector
"""
alias = self.alias
if alias is None:
alias = "~"
if "." not in selector.split("$", 1)[0]:
return "%s.%s" % (alias, selector)
else:
return selector
# -------------------------------------------------------------------------
def _selector(self, resource, fields):
"""
Helper method to generate a filter query selector for the
given field(s) in the given resource.
@param resource: the S3Resource
@param fields: the field selectors (as strings)
@return: the field label and the filter query selector, or None
if none of the field selectors could be resolved
"""
prefix = self._prefix
label = None
if not fields:
return label, None
if not isinstance(fields, (list, tuple)):
fields = [fields]
selectors = []
for field in fields:
if resource:
try:
rfield = S3ResourceField(resource, field)
except (AttributeError, TypeError):
continue
if not rfield.field and not rfield.virtual:
# Unresolvable selector
continue
if not label:
label = rfield.label
selectors.append(prefix(rfield.selector))
else:
selectors.append(field)
if selectors:
return label, "|".join(selectors)
else:
return label, None
# -------------------------------------------------------------------------
@staticmethod
def _values(get_vars, variable):
"""
Helper method to get all values of a URL query variable
@param get_vars: the GET vars (a dict)
@param variable: the name of the query variable
@return: a list of values
"""
if not variable:
return []
elif variable in get_vars:
values = S3URLQuery.parse_value(get_vars[variable])
if not isinstance(values, (list, tuple)):
values = [values]
return values
else:
return []
# -------------------------------------------------------------------------
@classmethod
def _variable(cls, selector, operator):
"""
Construct URL query variable(s) name from a filter query
selector and the given operator(s)
@param selector: the selector
@param operator: the operator (or tuple/list of operators)
@return: the URL query variable name (or list of variable names)
"""
if isinstance(operator, (tuple, list)):
return [cls._variable(selector, o) for o in operator]
elif operator:
return "%s__%s" % (selector, operator)
else:
return selector
# =============================================================================
class S3TextFilter(S3FilterWidget):
""" Text filter widget """
_class = "text-filter"
operator = "like"
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self.attr
if "_size" not in attr:
attr.update(_size="40")
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (attr["_class"], self._class)
else:
_class = self._class
attr["_class"] = _class
attr["_type"] = "text"
values = [v.strip("*") for v in values if v is not None]
if values:
attr["_value"] = " ".join(values)
return INPUT(**attr)
# =============================================================================
class S3RangeFilter(S3FilterWidget):
""" Numerical Range Filter Widget """
# Overall class
_class = "range-filter"
# Class for visible input boxes.
_input_class = "%s-%s" % (_class, "input")
operator = ["ge", "le"]
# Untranslated labels for individual input boxes.
input_labels = {"ge": "Minimum", "le": "Maximum"}
# -------------------------------------------------------------------------
def data_element(self, variables):
"""
Overrides S3FilterWidget.data_element(), constructs multiple
hidden INPUTs (one per variable) with element IDs of the form
<id>-<operator>-data (where no operator is translated as "eq").
@param variables: the variables
"""
if variables is None:
operators = self.operator
if type(operators) is not list:
operators = [operators]
variables = self._variable(self.selector, operators)
else:
# Split the operators off the ends of the variables.
if type(variables) is not list:
variables = [variables]
operators = [v.split("__")[1]
if "__" in v else "eq"
for v in variables]
elements = []
id = self.attr["_id"]
for o, v in zip(operators, variables):
elements.append(
INPUT(_type="hidden",
_id="%s-%s-data" % (id, o),
_class="filter-widget-data %s-data" % self._class,
_value=v))
return elements
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self.attr
_class = self._class
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (attr["_class"], _class)
else:
_class = _class
attr["_class"] = _class
input_class = self._input_class
input_labels = self.input_labels
input_elements = DIV()
ie_append = input_elements.append
selector = self.selector
_variable = self._variable
id = attr["_id"]
for operator in self.operator:
input_id = "%s-%s" % (id, operator)
input_box = INPUT(_name=input_id,
_id=input_id,
_type="text",
_class=input_class)
variable = _variable(selector, operator)
# Populate with the value, if given
# if user has not set any of the limits, we get [] in values.
value = values.get(variable, None)
if value not in [None, []]:
if type(value) is list:
value = value[0]
input_box["_value"] = value
input_box["value"] = value
ie_append(DIV(
DIV(LABEL(current.T(input_labels[operator] + ":"),
_for=input_id),
_class="range-filter-label"),
DIV(input_box,
_class="range-filter-widget"),
_class="range-filter-field"))
return input_elements
# =============================================================================
class S3DateFilter(S3RangeFilter):
"""
Date Range Filter Widget
@see: L{Configuration Options<S3FilterWidget.__init__>}
"""
_class = "date-filter"
# Class for visible input boxes.
_input_class = "%s-%s" % (_class, "input")
operator = ["ge", "le"]
# Untranslated labels for individual input boxes.
input_labels = {"ge": "From", "le": "To"}
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self.attr
# CSS class and element ID
_class = self._class
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (attr["_class"], _class)
else:
_class = _class
_id = attr["_id"]
# Determine the field type
if resource:
rfield = S3ResourceField(resource, self.field)
field = rfield.field
else:
rfield = field = None
if not field:
if not rfield or rfield.virtual:
ftype = self.opts.get("fieldtype", "datetime")
else:
# Unresolvable selector
return ""
else:
ftype = rfield.ftype
if not field:
# S3DateTimeWidget requires a Field
if rfield:
tname, fname = rfield.tname, rfield.fname
else:
tname, fname = "notable", "datetime"
if not _id:
raise SyntaxError("%s: _id parameter required " \
"when rendered without resource." % \
self.__class__.__name__)
dtformat = current.deployment_settings.get_L10n_date_format()
field = Field(fname, ftype,
requires = IS_DATE_IN_RANGE(format = dtformat))
field.tablename = field._tablename = tname
# Options
hide_time = self.opts.get("hide_time", False)
# Generate the input elements
T = current.T
selector = self.selector
_variable = self._variable
input_class = self._input_class
input_labels = self.input_labels
input_elements = DIV(_id=_id, _class=_class)
append = input_elements.append
for operator in self.operator:
input_id = "%s-%s" % (_id, operator)
# Determine the widget class
if ftype == "date":
widget = S3DateWidget()
else:
opts = {}
if operator == "ge":
opts["set_min"] = "%s-%s" % (_id, "le")
elif operator == "le":
opts["set_max"] = "%s-%s" % (_id, "ge")
widget = S3DateTimeWidget(hide_time=hide_time, **opts)
# Populate with the value, if given
# if user has not set any of the limits, we get [] in values.
variable = _variable(selector, operator)
value = values.get(variable, None)
if value not in [None, []]:
if type(value) is list:
value = value[0]
else:
value = None
# Render the widget
picker = widget(field, value,
_name=input_id,
_id=input_id,
_class=input_class)
# Append label and widget
append(DIV(
DIV(LABEL("%s:" % T(input_labels[operator]),
_for=input_id),
_class="range-filter-label"),
DIV(picker,
_class="range-filter-widget"),
_class="range-filter-field"))
return input_elements
# =============================================================================
class S3LocationFilter(S3FilterWidget):
"""
Hierarchical Location Filter Widget
@see: L{Configuration Options<S3FilterWidget.__init__>}
NB This will show records linked to all child locations of the Lx
"""
_class = "location-filter"
operator = "belongs"
# -------------------------------------------------------------------------
def __init__(self, field=None, **attr):
"""
Constructor to configure the widget
@param field: the selector(s) for the field(s) to filter by
@param attr: configuration options for this widget
"""
if not field:
field = "location_id"
# Translate options using gis_location_name?
settings = current.deployment_settings
translate = settings.get_L10n_translate_gis_location()
if translate:
language = current.session.s3.language
if language == settings.get_L10n_default_language():
translate = False
self.translate = translate
super(S3LocationFilter, self).__init__(field=field, **attr)
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self._attr(resource)
opts = self.opts
name = attr["_name"]
ftype, levels, noopt = self._options(resource, values=values)
if noopt:
return SPAN(noopt, _class="no-options-available")
# Filter class (default+custom)
_class = self._class
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (_class, attr["_class"])
attr["_class"] = _class
# Store id and name for the data element
base_id = attr["_id"]
base_name = attr["_name"]
widgets = []
w_append = widgets.append
operator = self.operator
field_name = self.field
fname = self._prefix(field_name) if resource else field_name
#widget_type = opts["widget"]
# Use groupedopts widget if we specify cols, otherwise assume multiselect
cols = opts.get("cols", None)
if cols:
# Grouped Checkboxes
# @ToDo: somehow working, but ugly, not usable (deprecated?)
if "groupedopts-filter-widget" not in _class:
attr["_class"] = "%s groupedopts-filter-widget" % _class
attr["cols"] = cols
# Add one widget per level
for level in levels:
options = levels[level]["options"]
groupedopts = S3GroupedOptionsWidget(cols = cols,
size = opts["size"] or 12,
)
# Dummy field
name = "%s-%s" % (base_name, level)
dummy_field = Storage(name=name,
type=ftype,
requires=IS_IN_SET(options,
multiple=True))
# Unique ID/name
attr["_id"] = "%s-%s" % (base_id, level)
attr["_name"] = name
# Find relevant values to pre-populate
_values = values.get("%s$%s__%s" % (fname, level, operator))
w_append(groupedopts(dummy_field, _values, **attr))
else:
# Multiselect is default
T = current.T
# Multiselect Dropdown with Checkboxes
if "multiselect-filter-widget" not in _class:
_class = "%s multiselect-filter-widget" % _class
# Add one widget per level
first = True
hide = True
for level in levels:
# Dummy field
name = "%s-%s" % (base_name, level)
options = levels[level]["options"]
dummy_field = Storage(name=name,
type=ftype,
requires=IS_IN_SET(options,
multiple=True))
# Unique ID/name
attr["_id"] = "%s-%s" % (base_id, level)
attr["_name"] = name
# Find relevant values to pre-populate the widget
_values = values.get("%s$%s__%s" % (fname, level, operator))
w = S3MultiSelectWidget(filter = opts.get("filter", "auto"),
header = opts.get("header", False),
selectedList = opts.get("selectedList", 3),
noneSelectedText = T("Select %(location)s") % \
dict(location=levels[level]["label"]))
if first:
attr["_class"] = _class
elif hide:
# Hide dropdowns other than first
_class = "%s hide" % _class
attr["_class"] = _class
hide = False
widget = w(dummy_field, _values, **attr)
w_append(widget)
first = False
# Restore id and name for the data_element
attr["_id"] = base_id
attr["_name"] = base_name
# Render the filter widget
return TAG[""](*widgets)
# -------------------------------------------------------------------------
def data_element(self, variable):
"""
Construct the hidden element that holds the
URL query term corresponding to an input element in the widget.
@param variable: the URL query variable
"""
output = []
oappend = output.append
i = 0
for level in self.levels:
widget = INPUT(_type="hidden",
_id="%s-%s-data" % (self.attr["_id"], level),
_class="filter-widget-data %s-data" % self._class,
_value=variable[i])
oappend(widget)
i += 1
return output
# -------------------------------------------------------------------------
def ajax_options(self, resource):
attr = self._attr(resource)
ftype, levels, noopt = self._options(resource, inject_hierarchy=False)
opts = {}
base_id = attr["_id"]
for level in levels:
if noopt:
opts["%s-%s" % (base_id, level)] = str(noopt)
else:
options = levels[level]["options"]
opts["%s-%s" % (base_id, level)] = options
return opts
# -------------------------------------------------------------------------
@staticmethod
def __options(row, levels, inject_hierarchy, hierarchy, _level, translate, name_l10n):
if inject_hierarchy:
parent = None
grandparent = None
greatgrandparent = None
greatgreatgrandparent = None
greatgreatgreatgrandparent = None
i = 0
for level in levels:
v = row[level]
if v:
o = levels[level]["options"]
if v not in o:
if translate:
o[v] = name_l10n.get(v, v)
else:
o.append(v)
if inject_hierarchy:
if i == 0:
h = hierarchy[_level]
if v not in h:
h[v] = {}
parent = v
elif i == 1:
h = hierarchy[_level][parent]
if v not in h:
h[v] = {}
grandparent = parent
parent = v
elif i == 2:
h = hierarchy[_level][grandparent][parent]
if v not in h:
h[v] = {}
greatgrandparent = grandparent
grandparent = parent
parent = v
elif i == 3:
h = hierarchy[_level][greatgrandparent][grandparent][parent]
if v not in h:
h[v] = {}
greatgreatgrandparent = greatgrandparent
greatgrandparent = grandparent
grandparent = parent
parent = v
elif i == 4:
h = hierarchy[_level][greatgreatgrandparent][greatgrandparent][grandparent][parent]
if v not in h:
h[v] = {}
greatgreatgreatgrandparent = greatgreatgrandparent
greatgreatgrandparent = greatgrandparent
greatgrandparent = grandparent
grandparent = parent
parent = v
elif i == 5:
h = hierarchy[_level][greatgreatgreatgrandparent][greatgreatgrandparent][greatgrandparent][grandparent][parent]
if v not in h:
h[v] = {}
i += 1
# -------------------------------------------------------------------------
def _options(self, resource, inject_hierarchy=True, values=None):
T = current.T
s3db = current.s3db
gtable = s3db.gis_location
NOOPT = T("No options available")
#attr = self.attr
opts = self.opts
translate = self.translate
# Which levels should we display?
# Lookup the appropriate labels from the GIS configuration
if "levels" in opts:
hierarchy = current.gis.get_location_hierarchy()
levels = OrderedDict()
for level in opts["levels"]:
levels[level] = hierarchy.get(level, level)
else:
levels = current.gis.get_relevant_hierarchy_levels(as_dict=True)
# Pass to data_element
self.levels = levels
if "label" not in opts:
opts["label"] = T("Filter by Location")
ftype = "reference gis_location"
default = (ftype, levels.keys(), opts.get("no_opts", NOOPT))
# Resolve the field selector
selector = None
if resource is None:
rname = opts.get("resource")
if rname:
resource = s3db.resource(rname)
selector = opts.get("lookup", "location_id")
else:
selector = self.field
options = opts.get("options")
if options:
# Fixed options (=list of location IDs)
resource = s3db.resource("gis_location", id=options)
fields = ["id"] + [l for l in levels]
if translate:
fields.append("path")
joined = False
elif selector:
# Lookup options from resource
rfield = S3ResourceField(resource, selector)
if not rfield.field or rfield.ftype != ftype:
# Must be a real reference to gis_location
return default
fields = [selector] + ["%s$%s" % (selector, l) for l in levels]
if translate:
fields.append("%s$path" % selector)
joined = True
# Filter out old Locations
# @ToDo: Allow override
resource.add_filter(gtable.end_date == None)
else:
# Neither fixed options nor resource to look them up
return default
# Find the options
rows = resource.select(fields=fields,
limit=None,
virtual=False,
as_rows=True)
rows2 = []
if not rows:
if values:
# Make sure the selected options are in the available options
resource = s3db.resource("gis_location")
fields = ["id"] + [l for l in levels]
if translate:
fields.append("path")
joined = False
rows = []
for f in values:
v = values[f]
if not v:
continue
level = "L%s" % f.split("L", 1)[1][0]
resource.clear_query()
query = (gtable.level == level) & \
(gtable.name.belongs(v))
resource.add_filter(query)
# Filter out old Locations
# @ToDo: Allow override
resource.add_filter(gtable.end_date == None)
_rows = resource.select(fields=fields,
limit=None,
virtual=False,
as_rows=True)
if rows:
rows &= _rows
else:
rows = _rows
if not rows:
# No options
return default
elif values:
# Make sure the selected options are in the available options
resource2 = s3db.resource("gis_location")
fields = ["id"] + [l for l in levels]
if translate:
fields.append("path")
for f in values:
v = values[f]
if not v:
continue
level = "L%s" % f.split("L", 1)[1][0]
resource2.clear_query()
query = (gtable.level == level) & \
(gtable.name.belongs(v))
resource2.add_filter(query)
# Filter out old Locations
# @ToDo: Allow override
resource2.add_filter(gtable.end_date == None)
_rows = resource2.select(fields=fields,
limit=None,
virtual=False,
as_rows=True)
if rows2:
rows2 &= _rows
else:
rows2 = _rows
# Initialise Options Storage & Hierarchy
hierarchy = {}
first = True
for level in levels:
if first:
hierarchy[level] = {}
_level = level
first = False
levels[level] = {"label": levels[level],
"options": {} if translate else [],
}
# Generate a name localization lookup dict
name_l10n = {}
if translate:
# Get IDs via Path to lookup name_l10n
ids = set()
if joined:
if "$" in selector:
selector = "%s.%s" % (rfield.field.tablename, selector.split("$", 1)[1])
elif "." in selector:
selector = "%s.%s" % (rfield.field.tablename, selector.split(".", 1)[1])
else:
selector = "%s.%s" % (resource.tablename, selector)
for row in rows:
_row = getattr(row, "gis_location") if joined else row
path = _row.path
if path:
path = path.split("/")
else:
# Build it
if joined:
location_id = row[selector]
if location_id:
_row.id = location_id
if "id" in _row:
path = current.gis.update_location_tree(_row)
path = path.split("/")
if path:
ids |= set(path)
for row in rows2:
path = row.path
if path:
path = path.split("/")
else:
# Build it
if "id" in row:
path = current.gis.update_location_tree(row)
path = path.split("/")
if path:
ids |= set(path)
# Build lookup table for name_l10n
ntable = s3db.gis_location_name
query = (gtable.id.belongs(ids)) & \
(ntable.deleted == False) & \
(ntable.location_id == gtable.id) & \
(ntable.language == current.session.s3.language)
nrows = current.db(query).select(gtable.name,
ntable.name_l10n,
limitby=(0, len(ids)),
)
for row in nrows:
name_l10n[row["gis_location.name"]] = row["gis_location_name.name_l10n"]
# Populate the Options and the Hierarchy
for row in rows:
_row = getattr(row, "gis_location") if joined else row
self.__options(_row, levels, inject_hierarchy, hierarchy, _level, translate, name_l10n)
for row in rows2:
self.__options(row, levels, inject_hierarchy, hierarchy, _level, translate, name_l10n)
if translate:
# Sort the options dicts
for level in levels:
options = levels[level]["options"]
options = OrderedDict(sorted(options.iteritems()))
else:
# Sort the options lists
for level in levels:
levels[level]["options"].sort()
if inject_hierarchy:
# Inject the Location Hierarchy
hierarchy = "S3.location_filter_hierarchy=%s" % \
json.dumps(hierarchy, separators=SEPARATORS)
js_global = current.response.s3.js_global
js_global.append(hierarchy)
if translate:
# Inject lookup list
name_l10n = "S3.location_name_l10n=%s" % \
json.dumps(name_l10n, separators=SEPARATORS)
js_global.append(name_l10n)
return (ftype, levels, None)
# -------------------------------------------------------------------------
def _selector(self, resource, fields):
"""
Helper method to generate a filter query selector for the
given field(s) in the given resource.
@param resource: the S3Resource
@param fields: the field selectors (as strings)
@return: the field label and the filter query selector, or None if none of the
field selectors could be resolved
"""
prefix = self._prefix
if resource:
rfield = S3ResourceField(resource, fields)
label = rfield.label
else:
label = None
if "levels" in self.opts:
levels = self.opts.levels
else:
levels = current.gis.get_relevant_hierarchy_levels()
fields = ["%s$%s" % (fields, level) for level in levels]
if resource:
selectors = []
for field in fields:
try:
rfield = S3ResourceField(resource, field)
except (AttributeError, TypeError):
continue
selectors.append(prefix(rfield.selector))
else:
selectors = fields
if selectors:
return label, "|".join(selectors)
else:
return label, None
# -------------------------------------------------------------------------
@classmethod
def _variable(cls, selector, operator):
"""
Construct URL query variable(s) name from a filter query
selector and the given operator(s)
@param selector: the selector
@param operator: the operator (or tuple/list of operators)
@return: the URL query variable name (or list of variable names)
"""
selectors = selector.split("|")
return ["%s__%s" % (selector, operator) for selector in selectors]
# =============================================================================
class S3OptionsFilter(S3FilterWidget):
"""
Options filter widget
@see: L{Configuration Options<S3FilterWidget.__init__>}
"""
_class = "options-filter"
operator = "belongs"
alternatives = ["anyof", "contains"]
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self._attr(resource)
opts = self.opts
name = attr["_name"]
# Get the options
ftype, options, noopt = self._options(resource, values=values)
if noopt:
return SPAN(noopt, _class="no-options-available")
else:
options = OrderedDict(options)
# Any-All-Option : for many-to-many fields the user can
# search for records containing all the options or any
# of the options:
if len(options) > 1 and ftype[:4] == "list":
operator = opts.get("operator", None)
if operator:
self.operator = operator
any_all = ""
else:
operator = self.operator
any_all = True
if operator == "anyof":
filter_type = "any"
else:
filter_type = "all"
if operator == "belongs":
operator = "contains"
if any_all:
# Provide a form to prompt the user to choose
T = current.T
any_all = DIV(T("Filter type"),
INPUT(_name="%s_filter" % name,
_id="%s_filter_any" % name,
_type="radio",
_value="any",
value=filter_type),
LABEL(T("Any"),
_for="%s_filter_any" % name),
INPUT(_name="%s_filter" % name,
_id="%s_filter_all" % name,
_type="radio",
_value="all",
value=filter_type),
LABEL(T("All"),
_for="%s_filter_all" % name),
_class="s3-options-filter-anyall",
)
else:
any_all = ""
# Initialize widget
#widget_type = opts["widget"]
# Use groupedopts widget if we specify cols, otherwise assume multiselect
cols = opts.get("cols", None)
if cols:
widget_class = "groupedopts-filter-widget"
w = S3GroupedOptionsWidget(options = options,
multiple = opts.get("multiple", True),
cols = cols,
size = opts["size"] or 12,
help_field = opts["help_field"],
)
else:
# Default widget_type = "multiselect"
widget_class = "multiselect-filter-widget"
w = S3MultiSelectWidget(filter = opts.get("filter", "auto"),
header = opts.get("header", False),
selectedList = opts.get("selectedList", 3),
multiple = opts.get("multiple", True),
)
# Add widget class and default class
classes = set(attr.get("_class", "").split()) | \
set((widget_class, self._class))
attr["_class"] = " ".join(classes) if classes else None
# Render the widget
dummy_field = Storage(name=name,
type=ftype,
requires=IS_IN_SET(options, multiple=True))
widget = w(dummy_field, values, **attr)
return TAG[""](any_all, widget)
# -------------------------------------------------------------------------
def ajax_options(self, resource):
"""
Method to Ajax-retrieve the current options of this widget
@param resource: the S3Resource
"""
opts = self.opts
attr = self._attr(resource)
ftype, options, noopt = self._options(resource)
if noopt:
options = {attr["_id"]: str(noopt)}
else:
#widget_type = opts["widget"]
# Use groupedopts widget if we specify cols, otherwise assume multiselect
cols = opts.get("cols", None)
if cols:
# Use the widget method to group and sort the options
widget = S3GroupedOptionsWidget(
options = options,
multiple = True,
cols = cols,
size = opts["size"] or 12,
help_field = opts["help_field"]
)
options = {attr["_id"]:
widget._options({"type": ftype}, [])}
else:
# Multiselect
# Produce a simple list of tuples
options = {attr["_id"]: [(k, s3_unicode(v))
for k, v in options]}
return options
# -------------------------------------------------------------------------
def _options(self, resource, values=None):
"""
Helper function to retrieve the current options for this
filter widget
@param resource: the S3Resource
"""
T = current.T
NOOPT = T("No options available")
EMPTY = T("None")
#attr = self.attr
opts = self.opts
# Resolve the field selector
selector = self.field
if isinstance(selector, (tuple, list)):
selector = selector[0]
if resource is None:
rname = opts.get("resource")
if rname:
resource = current.s3db.resource(rname)
if resource:
rfield = S3ResourceField(resource, selector)
field = rfield.field
colname = rfield.colname
ftype = rfield.ftype
else:
rfield = field = colname = None
ftype = "string"
# Find the options
opt_keys = []
multiple = ftype[:5] == "list:"
if opts.options is not None:
# Custom dict of options {value: label} or a callable
# returning such a dict:
options = opts.options
if callable(options):
options = options()
opt_keys = options.keys()
elif resource:
# Determine the options from the field type
options = None
if ftype == "boolean":
opt_keys = (True, False)
elif field or rfield.virtual:
groupby = field if field and not multiple else None
virtual = field is None
# If the search field is a foreign key, then try to perform
# a reverse lookup of primary IDs in the lookup table which
# are linked to at least one record in the resource => better
# scalability.
rows = None
if field:
ktablename, key, m = s3_get_foreign_key(field, m2m=False)
if ktablename:
multiple = m
ktable = current.s3db.table(ktablename)
key_field = ktable[key]
colname = str(key_field)
left = None
accessible_query = current.auth.s3_accessible_query
# Respect the validator of the foreign key field.
# Commented because questionable: We want a filter
# option for every current field value, even if it
# doesn't match the validator (don't we?)
#requires = field.requires
#if requires:
#if not isinstance(requires, list):
#requires = [requires]
#requires = requires[0]
#if isinstance(requires, IS_EMPTY_OR):
#requires = requires.other
#if isinstance(requires, IS_ONE_OF_EMPTY):
#query, left = requires.query(ktable)
#else:
#query = accessible_query("read", ktable)
#query &= (key_field == field)
query = (key_field == field)
joins = rfield.join
for tname in joins:
query &= joins[tname]
# We do not allow the user to see values only used
# in records he's not permitted to see:
query &= accessible_query("read", resource.table)
# Filter options by location?
location_filter = opts.get("location_filter")
if location_filter and "location_id" in ktable:
location = current.session.s3.location_filter
if location:
query &= (ktable.location_id == location)
# Filter options by organisation?
org_filter = opts.get("org_filter")
if org_filter and "organisation_id" in ktable:
root_org = current.auth.root_org()
if root_org:
query &= ((ktable.organisation_id == root_org) | \
(ktable.organisation_id == None))
#else:
# query &= (ktable.organisation_id == None)
rows = current.db(query).select(key_field,
resource._id.min(),
groupby=key_field,
left=left)
# If we can not perform a reverse lookup, then we need
# to do a forward lookup of all unique values of the
# search field from all records in the table :/ still ok,
# but not endlessly scalable:
if rows is None:
rows = resource.select([selector],
limit=None,
orderby=field,
groupby=groupby,
virtual=virtual,
as_rows=True)
opt_keys = [] # Can't use set => would make orderby pointless
if rows:
kappend = opt_keys.append
kextend = opt_keys.extend
for row in rows:
val = row[colname]
if virtual and callable(val):
val = val()
if multiple or \
virtual and isinstance(val, (list, tuple, set)):
kextend([v for v in val
if v not in opt_keys])
elif val not in opt_keys:
kappend(val)
# Make sure the selected options are in the available options
# (not possible if we have a fixed options dict)
if options is None and values:
numeric = rfield.ftype in ("integer", "id") or \
rfield.ftype[:9] == "reference"
for _val in values:
if numeric:
try:
val = int(_val)
except ValueError:
# not valid for this field type => skip
continue
else:
val = _val
if val not in opt_keys and \
(not isinstance(val, (int, long)) or not str(val) in opt_keys):
opt_keys.append(val)
# No options?
if len(opt_keys) < 1 or len(opt_keys) == 1 and not opt_keys[0]:
return (ftype, None, opts.get("no_opts", NOOPT))
# Represent the options
opt_list = [] # list of tuples (key, value)
# Custom represent? (otherwise fall back to field.represent)
represent = opts.represent
if not represent: # or ftype[:9] != "reference":
represent = field.represent if field else None
if options is not None:
# Custom dict of {value:label} => use this label
opt_list = options.items()
elif callable(represent):
# Callable representation function:
if hasattr(represent, "bulk"):
# S3Represent => use bulk option
opt_dict = represent.bulk(opt_keys,
list_type=False,
show_link=False)
if None in opt_keys:
opt_dict[None] = EMPTY
elif None in opt_dict:
del opt_dict[None]
if "" in opt_keys:
opt_dict[""] = EMPTY
opt_list = opt_dict.items()
else:
# Simple represent function
args = {"show_link": False} \
if "show_link" in represent.func_code.co_varnames else {}
if multiple:
repr_opt = lambda opt: opt in (None, "") and (opt, EMPTY) or \
(opt, represent([opt], **args))
else:
repr_opt = lambda opt: opt in (None, "") and (opt, EMPTY) or \
(opt, represent(opt, **args))
opt_list = map(repr_opt, opt_keys)
elif isinstance(represent, str) and ftype[:9] == "reference":
# Represent is a string template to be fed from the
# referenced record
# Get the referenced table
db = current.db
ktable = db[ftype[10:]]
k_id = ktable._id.name
# Get the fields referenced by the string template
fieldnames = [k_id]
fieldnames += re.findall("%\(([a-zA-Z0-9_]*)\)s", represent)
represent_fields = [ktable[fieldname] for fieldname in fieldnames]
# Get the referenced records
query = (ktable.id.belongs([k for k in opt_keys
if str(k).isdigit()])) & \
(ktable.deleted == False)
rows = db(query).select(*represent_fields).as_dict(key=k_id)
# Run all referenced records against the format string
opt_list = []
ol_append = opt_list.append
for opt_value in opt_keys:
if opt_value in rows:
opt_represent = represent % rows[opt_value]
if opt_represent:
ol_append((opt_value, opt_represent))
else:
# Straight string representations of the values (fallback)
opt_list = [(opt_value, s3_unicode(opt_value))
for opt_value in opt_keys if opt_value]
none = opts["none"]
try:
opt_list.sort(key=lambda item: item[1])
except:
opt_list.sort(key=lambda item: s3_unicode(item[1]))
options = []
empty = False
for k, v in opt_list:
if k is None:
if none:
empty = True
if none is True:
# Use the represent
options.append((k, v))
else:
# Must be a string to use as the represent:
options.append((k, none))
else:
options.append((k, v))
if none and not empty:
# Add the value anyway (e.g. not found via the reverse lookup)
if none is True:
none = current.messages["NONE"]
options.append((None, none))
# Sort the options
return (ftype, options, None)
# -------------------------------------------------------------------------
@staticmethod
def _values(get_vars, variable):
"""
Helper method to get all values of a URL query variable
@param get_vars: the GET vars (a dict)
@param variable: the name of the query variable
@return: a list of values
"""
if not variable:
return []
# Match __eq before checking any other operator
selector = variable.split("__", 1)[0]
for key in ("%s__eq" % selector, selector, variable):
if key in get_vars:
values = S3URLQuery.parse_value(get_vars[key])
if not isinstance(values, (list, tuple)):
values = [values]
return values
return []
# =============================================================================
class S3HierarchyFilter(S3FilterWidget):
"""
Filter widget for hierarchical types
Specific options:
lookup name of the lookup table
represent representation method for the key
"""
_class = "hierarchy-filter"
operator = "belongs"
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
# Currently selected values
selected = []
append = selected.append
if not isinstance(values, (list, tuple, set)):
values = [values]
for v in values:
if isinstance(v, (int, long)) or str(v).isdigit():
append(v)
# Resolve the field selector
rfield = S3ResourceField(resource, self.field)
# Instantiate the widget
opts = self.opts
w = S3HierarchyWidget(lookup = opts.get("lookup"),
represent = opts.get("represent"),
multiple = opts.get("multiple", True),
leafonly = opts.get("leafonly", True),
)
# Render the widget
widget = w(rfield.field, selected, **self._attr(resource))
widget.add_class(self._class)
return widget
# -------------------------------------------------------------------------
def variable(self, resource, get_vars=None):
"""
Generate the name for the URL query variable for this
widget, detect alternative __typeof queries.
@param resource: the resource
@return: the URL query variable name (or list of
variable names if there are multiple operators)
"""
label, self.selector = self._selector(resource, self.field)
if not self.selector:
return None
if "label" not in self.opts:
self.opts["label"] = label
selector = self.selector
if self.alternatives and get_vars is not None:
# Get the actual operator from get_vars
operator = self._operator(get_vars, self.selector)
if operator:
self.operator = operator
variable = self._variable(selector, self.operator)
if not get_vars or not resource or variable in get_vars:
return variable
# Detect and resolve __typeof queries
#BELONGS = current.db._adapter.BELONGS
resolve = S3ResourceQuery._resolve_hierarchy
selector = resource.prefix_selector(selector)
for key, value in get_vars.items():
if key.startswith(selector):
selectors, op, invert = S3URLQuery.parse_expression(key)
else:
continue
if op != "typeof" or len(selectors) != 1:
continue
rfield = resource.resolve_selector(selectors[0])
if rfield.field:
values = S3URLQuery.parse_value(value)
hierarchy, field, nodeset, none = resolve(rfield.field, values)
if field and (nodeset or none):
if nodeset is None:
nodeset = set()
if none:
nodeset.add(None)
get_vars.pop(key, None)
get_vars[variable] = [str(v) for v in nodeset]
break
return variable
# =============================================================================
class S3FilterForm(object):
""" Helper class to construct and render a filter form for a resource """
def __init__(self, widgets, **attr):
"""
Constructor
@param widgets: the widgets (as list)
@param attr: HTML attributes for this form
"""
self.widgets = widgets
attributes = Storage()
options = Storage()
for k, v in attr.iteritems():
if k[0] == "_":
attributes[k] = v
else:
options[k] = v
self.attr = attributes
self.opts = options
# -------------------------------------------------------------------------
def html(self, resource, get_vars=None, target=None, alias=None):
"""
Render this filter form as HTML form.
@param resource: the S3Resource
@param get_vars: the request GET vars (URL query dict)
@param target: the HTML element ID of the target object for
this filter form (e.g. a datatable)
@param alias: the resource alias to use in widgets
"""
attr = self.attr
form_id = attr.get("_id")
if not form_id:
form_id = "filter-form"
attr["_id"] = form_id
# Prevent issues with Webkit-based browsers & Back buttons
attr["_autocomplete"] = "off"
opts = self.opts
settings = current.deployment_settings
# Form style
formstyle = opts.get("formstyle", None)
if not formstyle:
formstyle = settings.get_ui_filter_formstyle()
# Filter widgets
rows = self._render_widgets(resource,
get_vars=get_vars or {},
alias=alias,
formstyle=formstyle)
# Other filter form controls
controls = self._render_controls(resource)
if controls:
rows.append(formstyle(None, "", controls, ""))
# Submit elements
ajax = opts.get("ajax", False)
submit = opts.get("submit", False)
if submit:
# Auto-submit?
auto_submit = settings.get_ui_filter_auto_submit()
if auto_submit and opts.get("auto_submit", True):
script = '''S3.search.filterFormAutoSubmit('%s',%s)''' % \
(form_id, auto_submit)
current.response.s3.jquery_ready.append(script)
# Custom label and class
_class = None
if submit is True:
label = current.T("Search")
elif isinstance(submit, (list, tuple)):
label, _class = submit
else:
label = submit
# Submit button
submit_button = INPUT(_type="button",
_value=label,
_class="filter-submit")
#if auto_submit:
#submit_button.add_class("hide")
if _class:
submit_button.add_class(_class)
# Where to request filtered data from:
submit_url = opts.get("url", URL(vars={}))
# Where to request updated options from:
ajax_url = opts.get("ajaxurl", URL(args=["filter.options"], vars={}))
# Submit row elements
submit = TAG[""](submit_button,
INPUT(_type="hidden",
_class="filter-ajax-url",
_value=ajax_url),
INPUT(_type="hidden",
_class="filter-submit-url",
_value=submit_url))
if ajax and target:
submit.append(INPUT(_type="hidden",
_class="filter-submit-target",
_value=target))
# Append submit row
submit_row = formstyle(None, "", submit, "")
if auto_submit and hasattr(submit_row, "add_class"):
submit_row.add_class("hide")
rows.append(submit_row)
# Filter Manager (load/apply/save filters)
fm = settings.get_search_filter_manager()
if fm and opts.get("filter_manager", resource is not None):
filter_manager = self._render_filters(resource, form_id)
if filter_manager:
fmrow = formstyle(None, "", filter_manager, "")
if hasattr(fmrow, "add_class"):
fmrow.add_class("hide filter-manager-row")
rows.append(fmrow)
# Adapt to formstyle: render a TABLE only if formstyle returns TRs
if rows:
elements = rows[0]
if not isinstance(elements, (list, tuple)):
elements = elements.elements()
n = len(elements)
if n > 0 and elements[0].tag == "tr" or \
n > 1 and elements[0].tag == "" and elements[1].tag == "tr":
form = FORM(TABLE(TBODY(rows)), **attr)
else:
form = FORM(DIV(rows), **attr)
if settings.ui.formstyle == "bootstrap":
# We need to amend the HTML markup to support this CSS framework
form.add_class("form-horizontal")
form.add_class("filter-form")
if ajax:
form.add_class("filter-ajax")
else:
return ""
# Put a copy of formstyle into the form for access by the view
form.formstyle = formstyle
return form
# -------------------------------------------------------------------------
def fields(self, resource, get_vars=None, alias=None):
"""
Render the filter widgets without FORM wrapper, e.g. to
embed them as fieldset in another form.
@param resource: the S3Resource
@param get_vars: the request GET vars (URL query dict)
@param alias: the resource alias to use in widgets
"""
formstyle = self.opts.get("formstyle", None)
if not formstyle:
formstyle = current.deployment_settings.get_ui_filter_formstyle()
rows = self._render_widgets(resource,
get_vars=get_vars,
alias=alias,
formstyle=formstyle)
controls = self._render_controls(resource)
if controls:
rows.append(formstyle(None, "", controls, ""))
# Adapt to formstyle: only render a TABLE if formstyle returns TRs
if rows:
elements = rows[0]
if not isinstance(elements, (list, tuple)):
elements = elements.elements()
n = len(elements)
if n > 0 and elements[0].tag == "tr" or \
n > 1 and elements[0].tag == "" and elements[1].tag == "tr":
fields = TABLE(TBODY(rows))
else:
fields = DIV(rows)
return fields
# -------------------------------------------------------------------------
def _render_controls(self, resource):
"""
Render optional additional filter form controls: advanced
options toggle, clear filters.
"""
T = current.T
controls = []
opts = self.opts
advanced = opts.get("advanced", False)
if advanced:
_class = "filter-advanced"
if advanced is True:
label = T("More Options")
elif isinstance(advanced, (list, tuple)):
label = advanced[0]
label = advanced[1]
if len(advanced > 2):
_class = "%s %s" % (advanced[2], _class)
else:
label = advanced
label_off = T("Less Options")
advanced = A(SPAN(label,
data = {"on": label,
"off": label_off,
},
_class="filter-advanced-label",
),
I(" ", _class="icon-down"),
I(" ", _class="icon-up", _style="display:none"),
_class=_class
)
controls.append(advanced)
clear = opts.get("clear", True)
if clear:
_class = "filter-clear"
if clear is True:
label = T("Clear filter")
elif isinstance(clear, (list, tuple)):
label = clear[0]
_class = "%s %s" % (clear[1], _class)
else:
label = clear
clear = A(label, _class=_class)
clear.add_class("action-lnk")
controls.append(clear)
fm = current.deployment_settings.get_search_filter_manager()
if fm and opts.get("filter_manager", resource is not None):
show_fm = A(T("Saved filters"),
_class="show-filter-manager action-lnk")
controls.append(show_fm)
if controls:
return DIV(controls, _class="filter-controls")
else:
return None
# -------------------------------------------------------------------------
def _render_widgets(self,
resource,
get_vars=None,
alias=None,
formstyle=None):
"""
Render the filter widgets
@param resource: the S3Resource
@param get_vars: the request GET vars (URL query dict)
@param alias: the resource alias to use in widgets
@param formstyle: the formstyle to use
@return: a list of form rows
"""
rows = []
rappend = rows.append
advanced = False
for f in self.widgets:
widget = f(resource, get_vars, alias=alias)
label = f.opts["label"]
comment = f.opts["comment"]
hidden = f.opts["hidden"]
if hidden:
advanced = True
widget_id = f.attr["_id"]
if widget_id:
row_id = "%s__row" % widget_id
label_id = "%s__label" % widget_id
else:
row_id = None
label_id = None
if label:
label = LABEL("%s:" % label, _id=label_id, _for=widget_id)
else:
label = ""
if not comment:
comment = ""
formrow = formstyle(row_id, label, widget, comment, hidden=hidden)
if hidden:
if isinstance(formrow, DIV):
formrow.add_class("advanced")
elif isinstance(formrow, tuple):
for item in formrow:
if hasattr(item, "add_class"):
item.add_class("advanced")
rappend(formrow)
if advanced:
if resource:
self.opts["advanced"] = resource.get_config(
"filter_advanced", True)
else:
self.opts["advanced"] = True
return rows
# -------------------------------------------------------------------------
def _render_filters(self, resource, form_id):
"""
Render a filter manager widget
@param resource: the resource
@return: the widget
"""
SELECT_FILTER = current.T("Saved Filters...")
ajaxurl = self.opts.get("saveurl", URL(args=["filter.json"], vars={}))
# Current user
auth = current.auth
pe_id = auth.user.pe_id if auth.s3_logged_in() else None
if not pe_id:
return None
table = current.s3db.pr_filter
query = (table.deleted != True) & \
(table.pe_id == pe_id)
if resource:
query &= (table.resource == resource.tablename)
else:
query &= (table.resource == None)
rows = current.db(query).select(table._id,
table.title,
table.query,
orderby=table.title)
options = [OPTION(SELECT_FILTER,
_value="",
_class="filter-manager-prompt",
_disabled="disabled")]
add_option = options.append
filters = {}
for row in rows:
filter_id = row[table._id]
add_option(OPTION(row.title, _value=filter_id))
query = row.query
if query:
query = json.loads(query)
filters[filter_id] = query
widget_id = "%s-fm" % form_id
widget = DIV(SELECT(options,
_id=widget_id,
_class="filter-manager-widget"),
_class="filter-manager-container")
# JSON-serializable translator
T = current.T
_t = lambda s: str(T(s))
# Configure the widget
settings = current.deployment_settings
config = dict(
# Filters and Ajax URL
filters = filters,
ajaxURL = ajaxurl,
# Workflow Options
allowDelete = settings.get_search_filter_manager_allow_delete(),
# Tooltips for action icons/buttons
createTooltip = _t("Save current options as new filter"),
loadTooltip = _t("Load filter"),
saveTooltip = _t("Update saved filter"),
deleteTooltip = _t("Delete saved filter"),
# Hints
titleHint = _t("Enter a title..."),
selectHint = str(SELECT_FILTER),
emptyHint = _t("No saved filters"),
# Confirm update + confirmation text
confirmUpdate = _t("Update this filter?"),
confirmDelete = _t("Delete this filter?"),
)
# Render actions as buttons with text if configured, otherwise
# they will appear as empty DIVs with classes for CSS icons
create_text = settings.get_search_filter_manager_save()
if create_text:
config["createText"] = _t(create_text)
update_text = settings.get_search_filter_manager_update()
if update_text:
config["saveText"] = _t(update_text)
delete_text = settings.get_search_filter_manager_delete()
if delete_text:
config["deleteText"] = _t(delete_text)
load_text = settings.get_search_filter_manager_load()
if load_text:
config["loadText"] = _t(load_text)
script = '''$("#%s").filtermanager(%s)''' % \
(widget_id,
json.dumps(config, separators=SEPARATORS))
current.response.s3.jquery_ready.append(script)
return widget
# -------------------------------------------------------------------------
def json(self, resource, get_vars=None):
"""
Render this filter form as JSON (for Ajax requests)
@param resource: the S3Resource
@param get_vars: the request GET vars (URL query dict)
"""
raise NotImplementedError
# -------------------------------------------------------------------------
@staticmethod
def apply_filter_defaults(request, resource):
"""
Add default filters to resource, to be called a multi-record
view with a filter form is rendered the first time and before
the view elements get processed
@param request: the request
@param resource: the resource
"""
s3 = current.response.s3
get_vars = request.get_vars
tablename = resource.tablename
# Do we have filter defaults for this resource?
filter_defaults = s3
for level in ("filter_defaults", tablename):
if level not in filter_defaults:
return None
filter_defaults = filter_defaults[level]
# Which filter widgets do we need to apply defaults for?
filter_widgets = resource.get_config("filter_widgets")
for filter_widget in filter_widgets:
# Do not apply defaults of hidden widgets because they are
# not visible to the user:
if filter_widget.opts.hidden:
continue
defaults = {}
variable = filter_widget.variable(resource, get_vars)
# Do we have a corresponding value in get_vars?
if type(variable) is list:
for k in variable:
values = filter_widget._values(get_vars, k)
if values:
filter_widget.values[k] = values
else:
defaults[k] = None
else:
values = filter_widget._values(get_vars, variable)
if values:
filter_widget.values[variable] = values
else:
defaults[variable] = None
default_filters = {}
for variable in defaults:
if "__" in variable:
selector, operator = variable.split("__", 1)
else:
selector, operator = variable, None
if selector not in filter_defaults:
continue
applicable_defaults = filter_defaults[selector]
if callable(applicable_defaults):
applicable_defaults = applicable_defaults(selector,
tablename=tablename)
if isinstance(applicable_defaults, dict):
if operator in applicable_defaults:
default = applicable_defaults[operator]
else:
continue
elif operator in (None, "belongs", "eq"):
default = applicable_defaults
else:
continue
if not isinstance(default, (list, type(None))):
default = [default]
filter_widget.values[variable] = [str(v) if v is None else v
for v in default]
default_filters[variable] = ",".join(s3_unicode(v)
for v in default)
# @todo: make sure the applied default options are available in
# the filter widget - otherwise the user can not deselect
# them! (critical) Maybe enforce this by adding the default
# values to the available options in S3OptionsFilter and
# S3LocationFilter?
# Apply to resource
queries = S3URLQuery.parse(resource, default_filters)
add_filter = resource.add_filter
for alias in queries:
for q in queries[alias]:
add_filter(q)
return
# =============================================================================
class S3Filter(S3Method):
""" Back-end for filter forms """
def apply_method(self, r, **attr):
"""
Entry point for REST interface
@param r: the S3Request
@param attr: additional controller parameters
"""
representation = r.representation
if representation == "options":
# Return the filter options as JSON
return self._options(r, **attr)
elif representation == "json":
if r.http == "GET":
# Load list of saved filters
return self._load(r, **attr)
elif r.http == "POST":
if "delete" in r.get_vars:
# Delete a filter
return self._delete(r, **attr)
else:
# Save a filter
return self._save(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
elif representation == "html":
return self._form(r, **attr)
else:
r.error(501, current.ERROR.BAD_FORMAT)
# -------------------------------------------------------------------------
def _form(self, r, **attr):
"""
Get the filter form for the target resource as HTML snippet
GET filter.html
@param r: the S3Request
@param attr: additional controller parameters
"""
r.error(501, current.ERROR.NOT_IMPLEMENTED)
# -------------------------------------------------------------------------
def _options(self, r, **attr):
"""
Get the updated options for the filter form for the target
resource as JSON
GET filter.options
@param r: the S3Request
@param attr: additional controller parameters
"""
resource = self.resource
get_config = resource.get_config
options = {}
filter_widgets = get_config("filter_widgets", None)
if filter_widgets:
fresource = current.s3db.resource(resource.tablename)
for widget in filter_widgets:
if hasattr(widget, "ajax_options"):
opts = widget.ajax_options(fresource)
if opts and isinstance(opts, dict):
options.update(opts)
options = json.dumps(options, separators=SEPARATORS)
current.response.headers["Content-Type"] = "application/json"
return options
# -------------------------------------------------------------------------
def _delete(self, r, **attr):
"""
Delete a filter, responds to POST filter.json?delete=
@param r: the S3Request
@param attr: additional controller parameters
"""
# Authorization, get pe_id
auth = current.auth
if auth.s3_logged_in():
pe_id = current.auth.user.pe_id
else:
pe_id = None
if not pe_id:
r.unauthorised()
# Read the source
source = r.body
source.seek(0)
try:
data = json.load(source)
except ValueError:
# Syntax error: no JSON data
r.error(501, current.ERROR.BAD_SOURCE)
# Try to find the record
db = current.db
s3db = current.s3db
table = s3db.pr_filter
record = None
record_id = data.get("id")
if record_id:
query = (table.id == record_id) & (table.pe_id == pe_id)
record = db(query).select(table.id, limitby=(0, 1)).first()
if not record:
r.error(501, current.ERROR.BAD_RECORD)
resource = s3db.resource("pr_filter", id=record_id)
success = resource.delete(format=r.representation)
if not success:
raise(400, resource.error)
else:
current.response.headers["Content-Type"] = "application/json"
return current.xml.json_message(deleted=record_id)
# -------------------------------------------------------------------------
def _save(self, r, **attr):
"""
Save a filter, responds to POST filter.json
@param r: the S3Request
@param attr: additional controller parameters
"""
# Authorization, get pe_id
auth = current.auth
if auth.s3_logged_in():
pe_id = current.auth.user.pe_id
else:
pe_id = None
if not pe_id:
r.unauthorised()
# Read the source
source = r.body
source.seek(0)
try:
data = json.load(source)
except ValueError:
r.error(501, current.ERROR.BAD_SOURCE)
# Try to find the record
db = current.db
s3db = current.s3db
table = s3db.pr_filter
record_id = data.get("id")
record = None
if record_id:
query = (table.id == record_id) & (table.pe_id == pe_id)
record = db(query).select(table.id, limitby=(0, 1)).first()
if not record:
r.error(404, current.ERROR.BAD_RECORD)
# Build new record
filter_data = {
"pe_id": pe_id,
"controller": r.controller,
"function": r.function,
"resource": self.resource.tablename,
"deleted": False,
}
title = data.get("title")
if title is not None:
filter_data["title"] = title
description = data.get("description")
if description is not None:
filter_data["description"] = description
query = data.get("query")
if query is not None:
filter_data["query"] = json.dumps(query)
url = data.get("url")
if url is not None:
filter_data["url"] = url
# Store record
onaccept = None
form = Storage(vars=filter_data)
if record:
success = db(table.id == record_id).update(**filter_data)
if success:
current.audit("update", "pr", "filter", form, record_id, "json")
info = {"updated": record_id}
onaccept = s3db.get_config(table, "update_onaccept",
s3db.get_config(table, "onaccept"))
else:
success = table.insert(**filter_data)
if success:
record_id = success
current.audit("create", "pr", "filter", form, record_id, "json")
info = {"created": record_id}
onaccept = s3db.get_config(table, "update_onaccept",
s3db.get_config(table, "onaccept"))
if onaccept is not None:
form.vars["id"] = record_id
callback(onaccept, form)
# Success/Error response
xml = current.xml
if success:
msg = xml.json_message(**info)
else:
msg = xml.json_message(False, 400)
current.response.headers["Content-Type"] = "application/json"
return msg
# -------------------------------------------------------------------------
def _load(self, r, **attr):
"""
Load filters
GET filter.json or GET filter.json?load=<id>
@param r: the S3Request
@param attr: additional controller parameters
"""
db = current.db
table = current.s3db.pr_filter
# Authorization, get pe_id
auth = current.auth
if auth.s3_logged_in():
pe_id = current.auth.user.pe_id
else:
pe_id = None
if not pe_id:
r.unauthorized()
# Build query
query = (table.deleted != True) & \
(table.resource == self.resource.tablename) & \
(table.pe_id == pe_id)
# Any particular filters?
load = r.get_vars.get("load")
if load:
record_ids = [i for i in load.split(",") if i.isdigit()]
if record_ids:
if len(record_ids) > 1:
query &= table.id.belongs(record_ids)
else:
query &= table.id == record_ids[0]
else:
record_ids = None
# Retrieve filters
rows = db(query).select(table.id,
table.title,
table.description,
table.query)
# Pack filters
filters = []
for row in rows:
filters.append({
"id": row.id,
"title": row.title,
"description": row.description,
"query": json.loads(row.query) if row.query else [],
})
# JSON response
current.response.headers["Content-Type"] = "application/json"
return json.dumps(filters, separators=SEPARATORS)
# =============================================================================
class S3FilterString(object):
"""
Helper class to render a human-readable representation of a
filter query, as representation method of JSON-serialized
queries in saved filters.
"""
def __init__(self, resource, query):
"""
Constructor
@param query: the URL query (list of key-value pairs or a
string with such a list in JSON)
"""
if type(query) is not list:
try:
self.query = json.loads(query)
except ValueError:
self.query = []
else:
self.query = query
get_vars = {}
for k, v in self.query:
if v is not None:
key = resource.prefix_selector(k)
if key in get_vars:
value = get_vars[key]
if type(value) is list:
value.append(v)
else:
get_vars[key] = [value, v]
else:
get_vars[key] = v
self.resource = resource
self.get_vars = get_vars
# -------------------------------------------------------------------------
def represent(self):
""" Render the query representation for the given resource """
default = ""
get_vars = self.get_vars
resource = self.resource
if not get_vars:
return default
else:
queries = S3URLQuery.parse(resource, get_vars)
# Get alternative field labels
labels = {}
get_config = resource.get_config
prefix = resource.prefix_selector
for config in ("list_fields", "notify_fields"):
fields = get_config(config, set())
for f in fields:
if type(f) is tuple:
labels[prefix(f[1])] = f[0]
# Iterate over the sub-queries
render = self._render
substrings = []
append = substrings.append
for alias, subqueries in queries.iteritems():
for subquery in subqueries:
s = render(resource, alias, subquery, labels=labels)
if s:
append(s)
if substrings:
result = substrings[0]
T = current.T
for s in substrings[1:]:
result = T("%s AND %s") % (result, s)
return result
else:
return default
# -------------------------------------------------------------------------
@classmethod
def _render(cls, resource, alias, query, invert=False, labels=None):
"""
Recursively render a human-readable representation of a
S3ResourceQuery.
@param resource: the S3Resource
@param query: the S3ResourceQuery
@param invert: invert the query
"""
T = current.T
if not query:
return None
op = query.op
l = query.left
r = query.right
render = lambda q, r=resource, a=alias, invert=False, labels=labels: \
cls._render(r, a, q, invert=invert, labels=labels)
if op == query.AND:
# Recurse AND
l = render(l)
r = render(r)
if l is not None and r is not None:
if invert:
result = T("NOT %s OR NOT %s") % (l, r)
else:
result = T("%s AND %s") % (l, r)
else:
result = l if l is not None else r
elif op == query.OR:
# Recurse OR
l = render(l)
r = render(r)
if l is not None and r is not None:
if invert:
result = T("NOT %s AND NOT %s") % (l, r)
else:
result = T("%s OR %s") % (l, r)
else:
result = l if l is not None else r
elif op == query.NOT:
# Recurse NOT
result = render(l, invert=not invert)
else:
# Resolve the field selector against the resource
try:
rfield = l.resolve(resource)
except (AttributeError, SyntaxError):
return None
# Convert the filter values into the field type
try:
values = cls._convert(rfield, r)
except (TypeError, ValueError):
values = r
# Alias
selector = l.name
if labels and selector in labels:
rfield.label = labels[selector]
# @todo: for duplicate labels, show the table name
#else:
#tlabel = " ".join(s.capitalize() for s in rfield.tname.split("_")[1:])
#rfield.label = "(%s) %s" % (tlabel, rfield.label)
# Represent the values
if values is None:
values = T("None")
else:
list_type = rfield.ftype[:5] == "list:"
renderer = rfield.represent
if not callable(renderer):
renderer = lambda v: s3_unicode(v)
if hasattr(renderer, "linkto"):
#linkto = renderer.linkto
renderer.linkto = None
#else:
# #linkto = None
is_list = type(values) is list
try:
if is_list and hasattr(renderer, "bulk") and not list_type:
fvalues = renderer.bulk(values, list_type=False)
values = [fvalues[v] for v in values if v in fvalues]
elif list_type:
if is_list:
values = renderer(values)
else:
values = renderer([values])
else:
if is_list:
values = [renderer(v) for v in values]
else:
values = renderer(values)
except:
values = s3_unicode(values)
# Translate the query
result = cls._translate_query(query, rfield, values, invert=invert)
return result
# -------------------------------------------------------------------------
@classmethod
def _convert(cls, rfield, value):
"""
Convert a filter value according to the field type
before representation
@param rfield: the S3ResourceField
@param value: the value
"""
if value is None:
return value
ftype = rfield.ftype
if ftype[:5] == "list:":
if ftype[5:8] in ("int", "ref"):
ftype = long
else:
ftype = unicode
elif ftype == "id" or ftype [:9] == "reference":
ftype = long
elif ftype == "integer":
ftype = int
elif ftype == "date":
ftype = datetime.date
elif ftype == "time":
ftype = datetime.time
elif ftype == "datetime":
ftype = datetime.datetime
elif ftype == "double":
ftype = float
elif ftype == "boolean":
ftype = bool
else:
ftype = unicode
convert = S3TypeConverter.convert
if type(value) is list:
output = []
append = output.append
for v in value:
try:
append(convert(ftype, v))
except TypeError, ValueError:
continue
else:
try:
output = convert(ftype, value)
except TypeError, ValueError:
output = None
return output
# -------------------------------------------------------------------------
@classmethod
def _translate_query(cls, query, rfield, values, invert=False):
"""
Translate the filter query into human-readable language
@param query: the S3ResourceQuery
@param rfield: the S3ResourceField the query refers to
@param values: the filter values
@param invert: invert the operation
"""
T = current.T
# Value list templates
vor = T("%s or %s")
vand = T("%s and %s")
# Operator templates
otemplates = {
query.LT: (query.GE, vand, "%(label)s < %(values)s"),
query.LE: (query.GT, vand, "%(label)s <= %(values)s"),
query.EQ: (query.NE, vor, T("%(label)s is %(values)s")),
query.GE: (query.LT, vand, "%(label)s >= %(values)s"),
query.GT: (query.LE, vand, "%(label)s > %(values)s"),
query.NE: (query.EQ, vor, T("%(label)s != %(values)s")),
query.LIKE: ("notlike", vor, T("%(label)s like %(values)s")),
query.BELONGS: (query.NE, vor, T("%(label)s = %(values)s")),
query.CONTAINS: ("notall", vand, T("%(label)s contains %(values)s")),
query.ANYOF: ("notany", vor, T("%(label)s contains any of %(values)s")),
"notall": (query.CONTAINS, vand, T("%(label)s does not contain %(values)s")),
"notany": (query.ANYOF, vor, T("%(label)s does not contain %(values)s")),
"notlike": (query.LIKE, vor, T("%(label)s not like %(values)s"))
}
# Quote values as necessary
ftype = rfield.ftype
if ftype in ("string", "text") or \
ftype[:9] == "reference" or \
ftype[:5] == "list:" and ftype[5:8] in ("str", "ref"):
if type(values) is list:
values = ['"%s"' % v for v in values]
elif values is not None:
values = '"%s"' % values
else:
values = current.messages["NONE"]
# Render value list template
def render_values(template=None, values=None):
if not template or type(values) is not list:
return str(values)
elif not values:
return "()"
elif len(values) == 1:
return values[0]
else:
return template % (", ".join(values[:-1]), values[-1])
# Render the operator template
op = query.op
if op in otemplates:
inversion, vtemplate, otemplate = otemplates[op]
if invert:
inversion, vtemplate, otemplate = otemplates[inversion]
return otemplate % dict(label=rfield.label,
values=render_values(vtemplate, values))
else:
# Fallback to simple representation
# FIXME: resource not defined here!
return query.represent(resource)
# END =========================================================================
| {
"content_hash": "ea634f174cfbf3f4eb1e7fcf3f41118a",
"timestamp": "",
"source": "github",
"line_count": 2947,
"max_line_length": 143,
"avg_line_length": 36.701051917203934,
"alnum_prop": 0.4659294735479576,
"repo_name": "gnarula/eden_deployment",
"id": "d8e219d819d7ab1943832fd4528667a350d80eca",
"size": "108183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/s3/s3filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1305178"
},
{
"name": "JavaScript",
"bytes": "16338028"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28218113"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2491556"
}
],
"symlink_target": ""
} |
from pgcli.packages.expanded import expanded_table
import pytest
def test_expanded_table_renders():
input = [("hello", 123),("world", 456)]
expected = """-[ RECORD 0 ]
name | hello
age | 123
-[ RECORD 1 ]
name | world
age | 456
"""
assert expected == expanded_table(input, ["name", "age"])
| {
"content_hash": "a8d12854997e1936fc1f72a9e2c5af25",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 61,
"avg_line_length": 21.857142857142858,
"alnum_prop": 0.6372549019607843,
"repo_name": "czchen/debian-pgcli",
"id": "6f2c6591c3216889f96c130869a352bfdbb4634b",
"size": "306",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_expanded.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "185742"
}
],
"symlink_target": ""
} |
"""
This subpackage provides classes to communicate with other applications via the
`Simple Application Messaging Protocal (SAMP)
<http://www.ivoa.net/documents/SAMP/>`_.
Before integration into Astropy it was known as
`SAMPy <https://pypi.python.org/pypi/sampy/>`_, and was developed by Luigi Paioro
(INAF - Istituto Nazionale di Astrofisica).
"""
from .constants import *
from .errors import *
from .utils import *
from .hub import *
from .client import *
from .integrated_client import *
from .hub_proxy import *
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.samp`.
"""
use_internet = _config.ConfigItem(
True,
"Whether to allow `astropy.samp` to use "
"the internet, if available.",
aliases=['astropy.samp.utils.use_internet'])
n_retries = _config.ConfigItem(10,
"How many times to retry communications when they fail")
conf = Conf()
| {
"content_hash": "203bf095080ed8df73951752d8c9edba",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 81,
"avg_line_length": 25.763157894736842,
"alnum_prop": 0.6996935648621042,
"repo_name": "bsipocz/astropy",
"id": "dcd6e9240860aac5908d5a0af75fb7171987eb84",
"size": "1043",
"binary": false,
"copies": "1",
"ref": "refs/heads/hacking",
"path": "astropy/samp/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "442627"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9395160"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from django.db.models import Q, Sum
from django.db.models.deletion import ProtectedError
from django.db.utils import IntegrityError
from django.forms.models import modelform_factory
from django.test import TestCase, skipIfDBFeature
from .models import (
A, Address, B, Board, C, CharLink, Company, Contact, Content, D, Developer,
Guild, HasLinkThing, Link, Node, Note, OddRelation1, OddRelation2,
Organization, Person, Place, Related, Restaurant, Tag, Team, TextLink,
)
class GenericRelationTests(TestCase):
def test_inherited_models_content_type(self):
"""
GenericRelations on inherited classes use the correct content type.
"""
p = Place.objects.create(name="South Park")
r = Restaurant.objects.create(name="Chubby's")
l1 = Link.objects.create(content_object=p)
l2 = Link.objects.create(content_object=r)
self.assertEqual(list(p.links.all()), [l1])
self.assertEqual(list(r.links.all()), [l2])
def test_reverse_relation_pk(self):
"""
The correct column name is used for the primary key on the
originating model of a query. See #12664.
"""
p = Person.objects.create(account=23, name='Chef')
Address.objects.create(street='123 Anywhere Place',
city='Conifer', state='CO',
zipcode='80433', content_object=p)
qs = Person.objects.filter(addresses__zipcode='80433')
self.assertEqual(1, qs.count())
self.assertEqual('Chef', qs[0].name)
def test_charlink_delete(self):
oddrel = OddRelation1.objects.create(name='clink')
CharLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_textlink_delete(self):
oddrel = OddRelation2.objects.create(name='tlink')
TextLink.objects.create(content_object=oddrel)
oddrel.delete()
def test_q_object_or(self):
"""
SQL query parameters for generic relations are properly
grouped when OR is used (#11535).
In this bug the first query (below) works while the second, with the
query parameters the same but in reverse order, does not.
The issue is that the generic relation conditions do not get properly
grouped in parentheses.
"""
note_contact = Contact.objects.create()
org_contact = Contact.objects.create()
Note.objects.create(note='note', content_object=note_contact)
org = Organization.objects.create(name='org name')
org.contacts.add(org_contact)
# search with a non-matching note and a matching org name
qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') |
Q(organizations__name__icontains=r'org name'))
self.assertIn(org_contact, qs)
# search again, with the same query parameters, in reverse order
qs = Contact.objects.filter(
Q(organizations__name__icontains=r'org name') |
Q(notes__note__icontains=r'other note'))
self.assertIn(org_contact, qs)
def test_join_reuse(self):
qs = Person.objects.filter(
addresses__street='foo'
).filter(
addresses__street='bar'
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_generic_relation_ordering(self):
"""
Ordering over a generic relation does not include extraneous
duplicate results, nor excludes rows not participating in the relation.
"""
p1 = Place.objects.create(name="South Park")
p2 = Place.objects.create(name="The City")
c = Company.objects.create(name="Chubby's Intl.")
Link.objects.create(content_object=p1)
Link.objects.create(content_object=c)
places = list(Place.objects.order_by('links__id'))
def count_places(place):
return len([p for p in places if p.id == place.id])
self.assertEqual(len(places), 2)
self.assertEqual(count_places(p1), 1)
self.assertEqual(count_places(p2), 1)
def test_target_model_is_unsaved(self):
"""Test related to #13085"""
# Fails with another, ORM-level error
dev1 = Developer(name='Joe')
note = Note(note='Deserves promotion', content_object=dev1)
with self.assertRaises(IntegrityError):
note.save()
def test_target_model_len_zero(self):
"""
Saving a model with a GenericForeignKey to a model instance whose
__len__ method returns 0 (Team.__len__() here) shouldn't fail (#13085).
"""
team1 = Team.objects.create(name='Backend devs')
note = Note(note='Deserve a bonus', content_object=team1)
note.save()
def test_target_model_bool_false(self):
"""
Saving a model with a GenericForeignKey to a model instance whose
__bool__ method returns False (Guild.__bool__() here) shouldn't fail
(#13085).
"""
g1 = Guild.objects.create(name='First guild')
note = Note(note='Note for guild', content_object=g1)
note.save()
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_gfk_to_model_with_empty_pk(self):
"""Test related to #13085"""
# Saving model with GenericForeignKey to model instance with an
# empty CharField PK
b1 = Board.objects.create(name='')
tag = Tag(label='VP', content_object=b1)
tag.save()
def test_ticket_20378(self):
# Create a couple of extra HasLinkThing so that the autopk value
# isn't the same for Link and HasLinkThing.
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
hs3 = HasLinkThing.objects.create()
hs4 = HasLinkThing.objects.create()
l1 = Link.objects.create(content_object=hs3)
l2 = Link.objects.create(content_object=hs4)
self.assertSequenceEqual(HasLinkThing.objects.filter(links=l1), [hs3])
self.assertSequenceEqual(HasLinkThing.objects.filter(links=l2), [hs4])
self.assertSequenceEqual(HasLinkThing.objects.exclude(links=l2), [hs1, hs2, hs3])
self.assertSequenceEqual(HasLinkThing.objects.exclude(links=l1), [hs1, hs2, hs4])
def test_ticket_20564(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
c1 = C.objects.create(b=b1)
c2 = C.objects.create(b=b2)
c3 = C.objects.create(b=b3)
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertSequenceEqual(C.objects.filter(b__a__flag=None), [c1, c3])
self.assertSequenceEqual(C.objects.exclude(b__a__flag=None), [c2])
def test_ticket_20564_nullable_fk(self):
b1 = B.objects.create()
b2 = B.objects.create()
b3 = B.objects.create()
d1 = D.objects.create(b=b1)
d2 = D.objects.create(b=b2)
d3 = D.objects.create(b=b3)
d4 = D.objects.create()
A.objects.create(flag=None, content_object=b1)
A.objects.create(flag=True, content_object=b1)
A.objects.create(flag=True, content_object=b2)
self.assertSequenceEqual(D.objects.exclude(b__a__flag=None), [d2])
self.assertSequenceEqual(D.objects.filter(b__a__flag=None), [d1, d3, d4])
self.assertSequenceEqual(B.objects.filter(a__flag=None), [b1, b3])
self.assertSequenceEqual(B.objects.exclude(a__flag=None), [b2])
def test_extra_join_condition(self):
# A crude check that content_type_id is taken in account in the
# join/subquery condition.
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower())
# No need for any joins - the join from inner query can be trimmed in
# this case (but not in the above case as no a objects at all for given
# B would then fail).
self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower())
self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower())
def test_annotate(self):
hs1 = HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
HasLinkThing.objects.create()
b = Board.objects.create(name=str(hs1.pk))
Link.objects.create(content_object=hs2)
link = Link.objects.create(content_object=hs1)
Link.objects.create(content_object=b)
qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk)
# If content_type restriction isn't in the query's join condition,
# then wrong results are produced here as the link to b will also match
# (b and hs1 have equal pks).
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].links__sum, link.id)
link.delete()
# Now if we don't have proper left join, we will not produce any
# results at all here.
# clear cached results
qs = qs.all()
self.assertEqual(qs.count(), 1)
# Note - 0 here would be a nicer result...
self.assertIs(qs[0].links__sum, None)
# Finally test that filtering works.
self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1)
self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0)
def test_filter_targets_related_pk(self):
HasLinkThing.objects.create()
hs2 = HasLinkThing.objects.create()
link = Link.objects.create(content_object=hs2)
self.assertNotEqual(link.object_id, link.pk)
self.assertSequenceEqual(HasLinkThing.objects.filter(links=link.pk), [hs2])
def test_editable_generic_rel(self):
GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__')
form = GenericRelationForm()
self.assertIn('links', form.fields)
form = GenericRelationForm({'links': None})
self.assertTrue(form.is_valid())
form.save()
links = HasLinkThing._meta.get_field('links')
self.assertEqual(links.save_form_data_calls, 1)
def test_ticket_22998(self):
related = Related.objects.create()
content = Content.objects.create(related_obj=related)
Node.objects.create(content=content)
# deleting the Related cascades to the Content cascades to the Node,
# where the pre_delete signal should fire and prevent deletion.
with self.assertRaises(ProtectedError):
related.delete()
def test_ticket_22982(self):
place = Place.objects.create(name='My Place')
self.assertIn('GenericRelatedObjectManager', str(place.links))
def test_filter_on_related_proxy_model(self):
place = Place.objects.create()
Link.objects.create(content_object=place)
self.assertEqual(Place.objects.get(link_proxy__object_id=place.id), place)
| {
"content_hash": "f6fec9859e88ee88252f9309a0aed015",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 92,
"avg_line_length": 43.13833992094862,
"alnum_prop": 0.6367051493494594,
"repo_name": "tomchristie/django",
"id": "9add025a46a2a926b1039394fedac62d64f40021",
"size": "10914",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "tests/generic_relations_regress/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55975"
},
{
"name": "HTML",
"bytes": "219349"
},
{
"name": "JavaScript",
"bytes": "252940"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "12092827"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import Gaffer
import GafferScene
GafferScene.TweakPlug = Gaffer.TweakPlug
GafferScene.TweaksPlug = Gaffer.TweaksPlug
| {
"content_hash": "2ba4c619af4d14731ed6e6c1d0664f23",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 23.6,
"alnum_prop": 0.8559322033898306,
"repo_name": "johnhaddon/gaffer",
"id": "34b66ad5e213727152d8a4f6160ef3aa129ca433",
"size": "1914",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "startup/GafferScene/tweakPlugCompatibility.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9571062"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10271481"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14389"
}
],
"symlink_target": ""
} |
from .sub_resource import SubResource
class BackendAddressPool(SubResource):
"""Pool of backend IP addresses.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar backend_ip_configurations: Gets collection of references to IP
addresses defined in network interfaces.
:vartype backend_ip_configurations:
list[~azure.mgmt.network.v2017_03_01.models.NetworkInterfaceIPConfiguration]
:ivar load_balancing_rules: Gets load balancing rules that use this
backend address pool.
:vartype load_balancing_rules:
list[~azure.mgmt.network.v2017_03_01.models.SubResource]
:ivar outbound_nat_rule: Gets outbound rules that use this backend address
pool.
:vartype outbound_nat_rule:
~azure.mgmt.network.v2017_03_01.models.SubResource
:param provisioning_state: Get provisioning state of the public IP
resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param name: Gets name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_validation = {
'backend_ip_configurations': {'readonly': True},
'load_balancing_rules': {'readonly': True},
'outbound_nat_rule': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'backend_ip_configurations': {'key': 'properties.backendIPConfigurations', 'type': '[NetworkInterfaceIPConfiguration]'},
'load_balancing_rules': {'key': 'properties.loadBalancingRules', 'type': '[SubResource]'},
'outbound_nat_rule': {'key': 'properties.outboundNatRule', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, provisioning_state=None, name=None, etag=None):
super(BackendAddressPool, self).__init__(id=id)
self.backend_ip_configurations = None
self.load_balancing_rules = None
self.outbound_nat_rule = None
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
| {
"content_hash": "4c1313e4772773f2545d3e38aea2f71a",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 128,
"avg_line_length": 42.3448275862069,
"alnum_prop": 0.6640879478827362,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "67f2ddd8888c2d687d2959e8b07fc5100e0c62eb",
"size": "2930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/backend_address_pool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import girder_client
GIRDER_URL = 'http://127.0.0.1/api/v1'
GIRDER_LOGIN = 'user'
GIRDER_API_KEY = 'API_KEY'
gc = girder_client.GirderClient(apiUrl=GIRDER_URL)
gc.authenticate(username=GIRDER_LOGIN, apiKey=GIRDER_API_KEY)
def handle_item(item, bc):
bc = bc + (item['name'],)
print '/'.join(bc)
def handle_folder(folder, bc):
bc = bc + (folder['name'],)
folders = gc.listFolder(folder['_id'], 'folder', limit=0)
items = gc.listItem(folder['_id'], limit=0)
for child in folders:
handle_folder(child, bc)
for item in items:
handle_item(item, bc)
def handle_collection(collection):
bc = ('collection', collection['name'])
folders = gc.listFolder(collection['_id'], 'collection', limit=0)
for folder in folders:
handle_folder(folder, bc)
def handle_user(user):
bc = ('user', user['email'])
folders = gc.listFolder(user['_id'], 'user', limit=0)
for folder in folders:
handle_folder(folder, bc)
def main():
users = gc.listUser(limit=0)
for user in users:
handle_user(user)
collections = gc.listCollection(limit=0)
for collection in collections:
handle_collection(collection)
if __name__ == '__main__':
main()
| {
"content_hash": "463a638190538a19a47e3c8978e25968",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 69,
"avg_line_length": 27.954545454545453,
"alnum_prop": 0.6341463414634146,
"repo_name": "data-exp-lab/girder",
"id": "56df5dcc218c25241120bd74b369bf09f2a97809",
"size": "1230",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scripts/midas/walk_girder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "42365"
},
{
"name": "CSS",
"bytes": "61237"
},
{
"name": "Dockerfile",
"bytes": "2416"
},
{
"name": "HCL",
"bytes": "1424"
},
{
"name": "HTML",
"bytes": "170299"
},
{
"name": "JavaScript",
"bytes": "1399182"
},
{
"name": "Mako",
"bytes": "8756"
},
{
"name": "Python",
"bytes": "2388013"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "10593"
},
{
"name": "Shell",
"bytes": "7661"
}
],
"symlink_target": ""
} |
"""
vg_config.py: Default configuration values all here (and only here), as well as logic
for reading and generating config files.
"""
import argparse, sys, os, os.path, errno, random, subprocess, shutil, itertools, glob, tarfile
import doctest, re, json, collections, time, timeit
import logging, logging.handlers, struct, socket, threading
import string
import getpass
import pdb
import textwrap
import yaml
from toil_vg.vg_common import require, test_docker, test_singularity
# Determine what containerization to default to in the config. We use Docker
# with higher priority than Singularity because Singularity is our work-around.
default_container = "Docker" if test_docker() else ("Singularity" if test_singularity() else "None")
default_config = textwrap.dedent("""
# Toil VG Pipeline configuration file (created by toil-vg generate-config)
# This configuration file is formatted in YAML. Simply write the value (at least one space) after the colon.
# Edit the values in the configuration file and then rerun the pipeline: "toil-vg run"
#
# URLs can take the form: "/", "s3://"
# Local inputs follow the URL convention: "/full/path/to/input.txt"
# S3 URLs follow the convention: "s3://bucket/directory/file.txt"
#
# Comments (beginning with #) do not need to be removed.
# Command-line options take priority over parameters in this file.
######################################################################################################################
###########################################
### Toil resource tuning ###
# These parameters must be adjusted based on data and cluster size
# when running on anything other than single-machine mode
# TODO: Reduce number of parameters here. Seems fine grained, especially for disk/mem
# option to spin off config files for small/medium/large datasets?
# The following parameters assign resources to small helper jobs that typically don't do
# do any computing outside of toil overhead. Generally do not need to be changed.
misc-cores: 1
misc-mem: '1G'
misc-disk: '1G'
# Resources allotted for vcf preprocessing.
preprocess-cores: 1
preprocess-mem: '2G'
preprocess-disk: '2G'
# Resources allotted for vg construction.
construct-cores: 1
construct-mem: '4G'
construct-disk: '2G'
# Resources allotted for xg indexing.
xg-index-cores: 1
xg-index-mem: '4G'
xg-index-disk: '2G'
# Resources allotted for xg indexing by chromosome (used for GBWT).
gbwt-index-cores: 1
gbwt-index-mem: '4G'
gbwt-index-disk: '2G'
gbwt-index-preemptable: True
# Resources allotted for gcsa pruning. Note that the vg mod commands used in
# this stage generally cannot take advantage of more than one thread
prune-cores: 1
prune-mem: '4G'
prune-disk: '2G'
# Resources allotted gcsa indexing
gcsa-index-cores: 1
gcsa-index-mem: '4G'
gcsa-index-disk: '8G'
gcsa-index-preemptable: True
# Resources allotted for snarl indexing.
snarl-index-cores: 1
snarl-index-mem: '4G'
snarl-index-disk: '2G'
# Resources allotted for distance indexing.
distance-index-cores: 8
distance-index-mem: '4G'
distance-index-disk: '2G'
# Resources allotted for minimizer indexing.
minimizer-index-cores: 8
minimizer-index-mem: '4G'
minimizer-index-disk: '2G'
# Resources for BWA indexing.
bwa-index-cores: 1
bwa-index-mem: '4G'
bwa-index-disk: '2G'
# Resources for minimap2 indexing.
minimap2-index-cores: 1
minimap2-index-mem: '4G'
minimap2-index-disk: '2G'
# Resources for fastq splitting and gam merging
# Important to assign as many cores as possible here for large fastq inputs
fq-split-cores: 1
fq-split-mem: '4G'
fq-split-disk: '2G'
# Resources for *each* vg map job
# the number of vg map jobs is controlled by reads-per-chunk (below)
alignment-cores: 1
alignment-mem: '4G'
alignment-disk: '2G'
# Resources for chunking up a graph/gam for calling (and merging)
# typically take xg for whoe grpah, and gam for a chromosome
chunk-cores: 1
chunk-mem: '4G'
chunk-disk: '2G'
# Resources for augmenting a graph
augment-cores: 1
augment-mem: '4G'
augment-disk: '2G'
# Resources for calling each chunk (currently includes augment/call/genotype)
calling-cores: 1
calling-mem: '4G'
calling-disk: '2G'
# Resources for vcfeval
vcfeval-cores: 1
vcfeval-mem: '4G'
vcfeval-disk: '2G'
# Resources for vg sim
sim-cores: 2
sim-mem: '4G'
sim-disk: '2G'
###########################################
### Arguments Shared Between Components ###
# Use output store instead of toil for all intermediate files (use only for debugging)
force-outstore: False
# Toggle container support. Valid values are Docker / Singularity / None
# (commenting out or Null values equivalent to None)
container: """ + (default_container) + """
#############################
### Docker Tool Arguments ###
## Docker Tool List ##
## Locations of docker images.
## If empty or commented, then the tool will be run directly from the command line instead
## of through docker.
# Docker image to use for vg
vg-docker: 'quay.io/vgteam/vg:v1.34.0'
# Docker image to use for bcftools
bcftools-docker: 'quay.io/biocontainers/bcftools:1.9--h4da6232_0'
# Docker image to use for tabix
tabix-docker: 'lethalfang/tabix:1.7'
# Docker image to use for samtools
samtools-docker: 'quay.io/ucsc_cgl/samtools:latest'
# Docker image to use for bwa
bwa-docker: 'quay.io/ucsc_cgl/bwa:latest'
# Docker image to use for minimap2
minimap2-docker: 'evolbioinfo/minimap2:v2.14'
# Docker image to use for jq
jq-docker: 'celfring/jq'
# Docker image to use for rtg
rtg-docker: 'realtimegenomics/rtg-tools:3.8.4'
# Docker image to use for pigz
pigz-docker: 'quay.io/glennhickey/pigz:latest'
# Docker image to use to run R scripts
r-docker: 'rocker/tidyverse:3.5.1'
# Docker image to use for vcflib
vcflib-docker: 'quay.io/biocontainers/vcflib:1.0.0_rc1--0'
# Docker image to use for Freebayes
freebayes-docker: 'maxulysse/freebayes:1.2.5'
# Docker image to use for Platypus
platypus-docker: 'quay.io/biocontainers/platypus-variant:0.8.1.1--htslib1.7_1'
# Docker image to use for hap.py
happy-docker: 'donfreed12/hap.py:v0.3.9'
# Docker image to use for bedtools
bedtools-docker: 'quay.io/biocontainers/bedtools:2.27.0--1'
# Docker image to use for bedops
bedops-docker: 'quay.io/biocontainers/bedops:2.4.35--0'
# Docker image to use for sveval R package
sveval-docker: 'jmonlong/sveval:version-2.0.0'
# Docker image to use for gatk
gatk-docker: 'broadinstitute/gatk:4.1.1.0'
# Docker image to use for gatk3
gatk3-docker: 'broadinstitute/gatk3:3.8-1'
# Docker image to use for snpEff
snpEff-docker: 'quay.io/biocontainers/snpeff:5.0--hdfd78af_1'
# Docker image to use for picard
picard-docker: 'broadinstitute/picard:2.21.9'
# Docker image to use for whatshap
whatshap-docker: 'quay.io/biocontainers/whatshap:0.18--py37h6bb024c_0'
# Docker image to use for eagle
eagle-docker: 'quay.io/cmarkello/eagle'
# Docker image to use for vcf2shebang
vcf2shebang-docker: 'quay.io/cmarkello/vcf2shebang_grch38:latest'
# Docker image to use for cadd
cadd-docker: 'quay.io/cmarkello/cadd_1.6:latest'
# Docker image to use for cadd editor
caddeditor-docker: 'quay.io/cmarkello/cadd_editor:latest'
# Docker image to use for bmtb
bmtb-docker: 'quay.io/cmarkello/bmtb_grch38:latest'
# Docker image to use for vcftools
vcftools-docker: 'biocontainers/vcftools:v0.1.16-1-deb_cv1'
# Docker image to use for vt
vt-docker: 'quay.io/biocontainers/vt:0.57721--heae7c10_3'
# Docker image to use for deepvariant
deepvariant-docker: 'google/deepvariant:1.1.0'
# Docker image to use for glnexus
glnexus-docker: 'quay.io/mlin/glnexus:v1.2.7'
# Docker image to use for abra2
abra2-docker: 'dceoy/abra2:latest'
# Docker image to use for deeptrio
deeptrio-docker: 'google/deepvariant:deeptrio-1.1.0'
# Docker image to use for mosaicism detection
mosaicism-docker: 'quay.io/cmarkello/mosaicism_detector:latest'
##############################
### vg_construct Arguments ###
# Number of times to iterate normalization when --normalized used in construction
normalize-iterations: 10
##########################
### vg_index Arguments ###
# Options to pass to vg prune.
# (limit to general parameters, currently -k, -e, s.
# Rest decided by toil-vg via other options like prune-mode)
prune-opts: []
# Options to pass to vg gcsa indexing
gcsa-opts: []
# Options to pass to vg minimizer indexing
minimizer-opts: []
# Randomly phase unphased variants when constructing GBWT
force-phasing: True
########################
### vg_map Arguments ###
# Toggle whether reads are split into chunks
single-reads-chunk: False
# Number of reads per chunk to use when splitting up fastq.
# (only applies if single-reads-chunk is False)
# Each chunk will correspond to a vg map job
reads-per-chunk: 10000000
# Core arguments for vg mapping (do not include file names or -t/--threads)
# Note -i/--interleaved will be ignored. use the --interleaved option
# on the toil-vg command line instead
map-opts: []
# Core arguments for vg multipath mapping (do not include file names or -t/--threads)
mpmap-opts: ['--output-fmt', 'GAM']
# Core arguments for vg giraffe mapping (do not include file names or -t/--threads)
giraffe-opts: []
########################
### vg_msga Arguments ###
# Core arguments for vg msgaing (do not include file names or -t/--threads)
msga-opts: []
# Number of steps to conext expand target regions before aligning with msga
msga-context: 50
#########################
### vg_call Arguments ###
# Options to pass to vg filter when running vg call. (do not include file names or -t/--threads)
filter-opts: []
# Options to pass to vg augment. (do not include any file names or -t/--threads or -a/--augmentation-mode)
augment-opts: []
# Options to pass to vg pack. (do not include any file names or -t/--threads)
pack-opts: []
# Options to pass to vg call. (do not include file/contig/sample names or -t/--threads)
call-opts: []
#########################
### vcfeval Arguments ###
# Options to pass to rgt vcfeval. (do not include filenaems or threads or BED)
vcfeval-opts: ['--ref-overlap']
#########################
### sim and mapeval Arguments ###
# Options to pass to vg sim (should not include -x, -n, -s or -a)
sim-opts: ['--read-length', '150', '--frag-len', '570', '--frag-std-dev', '165', '--sub-rate', '0.01', '--indel-rate', '0.002']
# Options to pass to bwa
bwa-opts: []
# Options to pass to minimap2
minimap2-opts: ['-ax', 'sr']
""")
whole_genome_config = textwrap.dedent("""
# Toil VG Pipeline configuration file (created by toil-vg generate-config)
# This configuration file is formatted in YAML. Simply write the value (at least one space) after the colon.
# Edit the values in the configuration file and then rerun the pipeline: "toil-vg run"
#
# URLs can take the form: "/", "s3://"
# Local inputs follow the URL convention: "/full/path/to/input.txt"
# S3 URLs follow the convention: "s3://bucket/directory/file.txt"
#
# Comments (beginning with #) do not need to be removed.
# Command-line options take priority over parameters in this file.
######################################################################################################################
###########################################
### Toil resource tuning ###
# These parameters must be adjusted based on data and cluster size
# when running on anything other than single-machine mode
# TODO: Reduce number of parameters here. Seems fine grained, especially for disk/mem
# option to spin off config files for small/medium/large datasets?
# The following parameters assign resources to small helper jobs that typically don't do
# do any computing outside of toil overhead. Generally do not need to be changed.
misc-cores: 1
misc-mem: '1G'
misc-disk: '1G'
# Resources allotted for vcf preprocessing.
preprocess-cores: 1
preprocess-mem: '8G'
preprocess-disk: '64G'
# Resources allotted for vg construction.
construct-cores: 1
construct-mem: '64G'
construct-disk: '64G'
# Resources allotted for xg indexing.
xg-index-cores: 16
xg-index-mem: '200G'
xg-index-disk: '100G'
# Resources allotted for xg indexing by chromosome (used for GBWT).
gbwt-index-cores: 4
gbwt-index-mem: '50G'
gbwt-index-disk: '100G'
gbwt-index-preemptable: True
# Resources allotted for gcsa pruning. Note that the vg mod commands used in
# this stage generally cannot take advantage of more than one thread
prune-cores: 2
prune-mem: '60G'
prune-disk: '60G'
# Resources allotted gcsa indexing
gcsa-index-cores: 32
gcsa-index-mem: '220G'
gcsa-index-disk: '2200G'
gcsa-index-preemptable: True
# Resources alloted to snarl indexing
snarl-index-cores: 1
snarl-index-mem: '200G'
snarl-index-disk: '100G'
# Resources allotted for distance indexing.
distance-index-cores: 16
distance-index-mem: '220G'
distance-index-disk: '100G'
# Resources allotted for minimizer indexing.
minimizer-index-cores: 16
minimizer-index-mem: '110G'
minimizer-index-disk: '200G'
# Resources for BWA indexing.
bwa-index-cores: 1
bwa-index-mem: '40G'
bwa-index-disk: '40G'
# Resources for minimap2 indexing.
minimap2-index-cores: 1
minimap2-index-mem: '40G'
minimap2-index-disk: '40G'
# Resources for fastq splitting and gam merging
# Important to assign as many cores as possible here for large fastq inputs
fq-split-cores: 32
fq-split-mem: '4G'
fq-split-disk: '200G'
# Resources for *each* vg map job
# the number of vg map jobs is controlled by reads-per-chunk (below)
alignment-cores: 32
alignment-mem: '100G'
alignment-disk: '100G'
# Resources for chunking up a graph/gam for calling (and merging)
# typically take xg for whoe grpah, and gam for a chromosome
chunk-cores: 16
chunk-mem: '100G'
chunk-disk: '100G'
# Resources for augmenting a graph
augment-cores: 8
augment-mem: '64G'
augment-disk: '64G'
# Resources for calling each chunk (currently includes augment/call/genotype)
calling-cores: 8
calling-mem: '60G'
calling-disk: '16G'
# Resources for vcfeval
vcfeval-cores: 32
vcfeval-mem: '64G'
vcfeval-disk: '64G'
# Resources for vg sim
sim-cores: 2
sim-mem: '20G'
sim-disk: '100G'
###########################################
### Arguments Shared Between Components ###
# Use output store instead of toil for all intermediate files (use only for debugging)
force-outstore: False
# Toggle container support. Valid values are Docker / Singularity / None
# (commenting out or Null values equivalent to None)
container: """ + (default_container) + """
#############################
### Docker Tool Arguments ###
## Docker Tool List ##
## Locations of docker images.
## If empty or commented, then the tool will be run directly from the command line instead
## of through docker.
# Docker image to use for vg
vg-docker: 'quay.io/vgteam/vg:v1.34.0'
# Docker image to use for bcftools
bcftools-docker: 'quay.io/biocontainers/bcftools:1.9--h4da6232_0'
# Docker image to use for tabix
tabix-docker: 'lethalfang/tabix:1.7'
# Docker image to use for samtools
samtools-docker: 'quay.io/ucsc_cgl/samtools:latest'
# Docker image to use for bwa
bwa-docker: 'quay.io/ucsc_cgl/bwa:latest'
# Docker image to use for minimap2
minimap2-docker: 'evolbioinfo/minimap2:v2.14'
# Docker image to use for jq
jq-docker: 'celfring/jq'
# Docker image to use for rtg
rtg-docker: 'realtimegenomics/rtg-tools:3.8.4'
# Docker image to use for pigz
pigz-docker: 'quay.io/glennhickey/pigz:latest'
# Docker image to use to run R scripts
r-docker: 'rocker/tidyverse:3.5.1'
# Docker image to use for vcflib
vcflib-docker: 'quay.io/biocontainers/vcflib:1.0.0_rc1--0'
# Docker image to use for Freebayes
freebayes-docker: 'maxulysse/freebayes:1.2.5'
# Docker image to use for Platypus
platypus-docker: 'quay.io/biocontainers/platypus-variant:0.8.1.1--htslib1.7_1'
# Docker image to use for hap.py
happy-docker: 'donfreed12/hap.py:v0.3.9'
# Docker image to use for bedtools
bedtools-docker: 'quay.io/biocontainers/bedtools:2.27.0--1'
# Docker image to use for bedops
bedops-docker: 'quay.io/biocontainers/bedops:2.4.35--0'
# Docker image to use for sveval R package
sveval-docker: 'jmonlong/sveval:version-2.0.0'
# Docker image to use for gatk
gatk-docker: 'broadinstitute/gatk:4.1.1.0'
# Docker image to use for gatk3
gatk3-docker: 'broadinstitute/gatk3:3.8-1'
# Docker image to use for snpEff
snpEff-docker: 'quay.io/biocontainers/snpeff:5.0--hdfd78af_1'
# Docker image to use for picard
picard-docker: 'broadinstitute/picard:2.21.9'
# Docker image to use for whatshap
whatshap-docker: 'quay.io/biocontainers/whatshap:0.18--py37h6bb024c_0'
# Docker image to use for eagle
eagle-docker: 'quay.io/cmarkello/eagle'
# Docker image to use for vcf2shebang
vcf2shebang-docker: 'quay.io/cmarkello/vcf2shebang_grch38:latest'
# Docker image to use for cadd
cadd-docker: 'quay.io/cmarkello/cadd_1.6:latest'
# Docker image to use for cadd editor
caddeditor-docker: 'quay.io/cmarkello/cadd_editor:latest'
# Docker image to use for bmtb
bmtb-docker: 'quay.io/cmarkello/bmtb_grch38:latest'
# Docker image to use for vcftools
vcftools-docker: 'biocontainers/vcftools:v0.1.16-1-deb_cv1'
# Docker image to use for vt
vt-docker: 'quay.io/biocontainers/vt:0.57721--heae7c10_3'
# Docker image to use for deepvariant
deepvariant-docker: 'google/deepvariant:1.1.0'
# Docker image to use for glnexus
glnexus-docker: 'quay.io/mlin/glnexus:v1.2.7'
# Docker image to use for abra2
abra2-docker: 'dceoy/abra2:latest'
# Docker image to use for deeptrio
deeptrio-docker: 'google/deepvariant:deeptrio-1.1.0'
# Docker image to use for mosaicism detection
mosaicism-docker: 'quay.io/cmarkello/mosaicism_detector:latest'
##############################
### vg_construct Arguments ###
# Number of times to iterate normalization when --normalized used in construction
normalize-iterations: 10
##########################
### vg_index Arguments ###
# Options to pass to vg prune.
# (limit to general parameters, currently -k, -e, s.
# Rest decided by toil-vg via other options like prune-mode)
prune-opts: []
# Options to pass to vg gcsa indexing
gcsa-opts: []
# Options to pass to vg minimizer indexing
minimizer-opts: []
# Randomly phase unphased variants when constructing GBWT
force-phasing: True
########################
### vg_map Arguments ###
# Toggle whether reads are split into chunks
single-reads-chunk: False
# Number of reads per chunk to use when splitting up fastq.
# (only applies if single-reads-chunk is False)
# Each chunk will correspond to a vg map job
reads-per-chunk: 50000000
# Core arguments for vg mapping (do not include file names or -t/--threads)
# Note -i/--interleaved will be ignored. use the --interleaved option
# on the toil-vg command line instead
map-opts: []
# Core arguments for vg multipath mapping (do not include file names or -t/--threads)
mpmap-opts: ['--output-fmt', 'GAM']
# Core arguments for vg giraffe mapping (do not include file names or -t/--threads)
giraffe-opts: []
########################
### vg_msga Arguments ###
# Core arguments for vg msgaing (do not include file names or -t/--threads)
msga-opts: []
# Number of steps to conext expand target regions before aligning with msga
msga-context: 2000
#########################
### vg_call Arguments ###
# Options to pass to vg filter when running vg call. (do not include file names or -t/--threads)
filter-opts: []
# Options to pass to vg augment. (do not include any file names or -t/--threads or -a/--augmentation-mode)
augment-opts: []
# Options to pass to vg pack. (do not include any file names or -t/--threads)
pack-opts: []
# Options to pass to vg call. (do not include file/contig/sample names or -t/--threads)
call-opts: []
#########################
### vcfeval Arguments ###
# Options to pass to rgt vcfeval. (do not include filenaems or threads or BED)
vcfeval-opts: ['--ref-overlap']
#########################
### sim and mapeval Arguments ###
# Options to pass to vg sim (should not include -x, -n, -s or -a)
sim-opts: ['--read-length', '150', '--frag-len', '570', '--frag-std-dev', '165', '--sub-rate', '0.01', '--indel-rate', '0.002']
# Options to pass to bwa
bwa-opts: []
# Options to pass to minimap2
minimap2-opts: ['-ax', 'sr']
""")
def generate_config(whole_genome = False):
return whole_genome_config if whole_genome is True else default_config
def make_opts_list(x_opts):
opts_list = list([a for a in x_opts.split(' ') if len(a)])
# get rid of any -t or --threads while we're at it
for t in ['-t', '--threads']:
if t in opts_list:
pos = opts_list.index(t)
del opts_list[pos:pos+2]
return opts_list
def apply_config_file_args(args):
"""
Merge args from the config file and the parser, giving priority to the parser.
"""
# turn --*_opts from strings to lists to be consistent with config file
for x_opts in ['map_opts', 'call_opts', 'recall_opts', 'filter_opts', 'recall_filter_opts', 'genotype_opts',
'vcfeval_opts', 'sim_opts', 'bwa_opts', 'minimap2_opts', 'gcsa_opts', 'minimizer_opts', 'mpmap_opts',
'giraffe_opts', 'augment_opts', 'pack_opts', 'prune_opts']:
if x_opts in list(args.__dict__.keys()) and type(args.__dict__[x_opts]) is str:
args.__dict__[x_opts] = make_opts_list(args.__dict__[x_opts])
# do the same thing for more_mpmap_opts which is a list of strings
if 'more_mpmap_opts' in list(args.__dict__.keys()) and args.__dict__['more_mpmap_opts']:
for i, m_opts in enumerate(args.__dict__['more_mpmap_opts']):
if isinstance(m_opts, str):
args.__dict__['more_mpmap_opts'][i] = make_opts_list(m_opts)
# If no config file given, we generate a default one
wg_config = 'whole_genome_config' in list(args.__dict__.keys()) and args.whole_genome_config
if 'config' not in list(args.__dict__.keys()) or args.config is None:
config = generate_config(whole_genome = wg_config)
else:
if wg_config:
raise RuntimeError('--config and --whole_genome_config cannot be used together')
require(os.path.exists(args.config), 'Config, {}, not found. Please run '
'"toil-vg generate-config > {}" to create.'.format(args.config, args.config))
with open(args.config) as conf:
config = conf.read()
# Parse config
parsed_config = {x.replace('-', '_'): y for x, y in list(yaml.safe_load(config).items())}
if 'prune_opts_2' in parsed_config:
raise RuntimeError('prune-opts-2 from config no longer supported')
options = argparse.Namespace(**parsed_config)
# Add in options from the program arguments to the arguments in the config file
# program arguments that are also present in the config file will overwrite the
# arguments in the config file
for args_key in args.__dict__:
# Add in missing program arguments to config option list and
# overwrite config options with corresponding options that are not None in program arguments
if (args.__dict__[args_key] is not None) or (args_key not in list(options.__dict__.keys())):
options.__dict__[args_key] = args.__dict__[args_key]
return options
def config_subparser(parser):
"""
Create a subparser for config. Should pass in results of subparsers.add_parser()
"""
parser.add_argument("--whole_genome", action="store_true",
help="Make config tuned to process a whole genome on 32-core instances")
parser.add_argument("--config", type=argparse.FileType('w'), default=sys.stdout,
help="config file to write to")
def config_main(options):
""" config just prints out a file """
options.config.write(generate_config(options.whole_genome))
| {
"content_hash": "d5f304f18d3d1e4a28267e20520975df",
"timestamp": "",
"source": "github",
"line_count": 757,
"max_line_length": 127,
"avg_line_length": 31.27212681638045,
"alnum_prop": 0.6946310142356271,
"repo_name": "vgteam/toil-vg",
"id": "df0090d061a5a285d0e6a305a758bbe5c5f22c35",
"size": "23698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/toil_vg/vg_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "685"
},
{
"name": "Makefile",
"bytes": "5736"
},
{
"name": "Python",
"bytes": "997222"
},
{
"name": "Shell",
"bytes": "56495"
}
],
"symlink_target": ""
} |
import numpy as np
from sklearn.metrics import cohen_kappa_score as kappa # noqa
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae # noqa
from sklearn.metrics import r2_score as r2 # noqa
from ..const import EPS
def mape(y, p):
"""Mean Absolute Percentage Error (MAPE).
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): MAPE
"""
filt = np.abs(y) > EPS
return np.mean(np.abs(1 - p[filt] / y[filt]))
def rmse(y, p):
"""Root Mean Squared Error (RMSE).
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): RMSE
"""
# check and get number of samples
assert y.shape == p.shape
return np.sqrt(mse(y, p))
def gini(y, p):
"""Normalized Gini Coefficient.
Args:
y (numpy.array): target
p (numpy.array): prediction
Returns:
e (numpy.float64): normalized Gini coefficient
"""
# check and get number of samples
assert y.shape == p.shape
n_samples = y.shape[0]
# sort rows on prediction column
# (from largest to smallest)
arr = np.array([y, p]).transpose()
true_order = arr[arr[:, 0].argsort()][::-1, 0]
pred_order = arr[arr[:, 1].argsort()][::-1, 0]
# get Lorenz curves
l_true = np.cumsum(true_order) / np.sum(true_order)
l_pred = np.cumsum(pred_order) / np.sum(pred_order)
l_ones = np.linspace(1 / n_samples, 1, n_samples)
# get Gini coefficients (area between curves)
g_true = np.sum(l_ones - l_true)
g_pred = np.sum(l_ones - l_pred)
# normalize to true Gini coefficient
return g_pred / g_true
| {
"content_hash": "1e2cb45ebd890b7c505c0f6701d587b3",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 62,
"avg_line_length": 23.56756756756757,
"alnum_prop": 0.6100917431192661,
"repo_name": "jeongyoonlee/Kaggler",
"id": "c049c64cdedc895ed0045cbb35a493481112cbee",
"size": "1744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kaggler/metrics/regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1155"
},
{
"name": "C++",
"bytes": "7971"
},
{
"name": "Cython",
"bytes": "46033"
},
{
"name": "Python",
"bytes": "125376"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduling_partitioned', '0006_unique_indexes'),
]
operations = [
# (case_id, alert_schedule_id) covered by unique(case_id, alert_schedule_id, recipient_type, recipient_id)
migrations.AlterIndexTogether(
name='casealertscheduleinstance',
index_together=set([('domain', 'active', 'next_event_due'), ('active', 'next_event_due')]),
),
# (case_id, timed_schedule_id) covered by unique(case_id, timed_schedule_id, recipient_type, recipient_id)
migrations.AlterIndexTogether(
name='casetimedscheduleinstance',
index_together=set([('domain', 'active', 'next_event_due'), ('active', 'next_event_due')]),
),
]
| {
"content_hash": "88619b01153a598df652e60fe57a237e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 114,
"avg_line_length": 38.904761904761905,
"alnum_prop": 0.627906976744186,
"repo_name": "dimagi/commcare-hq",
"id": "5ef4182b59f5e30aae503e8fa1e51136b94a4b01",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/messaging/scheduling/scheduling_partitioned/migrations/0007_index_cleanup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
import re
from reportlab import platypus
from facturapdf import flowables, helper
def element(item):
elements = {
'framebreak': {'class': platypus.FrameBreak},
'simpleline': {'class': flowables.SimpleLine, 'cast': {0: float, 1: float}},
'paragraph': {'class': flowables.Paragraph},
'image': {'class': helper.get_image, 'cast': {1: float}},
'spacer': {'class': platypus.Spacer, 'cast': {0: float, 1: float}}
}
if isinstance(item, str):
match = re.search('(?P<name>\w+)(\[(?P<args>.+)\])?', item)
if match and match.group('name') in elements:
flowable = elements[match.group('name')]
args = [] if not match.group('args') else match.group('args').split('|')
if 'cast' in flowable:
for index, cls in iter(flowable['cast'].items()):
args[index] = cls(args[index])
return flowable['class'](*args)
return item
def chapter(*args):
return [element(item) for item in args] | {
"content_hash": "5d5ad535ca159687f98c9e05c5c66302",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 84,
"avg_line_length": 32.53125,
"alnum_prop": 0.56388088376561,
"repo_name": "initios/factura-pdf",
"id": "fe7889e7ca8545bc28c5f41c3a73643d2fc70a2c",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "facturapdf/generators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23357"
}
],
"symlink_target": ""
} |
"""Support for IP Cameras."""
from __future__ import annotations
import asyncio
from contextlib import closing
import logging
import aiohttp
import async_timeout
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import (
async_aiohttp_proxy_web,
async_get_clientsession,
)
_LOGGER = logging.getLogger(__name__)
CONF_MJPEG_URL = "mjpeg_url"
CONF_STILL_IMAGE_URL = "still_image_url"
CONTENT_TYPE_HEADER = "Content-Type"
DEFAULT_NAME = "Mjpeg Camera"
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MJPEG_URL): cv.url,
vol.Optional(CONF_STILL_IMAGE_URL): cv.url,
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a MJPEG IP Camera."""
filter_urllib3_logging()
if discovery_info:
config = PLATFORM_SCHEMA(discovery_info)
async_add_entities([MjpegCamera(config)])
def filter_urllib3_logging():
"""Filter header errors from urllib3 due to a urllib3 bug."""
urllib3_logger = logging.getLogger("urllib3.connectionpool")
if not any(isinstance(x, NoHeaderErrorFilter) for x in urllib3_logger.filters):
urllib3_logger.addFilter(NoHeaderErrorFilter())
def extract_image_from_mjpeg(stream):
"""Take in a MJPEG stream object, return the jpg from it."""
data = b""
for chunk in stream:
data += chunk
jpg_end = data.find(b"\xff\xd9")
if jpg_end == -1:
continue
jpg_start = data.find(b"\xff\xd8")
if jpg_start == -1:
continue
return data[jpg_start : jpg_end + 2]
class MjpegCamera(Camera):
"""An implementation of an IP camera that is reachable over a URL."""
def __init__(self, device_info):
"""Initialize a MJPEG camera."""
super().__init__()
self._name = device_info.get(CONF_NAME)
self._authentication = device_info.get(CONF_AUTHENTICATION)
self._username = device_info.get(CONF_USERNAME)
self._password = device_info.get(CONF_PASSWORD)
self._mjpeg_url = device_info[CONF_MJPEG_URL]
self._still_image_url = device_info.get(CONF_STILL_IMAGE_URL)
self._auth = None
if (
self._username
and self._password
and self._authentication == HTTP_BASIC_AUTHENTICATION
):
self._auth = aiohttp.BasicAuth(self._username, password=self._password)
self._verify_ssl = device_info.get(CONF_VERIFY_SSL)
async def async_camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return a still image response from the camera."""
# DigestAuth is not supported
if (
self._authentication == HTTP_DIGEST_AUTHENTICATION
or self._still_image_url is None
):
image = await self.hass.async_add_executor_job(self.camera_image)
return image
websession = async_get_clientsession(self.hass, verify_ssl=self._verify_ssl)
try:
with async_timeout.timeout(10):
response = await websession.get(self._still_image_url, auth=self._auth)
image = await response.read()
return image
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting camera image from %s", self._name)
except aiohttp.ClientError as err:
_LOGGER.error("Error getting new camera image from %s: %s", self._name, err)
return None
def camera_image(
self, width: int | None = None, height: int | None = None
) -> bytes | None:
"""Return a still image response from the camera."""
if self._username and self._password:
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
auth: HTTPDigestAuth | HTTPBasicAuth = HTTPDigestAuth(
self._username, self._password
)
else:
auth = HTTPBasicAuth(self._username, self._password)
req = requests.get(
self._mjpeg_url,
auth=auth,
stream=True,
timeout=10,
verify=self._verify_ssl,
)
else:
req = requests.get(self._mjpeg_url, stream=True, timeout=10)
with closing(req) as response:
return extract_image_from_mjpeg(response.iter_content(102400))
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
# aiohttp don't support DigestAuth -> Fallback
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
return await super().handle_async_mjpeg_stream(request)
# connect to stream
websession = async_get_clientsession(self.hass, verify_ssl=self._verify_ssl)
stream_coro = websession.get(self._mjpeg_url, auth=self._auth)
return await async_aiohttp_proxy_web(self.hass, request, stream_coro)
@property
def name(self):
"""Return the name of this camera."""
return self._name
class NoHeaderErrorFilter(logging.Filter):
"""Filter out urllib3 Header Parsing Errors due to a urllib3 bug."""
def filter(self, record):
"""Filter out Header Parsing Errors."""
return "Failed to parse headers" not in record.getMessage()
| {
"content_hash": "d6b0b959090c428044f55c88e20694fa",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 88,
"avg_line_length": 33.38172043010753,
"alnum_prop": 0.6329521662103398,
"repo_name": "aronsky/home-assistant",
"id": "d486f78d334f55255b06093de6cb2263b3b6ebd6",
"size": "6209",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mjpeg/camera.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38448521"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from jsonfield import JSONField
from openslides.utils.models import RESTModelMixin
from .access_permissions import (
ObjectProtocolAccessPermissions,
ProtocolAccessPermissions,
)
class ObjectProtocolManager(models.Manager):
"""
Customized model manager for prefetching content objects.
"""
def get_full_queryset(self):
"""
Returns the normal queryset with all objectProtocols. In the background all
related items are prefetched from the database.
"""
return self.get_queryset().prefetch_related('content_object')
class ObjectProtocol(RESTModelMixin, models.Model):
"""
Model for a protocol entry for an agenda item.
"""
access_permissions = ObjectProtocolAccessPermissions()
objects = ObjectProtocolManager()
content_type = models.ForeignKey(
ContentType,
on_delete=models.SET_NULL,
null=True,
blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey()
protocol = models.TextField(blank=True)
class Meta:
default_permissions = ()
permissions = (('can_write_protocol', 'Can write protocol'),)
unique_together = ('content_type', 'object_id')
class Protocol(RESTModelMixin, models.Model):
"""
Model for a protocol entry for an agenda item.
"""
access_permissions = ProtocolAccessPermissions()
protocol = JSONField(default=[])
class Meta:
default_permissions = ()
| {
"content_hash": "7ecc7fb9771d5910c8c8e3b4064b36b3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 83,
"avg_line_length": 27.883333333333333,
"alnum_prop": 0.6999402271368799,
"repo_name": "OpenSlides/openslides-protocol",
"id": "3c35357bd518007a47eff7eb74af47245a4d9749",
"size": "1673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openslides_protocol/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "16994"
},
{
"name": "JavaScript",
"bytes": "24114"
},
{
"name": "Python",
"bytes": "14681"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import division
import ftplib
from fnmatch import fnmatch
from StringIO import StringIO
import pkgutil
import importlib
from path import path
from fuzzywuzzy import process
import resources
from magicmethods import load_class
from yml import ConfigReader, HistoryReader
from messenger import Messenger
class FTP(object):
"""
Model of a basic FTP server. Inherits a few methods from class:`ftplib.FTP`
as well as extends with a few new methods making it more like `ftputil`.
.. code-block::
>>> from cosmid.core import FTP
>>> ftp = FTP("ftp.ensembl.org", "anonymous", "")
>>> ftp.ls("")
['ls-lR.gz',
'.message',
'.ok_to_rsync',
'pub',
...
]
:param str url: URL for the server to connect to
:param str username: Username for an account on the server
:param str password: Password to the accound on the server
"""
def __init__(self, url, username, password):
super(FTP, self).__init__()
self.url = url
self.username = username
self.password = password
# Connect to the FTP server
self.ftp = ftplib.FTP(url, username, password)
# Shortcuts
self.nlst = self.ftp.nlst
self.retrbinary = self.ftp.retrbinary
self.sendcmd = self.ftp.sendcmd
self.size = self.ftp.size
def ls(self, dir_path="."):
"""
<public> Functions like `ls` in Unix where it lists the folders and files
in a specific directory. Compared to `nlst` it doesn't return the full
path for each file/folder.
:param str dir_path: (optional) Path to directory
:returns: List of files/folders in the directory
:rtype: list
"""
return [path_.split("/")[-1] for path_ in self.nlst(dir_path)]
def file(self, path):
"""
<public> Open a file-like object for reading txt-files on the server
without downloading it locally first.
:param str path: Path to file
:returns: File-like object
:rtype: StringIO object
"""
r = StringIO()
self.retrbinary("RETR " + path, r.write)
# Rewind the "file"
r.seek(0)
return r
def fileSize(self, path):
"""
<public> Returns the file size of a certain file on the server in MB.
:param str path: Path to file
:returns: Size of file in megabytes
:rtype: int
"""
# Switch to Binary mode (to be able to get size)
self.sendcmd("TYPE i")
return round(self.size(path)/1000000, 2)
def listFiles(self, dirPath, pattern):
"""
<public> Like `ls` but has the option to match file/folder names to a
pattern.
:param str dirPath: Path to directory
:param str pattern: Glob-like pattern to match against files
:returns: List of files/folders in the directory matching the pattern
:rtype: list
"""
return [item for item in self.ls(dirPath) if fnmatch(item, pattern)]
def commit(self, fullPath, dest, mode=None):
"""
<public>: Saves a file from the server, locally in the `dest`.
:param str fullPath: Path from the cwd to the file to download
:param str dest: Local path+filename where you want to save the file
:param str mode: (optional) "b" for binary files
:returns: 0: OK, >0: NOT OK
"""
# Is the remote file gzipped? (Binary format)
if mode is None:
if fullPath.endswith(".gz") or fullPath.endswith(".bam"):
mode = "b"
else:
# Default mode is to download non-binary files
mode = ""
# Open connection to the destination file and retrive the file
with open(dest, "w" + mode) as handle:
self.retrbinary("RETR " + fullPath, handle.write)
class Registry(object):
"""
Hub of-sorts to talk with different `Cosmid` related files and resources. Can
be seen as the API endpoint for `Cosmid`.
"""
def __init__(self):
super(Registry, self).__init__()
# Set up YAML parser for optional config file
self.config_path = path("cosmid.yaml")
self.config = ConfigReader(self.config_path)
# Extract stuff from config
self.email = self.config.find("email")
# Path to resource storage directory
self.directory = path(self.config.find("directory", default="resources"))
# Load history file consisting of already downloaded resources
self.history_path = path(self.directory + "/.cosmid.yaml")
self.history = HistoryReader(self.history_path)
# Set up a :class:`cosmid.messenger.Messenger`
self.messenger = Messenger("cosmid")
def get(self, resource_id, type_="class"):
"""
<public> Returns an instance of the specified resource class. Dodges an
``ImportError`` when failing to import a resource and returns ``None``
instead.
.. code-block:: python
>>> resource = registry.get("ccds")
>>> resource.latest()
'Hs104'
:param str resource_id: The resource key (name of module)
:returns: A class instance of the resource
"""
try:
if type_ == "class":
return load_class("cosmid.resources.{}.Resource".format(resource_id))()
elif type_ == "module":
return importlib.import_module("cosmid.resources." + resource_id)
else:
raise ValueError("Argument must be either 'class' or 'module'.")
except ImportError:
return None
def grab(self, resource_id, target, collapse=False):
"""
<public> Returns all that's nessesary to download a specific resource.
The method will try to correct both ``resource_id`` and the ``target``
release tag.
:param str resource_id: What resource to download
:param str target: What release of the resource to download
"""
# Either import resource class or print warning and move on.
# Test matching the resource ID
options = [item[0] for item in self.ls()]
resource_id = self.matchOne(resource_id, options)
if resource_id is None:
message = "Couldn't match resource ID: '{}'".format(resource_id)
self.messenger.send("warning", message)
return None, None, None, None
# Get the resource
resource = self.get(resource_id)
# Now let's figure out the version
# No specified version will match to the latest resource release
if target == "latest":
version = resource.latest()
else:
options = resource.versions()
version = self.matchOne(target, options)
if version is None:
message = ("Couldn't match version '{id}#{v}'; {vers}"
.format(v=target, id=resource.id, vers=", ".join(options)))
self.messenger.send("warning", message)
return None, None, None, None
# Get the goahead! (that we haven't already downloaded it)
if self.goahead(resource, version):
# Finally we can determine the paths to download and save the files
dl_paths = resource.paths(version)
if collapse:
# The user can select to store all downloaded files in the same
# directory
resource_dir = ""
else:
# Or by default separate different resources into subdirectories
resource_dir = "/" + resource.id
save_paths = [("{dir}{mid}/{file}"
.format(dir=self.directory, mid=resource_dir, file=name))
for name in resource.names]
# Add the resource to the history file as downloaded
self.history.add(resource_id, {
"version": version,
"target": target,
"names": resource.names,
"sources": dl_paths
})
return resource, dl_paths, save_paths, version
else:
# The resource was already downloaded
return None, None, None, None
def ls(self):
"""
<public> Returns a list of resource IDs and docstrings for all the
included resource modules.
*Reference*: http://stackoverflow.com/questions/1707709/list-all-the-modules-that-are-part-of-a-python-package
.. code-block:: python
>>> registry.ls()
[('ccds', 'A curated database of generic element'), ...]
:returns: A list of tuples: ``(resource_id, docstring)``
:rtype: list
"""
# Store everything here
items = []
prefix = resources.__name__ + "."
# Fetch all the resource modules
modules = pkgutil.iter_modules(resources.__path__, prefix)
# Loop over all resource modules
for importer, modpath, ispkg in modules:
# Strip path
modname = modpath.split(".")[-1]
# Load the `Resource` class for the module
module = self.get(modname, type_="module")
# Save name and docstring
items.append((modname, module.__doc__))
return items
def search(self, query, limit=5):
"""
<public> Fuzzy matches a query string against each of the resource IDs and
returns a limited number of results in order of match score.
.. code-block:: python
>>> registry.search("asmebly", limit=2)
[('ensembl_assembly', 68),
('ncbi_assembly', 68)]
:param str query: A string to match against the resource IDs
:param int limit: (optional) A maximum number of results to return
:returns: A list of tuples: ``(resource_id, score)`
:rtype: list
"""
# List all the available resources
resources = self.ls()
# Focus on resource IDs
resource_ids = [resource[0] for resource in resources]
# Fuzzy match against the resource IDs and return in order of best match
return process.extract(query, resource_ids, limit=limit)
def matchOne(self, target, options, threshold=60):
"""
<public> Fuzzy matches e.g. a target version tag against a list of options.
Returns the most likely match if the match score is sufficient.
.. code-block:: python
>>> resource = registry.get("ccds")
>>> registry.matchOne(104, resource.versions())
'Hs104'
>>> registry.matchOne("ensembl", registry.ls())
'ensembl_assembly'
:param object target: Any Python object to match with
:param list options: A list of possible options to match against
:param int threshold: A lower threshold for accepting a best match
:returns: The object with the best match (unless score is below threshold)
:rtype: Python object
"""
# Match against the options and extract the top match only
result, score = process.extractOne(target, map(str, options))
# Arbitrary lower limit for returning a *mathcing* result
if score >= threshold:
return result
else:
return None
def goahead(self, resource, version):
"""
Determines whether it's any idea in going ahead with a download.
"""
# Get any currently downloaded resources
current = self.history.find(resource.id, default={})
# Make sure we haven't already downloaded the resource
if current.get("version") == version:
message = "'{}' already downloaded and up-to-date.".format(resource.id)
self.messenger.send("update", message)
return False
return True
| {
"content_hash": "f6c73d88a98507d1cff52517897042ca",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 114,
"avg_line_length": 30.20498614958449,
"alnum_prop": 0.6507703595011005,
"repo_name": "robinandeer/cosmid",
"id": "79419658bd13cb07a9349c61ca36427fd98c2b89",
"size": "10927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cosmid/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64984"
}
],
"symlink_target": ""
} |
from login.permissions import cache_clear
def mqtt_cache_clear():
# call cache_clear locally
cache_clear()
# and signal through mqtt
from mqtt.publish import SingletonPublishClient
SingletonPublishClient().publish_message('cache_clear')
| {
"content_hash": "ac9de1aa2cd7747e5a0543f72cf3bc9f",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 59,
"avg_line_length": 23.727272727272727,
"alnum_prop": 0.7394636015325671,
"repo_name": "EMSTrack/WebServerAndClient",
"id": "5b0134d3dfc2be04506d7c623d016a14796ddf41",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mqtt/cache_clear.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10055"
},
{
"name": "HTML",
"bytes": "105332"
},
{
"name": "JavaScript",
"bytes": "169499"
},
{
"name": "Python",
"bytes": "609216"
}
],
"symlink_target": ""
} |
from lib import *
#from keras.layers.merge import Concatenate
from keras.layers import Merge
import copy
from collections import Counter
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
import math
word2topic = pickle.load(open("word2topic", "r"))
embedding = pickle.load(open("word2topic", "r"))
#embedding = pickle.load(open("embedding", "r"))
vocab = word2topic.keys()
max_words = 25#30
depth_embed = 100#370
depth_distance = 100#368#70#100
flag = 1
##def getEmbedding_word2vec(sentence, model):
#
# global max_words, depth, no_features, train_length
## model = model[0]
# list = np.array([])
# for word in sentence:
# if word in model.wv.vocab:
# list = np.append(list, model.wv[word])
#
# #print list.size
# if(list.size > depth*max_words):
# list = list[0:depth*max_words]
# #print sentence
# pad = np.zeros(depth*max_words - list.size)
# list = np.append(list, pad)
# #print list.shape
# return list
def get_topic_rep(topic, word2topic, word2vec):
global vocab
topics = str(topic).split(' ')
v = np.zeros(np.vstack(([word2topic['donald']], [word2vec.wv['donald']])).shape)
counter = 0
# if topics[0] in vocab:
# v = np.append(v, word#2topic[topics[0]])
## counter = 0
## if
for counter in range(len(topics)):
if topics[counter] in vocab:
# print topics[counter]
try:
v += np.vstack(([word2topic[topics[counter]]], [word2vec.wv[topics[counter]]]))
except:
v += np.vstack(([word2topic[topics[counter]]], [np.zeros(word2vec.wv['donald'].shape)]))
# print counter + 1
v /= (counter + 1) * 1.0
# print type(v)
return v
def custom_loss(y_true, y_pred):
y = K.argmax(y_true, axis=1)
print y[0:5]
## y_true = np.array(y_true).astype('int64')
##
print y_true[0:5]
## length = y_true.get_shape()
## l = tuple([length[i].value for i in range(0, len(length))])[0]
# for i in range(y_pred.get_shape()[0].value):
# y_pred[i] = y_pred[i][y[i]]
#
# y_pred = K.log(y_pred[:, K.constant(y, dtype='int64')])
return K.mean(K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 0)))[0], :], y_true[np.where(K.eval(K.equal(y, 0)))[0], :]), K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 1)))[0], :], y_true[np.where(K.eval(K.equal(y, 1)))[0], :]), K.categorical_crossentropy(y_pred[np.where(K.eval(K.equal(y, 2)))[0], :], y_true[np.where(K.eval(K.equal(y, 2)))[0], :]))
# return K.sum(K.mean(K.dot(K.equal(y, 0), y_pred)), K.mean(K.dot(K.equal(y, 1), y_pred)), K.mean(K.dot(K.equal(y, 2), y_pred)))
def evaluate(y_test, thresholded_pred):
print "accuracy", (sum(abs(y_test == thresholded_pred))) / float(len(thresholded_pred))
print Counter(y_test)
print Counter(thresholded_pred)
print confusion_matrix(y_test, thresholded_pred)
print "f1 is", f1_score(y_test, thresholded_pred, average='macro')
def distance_embed(sentence):
global max_words, depth_distance, word2topic
list = np.array([])
for word in sentence:
if word in vocab:
list = np.append(list, word2topic[word])
#print list.size
if(list.size > max_words * depth_distance):
list = list[0:max_words * depth_distance]
#print sentence
pad = np.zeros(max_words * depth_distance - list.size)
list = np.append(list, pad)
#print list.shape
return list
def getEmbedding(sentence, word2topic, word2vec):
global max_words, depth_embed, embedding, flag#, depth_distance
list = np.array([])
for word in sentence:
if word in vocab:
try:
if flag:
list = np.vstack(([word2topic[word]], [word2vec.wv[word]]))
flag = 0
else:
list = np.append(list, np.vstack(([word2topic[word]], [word2vec.wv[word]])))
except:
list = np.append(list, np.vstack(([word2topic[word]], [np.zeros(word2vec.wv['donald'].shape)])))
# try:
# list = np.append(list, model[word])
## print "found", word
# except:
# list = np.append(list, np.zeros(model['donald'].shape))
# print word
#print list.size
if(list.shape[1] > max_words * depth_embed):
list = list[:, 0:max_words * depth_embed]
#print sentence
pad = np.zeros((2, max_words * depth_embed - list.shape[1]))
list = np.concatenate((list, pad), axis=1)
#print list.shape
return list
#def getPOS(sentence):
#
# global max_words#, depth
# all_tags = CMUTweetTagger.runtagger_parse(sentence)
# list = np.array([])
# for i in range(len(sentence)):
# if sentence[i] in vocab:
# list = np.append(list, all_tags[i])
#
# #print list.size
# if(list.size > max_words):
# list = list[0:max_words]
# #print sentence
# pad = np.zeros(max_words - list.size)
# list = np.append(list, pad)
# #print list.shape
# return list
#def getEmbedding(sentence):
# global word2topic, vocab
# max_words = 30
# list = []#np.array([])
# for word in sentence:
# if word in vocab:
## list = np.append(list, word2topic[word])
# list.append(word2topic[word])
## list = np.array(list)
# #print list.size
# if(len(list) > max_words):
# list = list[0:max_words]
# #print sentence
# pad = [0] * 100# * (max_words - len(list))#np.zeros(max_words - list.size)
# for i in range((max_words - len(list))):
# list.append(pad)
## list.append(pad)
# #print list.shape
# return list
#getEmbedding(df['tokenized_sents'][0])
def run_model():
global tech, politics, sports, music, genre, max_words, depth_embed, depth_distance, word2topic, vocab, K
# with K.tf.device('/gpu:1'):
gpu_options = K.tf.GPUOptions(per_process_gpu_memory_fraction=1.0)#0.8)#0.2)
sess = K.tf.Session(config=K.tf.ConfigProto(gpu_options=gpu_options))
# all_topics = np.concatenate((tech, politics, music, sports))
# print "AAAAAAAAAAAAAAAAAAAAA"
# print len(all_topics)
# print all_topics
try:
[X, y, df, d] = pickle.load(open("data_rnn", "r"))
print d
# df = df[df["topic"].isin(all_topics)]
except:
#filename = "Homework2_data.csv"
# word2topic = pickle.load(open("word2topic", "r"))
[df, df0, df3] = readData(filename1, filename2)
#df = df[df["topic"].isin(all_topics)]
df['sentiment'] = pd.to_numeric(df['sentiment'])#, errors='coerce')
# df = df.dropna(subset=['sentiment'])
# topics_array = np.array(([tech, politics, music, sports]))
# print genre
# for index, row in df.iterrows():
# tweet_topic = row['topic']
# # print "tweet_topic", tweet_topic
# for i in range(len(topics_array)):
# if tweet_topic in topics_array[i]:
# # print "ta", topics_array[i]
# # df["topic"][index] = genre[i]
# df.ix[index, 'topic'] = genre[i]
# # print "df", df["topic"][index]
# break
# Remove topics of no interest
print "length of df is", len(df)
# print "from joined data\n", Counter(list(df["user_id"])).most_common(50)
indices = []
# df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl).apply(removeMention).apply(removeTrailingHash);
# df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl).apply(removeTrailingHash);
df['tweet'] = df['tweet'].apply(cleanhtml).apply(cleanUrl)#.apply(removeTrailingHash);
df['tweet'] = tokenize_and_stopwords(df['tweet'])
# df = df.sample(frac=1).reset_index(drop=True)
# df = shuffle(df)
print df.size
df['tokenized_sents'] = df.apply(lambda row: nltk.word_tokenize(row['tweet']), axis=1)
try:
word2vec = wv.Word2Vec.load("word2vec")
#model.similarity("this", "is")
# model.init_sims(replace=True)
print "loaded"
except:
word2vec = wv.Word2Vec(df["tokenized_sents"], size=depth_embed, window=5, min_count=5, workers=4)
word2vec.save("word2vec")
#X.shape[0]#7349
df['embedding'] = df['tokenized_sents'].apply(getEmbedding, args=(word2topic, word2vec,))
# df['word2vec'] = df['tokenized_sents'].apply(getEmbedding, args=(word2vec.wv,))
X = list(df['embedding'])
# X_w = list(df['word2vec'])
X = np.reshape(np.ravel(X), (len(X), max_words, 2, depth_embed))
# X_w = np.reshape(np.ravel(X_w), (len(X_w), max_words, depth_embed))
# a = copy.deepcopy(X)#np.array(df['embedding'])
df['tweet_rep'] = df['tokenized_sents'].apply(distance_embed)
#### a = list(df['tweet_rep'])
#### a = np.reshape(np.ravel(a), (len(a), max_words, depth_distance))
df['topic_rep'] = df['topic'].apply(get_topic_rep, args=(word2topic, word2vec,))
d = []
# a = np.reshape(a, ())
#### b = list(df['topic_rep'])
#### print b[0]
# print b
# print b.shape
#### b = np.reshape(np.ravel(np.ravel(b)), (X.shape[0], 1, depth_distance))
##### c = (a - b)**2
###### d = c
##### for i1 in range(len(c)):
##### for j1 in range(len(c[0])):
##### d.append(abs(sum(c[i1][j1])))
##### d = np.array(d)
##### d = np.reshape(d, (len(a), max_words))
##### d[d==0] = 0.1
##### d = 1.0 / d
##### print "d[0] is !!!", d[0]
# df['distance'] = d#1.0 / d#sum(sum(sum(abs(np.array(df['embedding']) - np.array(df['topic_rep'])))))
# one_hot =
# df['pos'] = df['tweet'].apply(getPOS)
# X = np.column_stack((np.array(df['embedding']), np.array(df['pos'])))
# for i in range(len(X)):
# X[i] = X[i][0:]
# B = np.array([])
# np.dstack((X, B)).shape
# y = np.array(df['sentiment'])
y = np.array(pd.get_dummies(df['sentiment']))
### No dumping
# try:
# pickle.dump([X, y, df, d], open("data_rnn", "wb"))
# except:
# "dumping data failed"
print len(X[0])
print len(X)
X_train = X[0:12200]
X_test = X[12200:]
# X_train_w = X_w[0:12200]
# X_test_w = X_w[12200:]
X_train = np.concatenate((X_train, X_train_w), axis=1)
y_train = y[0:12200]
y_test = y[12200:]
print " Y train!!\n", y_train[0:5]
print list(df['sentiment'])[0:5]
print y_test[0:5]
one_hot = list(df['topic_rep'])#(pd.get_dummies(df['topic']))
one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), 1, 2, depth_distance))
## LOAD MODEL
try:
model = load_model('modelc_rnn_new')
except:
# Word model
model_word = Sequential()
model_word.add(Bidirectional(LSTM(3 * max_words, activation='relu', return_sequences=True), input_shape=(max_words, 2, depth_embed)))
model_word.add(Dropout(0.2))
model_word.add(Bidirectional(LSTM(2 * max_words, activation='relu', return_sequences=True)))
model_word.add(Dropout(0.2))
model_word.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))
model_word.add(Dropout(0.2))
# model_word_w = Sequential()
# model_word_w.add(Bidirectional(LSTM(3 * max_words, activation='relu', return_sequences=True), input_shape=(max_words, depth_embed)))
# model_word_w.add(Dropout(0.2))
# model_word_w.add(Bidirectional(LSTM(2 * max_words, activation='relu', return_sequences=True), input_shape=(max_words, depth_embed)))
# model_word_w.add(Dropout(0.2))
# model_word_w.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))
# model_word_w.add(Dropout(0.2))
# model_word.add(Bidirectional(LSTM(max_words, return_sequences=True)))
# model_word.add(Dropout(0.2))
# model_word.add(Flatten())
# model_word.add(MaxPooling2D(pool_size=(2, 1)))
# model_word.add(Dropout(0.2))
# model_word.add(Dense((max_words), activation="tanh"))
## Reverse
# model_word_r = Sequential()
# model_word_r.add(LSTM(max_words, input_shape=(max_words, depth), consume_less='gpu', go_backwards=True))
# model_word_r.add(Dropout(0.2))
## model_word_r.add(LSTM(max_words, input_shape=(max_words, depth), consume_less='gpu', go_backwards=True))
# Topic model
print len(set(df['topic']))
print "set is", set(df['topic'])
# print "topic rep!! \n", df['topic_rep']
# one_hot = list(df['topic_rep'])#(pd.get_dummies(df['topic']))
## print df['topic'][0:5]
print "init one hot", one_hot[0:2]
# # one_hot = one_hot.as_matrix()
#
## one_hot = d#df['distance']
# print len(one_hot)
# # print len(one_hot[0])
# # print one_hot[0]
### one_hot = np.reshape(one_hot, (one_hot.shape[0], max_words, 1))
## one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), depth_distance, 1))
# one_hot = np.reshape(np.ravel(np.ravel(one_hot)), (len(one_hot), 2, depth_distance))
# one_hot_train = one_hot[0:12200]
# one_hot_test = one_hot[12200:]
print "one hot shape", one_hot.shape
model_topic = Sequential()
# , return_sequences=True
model_topic.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True), input_shape=(1, 2, depth_distance)))
model_topic.add(Dropout(0.2))
# model_topic.add(Bidirectional(LSTM(max_words, return_sequences=True)))
# model_topic.add(Flatten())
# model_topic.add(MaxPooling2D(pool_size=(2, 1)))
# model_topic.add(Dropout(0.2))
# model_topic.add(Dense(4, activation="tanh"))
# model_topic.add(Dropout(0.2))
# Merge forward and backward
# merged = Merge([model_word_f, model_word_r], mode='concat')#, concat_axis=1)
# model_word = Sequential()
# model_word.add(merged)
# model_word.add(Dropout(0.2))
## model_word.add(MaxPooling2D(pool_size=(2, 1)))
## model_word.add(Dropout(0.2))
# model_word.add(LSTM(max_words, input_shape=(2*max_words, 1)))
# model_word.add(Dropout(0.2))
# Merge merged and topic info
# merged2 = Merge([model_word, model_word_w, model_topic], mode='concat', concat_axis=1)
merged2 = Merge([model_word, model_topic], mode='concat', concat_axis=1)
# merged = Concatenate([model_word, model_topic], axis=-1)
model = Sequential()
model.add(merged2)
# model.add(Dropout(0.2))
# model.add(Bidirectional(LSTM(2*max_words, activation='relu', return_sequences=True)))#)))
# model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))
## # model.add(Flatten())
model.add(Dropout(0.2))
# model.add(Bidirectional(LSTM(max_words), input_shape=(4 + max_words, 1)))
print "added additional Dense, no flatten"
## model.add(Dense(max_words, activation='tanh'))
# model.add(Dropout(0.2))
#model.add(Dense(1, activation='linear', W_constraint=maxnorm(3)))
# model.add(Bidirectional(LSTM(2*max_words, activation='tanh', return_sequences=True)))#)))
# model.add(Dropout(0.2))
# model.add(Bidirectional(LSTM(max_words, activation='tanh', return_sequences=True)))#)))
# model.add(Dropout(0.2))
model.add(LSTM(3, activation="softmax"))
# model.add(LSTM(1, activation="linear"))
# optimizer = RMSprop(lr=0.01)
# model.compile(loss='categorical_crossentropy', optimizer=optimizer)
adam = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=adam)
print "Custom!!!"
# model.compile(loss=custom_loss, optimizer=adam)
print "came here saaaaar!!!!!!\n\n"
# print X[0:5]
# print Y_train[0:5]
print "model changedd !!!"
model.fit([X_train, one_hot_train], y_train, batch_size=64, epochs=50, validation_split=0.05, callbacks=[history])
model_json = model.to_json()
with open("modelc_rnn_new.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("modelc_rnn_new.h5")
print("Saved model to disk")
# print(history.History)
return [model, X, X_w, y, df, d, one_hot]
# print X.shape
# print X[0]
# print X[0]
# for i in X[0]:
# print i
def load_model(filename):
json_file = open(filename+ '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights(filename + ".h5")
# [X, y, df, d] = pickle.load(open("data_rnn", "r"))
return model#, X, y, df, d]
def duplicate_model(filename):
global tech, politics, sports, music, genre, max_words, depth, word2topic, vocab, K
print "Duplicating!!"
# Word model
model_word = Sequential()
model_word.add(Bidirectional(LSTM(max_words, return_sequences=True), input_shape=(max_words, depth)))
model_word.add(Dropout(0.2))
# model_word.add(Flatten())
# model_word.add(MaxPooling2D(pool_size=(2, 1)))
# model_word.add(Dropout(0.2))
model_topic = Sequential()
model_topic.add(Bidirectional(LSTM(max_words, return_sequences=True), input_shape=(max_words, 1)))
model_topic.add(Dropout(0.2))
# model_topic.add(Flatten())
# model_topic.add(MaxPooling2D(pool_size=(2, 1)))
# model_topic.add(Dropout(0.2))
merged2 = Merge([model_word, model_topic], mode='concat')
model = Sequential()
model.add(merged2)
model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(max_words, return_sequences=True)))
model.add(Dropout(0.2))
model.add(LSTM(max_words))
# model.add(Flatten())
model.add(Dropout(0.2))
# merged = Concatenate([model_word, model_topic], axis=-1)
model = Sequential()
model.add(merged2)
model.add(Dropout(0.2))
model.add(LSTM(max_words))
# model.add(Dropout(0.2))
# print "added additional Dense, no flatten"
# model.add(Dense(1, activation='linear', W_constraint=maxnorm(5)))
json_file = open(filename+ '.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model_true = model_from_json(loaded_model_json)
# load weights into new model
model_true.load_weights(filename + ".h5")
model.layers[0].set_weights(model_true.layers[0].get_weights())
model.layers[1].set_weights(model_true.layers[1].get_weights())
model.layers[2].set_weights(model_true.layers[2].get_weights())
model.layers[3].set_weights(model_true.layers[3].get_weights())
try:
model.layers[3].set_weights(model_true.layers[3].get_weights())
# model.layers[3].set_weights(model_true.layers[3].get_weights())
print "tried"
except:
print "excepted"
# model.add(Dropout(0.2))
# model.layers[3].set_weights(model_true.layers[3].get_weights())
return model
#equal weighted categorical cross entropy
def sentiment_classifier():
global max_words, depth_distance, depth_embed
print "in senti class, changes, class\n\n"
try:
assert False
print "in try\n\n"
[model, X, y, df, d] = load_model('modelc_rnn_new')
print "Data found"
print "done"
except Exception, e:
print "Caught an exception\n\n"
print "Error is", str(e), "\n\n"
[model, X, X_w, y, df, d, one_hot] = run_model()
print "length of X is", len(X)
X_test = X[12200:]
y_test = y[12200:]
X_test_w = X_w[12200:]
X_train_w = X_w[0:12200]
X_train = X[0:12200]
y_train = y[0:12200]
topics = list(df['topic'])
# ____________________________________________________________________________________________________________HERE_________________
# one_hot = d#df['distance']
# one_hot = pd.get_dummies(df['topic'])
# one_hot = one_hot.as_matrix()
# print len(set(df['topic']))
# print "set is", set(df['topic'])
# print len(one_hot)
# print len(one_hot[0])
# print one_hot[0]
## print len(all_topics)
## print all_topics
# print set(df["topic"])
# one_hot = np.array(df['topic_rep'])#np.array(pd.get_dummies(df['topic']))
# one_hot = np.reshape(one_hot, (X.shape[0], 1, depth_distance))
one_hot_train = one_hot[0:12200]
one_hot_test = one_hot[12200:]
pred = model.predict([X_test, X_test_w, one_hot_test], batch_size = 64)#, Y_train, batch_size=32, verbose=1, sample_weight=None)
print pred[0:5]
print y_test[0:5]
# pred[:, 0] *= 1.5
# margin = 0.06
# indexes = pred[:, 0] + margin >= pred[:, 1]
# print indexes
# pred[indexes, 0] = pred[indexes, 1] + 0.01
# print pred[0:5]
##### print "This is the prediction"
###### y[y >= 0.1] = 1
###### y[y < 0.1] = 0
##### pred.shape = (pred.shape[0],)
##### print pred[0:20]
##### print "true labels"
##### print y_test[0:20]
###### print sum(sum(y == Y_train))
###### print (len(X_train) * len(X_train[0]))
##### print (sum(abs(y_test - pred))) / float(len(pred))
##### thresh1 = 1.5#49#1.8#1.5
##### thresh2 = 3.9
##### thresholded_pred = copy.deepcopy(pred)
##### thresholded_pred[(pred > (-thresh1 + 0.0)) & (pred < thresh2)] = 0
##### thresholded_pred[(pred >= thresh1) & (pred < thresh2)] = 3#1
##### thresholded_pred[pred >= thresh2] = 5#2
##### thresholded_pred[(pred > -thresh2) & (pred <= (-thresh1 + 0.0))] = -3#1
##### thresholded_pred[pred <= -thresh2] = -5#2
##### thresholded_pred = thresholded_pred.astype('int8')
##### print "Testing"
##### evaluate(y_test, thresholded_pred)
#####
##### y_test[y_test > 0] = 1
##### y_test[y_test < 0] = -1
#####
##### thresholded_pred[thresholded_pred > 0] = 1
##### thresholded_pred[thresholded_pred < 0] = -1
thresholded_pred = pred.argmax(axis=1)
y_test = y_test.argmax(axis=1)
evaluate(y_test, thresholded_pred)
thresholded_pred[thresholded_pred<=1] = -1
thresholded_pred[thresholded_pred==2] = 0
thresholded_pred[thresholded_pred>2] = 1
y_test[y_test<=1] = -1
y_test[y_test==2] = 0
y_test[y_test>2] = 1
evaluate(y_test, thresholded_pred)
pred = model.predict([X_train, X_train_w, one_hot_train], batch_size = 64)#, Y_train, batch_size=32, verbose=1, sample_weight=None)
print pred[0:5]
print y_train[0:5]
#pred[:,0] *= 1.5
print "This is the prediction"
#### y[y >= 0.1] = 1
#### y[y < 0.1] = 0
#### pred.shape = (pred.shape[0],)
#### print pred[0:20]
#### print "true labels"
#### print y_train[0:20]
##### print sum(sum(y == Y_train))
##### print (len(X_train) * len(X_train[0]))
#### print (sum(abs(y_train - pred))) / float(len(pred))
#### thresh1 = 1.5
#### thresh2 = 3.9
#### thresholded_pred = copy.deepcopy(pred)
#### thresholded_pred[(pred > (-thresh1 + 0.0)) & (pred < thresh2)] = 0
#### thresholded_pred[(pred >= thresh1) & (pred < thresh2)] = 3#1
#### thresholded_pred[pred >= thresh2] = 5#2
#### thresholded_pred[(pred > -thresh2) & (pred <= (-thresh1 + 0))] = -3#1
#### thresholded_pred[pred <= -thresh2] = -5#2
#### thresholded_pred = thresholded_pred.astype('int8')
#### print "Training"
#### evaluate(y_train, thresholded_pred)
#### y_train[y_train > 0] = 1
#### y_train[y_train < 0] = -1
####
#### thresholded_pred[thresholded_pred > 0] = 1
#### thresholded_pred[thresholded_pred < 0] = -1
thresholded_pred = pred.argmax(axis=1)
y_train = y_train.argmax(axis=1)
evaluate(y_train, thresholded_pred)
thresholded_pred[thresholded_pred<=1] = -1
thresholded_pred[thresholded_pred==2] = 0
thresholded_pred[thresholded_pred>2] = 1
y_train[y_train<=1] = -1
y_train[y_train==2] = 0
y_train[y_train>2] = 1
evaluate(y_train, thresholded_pred)
# model_dup = duplicate_model('modelc_rnn_new')
# layer_output = model_dup.predict([X_test, one_hot_test], batch_size = 64)
#
### get_last_layer_output = K.function([model.layers[0].input],
### [model.layers[2].output])
## get_last_layer_output = K.function([model.layers[0].input, K.learning_phase()],
## [model.layers[2].output])
### output in train mode = 0
### layer_output = np.array(get_last_layer_output([X_train[0:1200], 0])[0])
##
### output in train mode = 0
##
### X = [X_test, one_hot_test]
## print X_test.shape
## print one_hot_test.shape
## print len(X_test)
## print len(one_hot_test)
##
##
## X_2 = np.concatenate((X_test, one_hot_test), axis=2)
## start = 0
## increment = 100
## flag = 1
## print len(X_test)
## print "now!!"
## while start+increment <= len(X_test):
### X = [[X_test[start:start+increment], 1], [one_hot_test[start:start+increment], 1]]
## if flag:
## layer_output = get_last_layer_output([X_2[start:start+increment], 0])[0]#get_last_layer_output([[X_test[start:start+increment], 0], [one_hot_test[:, start:start+increment], 0]])[0]
## flag = 0
## else:
## layer_output = np.concatenate((layer_output, get_last_layer_output([X_2[start:start+increment], 0])[0]))
## start += increment
## if start != len(X_test):
### X = [X_test[start:start+increment], one_hot_test[start:start+increment]]
## layer_output = np.concatenate((layer_output, get_last_layer_output([X_2[start:start+increment], 0])[0]))
# print "length of hidden", len(layer_output[0])
# for iter in range(10):
# print df["tweet"][iter], layer_output[iter]
sentiment_classifier()
| {
"content_hash": "2b21ee35dc79e4d7a7d3b64e18264df7",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 383,
"avg_line_length": 38.92285714285714,
"alnum_prop": 0.5525214710416207,
"repo_name": "ProjectsUCSC/NLP",
"id": "cee880c71cad4665991f33a755343d2f613833bc",
"size": "27246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "User Modelling/rnn_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "225665"
},
{
"name": "Python",
"bytes": "258827"
}
],
"symlink_target": ""
} |
from gailtf.baselines.common.mpi_running_mean_std import RunningMeanStd
import gailtf.baselines.common.tf_util as U
import tensorflow as tf
import gym
from gailtf.baselines.common.distributions import make_pdtype
class CnnPolicy(object):
recurrent = False
def __init__(self, name, ob_space, ac_space, kind='large'):
with tf.variable_scope(name):
self._init(ob_space, ac_space, kind)
self.scope = tf.get_variable_scope().name
def _init(self, ob_space, ac_space, kind):
assert isinstance(ob_space, gym.spaces.Box)
self.pdtype = pdtype = make_pdtype(ac_space)
sequence_length = None
ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))
x = ob / 255.0
if kind == 'small': # from A3C paper
x = tf.nn.relu(U.conv2d(x, 16, "l1", [8, 8], [4, 4], pad="VALID"))
x = tf.nn.relu(U.conv2d(x, 32, "l2", [4, 4], [2, 2], pad="VALID"))
x = U.flattenallbut0(x)
x = tf.nn.relu(U.dense(x, 256, 'lin', U.normc_initializer(1.0)))
elif kind == 'large': # Nature DQN
x = tf.nn.relu(U.conv2d(x, 32, "l1", [8, 8], [4, 4], pad="VALID"))
x = tf.nn.relu(U.conv2d(x, 64, "l2", [4, 4], [2, 2], pad="VALID"))
x = tf.nn.relu(U.conv2d(x, 64, "l3", [3, 3], [1, 1], pad="VALID"))
x = U.flattenallbut0(x)
x = tf.nn.relu(U.dense(x, 512, 'lin', U.normc_initializer(1.0)))
else:
raise NotImplementedError
logits = U.dense(x, pdtype.param_shape()[0], "logits", U.normc_initializer(0.01))
self.pd = pdtype.pdfromflat(logits)
self.vpred = U.dense(x, 1, "value", U.normc_initializer(1.0))[:,0]
self.state_in = []
self.state_out = []
stochastic = tf.placeholder(dtype=tf.bool, shape=())
ac = self.pd.sample() # XXX
self._act = U.function([stochastic, ob], [ac, self.vpred])
def act(self, stochastic, ob):
ac1, vpred1 = self._act(stochastic, ob[None])
return ac1[0], vpred1[0]
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
def get_initial_state(self):
return []
| {
"content_hash": "2b01d08a260185eca70d10ebf32f3c2e",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 107,
"avg_line_length": 41.666666666666664,
"alnum_prop": 0.5835789473684211,
"repo_name": "ryanjulian/gail-tf",
"id": "36a01894c8c6239968511b3507ea799acb51fbac",
"size": "2375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gailtf/baselines/ppo1/cnn_policy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "195509"
},
{
"name": "Shell",
"bytes": "123"
}
],
"symlink_target": ""
} |
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from pandac.PandaModules import *
from math import *
import math
from direct.fsm.FSM import FSM
from toontown.minigame import ArrowKeys
from direct.showbase import PythonUtil
from direct.task import Task
from direct.distributed.ClockDelta import *
import BuildGeometry
from toontown.golf import GolfGlobals
import random, time
def scalp(vec, scal):
vec0 = vec[0] * scal
vec1 = vec[1] * scal
vec2 = vec[2] * scal
vec = Vec3(vec0, vec1, vec2)
def length(vec):
return sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
class PhysicsWorldBase:
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhysicsWorld')
def __init__(self, canRender = 0):
self.canRender = canRender
self.world = OdeWorld()
self.space = OdeSimpleSpace()
self.contactgroup = OdeJointGroup()
self.bodyList = []
self.geomList = []
self.massList = []
self.rayList = []
self.showContacts = 0
self.jointMarkers = []
self.jointMarkerCount = 64
self.meshDataList = []
self.geomDataList = []
self.commonObjectInfoDict = {}
self.maxColCount = 0
if self.canRender:
self.odePandaRelationList = self.bodyList
self.root = render.attachNewNode('physics root node')
else:
self.root = NodePath('physics root node')
self.placerNode = self.root.attachNewNode('Placer')
self.subPlacerNode = self.placerNode.attachNewNode('Placer Sub Node')
self.commonObjectDict = {}
self.commonId = 0
self.worldAttach = self.root.attachNewNode('physics geom attach point')
self.timingCycleLength = 10.0
self.timingCycleOffset = 0.0
self.timingSimTime = 0.0
self.FPS = 90.0
self.refFPS = 60.0
self.DTAStep = 1.0 / self.FPS
self.refCon = 1.2
self.collisionEventName = 'ode-collision-%d' % id(self)
self.space.setCollisionEvent(self.collisionEventName)
self.accept(self.collisionEventName, self.__collisionHandler)
def delete(self):
self.notify.debug('Max Collision Count was %s' % self.maxColCount)
self.stopSim()
self.commonObjectDict = None
if self.canRender:
for pair in self.odePandaRelationList:
pair[0].removeNode()
pair[1].destroy()
self.odePandaRelationList = None
else:
for body in self.bodyList:
body[1].destroy()
self.bodyList = None
for mass in self.massList:
mass = None
for geom in self.geomList:
geom.destroy()
geom = None
for ray in self.rayList:
ray.destroy()
ray = None
self.placerNode.removeNode()
self.root.removeNode()
for marker in self.jointMarkers:
marker.removeNode()
self.jointMarkers = None
for data in self.geomDataList:
data.destroy()
for data in self.meshDataList:
data.destroy()
self.floor.destroy()
self.floor = None
self.contactgroup.empty()
self.world.destroy()
self.space.destroy()
self.world = None
self.space = None
self.ignore(self.collisionEventName)
def setupSimulation(self):
self.world.setAutoDisableFlag(0)
self.world.setAutoDisableLinearThreshold(0.15)
self.world.setAutoDisableAngularThreshold(0.15)
self.world.setAutoDisableSteps(2)
self.world.setGravity(0, 0, -25)
self.world.setErp(0.8)
self.world.setCfm(1e-05)
self.world.initSurfaceTable(5)
self.world.setSurfaceEntry(0, 0, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 1, 1500, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(2, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 3, 150, 0.0, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 3, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 1.0 / self.refCon)
self.world.setSurfaceEntry(2, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(3, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(4, 4, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 4, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(pos1=0, pos2=1, mu=80, bounce=0.15, bounce_vel=0.1, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.35 / self.refCon)
self.world.setSurfaceEntry(pos1=2, pos2=1, mu=1500, bounce=0.9, bounce_vel=0.01, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.001 / self.refCon)
self.floor = OdePlaneGeom(self.space, Vec4(0.0, 0.0, 1.0, -20.0))
self.floor.setCollideBits(BitMask32(0))
self.floor.setCategoryBits(BitMask32(3840))
self.space.setAutoCollideWorld(self.world)
self.space.setAutoCollideJointGroup(self.contactgroup)
self.world.setQuickStepNumIterations(8)
self.DTA = 0.0
self.frameCounter = 0
if self.canRender:
for count in xrange(self.jointMarkerCount):
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('phase_3/models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.1)
testMarker.setPos(0.0, 0.0, -100.0)
self.jointMarkers.append(testMarker)
def setTimingCycleLength(self, time):
self.timingCycleLength = time
def getTimingCycleLength(self):
return self.timingCycleLength
def getCycleTime(self, doprint = 0):
cycleTime = (globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength
if doprint:
print 'Get Cycle Time %s' % cycleTime
return cycleTime
def setTimeIntoCycle(self, time, doprint = 0):
trueCycleTime = globalClock.getRealTime() % self.timingCycleLength
self.timingCycleOffset = time - trueCycleTime
if doprint:
self.notify.debug('Set Cycle Time %s' % self.timingCycleOffset)
self.notify.debug('SET cycle time %s' % ((globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength))
def getSimCycleTime(self):
return
return self.timingSimTime % self.timingCycleLength
def startSim(self):
taskMgr.add(self.__simulationTask, 'simulation task')
def stopSim(self):
taskMgr.remove('simulation task')
def __simulationTask(self, task):
self.DTA += globalClock.getDt()
self.frameCounter += 1
if self.frameCounter >= 10:
self.frameCounter = 0
startTime = globalClock.getRealTime()
colCount = 0
while self.DTA >= self.DTAStep:
self.DTA -= self.DTAStep
self.preStep()
self.simulate()
self.postStep()
if self.canRender:
self.placeBodies()
if self.frameCounter == 0:
endTime = globalClock.getRealTime() - startTime
return task.cont
def __collisionHandler(self, entry):
self.colEntries.append(entry)
def simulate(self):
self.colEntries = []
self.space.autoCollide()
# We need the callbacks processed now, before we try to look at colEntries, so:
eventMgr.doEvents()
self.colCount = len(self.colEntries)
if self.maxColCount < self.colCount:
self.maxColCount = self.colCount
self.notify.debug('New Max Collision Count %s' % self.maxColCount)
self.world.quickStep(self.DTAStep)
for bodyPair in self.bodyList:
self.world.applyDampening(self.DTAStep, bodyPair[1])
self.contactgroup.empty()
self.commonObjectControl()
self.timingSimTime = self.timingSimTime + self.DTAStep
def placeBodies(self):
for pair in self.odePandaRelationList:
pandaNodePathGeom = pair[0]
odeBody = pair[1]
if pandaNodePathGeom:
pandaNodePathGeom.setPos(odeBody.getPosition())
rotation = odeBody.getRotation() * (180.0 / math.pi)
pandaNodePathGeom.setQuat(Quat(odeBody.getQuaternion()[0], odeBody.getQuaternion()[1], odeBody.getQuaternion()[2], odeBody.getQuaternion()[3]))
def preStep(self):
pass
def postStep(self):
if self.showContacts and self.canRender:
for count in xrange(self.jointMarkerCount):
pandaNodePathGeom = self.jointMarkers[count]
if count < self.colCount:
pandaNodePathGeom.setPos(self.space.getContactData(count * 3 + 0), self.space.getContactData(count * 3 + 1), self.space.getContactData(count * 3 + 2))
else:
pandaNodePathGeom.setPos(0.0, 0.0, -100.0)
def commonObjectControl(self):
time = self.getCycleTime()
for key in self.commonObjectDict:
if key not in self.commonObjectInfoDict:
self.commonObjectInfoDict[key] = None
entry = self.commonObjectDict[key]
if entry[1] in [2, 4]:
type = entry[1]
body = entry[2]
motor = entry[3]
timeData = entry[4]
forceData = entry[5]
eventData = entry[6]
model = entry[7]
force = 0.0
for index in xrange(len(timeData)):
if index == len(timeData) - 1 and timeData[index] < time or timeData[index] < time and timeData[index + 1] > time:
force = forceData[index]
event = eventData[index]
if event != self.commonObjectInfoDict[key]:
self.commonObjectEvent(key, model, type, force, event)
self.commonObjectInfoDict[key] = event
motor.setParamVel(force)
return
def commonObjectEvent(self, key, model, type, force, event):
self.notify.debug('commonObjectForceEvent %s %s %s %s %s' % (key,
model,
type,
force,
event))
def getCommonObjectData(self):
objectStream = [(0,
0,
self.getCycleTime(),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0)]
for key in self.commonObjectDict:
objectPair = self.commonObjectDict[key]
object = objectPair[2]
pos3 = object.getPosition()
quat4 = object.getQuaternion()
anV3 = object.getAngularVel()
lnV3 = object.getLinearVel()
data = (objectPair[0],
objectPair[1],
pos3[0],
pos3[1],
pos3[2],
quat4[0],
quat4[1],
quat4[2],
quat4[3],
anV3[0],
anV3[1],
anV3[2],
lnV3[0],
lnV3[1],
lnV3[2])
objectStream.append(data)
if len(objectStream) <= 1:
data = (0, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
objectStream.append(data)
return objectStream
def useCommonObjectData(self, objectData, enable = 1):
if not objectData:
return
if objectData[1][1] == 99:
return
time = objectData[0]
self.setTimeIntoCycle(time[2])
if time[2] > self.timingCycleLength:
pass
for dataIndex in xrange(1, len(objectData)):
data = objectData[dataIndex]
commonObject = self.commonObjectDict[data[0]]
commonObject[2].setPosition(data[2], data[3], data[4])
commonObject[2].setQuaternion(Quat(data[5], data[6], data[7], data[8]))
commonObject[2].setAngularVel(data[9], data[10], data[11])
commonObject[2].setLinearVel(data[12], data[13], data[14])
if enable:
commonObject[2].enable()
else:
commonObject[2].disable()
def createCommonObject(self, type, commonId, pos, hpr, sizeX = 0, sizeY = 0, moveDistance = 0):
if commonId == None:
commonId = self.commonId
self.commonId += 1
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
rHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
if type == 0:
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0)
box.setPosition(vPos)
self.placerNode.setHpr(vHpr)
box.setQuaternion(self.placerNode.getQuat())
self.commonObjectDict[commonId] = (commonId, type, box)
elif type == 1:
model, cross = self.createCross(self.world, self.space, 1.0, 3.0, 12.0, 2.0, 2)
motor = OdeHingeJoint(self.world)
cross.setPosition(vPos)
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
motor.setParamVel(1.5)
motor.setParamFMax(500000000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attach(0, cross)
motor.setAnchor(vPos)
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 2:
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attach(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(3.0)
motor.setParamFMax(5000000.0)
motor.setParamHiStop(10.0)
motor.setParamLoStop(-10.0)
timeData = (0.0, 5.0)
forceData = (3.0, -3.0)
eventData = (1, 2)
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model)
elif type == 3:
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(0, 0, 0)
if self.canRender:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b')
else:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b.bam')
myModel.reparentTo(self.root)
myModel.setPos(vPos)
myModel.setHpr(vHpr)
millFan = myModel.find('**/windmillFan0')
millBase = myModel.find('**/arm')
rod = myModel.find('**/rod')
rod.wrtReparentTo(millBase)
self.windmillFanNodePath = millFan
self.windmillBaseNodePath = millBase
millData = OdeTriMeshData(millBase)
millGeom = OdeTriMeshGeom(self.space, millData)
self.meshDataList.append(millData)
millGeom.setPosition(self.subPlacerNode.getPos(self.root))
millGeom.setQuaternion(self.subPlacerNode.getQuat())
millGeom.setCollideBits(BitMask32(251658240))
millGeom.setCategoryBits(BitMask32(8388608))
self.space.setCollideId(millGeom, 8)
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]) + 5)
vHpr = Vec3(float(hpr[0]), float(hpr[1] + 90), float(hpr[2]) - 90)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
model, cross = self.createPinWheel(self.world, self.space, 10.0, 1.6, 4.0, 0.6, 5, 3.7, 1.2, 1, millFan, (0, 0, 90), (-4.6, -0.5, -0.25), 20)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
motor = OdeHingeJoint(self.world)
cross.setPosition(self.subPlacerNode.getPos(self.root))
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = self.root.getRelativeVector(self.subPlacerNode, Vec3(0, 0, 1))
motor.setParamVel(1.0)
motor.setParamFMax(50000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attach(0, cross)
motor.setAnchor(self.subPlacerNode.getPos(self.root))
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 4:
ourAxis = self.root.getRelativeVector(self.placerNode, Vec3(0, 1, 0))
model, box = self.createBox(self.world, self.space, 50.0, sizeX, sizeY, 1.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attach(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(moveDistance / 4.0)
motor.setParamFMax(25000.0)
motor.setParamHiStop(moveDistance)
motor.setParamLoStop(0)
timeData = (0.0, 1.0, 5.0, 6.0)
forceData = (-moveDistance / 4.0,
moveDistance / 4.0,
moveDistance / 4.0,
-moveDistance / 4.0)
eventData = (-1, 1, -2, 2)
radius = moveDistance + sizeY * 0.5
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model,
radius)
return [type,
commonId,
(pos[0], pos[1], pos[2]),
(hpr[0], hpr[1], hpr[2]),
sizeX,
sizeY,
moveDistance]
def createSphere(self, world, space, density, radius, ballIndex = None):
self.notify.debug('create sphere index %s' % ballIndex)
body = OdeBody(world)
M = OdeMass()
M.setSphere(density, radius)
body.setMass(M)
body.setPosition(0, 0, -100)
geom = OdeSphereGeom(space, radius)
self.space.setSurfaceType(geom, 1)
self.notify.debug('collide ID is %s' % self.space.setCollideId(geom, 42))
self.massList.append(M)
self.geomList.append(geom)
if ballIndex == 1:
self.notify.debug('1')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 2:
self.notify.debug('2')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 3:
self.notify.debug('3')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 4:
self.notify.debug('4')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
else:
geom.setCollideBits(BitMask32(4294967295L))
geom.setCategoryBits(BitMask32(4294967295L))
geom.setBody(body)
if self.notify.getDebug():
self.notify.debug('golf ball geom id')
geom.write()
self.notify.debug(' -')
self.notify.debug('Collide Bits %s' % geom.getCollideBits())
if self.canRender:
testball = render.attachNewNode('Ball Holder')
ballmodel = loader.loadModel('phase_6/models/golf/golf_ball')
ballmodel.reparentTo(testball)
ballmodel.setColor(*GolfGlobals.PlayerColors[ballIndex - 1])
testball.setPos(0, 0, -100)
self.odePandaRelationList.append((testball, body))
else:
testball = None
self.bodyList.append((None, body))
return (testball, body, geom)
def createBox(self, world, space, density, lx, ly, lz, colOnlyBall = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setSphere(density, 0.3 * (lx + ly + lz))
body.setMass(M)
boxsize = Vec3(lx, ly, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 7)
self.massList.append(M)
self.geomList.append(geom)
if colOnlyBall:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if self.canRender:
color = random.choice([Vec4(1.0, 0.0, 0.5, 1.0), Vec4(0.5, 0.5, 1.0, 1.0), Vec4(0.5, 1.0, 0.5, 1.0)])
boxsize = Vec3(lx, ly, lz)
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, color, 1)
boxNodePathGeom.setPos(0, 0, -100)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross(self, world, space, density, lx, ly, lz, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly, lz)
boxsize2 = Vec3(ly, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 26)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.odePandaRelationList.append((boxNodePathGeom, body))
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
if self.canRender:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(0, 0, -100)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(boxNodePathGeom, ly, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(0, 0, 0)
if attachedGeo:
attachedGeo.reparentTo(boxNodePathGeom)
attachedGeo.setHpr(0, 0, 90)
attachedGeo.setPos(-4.8, 0, -2.0)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross2(self, world, space, density, lx, ly, lz, latSlide, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(-latSlide, ly * 0.25, 0)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
geom2.setOffsetPosition(ly * 0.25, latSlide, 0)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 13)
geom3 = OdeBoxGeom(space, boxsize)
geom3.setBody(body)
geom3.setOffsetPosition(latSlide, -ly * 0.25, 0)
self.space.setSurfaceType(geom3, 0)
self.space.setCollideId(geom3, 13)
geom4 = OdeBoxGeom(space, boxsize2)
geom4.setBody(body)
geom4.setOffsetPosition(-ly * 0.25, -latSlide, 0)
self.space.setSurfaceType(geom4, 0)
self.space.setCollideId(geom4, 13)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.geomList.append(geom3)
self.geomList.append(geom4)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(251658240))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(251658240))
geom4.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(0))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(0))
geom4.setCategoryBits(BitMask32(0))
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
if attachedGeo:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(-latSlide, ly * 0.25, 0)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(ly * 0.25, latSlide, 0)
boxNodePathGeom3, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom3.setPos(latSlide, -ly * 0.25, 0)
boxNodePathGeom4, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom4.setPos(-ly * 0.25, -latSlide, 0)
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def createPinWheel(self, world, space, density, lx, ly, lz, numBoxes, disV, disH, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None, offRot = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
self.massList.append(M)
self.placerNode.setPos(0, 0, 0)
self.placerNode.setHpr(0, 0, 0)
self.subPlacerNode.setHpr(0, 0, 0)
self.subPlacerNode.setPos(disH, disV, 0)
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
else:
someNodePathGeom = self.root.attachNewNode('pinwheel')
for num in xrange(numBoxes):
spin = 360.0 * float(num) / float(numBoxes) + float(offRot)
self.placerNode.setH(spin)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(self.subPlacerNode.getPos(self.root))
geom.setOffsetQuaternion(self.subPlacerNode.getQuat(self.root))
self.geomList.append(geom)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if not attachedGeo:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(self.subPlacerNode.getPos(self.root))
boxNodePathGeom.setHpr(self.subPlacerNode.getHpr(self.root))
if attachedGeo and self.canRender:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
if self.canRender:
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def attachMarker(self, body):
if self.canRender:
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.25)
testMarker.setPos(0.0, 0.0, -100.0)
self.odePandaRelationList.append((testMarker, body))
| {
"content_hash": "c93b494e620dd39b1b6d77bc102b4a44",
"timestamp": "",
"source": "github",
"line_count": 731,
"max_line_length": 170,
"avg_line_length": 41.56497948016416,
"alnum_prop": 0.5822472353870458,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "edc657c96d0a45d6a0761763153f3e97d4bab19e",
"size": "30384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/golf/PhysicsWorldBase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-reclass'
copyright = u'2015, Michael Kuty'
author = u'Michael Kuty'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'hrCMSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django_reclass.tex', u'django-reclass',
u'Michael Kuty', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django_reclass', u'django-reclass Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django_reclass', u'django-reclass',
author, 'django_reclass', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| {
"content_hash": "18922a8843eeb606417a2f36f01e12ea",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 79,
"avg_line_length": 32.20640569395018,
"alnum_prop": 0.7048618784530387,
"repo_name": "michaelkuty/django-reclass",
"id": "e170c2dcf062501f36e91a5920a3f71f5114ea42",
"size": "9468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24758"
},
{
"name": "Shell",
"bytes": "102"
}
],
"symlink_target": ""
} |
# encoding: utf-8
"""
This script tests ``GitWildMatchPattern``.
"""
from __future__ import unicode_literals
import re
import sys
import unittest
import pathspec.patterns.gitwildmatch
import pathspec.util
from pathspec.patterns.gitwildmatch import GitWildMatchPattern
if sys.version_info[0] >= 3:
unichr = chr
class GitWildMatchTest(unittest.TestCase):
"""
The ``GitWildMatchTest`` class tests the ``GitWildMatchPattern``
implementation.
"""
def test_00_empty(self):
"""
Tests an empty pattern.
"""
regex, include = GitWildMatchPattern.pattern_to_regex('')
self.assertIsNone(include)
self.assertIsNone(regex)
def test_01_absolute(self):
"""
Tests an absolute path pattern.
This should match:
an/absolute/file/path
an/absolute/file/path/foo
This should NOT match:
foo/an/absolute/file/path
"""
regex, include = GitWildMatchPattern.pattern_to_regex('/an/absolute/file/path')
self.assertTrue(include)
self.assertEqual(regex, '^an/absolute/file/path(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'an/absolute/file/path',
'an/absolute/file/path/foo',
'foo/an/absolute/file/path',
]))
self.assertEqual(results, {
'an/absolute/file/path',
'an/absolute/file/path/foo',
})
def test_01_absolute_root(self):
"""
Tests a single root absolute path pattern.
This should NOT match any file (according to git check-ignore
(v2.4.1)).
"""
regex, include = GitWildMatchPattern.pattern_to_regex('/')
self.assertIsNone(include)
self.assertIsNone(regex)
def test_01_relative(self):
"""
Tests a relative path pattern.
This should match:
spam
spam/
foo/spam
spam/foo
foo/spam/bar
"""
regex, include = GitWildMatchPattern.pattern_to_regex('spam')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?spam(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'spam',
'spam/',
'foo/spam',
'spam/foo',
'foo/spam/bar',
]))
self.assertEqual(results, {
'spam',
'spam/',
'foo/spam',
'spam/foo',
'foo/spam/bar',
})
def test_01_relative_nested(self):
"""
Tests a relative nested path pattern.
This should match:
foo/spam
foo/spam/bar
This should **not** match (according to git check-ignore (v2.4.1)):
bar/foo/spam
"""
regex, include = GitWildMatchPattern.pattern_to_regex('foo/spam')
self.assertTrue(include)
self.assertEqual(regex, '^foo/spam(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'foo/spam',
'foo/spam/bar',
'bar/foo/spam',
]))
self.assertEqual(results, {
'foo/spam',
'foo/spam/bar',
})
def test_02_comment(self):
"""
Tests a comment pattern.
"""
regex, include = GitWildMatchPattern.pattern_to_regex('# Cork soakers.')
self.assertIsNone(include)
self.assertIsNone(regex)
def test_02_ignore(self):
"""
Tests an exclude pattern.
This should NOT match (according to git check-ignore (v2.4.1)):
temp/foo
"""
regex, include = GitWildMatchPattern.pattern_to_regex('!temp')
self.assertIsNotNone(include)
self.assertFalse(include)
self.assertEqual(regex, '^(?:.+/)?temp$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match(['temp/foo']))
self.assertEqual(results, set())
def test_03_child_double_asterisk(self):
"""
Tests a directory name with a double-asterisk child
directory.
This should match:
spam/bar
This should **not** match (according to git check-ignore (v2.4.1)):
foo/spam/bar
"""
regex, include = GitWildMatchPattern.pattern_to_regex('spam/**')
self.assertTrue(include)
self.assertEqual(regex, '^spam/.*$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'spam/bar',
'foo/spam/bar',
]))
self.assertEqual(results, {'spam/bar'})
def test_03_inner_double_asterisk(self):
"""
Tests a path with an inner double-asterisk directory.
This should match:
left/bar/right
left/foo/bar/right
left/bar/right/foo
This should **not** match (according to git check-ignore (v2.4.1)):
foo/left/bar/right
"""
regex, include = GitWildMatchPattern.pattern_to_regex('left/**/right')
self.assertTrue(include)
self.assertEqual(regex, '^left(?:/.+)?/right(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'left/bar/right',
'left/foo/bar/right',
'left/bar/right/foo',
'foo/left/bar/right',
]))
self.assertEqual(results, {
'left/bar/right',
'left/foo/bar/right',
'left/bar/right/foo',
})
def test_03_only_double_asterisk(self):
"""
Tests a double-asterisk pattern which matches everything.
"""
regex, include = GitWildMatchPattern.pattern_to_regex('**')
self.assertTrue(include)
self.assertEqual(regex, '^.+$')
def test_03_parent_double_asterisk(self):
"""
Tests a file name with a double-asterisk parent directory.
This should match:
foo/spam
foo/spam/bar
"""
regex, include = GitWildMatchPattern.pattern_to_regex('**/spam')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?spam(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'foo/spam',
'foo/spam/bar',
]))
self.assertEqual(results, {
'foo/spam',
'foo/spam/bar',
})
def test_04_infix_wildcard(self):
"""
Tests a pattern with an infix wildcard.
This should match:
foo--bar
foo-hello-bar
a/foo-hello-bar
foo-hello-bar/b
a/foo-hello-bar/b
"""
regex, include = GitWildMatchPattern.pattern_to_regex('foo-*-bar')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?foo\\-[^/]*\\-bar(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'foo--bar',
'foo-hello-bar',
'a/foo-hello-bar',
'foo-hello-bar/b',
'a/foo-hello-bar/b',
]))
self.assertEqual(results, {
'foo--bar',
'foo-hello-bar',
'a/foo-hello-bar',
'foo-hello-bar/b',
'a/foo-hello-bar/b',
})
def test_04_postfix_wildcard(self):
"""
Tests a pattern with a postfix wildcard.
This should match:
~temp-
~temp-foo
~temp-foo/bar
foo/~temp-bar
foo/~temp-bar/baz
"""
regex, include = GitWildMatchPattern.pattern_to_regex('~temp-*')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?\\~temp\\-[^/]*(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'~temp-',
'~temp-foo',
'~temp-foo/bar',
'foo/~temp-bar',
'foo/~temp-bar/baz',
]))
self.assertEqual(results, {
'~temp-',
'~temp-foo',
'~temp-foo/bar',
'foo/~temp-bar',
'foo/~temp-bar/baz',
})
def test_04_prefix_wildcard(self):
"""
Tests a pattern with a prefix wildcard.
This should match:
bar.py
bar.py/
foo/bar.py
foo/bar.py/baz
"""
regex, include = GitWildMatchPattern.pattern_to_regex('*.py')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?[^/]*\\.py(?:/.*)?$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'bar.py',
'bar.py/',
'foo/bar.py',
'foo/bar.py/baz',
]))
self.assertEqual(results, {
'bar.py',
'bar.py/',
'foo/bar.py',
'foo/bar.py/baz',
})
def test_05_directory(self):
"""
Tests a directory pattern.
This should match:
dir/
foo/dir/
foo/dir/bar
This should **not** match:
dir
"""
regex, include = GitWildMatchPattern.pattern_to_regex('dir/')
self.assertTrue(include)
self.assertEqual(regex, '^(?:.+/)?dir/.*$')
pattern = GitWildMatchPattern(re.compile(regex), include)
results = set(pattern.match([
'dir/',
'foo/dir/',
'foo/dir/bar',
'dir',
]))
self.assertEqual(results, {
'dir/',
'foo/dir/',
'foo/dir/bar',
})
def test_06_registered(self):
"""
Tests that the pattern is registered.
"""
self.assertIs(pathspec.util.lookup_pattern('gitwildmatch'), GitWildMatchPattern)
def test_06_access_deprecated(self):
"""
Tests that the pattern is accessible from the root module using the
deprecated alias.
"""
self.assertTrue(hasattr(pathspec, 'GitIgnorePattern'))
self.assertTrue(issubclass(pathspec.GitIgnorePattern, GitWildMatchPattern))
def test_06_registered_deprecated(self):
"""
Tests that the pattern is registered under the deprecated alias.
"""
self.assertIs(pathspec.util.lookup_pattern('gitignore'), pathspec.GitIgnorePattern)
def test_07_encode_bytes(self):
"""
Test encoding bytes.
"""
encoded = "".join(map(unichr, range(0,256))).encode(pathspec.patterns.gitwildmatch._BYTES_ENCODING)
expected = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
self.assertEqual(encoded, expected)
def test_07_decode_bytes(self):
"""
Test decoding bytes.
"""
decoded = bytes(bytearray(range(0,256))).decode(pathspec.patterns.gitwildmatch._BYTES_ENCODING)
expected = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
self.assertEqual(decoded, expected)
def test_07_match_bytes_and_bytes(self):
"""
Test byte string patterns matching byte string paths.
"""
pattern = GitWildMatchPattern(b'*.py')
results = set(pattern.match([b'a.py']))
self.assertEqual(results, {b'a.py'})
def test_07_match_bytes_and_bytes_complete(self):
"""
Test byte string patterns matching byte string paths.
"""
encoded = bytes(bytearray(range(0,256)))
escaped = b"".join(b"\\" + encoded[i:i+1] for i in range(len(encoded)))
pattern = GitWildMatchPattern(escaped)
results = set(pattern.match([encoded]))
self.assertEqual(results, {encoded})
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is strict")
def test_07_match_bytes_and_unicode(self):
"""
Test byte string patterns matching byte string paths.
"""
pattern = GitWildMatchPattern(b'*.py')
results = set(pattern.match(['a.py']))
self.assertEqual(results, {'a.py'})
@unittest.skipIf(sys.version_info[0] == 2, "Python 2 is lenient")
def test_07_match_bytes_and_unicode_fail(self):
"""
Test byte string patterns matching byte string paths.
"""
pattern = GitWildMatchPattern(b'*.py')
with self.assertRaises(TypeError):
for _ in pattern.match(['a.py']):
pass
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is strict")
def test_07_match_unicode_and_bytes(self):
"""
Test unicode patterns with byte paths.
"""
pattern = GitWildMatchPattern('*.py')
results = set(pattern.match([b'a.py']))
self.assertEqual(results, {b'a.py'})
@unittest.skipIf(sys.version_info[0] == 2, "Python 2 is lenient")
def test_07_match_unicode_and_bytes_fail(self):
"""
Test unicode patterns with byte paths.
"""
pattern = GitWildMatchPattern('*.py')
with self.assertRaises(TypeError):
for _ in pattern.match([b'a.py']):
pass
def test_07_match_unicode_and_unicode(self):
"""
Test unicode patterns with unicode paths.
"""
pattern = GitWildMatchPattern('*.py')
results = set(pattern.match(['a.py']))
self.assertEqual(results, {'a.py'})
def test_08_escape(self):
"""
Test escaping a string with meta-characters
"""
fname = "file!with*weird#naming_[1].t?t"
escaped = r"file\!with\*weird\#naming_\[1\].t\?t"
result = GitWildMatchPattern.escape(fname)
self.assertEqual(result, escaped)
| {
"content_hash": "e79fcf34292d53bbc2b2df8602ac5f6d",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 751,
"avg_line_length": 26.974683544303797,
"alnum_prop": 0.669169404035664,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "e552d5ef5398f47f6f90f27c8360be4f63a2fe82",
"size": "12786",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/pathspec/tests/test_gitwildmatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
} |
import datetime
import os
import unittest
from copy import copy
from decimal import Decimal
from django.conf import settings
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.utils.layermapping import (
InvalidDecimal, InvalidString, LayerMapError, LayerMapping,
MissingForeignKey,
)
from django.db import connection
from django.test import TestCase, override_settings
from .models import (
City, County, CountyFeat, DoesNotAllowNulls, HasNulls, ICity1, ICity2,
Interstate, Invalid, State, city_mapping, co_mapping, cofeat_mapping,
has_nulls_mapping, inter_mapping,
)
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
has_nulls_geojson = os.path.join(shp_path, 'has_nulls', 'has_nulls.geojson')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
class LayerMapTest(TestCase):
def test_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
with self.assertRaises(LayerMapError):
LayerMapping(City, city_shp, bad_map)
# A LookupError should be thrown for bogus encodings.
with self.assertRaises(LookupError):
LayerMapping(City, city_shp, city_mapping, encoding='foobar')
def test_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 5)
self.assertAlmostEqual(pnt1.y, pnt2.y, 5)
def test_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
with self.assertRaises(InvalidDecimal):
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
Interstate.objects.all().delete()
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
# Now test for failures
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
with self.assertRaises(e):
LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if connection.features.supports_transform:
with self.assertRaises(LayerMapError):
LayerMapping(County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping)
bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping)
bad_fk_map2['state'] = {'nombre': 'State'}
with self.assertRaises(TypeError):
LayerMapping(County, co_shp, bad_fk_map1, transform=False)
with self.assertRaises(LayerMapError):
LayerMapping(County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
with self.assertRaises(MissingForeignKey):
lm.save(silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties():
County.objects.all().delete()
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
with self.assertRaises(TypeError):
lm.save(fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
with self.assertRaises(LayerMapError):
lm.save(fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name)
self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name)
self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4, 7, 1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {
'name': 'Name',
'population': 'Population',
'density': 'Density',
'point': 'POINT',
'dt': 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def test_invalid_layer(self):
"Tests LayerMapping on invalid geometries. See #15378."
invalid_mapping = {'point': 'POINT'}
lm = LayerMapping(Invalid, invalid_shp, invalid_mapping,
source_srs=4326)
lm.save(silent=True)
def test_charfield_too_short(self):
mapping = copy(city_mapping)
mapping['name_short'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
with self.assertRaises(InvalidString):
lm.save(silent=True, strict=True)
def test_textfield(self):
"String content fits also in a TextField"
mapping = copy(city_mapping)
mapping['name_txt'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 3)
self.assertEqual(City.objects.get(name='Houston').name_txt, "Houston")
def test_encoded_name(self):
""" Test a layer containing utf-8-encoded name """
city_shp = os.path.join(shp_path, 'ch-city', 'ch-city.shp')
lm = LayerMapping(City, city_shp, city_mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 1)
self.assertEqual(City.objects.all()[0].name, "Zürich")
def test_null_geom_with_unique(self):
"""LayerMapping may be created with a unique and a null geometry."""
State.objects.bulk_create([State(name='Colorado'), State(name='Hawaii'), State(name='Texas')])
hw = State.objects.get(name='Hawaii')
hu = County.objects.create(name='Honolulu', state=hw, mpoly=None)
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
hu.refresh_from_db()
self.assertIsNotNone(hu.mpoly)
self.assertEqual(hu.mpoly.ogr.num_coords, 449)
def test_null_number_imported(self):
"""LayerMapping import of GeoJSON with a null numeric value."""
lm = LayerMapping(HasNulls, has_nulls_geojson, has_nulls_mapping)
lm.save()
self.assertEqual(HasNulls.objects.count(), 3)
self.assertEqual(HasNulls.objects.filter(num=0).count(), 1)
self.assertEqual(HasNulls.objects.filter(num__isnull=True).count(), 1)
def test_null_string_imported(self):
"Test LayerMapping import of GeoJSON with a null string value."
lm = LayerMapping(HasNulls, has_nulls_geojson, has_nulls_mapping)
lm.save()
self.assertEqual(HasNulls.objects.filter(name='None').count(), 0)
num_empty = 1 if connection.features.interprets_empty_strings_as_nulls else 0
self.assertEqual(HasNulls.objects.filter(name='').count(), num_empty)
self.assertEqual(HasNulls.objects.filter(name__isnull=True).count(), 1)
def test_nullable_boolean_imported(self):
"""LayerMapping import of GeoJSON with a nullable boolean value."""
lm = LayerMapping(HasNulls, has_nulls_geojson, has_nulls_mapping)
lm.save()
self.assertEqual(HasNulls.objects.filter(boolean=True).count(), 1)
self.assertEqual(HasNulls.objects.filter(boolean=False).count(), 1)
self.assertEqual(HasNulls.objects.filter(boolean__isnull=True).count(), 1)
def test_nullable_datetime_imported(self):
"""LayerMapping import of GeoJSON with a nullable date/time value."""
lm = LayerMapping(HasNulls, has_nulls_geojson, has_nulls_mapping)
lm.save()
self.assertEqual(HasNulls.objects.filter(datetime__lt=datetime.date(1994, 8, 15)).count(), 1)
self.assertEqual(HasNulls.objects.filter(datetime='2018-11-29T03:02:52').count(), 1)
self.assertEqual(HasNulls.objects.filter(datetime__isnull=True).count(), 1)
def test_uuids_imported(self):
"""LayerMapping import of GeoJSON with UUIDs."""
lm = LayerMapping(HasNulls, has_nulls_geojson, has_nulls_mapping)
lm.save()
self.assertEqual(HasNulls.objects.filter(uuid='1378c26f-cbe6-44b0-929f-eb330d4991f5').count(), 1)
def test_null_number_imported_not_allowed(self):
"""
LayerMapping import of GeoJSON with nulls to fields that don't permit
them.
"""
lm = LayerMapping(DoesNotAllowNulls, has_nulls_geojson, has_nulls_mapping)
lm.save(silent=True)
# When a model fails to save due to IntegrityError (null in non-null
# column), subsequent saves fail with "An error occurred in the current
# transaction. You can't execute queries until the end of the 'atomic'
# block." On Oracle and MySQL, the one object that did load appears in
# this count. On other databases, no records appear.
self.assertLessEqual(DoesNotAllowNulls.objects.count(), 1)
class OtherRouter:
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return self.db_for_read(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
# ContentType objects are created during a post-migrate signal while
# performing fixture teardown using the default database alias and
# don't abide by the database specified by this router.
return True
def allow_migrate(self, db, app_label, **hints):
return True
@override_settings(DATABASE_ROUTERS=[OtherRouter()])
class LayerMapRouterTest(TestCase):
databases = {'default', 'other'}
@unittest.skipUnless(len(settings.DATABASES) > 1, 'multiple databases required')
def test_layermapping_default_db(self):
lm = LayerMapping(City, city_shp, city_mapping)
self.assertEqual(lm.using, 'other')
| {
"content_hash": "13689ae613f8b6b35489b6eb067dd6f6",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 105,
"avg_line_length": 44.37283950617284,
"alnum_prop": 0.640977129820266,
"repo_name": "schinckel/django",
"id": "50fdb4815a7211675efea033aac29cf064a0a404",
"size": "17972",
"binary": false,
"copies": "37",
"ref": "refs/heads/master",
"path": "tests/gis_tests/layermap/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85024"
},
{
"name": "HTML",
"bytes": "224566"
},
{
"name": "JavaScript",
"bytes": "251536"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13234142"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
"""Tests for the gcp module - serviceusage.py"""
import typing
import unittest
import mock
from tests.providers.gcp import gcp_mocks
class GoogleServiceUsageTest(unittest.TestCase):
"""Test Google Service Usage class."""
# pylint: disable=line-too-long
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.common.ExecuteRequest')
@mock.patch('libcloudforensics.providers.gcp.internal.serviceusage.GoogleServiceUsage.GsuApi')
def testGetEnabled(self, mock_gsu_api, mock_execute_request):
"""Validates the GetEnabled function"""
mock_execute_request.return_value = gcp_mocks.MOCK_ENABLED_SERVICES
mock_service_usage = mock_gsu_api.return_value.services.return_value
response = gcp_mocks.FAKE_SERVICE_USAGE.GetEnabled()
mock_execute_request.assert_called_with(mock_service_usage,
'list', {'parent': 'projects/fake-project', 'filter': 'state:ENABLED'})
self.assertListEqual(response, [
'bigquery.googleapis.com',
'cloudapis.googleapis.com',
'compute.googleapis.com'
])
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.common.ExecuteRequest')
@mock.patch('libcloudforensics.providers.gcp.internal.serviceusage.GoogleServiceUsage.GsuApi')
def testEnableService(self, mock_gsu_api, mock_execute_request):
"""Validates that EnableService calls ExecuteRequest with the correct
arguments."""
mock_service_usage = mock_gsu_api.return_value.services.return_value
mock_execute_request.return_value = [{'name': 'operations/noop.DONE_OPERATION'}]
gcp_mocks.FAKE_SERVICE_USAGE.EnableService('container.googleapis.com')
mock_execute_request.assert_called_with(mock_service_usage, 'enable',
{'name': 'projects/fake-project/services/container.googleapis.com'})
@typing.no_type_check
@mock.patch('libcloudforensics.providers.gcp.internal.common.ExecuteRequest')
@mock.patch('libcloudforensics.providers.gcp.internal.serviceusage.GoogleServiceUsage.GsuApi')
def testDisableService(self, mock_gsu_api, mock_execute_request):
"""Validates that DisableService calls ExecuteRequest with the correct
arguments."""
mock_service_usage = mock_gsu_api.return_value.services.return_value
mock_execute_request.return_value = [{'name': 'operations/noop.DONE_OPERATION'}]
gcp_mocks.FAKE_SERVICE_USAGE.DisableService('container.googleapis.com')
mock_execute_request.assert_called_with(mock_service_usage, 'disable',
{'name': 'projects/fake-project/services/container.googleapis.com'})
| {
"content_hash": "6f7019f80b4432e80881acf2a4c0fac2",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 96,
"avg_line_length": 45.80357142857143,
"alnum_prop": 0.7508771929824561,
"repo_name": "google/cloud-forensics-utils",
"id": "8f90b22ad491efb7d1c5f5fea26d7b0194d74d6e",
"size": "3165",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/providers/gcp/internal/test_serviceusage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "843359"
},
{
"name": "Shell",
"bytes": "3622"
}
],
"symlink_target": ""
} |
import asyncore
import email.mime.text
from email.message import EmailMessage
import email.utils
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import unittest
from test import support, mock_socket
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
port = support.find_unused_port()
try:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_user = 'TXIUQUBZB21LD2HLCMUUY29T'
sim_auth_plain = 'AE1YLKFAC29TZXDOZXJLLMNVBQBZB21LCGFZC3DVCMQ='
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
mech = arg.strip().lower()
if mech=='cram-md5':
self.push('334 {}'.format(sim_cram_md5_challenge))
elif mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
elif mech=='plain':
self.push('334 ')
elif mech=='login':
self.push('334 ')
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for email, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(email)),
"ascii"))
self.assertEqual(smtp.vrfy(email), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
# SimSMTPChannel doesn't fully support AUTH because it requires a
# synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_plain, str(err))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials['cram-md5'], str(err))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def test_auth_function(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
self.serv.add_feature("AUTH CRAM-MD5")
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
supported = {'CRAM-MD5': smtp.auth_cram_md5,
'PLAIN': smtp.auth_plain,
'LOGIN': smtp.auth_login,
}
for mechanism, method in supported.items():
try: smtp.auth(mechanism, method)
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials[mechanism.lower()].upper(),
str(err))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(smtplib.SMTPNotSupportedError,
smtp.send_message(msg))
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(GeneralTests, DebuggingServerTests,
NonConnectingTests,
BadHELOServerTests, SMTPSimTests,
TooLongLineTests)
if __name__ == '__main__':
test_main()
| {
"content_hash": "3bdfaa884a94399abbfbe3353024adc4",
"timestamp": "",
"source": "github",
"line_count": 1153,
"max_line_length": 90,
"avg_line_length": 38.67823070251518,
"alnum_prop": 0.5977666158399857,
"repo_name": "munyirik/python",
"id": "e66ae9be51c044e8124c49628627f057fb83bac3",
"size": "44619",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cpython/Lib/test/test_smtplib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "470920"
},
{
"name": "Batchfile",
"bytes": "35551"
},
{
"name": "C",
"bytes": "17872871"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "356072"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "Groff",
"bytes": "254942"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "Makefile",
"bytes": "25026"
},
{
"name": "Objective-C",
"bytes": "33182"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24911704"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""Unit tests for the astropy.coordinates.angle_utilities module"""
import numpy as np
import pytest
import astropy.units as u
from astropy.coordinates.angle_utilities import (
golden_spiral_grid, uniform_spherical_random_surface, uniform_spherical_random_volume)
from astropy.utils import NumpyRNGContext
def test_golden_spiral_grid_input():
usph = golden_spiral_grid(size=100)
assert len(usph) == 100
@pytest.mark.parametrize("func", [uniform_spherical_random_surface,
uniform_spherical_random_volume])
def test_uniform_spherical_random_input(func):
with NumpyRNGContext(42):
sph = func(size=100)
assert len(sph) == 100
def test_uniform_spherical_random_volume_input():
with NumpyRNGContext(42):
sph = uniform_spherical_random_volume(size=100, max_radius=1)
assert len(sph) == 100
assert sph.distance.unit == u.dimensionless_unscaled
assert sph.distance.max() <= 1.
sph = uniform_spherical_random_volume(size=100, max_radius=4*u.pc)
assert len(sph) == 100
assert sph.distance.max() <= 4*u.pc
| {
"content_hash": "b7b7486b513f03faeadd983b226cbb70",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 90,
"avg_line_length": 33.205882352941174,
"alnum_prop": 0.6837909654561559,
"repo_name": "lpsinger/astropy",
"id": "885938f5578aa255f29881cb5771a9a4e498a186",
"size": "1129",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/coordinates/tests/test_angle_generators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
import logging
import os
import config
import destalinator
import slackbot
import slacker
import utils
import json
class Executor(object):
def __init__(self, debug=False, verbose=False):
self.debug = debug
self.verbose = verbose
self.config = config.Config()
slackbot_token = os.getenv(self.config.slackbot_api_token_env_varname)
api_token = os.getenv(self.config.api_token_env_varname)
self.logger = logging.getLogger(__name__)
utils.set_up_logger(self.logger, log_level_env_var='DESTALINATOR_LOG_LEVEL')
self.destalinator_activated = False
if os.getenv(self.config.destalinator_activated_env_varname):
self.destalinator_activated = True
self.logger.debug("destalinator_activated is %s", self.destalinator_activated)
self.sb = slackbot.Slackbot(config.SLACK_NAME, token=slackbot_token)
self.slacker = slacker.Slacker(config.SLACK_NAME, token=api_token, logger=self.logger)
self.ds = destalinator.Destalinator(slacker=self.slacker,
slackbot=self.sb,
activated=self.destalinator_activated,
logger=self.logger)
| {
"content_hash": "0add3eae07274c4f886d0c6752f45233",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 94,
"avg_line_length": 34.351351351351354,
"alnum_prop": 0.6341463414634146,
"repo_name": "rossrader/destalinator",
"id": "af0ba61f2e635c5c71f41ff6c24c0afba92857ee",
"size": "1295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "executor.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "65646"
}
],
"symlink_target": ""
} |
from django import forms
from main.models import Episode, Location
# from django.contrib.admin import widgets
class Who(forms.Form):
locked_by = forms.CharField(max_length=32, required=True,
label="Please enter your name")
# class Episode_Form(forms.ModelForm):
# exclude=[]
# class Meta:
# model = Episode
class Episode_Form_Preshow(forms.ModelForm):
authors = forms.CharField(max_length=255, required=False)
emails = forms.CharField(max_length=255, required=False)
def __init__(self, *args, **kwargs):
locations = kwargs.get('locations', Location.objects.all())
if 'locations' in kwargs:
del kwargs['locations']
super(Episode_Form_Preshow, self).__init__(*args, **kwargs)
self.fields['location']._set_choices([(l.id, l.name) for l in locations])
class Meta:
model = Episode
fields = ('sequence',
'name', 'slug',
'show','location',
'start', 'duration',
'authors',
'emails',
'released',
'description', 'tags')
class Episode_Form_small(forms.ModelForm):
class Meta:
model = Episode
fields = ('state', 'locked', 'locked_by', 'start', 'duration',
'name',
'emails',
'released',
'normalise', 'channelcopy',
'thumbnail', 'description', 'comment')
class clrfForm(forms.Form):
clid = forms.IntegerField(widget=forms.HiddenInput())
trash = forms.BooleanField(label="Trash",required=False)
apply = forms.BooleanField(label="Apply",required=False)
split = forms.BooleanField(label="Split",required=False)
sequence = forms.IntegerField(label="Sequence",required=False,
widget=forms.TextInput(attrs={'size':'3','class':'suSpinButton'}))
start = forms.CharField(max_length=12,label="Start",required=False,
help_text = "offset from start in h:m:s or frames, blank for start",
widget=forms.TextInput(attrs={'size':'9'}))
end = forms.CharField(max_length=12,label="End",required=False,
help_text = "offset from start in h:m:s or frames, blank for end",
widget=forms.TextInput(attrs={'size':'9'}))
rf_comment = forms.CharField(label="Raw_File comment",required=False,
widget=forms.Textarea(attrs={'rows':'2','cols':'20'}))
cl_comment = forms.CharField(label="Cut_List comment",required=False,
widget=forms.Textarea(attrs={'rows':'2','cols':'20'}))
class Add_CutList_to_Ep(forms.Form):
rf_filename = forms.CharField(max_length=132,required=False,
help_text = "root is .../show/dv/location/, example: 2013-03-13/13:13:30.dv" )
sequence = forms.IntegerField(label="Sequence",required=False,
widget=forms.TextInput(attrs={'size':'3','class':'suSpinButton'}))
getit = forms.BooleanField(label="get this", required=False,
help_text="check and save to add this")
class AddImageToEp(forms.Form):
image_id = forms.IntegerField(widget=forms.HiddenInput())
episode_id = forms.IntegerField(required=False,)
class AddEpisodeToRaw(forms.ModelForm):
class Meta:
model = Episode
fields = ('name',
'duration',
# 'comment',
)
raw_id = forms.IntegerField(widget=forms.HiddenInput())
| {
"content_hash": "48fdadd78325bfa3426f1074bfe5518c",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 84,
"avg_line_length": 39.48837209302326,
"alnum_prop": 0.6166077738515902,
"repo_name": "yoe/veyepar",
"id": "c51f63caa7a5ea81744cf0487835a4c91fd0550a",
"size": "3409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dj/main/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6107"
},
{
"name": "HTML",
"bytes": "76370"
},
{
"name": "JavaScript",
"bytes": "76640"
},
{
"name": "Python",
"bytes": "713606"
},
{
"name": "Ruby",
"bytes": "3503"
},
{
"name": "Shell",
"bytes": "80571"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import esky
if getattr(sys,"frozen",False):
#app = esky.Esky(sys.executable,"https://example-app.com/downloads/")
app = esky.Esky(sys.executable,"http://localhost:8000")
try:
app.auto_update()
except Exception as e:
print ("ERROR UPDATING APP:", e)
print("HELLO AGAAIN WORLD - Stage 2")
| {
"content_hash": "d524af507a85f809a28437d20b6e5369",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 73,
"avg_line_length": 24.8,
"alnum_prop": 0.6612903225806451,
"repo_name": "datalytica/esky",
"id": "92fdfad5a5c8f457ec592b00910f096707ba371b",
"size": "372",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tutorial/stage2/example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "393772"
}
],
"symlink_target": ""
} |
import saga
c = saga.Context ('ssh')
c.user_id = 'dinesh'
s = saga.Session ()
s.add_context (c)
js = saga.job.Service("lsf+ssh://yellowstone.ucar.edu", session=s)
| {
"content_hash": "3063f0561573a6cc7ef2363db0f5e18c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 16.7,
"alnum_prop": 0.6586826347305389,
"repo_name": "telamonian/saga-python",
"id": "e98f840060550cbd1298c8c34ce001405e2006d3",
"size": "168",
"binary": false,
"copies": "5",
"ref": "refs/heads/devel",
"path": "tests/issues/yubikey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "2790"
},
{
"name": "Makefile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "1551101"
},
{
"name": "Shell",
"bytes": "55277"
}
],
"symlink_target": ""
} |
from flask import Blueprint, jsonify, current_app, request, g
from web.autorizacao import requer_acesso
from web.autenticacao import requer_usuario
from domain.recurso import DTOAgendamento, DTOIntervalo
from domain.usuario.nivel_acesso import *
from domain.iso8601 import to_iso
from domain.excecoes import *
api = Blueprint('api', __name__)
def recurso_to_dict(recurso):
print(recurso.__dict__)
return {
'nome': recurso.nome,
'categoria': recurso.tipo.nome,
'utilizavel': recurso.utilizavel,
'agendamentos': [agendamento_to_dict(a) for a in recurso.agendamentos]
}
def agendamento_to_dict(agendamento):
return {
'idResponsavel': agendamento.idResponsavel,
'intervalo': intervalo_to_dict(agendamento.intervalo)
}
def intervalo_to_dict(intervalo):
return {
'inicio': to_iso(intervalo.inicio),
'fim': to_iso(intervalo.fim)
}
def return_recurso(id):
recurso = current_app.crud_recurso.obter(id)
return jsonify({'recurso': recurso_to_dict(recurso)})
def erro(txt, code=400):
response = jsonify({'erro': txt})
response.status_code = code
return response
@api.route("/recursos")
@requer_usuario
def listar_recurso():
recursos = current_app.crud_recurso.listar()
json = {
'recursos': [recurso_to_dict(r) for r in recursos]
}
return jsonify(json)
@api.route("/recursos/<id>")
@requer_acesso(SistemaManutencao(), Administrador())
def obter_recurso(id):
return return_recurso(int(id))
@api.route("/recursos/<id_recurso>/estado", methods=['POST'])
@requer_acesso(SistemaManutencao(), Administrador())
def alterar_estado(id_recurso):
entrada = request.get_json()
current_app.estado_recurso.alterar_estado(int(id_recurso), entrada['utilizavel'])
return return_recurso(id_recurso)
@api.route("/recursos/<id_recurso>/agendamentos", methods=['POST'])
@requer_usuario
def agendar(id_recurso):
id = int(id_recurso)
entrada = request.get_json()
try:
dto = DTOAgendamento(
idResponsavel = g.usuario.id,
intervalo = DTOIntervalo(
entrada['agendamento']['intervalo']['inicio'],
entrada['agendamento']['intervalo']['fim']
)
)
current_app.agendamento.agendar(id, dto)
return return_recurso(id)
except ExcecaoAgendamentoRecursoOcupado:
return erro('Recurso não disponível para o intervalo desejado')
except ExcecaoAgendamentoRecursoInutilizavel:
return erro('Recurso está marcado como inutilizável')
except KeyError:
return erro('Formato de entrada inválido')
@api.route("/recursos/<id_recurso>/cancelar_agendamento", methods=['POST'])
@requer_usuario
def cancelar_agendamento(id_recurso):
id = int(id_recurso)
entrada = request.get_json()
try:
dto = DTOAgendamento(
idResponsavel = int(entrada['agendamento']['idResponsavel']),
intervalo = DTOIntervalo(
entrada['agendamento']['intervalo']['inicio'],
entrada['agendamento']['intervalo']['fim']
)
)
if dto.idResponsavel != g.usuario.id and g.nivelAcesso != Administrador():
raise ExcecaoNaoAutorizado
current_app.agendamento.remover(id, dto)
return return_recurso(id)
except ExcecaoAgendamentoInexistente:
return erro('Não existe agendamento igual ao especificado')
except KeyError:
return erro('Formato de entrada inválido')
| {
"content_hash": "e2c8a49106cb6c9df04091997d981085",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 85,
"avg_line_length": 31.491071428571427,
"alnum_prop": 0.6640204139495322,
"repo_name": "ESEGroup/Paraguai",
"id": "64036144c866f9eba20043ea2b27e01467a42ad7",
"size": "3534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/views/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7243"
},
{
"name": "HTML",
"bytes": "14750"
},
{
"name": "JavaScript",
"bytes": "502"
},
{
"name": "Python",
"bytes": "67669"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class Class(models.Model):
cls_id = models.AutoField("ClassManagement id", primary_key=True)
teacher = models.ManyToManyField(User, related_name='teacher', blank=True)
students = models.ManyToManyField(User, blank=True)
name = models.CharField('ClassManagement Name', max_length=100)
description = models.CharField('ClassManagement Description', max_length=100)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
def __unicode__(self):
return self.__str__() | {
"content_hash": "da8652dff9019b058956576173ff1321",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 81,
"avg_line_length": 32.6,
"alnum_prop": 0.6932515337423313,
"repo_name": "Mihai925/EduCoding",
"id": "2895684f9dac73fed03073e3225fcc0f91eea2b0",
"size": "652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Class/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103299"
},
{
"name": "HTML",
"bytes": "79484"
},
{
"name": "JavaScript",
"bytes": "47731"
},
{
"name": "Python",
"bytes": "72639"
},
{
"name": "Shell",
"bytes": "130"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.join('..', 'src'))
| {
"content_hash": "26bf0b094e7b6a922a1ecddb06914bd4",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 17,
"alnum_prop": 0.6470588235294118,
"repo_name": "davetcoleman/catkin_pkg",
"id": "4a31225b8fbd8224d76b6950ce3b9de2f4e3d3fa",
"size": "68",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CMake",
"bytes": "937"
},
{
"name": "Makefile",
"bytes": "524"
},
{
"name": "Python",
"bytes": "196083"
}
],
"symlink_target": ""
} |
"""A simple example to test the a DistributionStrategy with Estimators.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.keras import metrics as metrics_module
def build_model_fn_optimizer():
"""Simple model_fn with optimizer."""
# TODO(anjalisridhar): Move this inside the model_fn once OptimizerV2 is
# done?
optimizer = tf.train.GradientDescentOptimizer(0.2)
def model_fn(features, labels, mode): # pylint: disable=unused-argument
"""model_fn which uses a single unit Dense layer."""
# You can also use the Flatten layer if you want to test a model without any
# weights.
layer = tf.layers.Dense(1, use_bias=True)
logits = layer(features)
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = {"logits": logits}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
def loss_fn():
y = tf.reshape(logits, []) - tf.constant(1.)
return y * y
if mode == tf.estimator.ModeKeys.EVAL:
acc_obj = metrics_module.BinaryAccuracy()
acc_obj.update_state(labels, labels)
return tf.estimator.EstimatorSpec(
mode, loss=loss_fn(), eval_metric_ops={"Accuracy": acc_obj})
assert mode == tf.estimator.ModeKeys.TRAIN
global_step = tf.train.get_global_step()
train_op = optimizer.minimize(loss_fn(), global_step=global_step)
return tf.estimator.EstimatorSpec(mode, loss=loss_fn(), train_op=train_op)
return model_fn
def main(_):
distribution = tf.contrib.distribute.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1"])
config = tf.estimator.RunConfig(train_distribute=distribution,
eval_distribute=distribution)
# Since there are 2 devices and 10 samples, we set steps=5.
steps = 5
def train_input_fn():
features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)
labels = tf.data.Dataset.from_tensors([1.]).repeat(10)
return tf.data.Dataset.zip((features, labels))
estimator = tf.estimator.Estimator(
model_fn=build_model_fn_optimizer(), config=config)
estimator.train(input_fn=train_input_fn, steps=steps)
def eval_input_fn():
features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)
labels = tf.data.Dataset.from_tensors([1.]).repeat(10)
return tf.data.Dataset.zip((features, labels))
eval_result = estimator.evaluate(input_fn=eval_input_fn, steps=steps)
print("Eval result: {}".format(eval_result))
assert eval_result["Accuracy"] == 1.0
def predict_input_fn():
predict_features = tf.data.Dataset.from_tensors([[1.]]).repeat(10)
return predict_features
prediction_iterable = estimator.predict(input_fn=predict_input_fn)
# Create a list containing each of the prediction dictionaries that map
# the key 'logits' to an array of model outputs.
predictions = [prediction_iterable.next() for _ in range(10)]
print("Prediction results: {}".format(predictions))
if __name__ == "__main__":
tf.app.run()
| {
"content_hash": "0efe028daa178212e1059a86c0f2e6db",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 80,
"avg_line_length": 34.71590909090909,
"alnum_prop": 0.6880523731587561,
"repo_name": "ageron/tensorflow",
"id": "cfaee03a2003089366a506168be2942c279f45bf",
"size": "3744",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distribute/python/examples/simple_estimator_example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
import unittest
import provider.lax_provider as lax_provider
import tests.settings_mock as settings_mock
import base64
import json
import tests.test_data as test_data
from provider.lax_provider import ErrorCallingLaxException
from mock import mock, patch, MagicMock
from ddt import ddt, data, unpack
@ddt
class TestLaxProvider(unittest.TestCase):
@patch('provider.lax_provider.article_versions')
def test_article_highest_version_200(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 200, test_data.lax_article_versions_response_data
version = lax_provider.article_highest_version('08411', settings_mock)
self.assertEqual(3, version)
@patch('provider.lax_provider.article_versions')
def test_article_highest_version_no_versions(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 200, []
version = lax_provider.article_highest_version('08411', settings_mock)
self.assertEqual(0, version)
@patch('provider.lax_provider.article_versions')
def test_article_highest_version_404(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 404, None
version = lax_provider.article_highest_version('08411', settings_mock)
self.assertEqual("1", version)
@patch('provider.lax_provider.article_versions')
def test_article_next_version_no_versions(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 200, []
version = lax_provider.article_next_version('08411', settings_mock)
self.assertEqual("1", version)
@patch('provider.lax_provider.article_versions')
def test_article_publication_date_200(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 200, test_data.lax_article_versions_response_data
date_str = lax_provider.article_publication_date('08411', settings_mock)
self.assertEqual('20151126000000', date_str)
@patch('provider.lax_provider.article_versions')
def test_article_publication_date_200_no_versions(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 200, []
date_str = lax_provider.article_publication_date('08411', settings_mock)
self.assertEqual(None, date_str)
@patch('provider.lax_provider.article_versions')
def test_article_publication_date_404(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 404, None
date_str = lax_provider.article_publication_date('08411', settings_mock)
self.assertEqual(None, date_str)
@patch('provider.lax_provider.article_versions')
def test_article_publication_date_500(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 500, None
date_str = lax_provider.article_publication_date('08411', settings_mock)
self.assertEqual(None, date_str)
@patch('provider.lax_provider.article_versions')
def test_article_version_date_by_version(self, mock_lax_provider_article_versions):
mock_lax_provider_article_versions.return_value = 200, test_data.lax_article_versions_response_data
result = lax_provider.article_version_date_by_version('08411', "2", settings_mock)
self.assertEqual("2015-11-30T00:00:00Z", result)
@patch('requests.get')
def test_article_version_200(self, mock_requests_get):
response = MagicMock()
response.status_code = 200
response.json.return_value = {'versions': [{'version': 1}]}
mock_requests_get.return_value = response
status_code, versions = lax_provider.article_versions('08411', settings_mock)
self.assertEqual(status_code, 200)
self.assertEqual(versions, [{'version': 1}])
@patch('requests.get')
def test_article_version_404(self, mock_requests_get):
response = MagicMock()
response.status_code = 404
mock_requests_get.return_value = response
status_code, versions = lax_provider.article_versions('08411', settings_mock)
self.assertEqual(status_code, 404)
self.assertIsNone(versions)
@patch('requests.get')
def test_article_version_500(self, mock_requests_get):
response = MagicMock()
response.status_code = 500
mock_requests_get.return_value = response
self.assertRaises(ErrorCallingLaxException, lax_provider.article_highest_version, '08411', settings_mock)
# endpoint currently not available
# @patch('provider.lax_provider.article_version')
# def test_article_publication_date_by_version_id_version(self, mock_lax_provider_article_version):
# mock_lax_provider_article_version.return_value = 200, test_data.lax_article_by_version_response_data_incomplete
# result = lax_provider.article_version_date('08411', "2", settings_mock)
# self.assertEqual("2016-11-11T17:48:41Z", result)
def test_poa_vor_status_both_true(self):
exp_poa_status, exp_vor_status = lax_provider.poa_vor_status(test_data.lax_article_versions_response_data)
self.assertEqual(True, exp_poa_status)
self.assertEqual(True, exp_vor_status)
def test_poa_vor_status_both_none(self):
exp_poa_status, exp_vor_status = lax_provider.poa_vor_status([])
self.assertEqual(None, exp_poa_status)
self.assertEqual(None, exp_vor_status)
def test_poa_vor_status_not_found(self):
data = None
exp_poa_status, exp_vor_status = lax_provider.poa_vor_status(data)
self.assertEqual(None, exp_poa_status)
self.assertEqual(None, exp_vor_status)
def test_poa_vor_status_blank_version(self):
data = [{},{"status":"poa","version":1}]
exp_poa_status, exp_vor_status = lax_provider.poa_vor_status(data)
self.assertEqual(True, exp_poa_status)
self.assertEqual(None, exp_vor_status)
@patch('provider.lax_provider.get_xml_file_name')
def test_prepare_action_message(self, fake_xml_file_name):
fake_xml_file_name.return_value = "elife-00353-v1.xml"
message = lax_provider.prepare_action_message(settings_mock,
"00353", "bb2d37b8-e73c-43b3-a092-d555753316af",
"00353.1/bb2d37b8-e73c-43b3-a092-d555753316af",
"1", "vor", "", "ingest")
self.assertIn('token', message)
del message['token']
self.assertDictEqual(message, {'action': 'ingest',
'id': '00353',
'location': 'https://s3-external-1.amazonaws.com/origin_bucket/00353.1/bb2d37b8-e73c-43b3-a092-d555753316af/elife-00353-v1.xml',
'version': 1,
'force': False})
def test_lax_token(self):
token = lax_provider.lax_token("bb2d37b8-e73c-43b3-a092-d555753316af",
"1",
"00353.1/bb2d37b8-e73c-43b3-a092-d555753316af",
"vor",
"")
self.assertEqual(json.loads(base64.decodestring(token)), {"run": "bb2d37b8-e73c-43b3-a092-d555753316af",
"version": "1",
"expanded_folder": "00353.1/bb2d37b8-e73c-43b3-a092-d555753316af",
"eif_location": "",
"status": "vor",
"force": False})
@patch('provider.lax_provider.article_versions')
def test_was_ever_poa_was_poa(self, mock_lax_provider_article_versions):
article_id = '04132'
mock_lax_provider_article_versions.return_value = 200, test_data.lax_article_versions_response_data
result = lax_provider.was_ever_poa(article_id, settings_mock)
self.assertEqual(result, True)
@patch('provider.lax_provider.article_versions')
def test_was_ever_poa_was_not_poa(self, mock_lax_provider_article_versions):
article_id = '04132'
mock_lax_provider_article_versions.return_value = 200, [test_data.lax_article_by_version_response_data_incomplete]
result = lax_provider.was_ever_poa(article_id, settings_mock)
self.assertEqual(result, False)
@patch('provider.lax_provider.article_versions')
def test_was_ever_poa_was_not_poa_blank(self, mock_lax_provider_article_versions):
article_id = '04132'
mock_lax_provider_article_versions.return_value = 200, []
result = lax_provider.was_ever_poa(article_id, settings_mock)
self.assertEqual(result, False)
@patch('provider.lax_provider.article_versions')
def test_was_ever_poa_was_not_poa_500(self, mock_lax_provider_article_versions):
article_id = '04132'
mock_lax_provider_article_versions.return_value = 500, []
result = lax_provider.was_ever_poa(article_id, settings_mock)
self.assertEqual(result, None)
@patch('provider.lax_provider.article_versions')
@data(
(True, True, True),
(True, False, False),
(True, None, False),
(False, True, True),
(False, False, True),
(False, None, False),
)
@unpack
def test_published_considering_poa_status(self, is_poa, was_ever_poa, expected_return_value,
mock_lax_provider_article_versions):
article_id = '04132'
mock_lax_provider_article_versions.return_value = 200, test_data.lax_article_versions_response_data
published = lax_provider.published_considering_poa_status(article_id, settings_mock,
is_poa, was_ever_poa)
self.assertEqual(published, expected_return_value)
@patch('provider.lax_provider.article_versions')
@data(
(True, True, False),
(True, False, False),
(True, None, False),
(False, True, False),
(False, False, False),
(False, None, False),
)
@unpack
def test_published_considering_poa_status_500(self, is_poa, was_ever_poa, expected_return_value,
mock_lax_provider_article_versions):
article_id = '04132'
mock_lax_provider_article_versions.return_value = 500, []
published = lax_provider.published_considering_poa_status(article_id, settings_mock,
is_poa, was_ever_poa)
self.assertEqual(published, expected_return_value)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f5f88bfbf238098c671bcbb718f2b18d",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 167,
"avg_line_length": 50.25454545454546,
"alnum_prop": 0.6260853835021708,
"repo_name": "gnott/elife-bot",
"id": "00d71756027ef9c46f6de3f9931508a24ca06a0a",
"size": "11056",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/provider/test_lax_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "53428"
},
{
"name": "HTML",
"bytes": "3975"
},
{
"name": "Python",
"bytes": "1295112"
},
{
"name": "Shell",
"bytes": "2363"
}
],
"symlink_target": ""
} |
import random
import numpy as np
import sys
from domain.make_env import make_env
from neat_src import *
class GymTask():
"""Problem domain to be solved by neural network. Uses OpenAI Gym patterns.
"""
def __init__(self, game, paramOnly=False, nReps=1):
"""Initializes task environment
Args:
game - (string) - dict key of task to be solved (see domain/config.py)
Optional:
paramOnly - (bool) - only load parameters instead of launching task?
nReps - (nReps) - number of trials to get average fitness
"""
# Network properties
self.nInput = game.input_size
self.nOutput = game.output_size
self.actRange = game.h_act
self.absWCap = game.weightCap
self.layers = game.layers
self.activations = np.r_[np.full(1,1),game.i_act,game.o_act]
# Environment
self.nReps = nReps
self.maxEpisodeLength = game.max_episode_length
self.actSelect = game.actionSelect
if not paramOnly:
self.env = make_env(game.env_name)
# Special needs...
self.needsClosed = (game.env_name.startswith("CartPoleSwingUp"))
def getFitness(self, wVec, aVec, hyp=None, view=False, nRep=False, seed=-1):
"""Get fitness of a single individual.
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
Optional:
view - (bool) - view trial?
nReps - (nReps) - number of trials to get average fitness
seed - (int) - starting random seed for trials
Returns:
fitness - (float) - mean reward over all trials
"""
if nRep is False:
nRep = self.nReps
wVec[np.isnan(wVec)] = 0
reward = np.empty(nRep)
for iRep in range(nRep):
reward[iRep] = self.testInd(wVec, aVec, view=view, seed=seed+iRep)
fitness = np.mean(reward)
return fitness
def testInd(self, wVec, aVec, view=False,seed=-1):
"""Evaluate individual on task
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
Optional:
view - (bool) - view trial?
seed - (int) - starting random seed for trials
Returns:
fitness - (float) - reward earned in trial
"""
if seed >= 0:
random.seed(seed)
np.random.seed(seed)
self.env.seed(seed)
state = self.env.reset()
self.env.t = 0
annOut = act(wVec, aVec, self.nInput, self.nOutput, state)
action = selectAct(annOut,self.actSelect)
wVec[wVec!=0]
predName = str(np.mean(wVec[wVec!=0]))
state, reward, done, info = self.env.step(action)
if self.maxEpisodeLength == 0:
if view:
if self.needsClosed:
self.env.render(close=done)
else:
self.env.render()
return reward
else:
totalReward = reward
for tStep in range(self.maxEpisodeLength):
annOut = act(wVec, aVec, self.nInput, self.nOutput, state)
action = selectAct(annOut,self.actSelect)
state, reward, done, info = self.env.step(action)
totalReward += reward
if view:
if self.needsClosed:
self.env.render(close=done)
else:
self.env.render()
if done:
break
return totalReward
| {
"content_hash": "cd3719f32947c507a5ca507cbba98d11",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 31,
"alnum_prop": 0.5957924263674614,
"repo_name": "google/brain-tokyo-workshop",
"id": "3ebd386999749995c5a353a1c390f5d3c33cacee",
"size": "3565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WANNRelease/prettyNEAT/domain/task_gym.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "671"
},
{
"name": "HTML",
"bytes": "1031"
},
{
"name": "Jupyter Notebook",
"bytes": "47079538"
},
{
"name": "Python",
"bytes": "1037153"
},
{
"name": "Shell",
"bytes": "6053"
}
],
"symlink_target": ""
} |
'''
Test the upgrade master from 3.1.0.233
@author: YeTian 2018-11-25
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstacklib.utils.ssh as ssh
import zstackwoodpecker.operations.scenario_operations as scen_ops
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
vm_inv = None
def test():
global vm_inv
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageNameBase_310_mn_c74')
c74_iso_path = os.environ.get('c74_iso_path')
#iso_21_path = os.environ.get('iso_21_path')
zstack_latest_version = os.environ.get('zstackLatestVersion')
zstack_latest_path = os.environ.get('zstackLatestInstaller')
vm_name = os.environ.get('vmName') + image_name
upgrade_script_path = os.environ.get('upgradeScript')
vm_inv = test_stub.create_vm_scenario(image_name, vm_name)
vm_ip = vm_inv.vmNics[0].ip
test_lib.lib_wait_target_up(vm_ip, 22)
test_stub.make_ssh_no_password(vm_ip, tmp_file)
test_util.test_logger('Update MN IP')
test_stub.update_mn_hostname(vm_ip, tmp_file)
test_stub.update_mn_ip(vm_ip, tmp_file)
test_stub.stop_mn(vm_ip, tmp_file)
test_stub.start_node(vm_ip, tmp_file)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
test_util.test_logger('Upgrade zstack to latest')
test_stub.update_c74_iso(vm_ip, tmp_file, c74_iso_path, upgrade_script_path)
#test_stub.updatei_21_iso(vm_ip, tmp_file, iso_21_path, upgrade_script_path)
test_stub.upgrade_zstack(vm_ip, zstack_latest_path, tmp_file)
test_stub.check_zstack_version(vm_ip, tmp_file, zstack_latest_version)
test_stub.start_mn(vm_ip, tmp_file)
test_stub.check_mn_running(vm_ip, tmp_file)
test_stub.check_installation(vm_ip, tmp_file)
os.system('rm -f %s' % tmp_file)
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_util.test_pass('ZStack upgrade Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm_inv
os.system('rm -f %s' % tmp_file)
if vm_inv:
test_stub.destroy_vm_scenario(vm_inv.uuid)
test_lib.lib_error_cleanup(test_obj_dict)
| {
"content_hash": "750e074ce00408243804f33b546fdee5",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 80,
"avg_line_length": 36.776119402985074,
"alnum_prop": 0.6814123376623377,
"repo_name": "zstackio/zstack-woodpecker",
"id": "73e160f93b41142c6e49c0a71702bc88166cd438",
"size": "2464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "integrationtest/vm/installation/upgrade/test_zs_upgd_3.1.0_latest_on_cos74.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2356"
},
{
"name": "Go",
"bytes": "49822"
},
{
"name": "Makefile",
"bytes": "687"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "13070596"
},
{
"name": "Shell",
"bytes": "177861"
}
],
"symlink_target": ""
} |
from PyQt5.QtWidgets import QComboBox
from sdbcore.logger import Logger
from sdbcore.stencildatalistener import StencilDataStencilListListener
class StencilListWidget(QComboBox, StencilDataStencilListListener):
def __init__(self, parent, stencil_data):
super().__init__(parent)
self.__stencil_data = stencil_data
self.__stencil_data.register_as_stencil_list_listener(self)
def remove_all_stencils(self):
Logger.info(
"Removing all stencils of StencilListWidget of '%s'" % self.__stencil_data.name)
self.clear()
def add_stencil(self, stencil):
Logger.info(
"Adding stencil '%s' to StencilFieldListWidget of '%s'" % (
stencil, self.__stencil_data.name))
self.addItem(stencil)
def remove_stencil(self, stencil):
Logger.info(
"Removing stencil '%s' of StencilFieldListWidget of '%s'" % (
stencil, self.__stencil_data.name))
self.removeItem(self.findText(stencil))
@property
def stencil(self):
return self.itemText(self.currentIndex())
| {
"content_hash": "0dd013e44482d450de33d89cd6ea26fc",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 92,
"avg_line_length": 34.5625,
"alnum_prop": 0.6537070524412296,
"repo_name": "thfabian/serialbox2",
"id": "3389c473e1e3221b2394245a54534b437dd22115",
"size": "1513",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/serialbox-python/sdb/sdbgui/stencillistwidget.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "935152"
},
{
"name": "CMake",
"bytes": "100191"
},
{
"name": "Fortran",
"bytes": "137809"
},
{
"name": "Python",
"bytes": "389551"
},
{
"name": "Shell",
"bytes": "5310"
}
],
"symlink_target": ""
} |
"""
Tests for the Root Controller
"""
import pytest
import bottle
from controller import root
def test_static_routing():
# Ensure that you can retrieve the JS and resources. (Wrap this in a
# pytest.warns so that it allows for ResourceWarnings becuase of opening,
# but not closing the resource files.)
with pytest.warns(ResourceWarning):
root.static('js', 'require.js')
root.static('resources', 'css/app.css')
# Ensure that Bottle raises an exception on files not in the js or
# resources directories.
with pytest.raises(bottle.HTTPError) as e_info:
root.static('helpers', 'util.py')
assert e_info.value.status == '404 Not Found'
# Assert that when it can't find the file, the status code is 404.
assert root.static('js', 'not-here.js').status == '404 Not Found'
| {
"content_hash": "cf951e44c1a8e73409d135c5562dca7a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 33.32,
"alnum_prop": 0.6866746698679472,
"repo_name": "sumnerevans/wireless-debugging",
"id": "ea9146a14522658bd566034a01662efb4dedac67",
"size": "833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/tests/root_controller_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40192"
},
{
"name": "HTML",
"bytes": "9412"
},
{
"name": "Java",
"bytes": "40850"
},
{
"name": "JavaScript",
"bytes": "23092"
},
{
"name": "Python",
"bytes": "112724"
},
{
"name": "Ruby",
"bytes": "1241"
},
{
"name": "Swift",
"bytes": "120570"
}
],
"symlink_target": ""
} |
import logging
import operator
import pytest
from cassandra import ConsistencyLevel
from pytest import mark
from dtest import Tester, create_ks, create_cf
from tools.data import insert_c1c2
from tools.misc import generate_ssl_stores
from itertools import product
since = pytest.mark.since
logger = logging.getLogger(__name__)
opmap = {
operator.eq: "==",
operator.gt: ">",
operator.lt: "<",
operator.ne: "!=",
operator.ge: ">=",
operator.le: "<="
}
class TestStreaming(Tester):
@pytest.fixture(autouse=True)
def fixture_add_additional_log_patterns(self, fixture_dtest_setup):
fixture_dtest_setup.ignore_log_patterns = (
# This one occurs when trying to send the migration to a
# node that hasn't started yet, and when it does, it gets
# replayed and everything is fine.
r'Can\'t send migration request: node.*is down',
# ignore streaming error during bootstrap
r'Exception encountered during startup',
r'Streaming error occurred'
)
def setup_internode_ssl(self, cluster):
logger.debug("***using internode ssl***")
generate_ssl_stores(self.fixture_dtest_setup.test_path)
cluster.enable_internode_ssl(self.fixture_dtest_setup.test_path)
def _test_streaming(self, op_zerocopy, op_partial, num_partial, num_zerocopy,
compaction_strategy='LeveledCompactionStrategy', num_keys=1000, rf=3, num_nodes=3, ssl=False):
keys = num_keys
cluster = self.cluster
if ssl:
self.setup_internode_ssl(cluster)
tokens = cluster.balanced_tokens(num_nodes)
cluster.set_configuration_options(values={'endpoint_snitch': 'org.apache.cassandra.locator.PropertyFileSnitch'})
cluster.set_configuration_options(values={'num_tokens': 1})
cluster.populate(num_nodes)
nodes = cluster.nodelist()
for i in range(0, len(nodes)):
nodes[i].set_configuration_options(values={'initial_token': tokens[i]})
cluster.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(nodes[0])
create_ks(session, name='ks2', rf=rf)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'},
compaction_strategy=compaction_strategy)
insert_c1c2(session, n=keys, consistency=ConsistencyLevel.ALL)
session_n2 = self.patient_exclusive_cql_connection(nodes[1])
session_n2.execute("TRUNCATE system.available_ranges;")
mark = nodes[1].mark_log()
nodes[1].nodetool('rebuild -ks ks2')
nodes[1].watch_log_for('Completed submission of build tasks', filename='debug.log', timeout=120)
zerocopy_streamed_sstable = len(
nodes[1].grep_log('.*CassandraEntireSSTableStreamReader.*?Finished receiving Data.*', filename='debug.log',
from_mark=mark))
partial_streamed_sstable = len(
nodes[1].grep_log('.*CassandraStreamReader.*?Finished receiving file.*', filename='debug.log',
from_mark=mark))
assert op_zerocopy(zerocopy_streamed_sstable, num_zerocopy), "%s %s %s" % (num_zerocopy, opmap.get(op_zerocopy),
zerocopy_streamed_sstable)
assert op_partial(partial_streamed_sstable, num_partial), "%s %s %s" % (num_partial, op_partial,
partial_streamed_sstable)
@since('4.0')
@pytest.mark.parametrize('ssl,compaction_strategy', product(['SSL', 'NoSSL'], ['LeveledCompactionStrategy', 'SizeTieredCompactionStrategy']))
def test_zerocopy_streaming(self, ssl, compaction_strategy):
self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.gt, num_zerocopy=1, num_partial=1, rf=2,
num_nodes=3, ssl=(ssl == 'SSL'), compaction_strategy=compaction_strategy)
@since('4.0')
def test_zerocopy_streaming(self):
self._test_streaming(op_zerocopy=operator.gt, op_partial=operator.eq, num_zerocopy=1, num_partial=0,
num_nodes=2, rf=2)
@since('4.0')
def test_zerocopy_streaming_no_replication(self):
self._test_streaming(op_zerocopy=operator.eq, op_partial=operator.eq, num_zerocopy=0, num_partial=0, rf=1,
num_nodes=3)
| {
"content_hash": "0a005acf48ad6cb095d4a2ab93acd72b",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 145,
"avg_line_length": 42.235849056603776,
"alnum_prop": 0.6209515300424391,
"repo_name": "pcmanus/cassandra-dtest",
"id": "34109202eb67ba481daff50c3010f4dad61fa72e",
"size": "4477",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "streaming_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1176235"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.db import models
class WithScrobbleCountsManager(models.Manager):
"""
Adds a with_scrobble_counts() method.
"""
# Can we filter these (things) by Album?
is_filterable_by_album = True
# Can we filter these (things) by Artist?
is_filterable_by_artist = True
# Can we filter these (things) by Track?
is_filterable_by_track = True
def with_scrobble_counts(self, **kwargs):
"""
Adds a `scrobble_count` field to the Queryset's objects, and
orders the results by that, descending.
eg:
# All Tracks, each with a scrobble_count:
Track.objects.with_scrobble_counts()
# All Albums, each with a total scrobble_count:
Album.objects.with_scrobble_counts()
# All Artists, each with a total scrobble_count:
Artist.objects.with_scrobble_counts()
# Tracks by artist_obj:
Track.objects.with_scrobble_counts(artist=artist_obj)
# Tracks appearing on album_obj:
Track.objects.with_scrobble_counts(album=album_obj)
# Albums on which track_obj appears:
Album.objects.with_scrobble_counts(track=track_obj)
or combine filters:
# Tracks by artist_obj, scrobbled by account_obj between
# datetime_obj_1 and datetime_obj2:
Track.objects.with_scrobble_counts(
account = account_obj,
artist = artist_obj,
min_post_time = datetime_obj_1,
max_post_time = datetime_obj_2,
)
Include an `account` to only include Scrobbles by that Account.
Include an `album` to only include Scrobbles on that Album.
Include an `artist` to only include Scrobbles by that Artist.
Include a `track` to only include Scrobbles including that Track.
Include a `min_post_time` to only include Scrobbles after then.
Include a `max_post_time` to only include Scrobbles before then.
"""
account = kwargs.get("account", None)
min_post_time = kwargs.get("min_post_time", None)
max_post_time = kwargs.get("max_post_time", None)
album = kwargs.get("album", None)
artist = kwargs.get("artist", None)
track = kwargs.get("track", None)
if album and not self.is_filterable_by_album:
raise ValueError("This is not filterable by album")
if artist and not self.is_filterable_by_artist:
raise ValueError("This is not filterable by artist")
if track and not self.is_filterable_by_track:
raise ValueError("This is not filterable by track")
if account is not None and account.__class__.__name__ != "Account":
raise TypeError(
"account must be an Account instance, " "not a %s" % type(account)
)
if album is not None and album.__class__.__name__ != "Album":
raise TypeError(
"album must be an Album instance, " "not a %s" % type(album)
)
if artist is not None and artist.__class__.__name__ != "Artist":
raise TypeError(
"artist must be an Artist instance, " "not a %s" % type(account)
)
if min_post_time is not None and type(min_post_time) is not datetime:
raise TypeError(
"min_post_time must be a datetime.datetime, "
"not a %s" % type(min_post_time)
)
if max_post_time is not None and type(max_post_time) is not datetime:
raise TypeError(
"max_post_time must be a datetime.datetime, "
"not a %s" % type(max_post_time)
)
filter_kwargs = {}
if account:
filter_kwargs["scrobbles__account"] = account
if album:
filter_kwargs["scrobbles__album"] = album
if artist:
filter_kwargs["scrobbles__artist"] = artist
if track:
filter_kwargs["scrobbles__track"] = track
if min_post_time and max_post_time:
filter_kwargs["scrobbles__post_time__gte"] = min_post_time
filter_kwargs["scrobbles__post_time__lte"] = max_post_time
elif min_post_time:
filter_kwargs["scrobbles__post_time__gte"] = min_post_time
elif max_post_time:
filter_kwargs["scrobbles__post_time__lte"] = max_post_time
qs = self.filter(**filter_kwargs)
return qs.annotate(
scrobble_count=models.Count("scrobbles", distinct=True)
).order_by("-scrobble_count")
class TracksManager(WithScrobbleCountsManager):
"""
Adds a `scrobble_count` field to the Track objects.
See WithScrobbleCountsManager for docs.
"""
# We can't filter a list of Tracks by Tracks.
is_filterable_by_track = False
def with_scrobble_counts(self, **kwargs):
"Pre-fetch all the Tracks' Artists."
qs = (
super(TracksManager, self)
.with_scrobble_counts(**kwargs)
.prefetch_related("artist")
)
return qs
class AlbumsManager(WithScrobbleCountsManager):
"""
Adds a `scrobble_count` field to the Album objects.
See WithScrobbleCountsManager for docs.
"""
# We can't filter a list of Albums by Album.
is_filterable_by_album = False
def with_scrobble_counts(self, **kwargs):
"Pre-fetch all the Albums' Artists."
qs = (
super(AlbumsManager, self)
.with_scrobble_counts(**kwargs)
.prefetch_related("artist")
)
return qs
class ArtistsManager(WithScrobbleCountsManager):
"""
Adds a `scrobble_count` field to the Artist objects.
See WithScrobbleCountsManager for docs.
"""
# We can't filter a list of Artists by Artist.
is_filterable_by_artist = False
| {
"content_hash": "ca066549fb23769aa960535151679462",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 82,
"avg_line_length": 33,
"alnum_prop": 0.5908253808806295,
"repo_name": "philgyford/django-ditto",
"id": "587bb9f918d1af60247fb15b1fff4e1d279bf485",
"size": "5973",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ditto/lastfm/managers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "131947"
},
{
"name": "JavaScript",
"bytes": "15927"
},
{
"name": "Python",
"bytes": "1121623"
}
],
"symlink_target": ""
} |
import re
def list_parser(names):
"""Parse a list of elements, some of which might be one-level sublists
within parentheses, into a a list of lists of those elements. For
example: list_parser('(a,b),c') -> [['a', 'b'], 'c']"""
elems = re.split(',', names)
ret = []
accum = []
for elem in elems:
if re.search('^\((.*)\)$', elem):
accum.append(re.sub('^\((.*)\)', '\\1', elem))
ret.append(accum)
accum = []
elif re.search('^\(', elem):
accum.append(re.sub('^\(', '', elem))
elif re.search('\)$', elem):
accum.append(re.sub('\)$', '', elem))
ret.append(accum)
accum = []
elif len(accum) != 0:
accum.append(elem)
else:
ret.append([elem])
if len(accum) > 0:
print('Non matching brackets in', names)
return ret
def map2(f, ls):
"""map to a depth of 2. That is, given a list of lists, apply
f to those innermost elements """
return [list(map(f, l)) for l in ls]
def remove_trailing_ws(line):
return re.sub('\s*$', '', line)
def remove_leading_and_trailing_ws(line):
return re.sub('\s*$', '', re.sub('^\s*', '', line))
def parse_pairs_list(pairString):
"""parse a string like 'name=value name2=value2' into a
list of pairs of ('name', 'value') ..."""
ret = []
pairs = re.finditer('(\w+)(=("[^"]*"|[^\s]*))?', pairString)
for pair in pairs:
name, rest, value = pair.groups()
if value is not None:
value = re.sub('^"(.*)"$', '\\1', value)
ret.append((name, value))
else:
ret.append((name, ''))
return ret
def parse_indexed_list(string):
"""parse a string of the form "(index,value),(index,value)..."
into a list of index, value pairs"""
ret = []
pairs = list_parser(string)
for pair in pairs:
if len(pair) == 2:
index, value = pair
ret.append((int(index), value))
return ret
def parse_pairs(pairString):
"""parse a string like 'name=value name2=value2' into a
dictionary of {'name': 'value', 'name2': 'value2'} """
return dict(parse_pairs_list(pairString))
| {
"content_hash": "bdc6a5c3e958ae34fc423058f82d3a8d",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 74,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.529226618705036,
"repo_name": "gem5/gem5",
"id": "d888f13460e5e8d8f390a547f314d0bdeeb4b1f4",
"size": "4293",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "util/minorview/parse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
} |
"""
Contains common test fixtures used to run AWS Identity and Access Management (IAM)
tests.
"""
import sys
# This is needed so Python can find test_tools on the path.
sys.path.append('../..')
from test_tools.fixtures.common import *
| {
"content_hash": "6ee6d51e903259e0c15450196fd611fb",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 82,
"avg_line_length": 26.22222222222222,
"alnum_prop": 0.7330508474576272,
"repo_name": "awsdocs/aws-doc-sdk-examples",
"id": "1a8cb23fd6b598dab767f678a49c37496963cb6f",
"size": "344",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/example_code/glue/test/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "476653"
},
{
"name": "Batchfile",
"bytes": "900"
},
{
"name": "C",
"bytes": "3852"
},
{
"name": "C#",
"bytes": "2051923"
},
{
"name": "C++",
"bytes": "943634"
},
{
"name": "CMake",
"bytes": "82068"
},
{
"name": "CSS",
"bytes": "33378"
},
{
"name": "Dockerfile",
"bytes": "2243"
},
{
"name": "Go",
"bytes": "1764292"
},
{
"name": "HTML",
"bytes": "319090"
},
{
"name": "Java",
"bytes": "4966853"
},
{
"name": "JavaScript",
"bytes": "1655476"
},
{
"name": "Jupyter Notebook",
"bytes": "9749"
},
{
"name": "Kotlin",
"bytes": "1099902"
},
{
"name": "Makefile",
"bytes": "4922"
},
{
"name": "PHP",
"bytes": "1220594"
},
{
"name": "Python",
"bytes": "2507509"
},
{
"name": "Ruby",
"bytes": "500331"
},
{
"name": "Rust",
"bytes": "558811"
},
{
"name": "Shell",
"bytes": "63776"
},
{
"name": "Swift",
"bytes": "267325"
},
{
"name": "TypeScript",
"bytes": "119632"
}
],
"symlink_target": ""
} |
from sqlagg.columns import SimpleColumn
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumnGroup
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.sqlreport import DatabaseColumn
from corehq.apps.reports.standard import DatespanMixin, CustomProjectReport
from corehq.apps.reports.util import format_datatables_data
from custom.up_nrhm.reports import LangMixin
from custom.up_nrhm.filters import HierarchySqlData
from custom.up_nrhm.reports.block_level_af_report import BlockLevelAFReport
from custom.up_nrhm.sql_data import ASHAFacilitatorsData
from django.utils.translation import gettext as _, gettext_noop
class DistrictFunctionalityReport(GenericTabularReport, DatespanMixin, CustomProjectReport, LangMixin):
name = gettext_noop("Format-5 Functionality of ASHAs in blocks")
slug = "district_functionality_report"
no_value = '--'
def get_blocks_for_district(self):
blocks = []
for location in HierarchySqlData(config={'domain': self.domain}).get_data():
if location['district'] == self.report_config['district']:
blocks.append(location['block'])
return set(blocks)
@property
def headers(self):
blocks = self.get_blocks_for_district()
headers = [DataTablesColumnGroup('')]
headers.extend([DataTablesColumnGroup(block) for block in self.get_blocks_for_district()])
columns = [DatabaseColumn(_("Percentage of ASHAs functional on "
"(Number of functional ASHAs/total number of ASHAs) x 100"), SimpleColumn(''),
header_group=headers[0])]
for i, block in enumerate(blocks):
columns.append(DatabaseColumn(_('%s of ASHAs') % '%',
SimpleColumn(block), header_group=headers[i + 1]))
columns.append(DatabaseColumn(_('Grade of Block'), SimpleColumn(block), header_group=headers[i + 1]))
return DataTablesHeader(*headers)
@property
def report_config(self):
return {
'domain': self.domain,
'year': self.request.GET.get('year'),
'month': self.request.GET.get('month'),
'district': self.request.GET.get('hierarchy_district'),
'is_checklist': 1
}
@property
def model(self):
return ASHAFacilitatorsData(config=self.report_config)
@property
def rows(self):
def percent(v1, v2):
try:
return float(v1) * 100.0 / float(v2)
except ZeroDivisionError:
return 0
def get_grade(v):
return 'D' if v < 25 else 'C' if v < 50 else 'B' if v < 75 else 'A'
rows = [[column.header] for column in self.model.columns[2:]]
for block in self.get_blocks_for_district():
self.request_params['hierarchy_block'] = block
q = self.request.GET.copy()
q['hierarchy_block'] = block
self.request.GET = q
rs, block_total = BlockLevelAFReport(self.request, domain=self.domain).rows
for index, row in enumerate(rs[0:-2]):
value = percent(row[-1]['sort_key'], block_total)
grade = get_grade(value)
if index < 10:
rows[index].append(format_datatables_data('%.1f%%' % value, '%.1f%%' % value))
rows[index].append(format_datatables_data(grade, grade))
else:
rows[index].append(row[-1])
val = row[-1]['sort_key']
grade = get_grade(val)
rows[index].append(format_datatables_data(grade, grade))
return rows, 0
| {
"content_hash": "3536f2a9103f716cd788ebeb831aefce",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 113,
"avg_line_length": 44.188235294117646,
"alnum_prop": 0.6112886048988285,
"repo_name": "dimagi/commcare-hq",
"id": "0a17d3321d32e2540f76bcd4ed0cd4ab8172bed8",
"size": "3756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom/up_nrhm/reports/district_functionality_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from orgmode import ORGMODE, apply_count, repeat, realign_tags
from orgmode import settings
from orgmode.exceptions import HeadingDomError
from orgmode.keybinding import Keybinding, Plug, MODE_INSERT, MODE_NORMAL
from orgmode.menu import Submenu, Separator, ActionEntry
from orgmode.liborgmode.base import Direction
from orgmode.liborgmode.headings import Heading
import vim
class EditStructure(object):
u""" EditStructure plugin """
def __init__(self):
u""" Initialize plugin """
object.__init__(self)
# menu entries this plugin should create
self.menu = ORGMODE.orgmenu + Submenu(u'&Edit Structure')
# key bindings for this plugin
# key bindings are also registered through the menu so only additional
# bindings should be put in this variable
self.keybindings = []
@classmethod
def new_heading(cls, below=None, insert_mode=False, end_of_last_child=False):
u"""
:below: True, insert heading below current heading, False,
insert heading above current heading, None, special
behavior for insert mode, use the current text as
heading
:insert_mode: True, if action is performed in insert mode
:end_of_last_child: True, insert heading at the end of last child,
otherwise the newly created heading will "take
over" the current heading's children
"""
d = ORGMODE.get_document()
current_heading = d.current_heading()
cursor = vim.current.window.cursor[:]
if not current_heading:
# the user is in meta data region
pos = cursor[0] - 1
heading = Heading(title=d.meta_information[pos], body=d.meta_information[pos + 1:])
d.headings.insert(0, heading)
del d.meta_information[pos:]
d.write()
vim.command((u'exe "normal %dgg"|startinsert!' % (heading.start_vim, )).encode(u'utf-8'))
return heading
heading = Heading(level=current_heading.level)
# it's weird but this is the behavior of original orgmode
if below is None:
below = cursor[1] != 0 or end_of_last_child
# insert newly created heading
l = current_heading.get_parent_list()
idx = current_heading.get_index_in_parent_list()
if l is not None and idx is not None:
l.insert(idx + (1 if below else 0), heading)
else:
raise HeadingDomError(u'Current heading is not properly linked in DOM')
if below and not end_of_last_child:
# append heading at the end of current heading and also take
# over the children of current heading
for child in current_heading.children:
heading.children.append(child, taint=False)
current_heading.children.remove_slice(0, len(current_heading.children), \
taint=False)
# if cursor is currently on a heading, insert parts of it into the
# newly created heading
if insert_mode and cursor[1] != 0 and cursor[0] == current_heading.start_vim:
offset = cursor[1] - current_heading.level - 1 - (len(current_heading.todo) \
+ 1 if current_heading.todo else 0)
if offset < 0:
offset = 0
if int(settings.get(u'org_improve_split_heading', u'1')) and \
offset > 0 and len(current_heading.title) == offset + 1 \
and current_heading.title[offset - 1] not in (u' ', u'\t'):
offset += 1
heading.title = current_heading.title[offset:]
current_heading.title = current_heading.title[:offset]
heading.body = current_heading.body[:]
current_heading.body = []
d.write()
vim.command((u'exe "normal %dgg"|startinsert!' % (heading.start_vim, )).encode(u'utf-8'))
# return newly created heading
return heading
@classmethod
def _append_heading(cls, heading, parent):
if heading.level <= parent.level:
raise ValueError('Heading level not is lower than parent level: %d ! > %d' % (heading.level, parent.level))
if parent.children and parent.children[-1].level < heading.level:
cls._append_heading(heading, parent.children[-1])
else:
parent.children.append(heading, taint=False)
@classmethod
def _change_heading_level(cls, level, including_children=True, on_heading=False, insert_mode=False):
u"""
Change level of heading realtively with or without including children.
:level: the number of levels to promote/demote heading
:including_children: True if should should be included in promoting/demoting
:on_heading: True if promoting/demoting should only happen when the cursor is on the heading
:insert_mode: True if vim is in insert mode
"""
d = ORGMODE.get_document()
current_heading = d.current_heading()
if not current_heading or on_heading and current_heading.start_vim != vim.current.window.cursor[0]:
# TODO figure out the actually pressed keybinding and feed these
# keys instead of making keys up like this
if level > 0:
if insert_mode:
vim.eval(u'feedkeys("\<C-t>", "n")'.encode(u'utf-8'))
elif including_children:
vim.eval(u'feedkeys(">]]", "n")'.encode(u'utf-8'))
elif on_heading:
vim.eval(u'feedkeys(">>", "n")'.encode(u'utf-8'))
else:
vim.eval(u'feedkeys(">}", "n")'.encode(u'utf-8'))
else:
if insert_mode:
vim.eval(u'feedkeys("\<C-d>", "n")'.encode(u'utf-8'))
elif including_children:
vim.eval(u'feedkeys("<]]", "n")'.encode(u'utf-8'))
elif on_heading:
vim.eval(u'feedkeys("<<", "n")'.encode(u'utf-8'))
else:
vim.eval(u'feedkeys("<}", "n")'.encode(u'utf-8'))
# return True because otherwise apply_count will not work
return True
# don't allow demotion below level 1
if current_heading.level == 1 and level < 1:
return False
# reduce level of demotion to a minimum heading level of 1
if (current_heading.level + level) < 1:
level = 1
def indent(heading, ic):
if not heading:
return
heading.level += level
if ic:
for child in heading.children:
indent(child, ic)
# save cursor position
c = vim.current.window.cursor[:]
# indent the promoted/demoted heading
indent_end_vim = current_heading.end_of_last_child_vim if including_children else current_heading.end_vim
indent(current_heading, including_children)
# when changing the level of a heading, its position in the DOM
# needs to be updated. It's likely that the heading gets a new
# parent and new children when demoted or promoted
# find new parent
p = current_heading.parent
pl = current_heading.get_parent_list()
ps = current_heading.previous_sibling
nhl = current_heading.level
if level > 0:
# demotion
# subheading or top level heading
if ps and nhl > ps.level:
pl.remove(current_heading, taint=False)
# find heading that is the new parent heading
oh = ps
h = ps
while nhl > h.level:
oh = h
if h.children:
h = h.children[-1]
else:
break
np = h if nhl > h.level else oh
# append current heading to new heading
np.children.append(current_heading, taint=False)
# if children are not included, distribute them among the
# parent heading and it's siblings
if not including_children:
for h in current_heading.children[:]:
if h and h.level <= nhl:
cls._append_heading(h, np)
current_heading.children.remove(h, taint=False)
else:
# promotion
if p and nhl <= p.level:
idx = current_heading.get_index_in_parent_list() + 1
# find the new parent heading
oh = p
h = p
while nhl <= h.level:
# append new children to current heading
for child in h.children[idx:]:
cls._append_heading(child, current_heading)
h.children.remove_slice(idx, len(h.children), taint=False)
idx = h.get_index_in_parent_list() + 1
if h.parent:
h = h.parent
else:
break
ns = oh.next_sibling
while ns and ns.level > current_heading.level:
nns = ns.next_sibling
cls._append_heading(ns, current_heading)
ns = nns
# append current heading to new parent heading / document
pl.remove(current_heading, taint=False)
if nhl > h.level:
h.children.insert(idx, current_heading, taint=False)
else:
d.headings.insert(idx, current_heading, taint=False)
d.write()
if indent_end_vim != current_heading.start_vim:
vim.command((u'normal %dggV%dgg=' % (current_heading.start_vim, indent_end_vim)).encode(u'utf-8'))
# restore cursor position
vim.current.window.cursor = (c[0], c[1] + level)
return True
@classmethod
@realign_tags
@repeat
@apply_count
def demote_heading(cls, including_children=True, on_heading=False, insert_mode=False):
if cls._change_heading_level(1, including_children=including_children, on_heading=on_heading, insert_mode=insert_mode):
if including_children:
return u'OrgDemoteSubtree'
return u'OrgDemoteHeading'
@classmethod
@realign_tags
@repeat
@apply_count
def promote_heading(cls, including_children=True, on_heading=False, insert_mode=False):
if cls._change_heading_level(-1, including_children=including_children, on_heading=on_heading, insert_mode=insert_mode):
if including_children:
return u'OrgPromoteSubtreeNormal'
return u'OrgPromoteHeadingNormal'
@classmethod
def _move_heading(cls, direction=Direction.FORWARD, including_children=True):
u""" Move heading up or down
:returns: heading or None
"""
d = ORGMODE.get_document()
current_heading = d.current_heading()
if not current_heading or \
(direction == Direction.FORWARD and not current_heading.next_sibling) or \
(direction == Direction.BACKWARD and not current_heading.previous_sibling):
return None
cursor_offset = vim.current.window.cursor[0] - (current_heading._orig_start + 1)
l = current_heading.get_parent_list()
if l is None:
raise HeadingDomError(u'Current heading is not properly linked in DOM')
if not including_children:
if current_heading.previous_sibling:
npl = current_heading.previous_sibling.children
for child in current_heading.children:
npl.append(child, taint=False)
elif current_heading.parent:
# if the current heading doesn't have a previous sibling it
# must be the first heading
np = current_heading.parent
for child in current_heading.children:
cls._append_heading(child, np)
else:
# if the current heading doesn't have a parent, its children
# must be added as top level headings to the document
npl = l
for child in current_heading.children[::-1]:
npl.insert(0, child, taint=False)
current_heading.children.remove_slice(0, len(current_heading.children), taint=False)
idx = current_heading.get_index_in_parent_list()
if idx is None:
raise HeadingDomError(u'Current heading is not properly linked in DOM')
offset = 1 if direction == Direction.FORWARD else -1
del l[idx]
l.insert(idx + offset, current_heading)
d.write()
vim.current.window.cursor = (current_heading.start_vim + cursor_offset, \
vim.current.window.cursor[1])
return True
@classmethod
@repeat
@apply_count
def move_heading_upward(cls, including_children=True):
if cls._move_heading(direction=Direction.BACKWARD, including_children=including_children):
if including_children:
return u'OrgMoveSubtreeUpward'
return u'OrgMoveHeadingUpward'
@classmethod
@repeat
@apply_count
def move_heading_downward(cls, including_children=True):
if cls._move_heading(direction=Direction.FORWARD, including_children=including_children):
if including_children:
return u'OrgMoveSubtreeDownward'
return u'OrgMoveHeadingDownward'
def register(self):
u"""
Registration of plugin. Key bindings and other initialization should be done.
"""
settings.set(u'org_improve_split_heading', u'1')
self.keybindings.append(Keybinding(u'<C-S-CR>', Plug(u'OrgNewHeadingAboveNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].new_heading(below=False)<CR>')))
self.menu + ActionEntry(u'New Heading &above', self.keybindings[-1])
self.keybindings.append(Keybinding(u'<S-CR>', Plug(u'OrgNewHeadingBelowNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].new_heading(below=True)<CR>')))
self.menu + ActionEntry(u'New Heading &below', self.keybindings[-1])
self.keybindings.append(Keybinding(u'<C-CR>', Plug(u'OrgNewHeadingBelowAfterChildrenNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].new_heading(below=True, end_of_last_child=True)<CR>')))
self.menu + ActionEntry(u'New Heading below, after &children', self.keybindings[-1])
self.keybindings.append(Keybinding(u'<C-S-CR>', Plug(u'OrgNewHeadingAboveInsert', u'<C-o>:<C-u>silent! py ORGMODE.plugins[u"EditStructure"].new_heading(below=False, insert_mode=True)<CR>', mode=MODE_INSERT)))
self.keybindings.append(Keybinding(u'<S-CR>', Plug(u'OrgNewHeadingBelowInsert', u'<C-o>:<C-u>silent! py ORGMODE.plugins[u"EditStructure"].new_heading(insert_mode=True)<CR>', mode=MODE_INSERT)))
self.keybindings.append(Keybinding(u'<C-CR>', Plug(u'OrgNewHeadingBelowAfterChildrenInsert', u'<C-o>:<C-u>silent! py ORGMODE.plugins[u"EditStructure"].new_heading(insert_mode=True, end_of_last_child=True)<CR>', mode=MODE_INSERT)))
self.menu + Separator()
self.keybindings.append(Keybinding(u'm{', Plug(u'OrgMoveHeadingUpward', u':py ORGMODE.plugins[u"EditStructure"].move_heading_upward(including_children=False)<CR>')))
self.keybindings.append(Keybinding(u'm[[', Plug(u'OrgMoveSubtreeUpward', u':py ORGMODE.plugins[u"EditStructure"].move_heading_upward()<CR>')))
self.menu + ActionEntry(u'Move Subtree &Up', self.keybindings[-1])
self.keybindings.append(Keybinding(u'm}', Plug(u'OrgMoveHeadingDownward', u':py ORGMODE.plugins[u"EditStructure"].move_heading_downward(including_children=False)<CR>')))
self.keybindings.append(Keybinding(u'm]]', Plug(u'OrgMoveSubtreeDownward', u':py ORGMODE.plugins[u"EditStructure"].move_heading_downward()<CR>')))
self.menu + ActionEntry(u'Move Subtree &Down', self.keybindings[-1])
self.menu + Separator()
self.menu + ActionEntry(u'&Copy Heading', u'yah', u'yah')
self.menu + ActionEntry(u'C&ut Heading', u'dah', u'dah')
self.menu + Separator()
self.menu + ActionEntry(u'&Copy Subtree', u'yar', u'yar')
self.menu + ActionEntry(u'C&ut Subtree', u'dar', u'dar')
self.menu + ActionEntry(u'&Paste Subtree', u'p', u'p')
self.menu + Separator()
self.keybindings.append(Keybinding(u'<ah', Plug(u'OrgPromoteHeadingNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].promote_heading(including_children=False)<CR>')))
self.menu + ActionEntry(u'&Promote Heading', self.keybindings[-1])
self.keybindings.append(Keybinding(u'<<', Plug(u'OrgPromoteOnHeadingNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].promote_heading(including_children=False, on_heading=True)<CR>')))
self.keybindings.append(Keybinding(u'<{', u'<Plug>OrgPromoteHeadingNormal', mode=MODE_NORMAL))
self.keybindings.append(Keybinding(u'<ih', u'<Plug>OrgPromoteHeadingNormal', mode=MODE_NORMAL))
self.keybindings.append(Keybinding(u'<ar', Plug(u'OrgPromoteSubtreeNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].promote_heading()<CR>')))
self.menu + ActionEntry(u'&Promote Subtree', self.keybindings[-1])
self.keybindings.append(Keybinding(u'<[[', u'<Plug>OrgPromoteSubtreeNormal', mode=MODE_NORMAL))
self.keybindings.append(Keybinding(u'<ir', u'<Plug>OrgPromoteSubtreeNormal', mode=MODE_NORMAL))
self.keybindings.append(Keybinding(u'>ah', Plug(u'OrgDemoteHeadingNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].demote_heading(including_children=False)<CR>')))
self.menu + ActionEntry(u'&Demote Heading', self.keybindings[-1])
self.keybindings.append(Keybinding(u'>>', Plug(u'OrgDemoteOnHeadingNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].demote_heading(including_children=False, on_heading=True)<CR>')))
self.keybindings.append(Keybinding(u'>}', u'>Plug>OrgDemoteHeadingNormal', mode=MODE_NORMAL))
self.keybindings.append(Keybinding(u'>ih', u'>Plug>OrgDemoteHeadingNormal', mode=MODE_NORMAL))
self.keybindings.append(Keybinding(u'>ar', Plug(u'OrgDemoteSubtreeNormal', u':silent! py ORGMODE.plugins[u"EditStructure"].demote_heading()<CR>')))
self.menu + ActionEntry(u'&Demote Subtree', self.keybindings[-1])
self.keybindings.append(Keybinding(u'>]]', u'<Plug>OrgDemoteSubtreeNormal', mode=MODE_NORMAL))
self.keybindings.append(Keybinding(u'>ir', u'<Plug>OrgDemoteSubtreeNormal', mode=MODE_NORMAL))
# other keybindings
self.keybindings.append(Keybinding(u'<C-d>', Plug(u'OrgPromoteOnHeadingInsert', u'<C-o>:silent! py ORGMODE.plugins[u"EditStructure"].promote_heading(including_children=False, on_heading=True, insert_mode=True)<CR>', mode=MODE_INSERT)))
self.keybindings.append(Keybinding(u'<C-t>', Plug(u'OrgDemoteOnHeadingInsert', u'<C-o>:silent! py ORGMODE.plugins[u"EditStructure"].demote_heading(including_children=False, on_heading=True, insert_mode=True)<CR>', mode=MODE_INSERT)))
| {
"content_hash": "ed6c056293d1c18bf7c0930233b581ab",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 237,
"avg_line_length": 42.59079283887468,
"alnum_prop": 0.7129045817570407,
"repo_name": "mmcclimon/dotfiles",
"id": "0d9b0263e20dc7d1db9cb125b490bec38eb38d7b",
"size": "16678",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "vim/ftplugin/orgmode/plugins/EditStructure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "33629"
},
{
"name": "Lua",
"bytes": "4629"
},
{
"name": "Perl",
"bytes": "10978"
},
{
"name": "Python",
"bytes": "167374"
},
{
"name": "Shell",
"bytes": "15415"
},
{
"name": "Vim Script",
"bytes": "749664"
},
{
"name": "Vim Snippet",
"bytes": "1856"
},
{
"name": "YASnippet",
"bytes": "3439"
}
],
"symlink_target": ""
} |
try:
from django.conf.urls import patterns, include, url
except ImportError: # django < 1.4
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('reportengine.views',
# Listing of reports
url('^$', 'report_list', name='reports-list'),
# view report redirected to current date format (requires date_field argument)
url('^current/(?P<daterange>(day|week|month|year))/(?P<namespace>[-\w]+)/(?P<slug>[-\w]+)/$',
'current_redirect', name='reports-current'),
# view report redirected to current date format with formatting specified
url('^current/(?P<daterange>(day|week|month|year))/(?P<namespace>[-\w]+)/(?P<slug>[-\w]+)/(?P<output>[-\w]+)/$',
'current_redirect', name='reports-current-format'),
# specify range of report per time (requires date_field)
url('^date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<namespace>[-\w]+)/(?P<slug>[-\w]+)/$',
'day_redirect', name='reports-date-range'),
# specify range of report per time with formatting
url('^date/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<namespace>[-\w]+)/(?P<slug>[-\w]+)/(?P<output>[-\w]+)/$',
'day_redirect', name='reports-date-range-format'),
# Show latest calendar of all date accessible reports
url('^calendar/$', 'calendar_current_redirect', name='reports-calendar-current'),
# Show specific month's calendar of reports
url('^calendar/(?P<year>\d+)/(?P<month>\d+)/$', 'calendar_month_view', name='reports-calendar-month'),
# Show specifi day's calendar of reports
url('^calendar/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/$', 'calendar_day_view', name='reports-calendar-day'),
)
urlpatterns += patterns('reportengine.views',
# View report in first output style
url('^request/(?P<namespace>[-\w]+)/(?P<slug>[-\w]+)/$', 'request_report', name='reports-view'),
# view report in specified output format
#url('^request/(?P<namespace>[-\w]+)/(?P<slug>[-\w]+)/(?P<output>[-\w]+)/$', 'request_report', name='reports-view-format'),
url('^view/(?P<token>[\w\d]+)/$', 'view_report', name='reports-request-view'),
# view report in specified output format
url('^view/(?P<token>[\w\d]+)/(?P<output>[-\w]+)/$', 'view_report_export', name='reports-request-view-format'),
)
| {
"content_hash": "3d0703fc5720f8081737f093bd967673",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 127,
"avg_line_length": 55.90243902439025,
"alnum_prop": 0.6226003490401396,
"repo_name": "dmpayton/django-reportengine",
"id": "d20829254a111c69e404e80a703ef1c74bd1a252",
"size": "2292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reportengine/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10143"
},
{
"name": "Python",
"bytes": "89815"
}
],
"symlink_target": ""
} |
import re
from .mesonlib import MesonException
class ParseException(MesonException):
def __init__(self, text, lineno, colno):
super().__init__(text)
self.lineno = lineno
self.colno = colno
class Token:
def __init__(self, tid, lineno, colno, value):
self.tid = tid
self.lineno = lineno
self.colno = colno
self.value = value
def __eq__(self, other):
if isinstance(other, str):
return self.tid == other
return self.tid == other.tid
class Lexer:
def __init__(self):
self.keywords = {'true', 'false', 'if', 'else', 'elif',
'endif', 'and', 'or', 'not', 'foreach', 'endforeach'}
self.token_specification = [
# Need to be sorted longest to shortest.
('ignore', re.compile(r'[ \t]')),
('id', re.compile('[_a-zA-Z][_0-9a-zA-Z]*')),
('number', re.compile(r'\d+')),
('eol_cont', re.compile(r'\\\n')),
('eol', re.compile(r'\n')),
('multiline_string', re.compile(r"'''(.|\n)*?'''", re.M)),
('comment', re.compile(r'\#.*')),
('lparen', re.compile(r'\(')),
('rparen', re.compile(r'\)')),
('lbracket', re.compile(r'\[')),
('rbracket', re.compile(r'\]')),
('dblquote', re.compile(r'"')),
('string', re.compile(r"'([^'\\]|(\\.))*'")),
('comma', re.compile(r',')),
('plusassign', re.compile(r'\+=')),
('dot', re.compile(r'\.')),
('plus', re.compile(r'\+')),
('dash', re.compile(r'-')),
('star', re.compile(r'\*')),
('percent', re.compile(r'\%')),
('fslash', re.compile(r'/')),
('colon', re.compile(r':')),
('equal', re.compile(r'==')),
('nequal', re.compile(r'\!=')),
('assign', re.compile(r'=')),
('le', re.compile(r'<=')),
('lt', re.compile(r'<')),
('ge', re.compile(r'>=')),
('gt', re.compile(r'>')),
('questionmark', re.compile(r'\?')),
]
def lex(self, code):
lineno = 1
line_start = 0
loc = 0;
par_count = 0
bracket_count = 0
col = 0
while(loc < len(code)):
matched = False
value = None
for (tid, reg) in self.token_specification:
mo = reg.match(code, loc)
if mo:
curline = lineno
col = mo.start()-line_start
matched = True
loc = mo.end()
match_text = mo.group()
if tid == 'ignore' or tid == 'comment':
break
elif tid == 'lparen':
par_count += 1
elif tid == 'rparen':
par_count -= 1
elif tid == 'lbracket':
bracket_count += 1
elif tid == 'rbracket':
bracket_count -= 1
elif tid == 'dblquote':
raise ParseException('Double quotes are not supported. Use single quotes.', lineno, col)
elif tid == 'string':
value = match_text[1:-1].replace(r"\'", "'").replace(r" \\ ".strip(), r" \ ".strip())\
.replace("\\n", "\n")
elif tid == 'multiline_string':
tid = 'string'
value = match_text[3:-3]
lines = match_text.split('\n')
if len(lines) > 1:
lineno += len(lines) - 1
line_start = mo.end() - len(lines[-1])
elif tid == 'number':
value = int(match_text)
elif tid == 'eol' or tid == 'eol_cont':
lineno += 1
line_start = loc
if par_count > 0 or bracket_count > 0:
break
elif tid == 'id':
if match_text in self.keywords:
tid = match_text
else:
value = match_text
yield Token(tid, curline, col, value)
break
if not matched:
raise ParseException('lexer', lineno, col)
class BooleanNode:
def __init__(self, token, value):
self.lineno = token.lineno
self.colno = token.colno
self.value = value
assert(isinstance(self.value, bool))
class IdNode:
def __init__(self, token):
self.lineno = token.lineno
self.colno = token.colno
self.value = token.value
assert(isinstance(self.value, str))
def __str__(self):
return "Id node: '%s' (%d, %d)." % (self.value, self.lineno, self.colno)
class NumberNode:
def __init__(self, token):
self.lineno = token.lineno
self.colno = token.colno
self.value = token.value
assert(isinstance(self.value, int))
class StringNode:
def __init__(self, token):
self.lineno = token.lineno
self.colno = token.colno
self.value = token.value
assert(isinstance(self.value, str))
def __str__(self):
return "String node: '%s' (%d, %d)." % (self.value, self.lineno, self.colno)
class ArrayNode:
def __init__(self, args):
self.lineno = args.lineno
self.colno = args.colno
self.args = args
class EmptyNode:
def __init__(self):
self.lineno = 0
self.colno = 0
self.value = None
class OrNode:
def __init__(self, lineno, colno, left, right):
self.lineno = lineno
self.colno = colno
self.left = left
self.right = right
class AndNode:
def __init__(self, lineno, colno, left, right):
self.lineno = lineno
self.colno = colno
self.left = left
self.right = right
class ComparisonNode:
def __init__(self, lineno, colno, ctype, left, right):
self.lineno = lineno
self.colno = colno
self.left = left
self.right = right
self.ctype = ctype
class ArithmeticNode:
def __init__(self, lineno, colno, operation, left, right):
self.lineno = lineno
self.colno = colno
self.left = left
self.right = right
self.operation = operation
class NotNode:
def __init__(self, lineno, colno, value):
self.lineno = lineno
self.colno = colno
self.value = value
class CodeBlockNode:
def __init__(self, lineno, colno):
self.lineno = lineno
self.colno = colno
self.lines = []
class IndexNode:
def __init__(self, iobject, index):
self.iobject = iobject
self.index = index
self.lineno = iobject.lineno
self.colno = iobject.colno
class MethodNode:
def __init__(self, lineno, colno, source_object, name, args):
self.lineno = lineno
self.colno = colno
self.source_object = source_object
self.name = name
assert(isinstance(self.name, str))
self.args = args
class FunctionNode:
def __init__(self, lineno, colno, func_name, args):
self.lineno = lineno
self.colno = colno
self.func_name = func_name
assert(isinstance(func_name, str))
self.args = args
class AssignmentNode:
def __init__(self, lineno, colno, var_name, value):
self.lineno = lineno
self.colno = colno
self.var_name = var_name
assert(isinstance(var_name, str))
self.value = value
class PlusAssignmentNode:
def __init__(self, lineno, colno, var_name, value):
self.lineno = lineno
self.colno = colno
self.var_name = var_name
assert(isinstance(var_name, str))
self.value = value
class ForeachClauseNode():
def __init__(self, lineno, colno, varname, items, block):
self.lineno = lineno
self.colno = colno
self.varname = varname
self.items = items
self.block = block
class IfClauseNode():
def __init__(self, lineno, colno):
self.lineno = lineno
self.colno = colno
self.ifs = []
self.elseblock = EmptyNode()
class UMinusNode():
def __init__(self, lineno, colno, value):
self.lineno = lineno
self.colno = colno
self.value = value
class IfNode():
def __init__(self, lineno, colno, condition, block):
self.lineno = lineno
self.colno = colno
self.condition = condition
self.block = block
class TernaryNode():
def __init__(self, lineno, colno, condition, trueblock, falseblock):
self.lineno = lineno
self.colno = colno
self.condition = condition
self.trueblock = trueblock
self.falseblock = falseblock
class ArgumentNode():
def __init__(self, token):
self.lineno = token.lineno
self.colno = token.colno
self.arguments = []
self.kwargs = {}
self.order_error = False
def prepend(self, statement):
if self.num_kwargs() > 0:
self.order_error = True
if not isinstance(statement, EmptyNode):
self.arguments = [statement] + self.arguments
def append(self, statement):
if self.num_kwargs() > 0:
self.order_error = True
if not isinstance(statement, EmptyNode):
self.arguments = self.arguments + [statement]
def set_kwarg(self, name, value):
self.kwargs[name] = value
def num_args(self):
return len(self.arguments)
def num_kwargs(self):
return len(self.kwargs)
def incorrect_order(self):
return self.order_error
def __len__(self):
return self.num_args() # Fixme
comparison_map = {'equal': '==',
'nequal': '!=',
'lt': '<',
'le': '<=',
'gt': '>',
'ge': '>='
}
# Recursive descent parser for Meson's definition language.
# Very basic apart from the fact that we have many precedence
# levels so there are not enough words to describe them all.
# Enter numbering:
#
# 1 assignment
# 2 or
# 3 and
# 4 comparison
# 5 arithmetic
# 6 negation
# 7 funcall, method call
# 8 parentheses
# 9 plain token
class Parser:
def __init__(self, code):
self.stream = Lexer().lex(code)
self.getsym()
self.in_ternary = False
def getsym(self):
try:
self.current = next(self.stream)
except StopIteration:
self.current = Token('eof', 0, 0, None)
def accept(self, s):
if self.current.tid == s:
self.getsym()
return True
return False
def expect(self, s):
if self.accept(s):
return True
raise ParseException('Expecting %s got %s.' % (s, self.current.tid), self.current.lineno, self.current.colno)
def parse(self):
block = self.codeblock()
self.expect('eof')
return block
def statement(self):
return self.e1()
def e1(self):
left = self.e2()
if self.accept('plusassign'):
value = self.e1()
if not isinstance(left, IdNode):
raise ParseException('Plusassignment target must be an id.', left.lineno, left.colno)
return PlusAssignmentNode(left.lineno, left.colno, left.value, value)
elif self.accept('assign'):
value = self.e1()
if not isinstance(left, IdNode):
raise ParseException('Assignment target must be an id.',
left.lineno, left.colno)
return AssignmentNode(left.lineno, left.colno, left.value, value)
elif self.accept('questionmark'):
if self.in_ternary:
raise ParseException('Nested ternary operators are not allowed.',
left.lineno, left.colno)
self.in_ternary = True
trueblock = self.e1()
self.expect('colon')
falseblock = self.e1()
self.in_ternary = False
return TernaryNode(left.lineno, left.colno, left, trueblock, falseblock)
return left
def e2(self):
left = self.e3()
while self.accept('or'):
left = OrNode(left.lineno, left.colno, left, self.e3())
return left
def e3(self):
left = self.e4()
while self.accept('and'):
left = AndNode(left.lineno, left.colno, left, self.e4())
return left
def e4(self):
left = self.e5()
for nodename, operator_type in comparison_map.items():
if self.accept(nodename):
return ComparisonNode(left.lineno, left.colno, operator_type, left, self.e5())
return left
def e5(self):
return self.e5add()
def e5add(self):
left = self.e5sub()
if self.accept('plus'):
return ArithmeticNode(left.lineno, left.colno, 'add', left, self.e5add())
return left
def e5sub(self):
left = self.e5mod()
if self.accept('dash'):
return ArithmeticNode(left.lineno, left.colno, 'sub', left, self.e5sub())
return left
def e5mod(self):
left = self.e5mul()
if self.accept('percent'):
return ArithmeticNode(left.lineno, left.colno, 'mod', left, self.e5mod())
return left
def e5mul(self):
left = self.e5div()
if self.accept('star'):
return ArithmeticNode(left.lineno, left.colno, 'mul', left, self.e5mul())
return left
def e5div(self):
left = self.e6()
if self.accept('fslash'):
return ArithmeticNode(left.lineno, left.colno, 'div', left, self.e5div())
return left
def e6(self):
if self.accept('not'):
return NotNode(self.current.lineno, self.current.colno, self.e7())
if self.accept('dash'):
return UMinusNode(self.current.lineno, self.current.colno, self.e7())
return self.e7()
def e7(self):
left = self.e8()
if self.accept('lparen'):
args = self.args()
self.expect('rparen')
if not isinstance(left, IdNode):
raise ParseException('Function call must be applied to plain id',
left.lineno, left.colno)
left = FunctionNode(left.lineno, left.colno, left.value, args)
go_again = True
while go_again:
go_again = False
if self.accept('dot'):
go_again = True
left = self.method_call(left)
if self.accept('lbracket'):
go_again = True
left = self.index_call(left)
return left
def e8(self):
if self.accept('lparen'):
e = self.statement()
self.expect('rparen')
return e
elif self.accept('lbracket'):
args = self.args()
self.expect('rbracket')
return ArrayNode(args)
else:
return self.e9()
def e9(self):
t = self.current
if self.accept('true'):
return BooleanNode(t, True);
if self.accept('false'):
return BooleanNode(t, False)
if self.accept('id'):
return IdNode(t)
if self.accept('number'):
return NumberNode(t)
if self.accept('string'):
return StringNode(t)
return EmptyNode()
def args(self):
s = self.statement()
a = ArgumentNode(s)
while not isinstance(s, EmptyNode):
if self.accept('comma'):
a.append(s)
elif self.accept('colon'):
if not isinstance(s, IdNode):
raise ParseException('Keyword argument must be a plain identifier.',
s.lineno, s.colno)
a.set_kwarg(s.value, self.statement())
if not self.accept('comma'):
return a
else:
a.append(s)
return a
s = self.statement()
return a
def method_call(self, source_object):
methodname = self.e9()
if not(isinstance(methodname, IdNode)):
raise ParseException('Method name must be plain id',
self.current.lineno, self.current.colno)
self.expect('lparen')
args = self.args()
self.expect('rparen')
method = MethodNode(methodname.lineno, methodname.colno, source_object, methodname.value, args)
if self.accept('dot'):
return self.method_call(method)
return method
def index_call(self, source_object):
index_statement = self.statement()
self.expect('rbracket')
return IndexNode(source_object, index_statement)
def foreachblock(self):
t = self.current
self.expect('id')
varname = t
self.expect('colon')
items = self.statement()
block = self.codeblock()
return ForeachClauseNode(varname.lineno, varname.colno, varname, items, block)
def ifblock(self):
condition = self.statement()
clause = IfClauseNode(condition.lineno, condition.colno)
block = self.codeblock()
clause.ifs.append(IfNode(clause.lineno, clause.colno, condition, block))
self.elseifblock(clause)
clause.elseblock = self.elseblock()
return clause
def elseifblock(self, clause):
while self.accept('elif'):
s = self.statement()
self.expect('eol')
b = self.codeblock()
clause.ifs.append(IfNode(s.lineno, s.colno, s, b))
def elseblock(self):
if self.accept('else'):
self.expect('eol')
return self.codeblock()
def line(self):
if self.current == 'eol':
return EmptyNode()
if self.accept('if'):
block = self.ifblock()
self.expect('endif')
return block
if self.accept('foreach'):
block = self.foreachblock()
self.expect('endforeach')
return block
return self.statement()
def codeblock(self):
block = CodeBlockNode(self.current.lineno, self.current.colno)
cond = True
while cond:
curline = self.line()
if not isinstance(curline, EmptyNode):
block.lines.append(curline)
cond = self.accept('eol')
return block
| {
"content_hash": "85840020ea520e00a9a628602e83ba3c",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 117,
"avg_line_length": 32.083191850594226,
"alnum_prop": 0.5164840980049743,
"repo_name": "centricular/meson",
"id": "f593c8e3111101011c9de85e256e614162bd9609",
"size": "19490",
"binary": false,
"copies": "1",
"ref": "refs/heads/gst-msvc",
"path": "mesonbuild/mparser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "90"
},
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "69252"
},
{
"name": "C#",
"bytes": "631"
},
{
"name": "C++",
"bytes": "11881"
},
{
"name": "D",
"bytes": "1605"
},
{
"name": "Emacs Lisp",
"bytes": "1226"
},
{
"name": "Fortran",
"bytes": "1359"
},
{
"name": "Groff",
"bytes": "232"
},
{
"name": "Inno Setup",
"bytes": "372"
},
{
"name": "Java",
"bytes": "519"
},
{
"name": "Lex",
"bytes": "110"
},
{
"name": "Objective-C",
"bytes": "462"
},
{
"name": "Objective-C++",
"bytes": "87"
},
{
"name": "Protocol Buffer",
"bytes": "46"
},
{
"name": "Python",
"bytes": "906381"
},
{
"name": "Rust",
"bytes": "372"
},
{
"name": "Shell",
"bytes": "1978"
},
{
"name": "Swift",
"bytes": "972"
},
{
"name": "Vala",
"bytes": "4083"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
} |
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.utils.translation import gettext_lazy as _
from django.views.generic import ListView
from django_cradmin import javascriptregistry
class RoleSelectView(javascriptregistry.viewmixin.WithinRoleViewMixin, ListView):
"""
The default view for listing and selecting roles within a cradmin instance.
- If the user has a single role, we redirect to the role-frontpage for that role.
This behavior can be overridden with :meth:`.get_autoredirect_if_single_role`.
- If the user has multiple roles, we list the roles.
- If the user has no roles, we call :meth:`.get_no_roles_response`.
"""
paginate_by = 30
#: Makes the roles queryset available as ``roles`` in the template.
context_object_name = 'roles'
#: The template used to render this view.
template_name = 'django_cradmin/roleselect.django.html'
#: The title of the page. See :meth:`.get_pagetitle`.
pagetitle = _('What would you like to edit?')
#: Redirect if we have a single role? See :meth:`.get_autoredirect_if_single_role`.
autoredirect_if_single_role = True
def get_queryset(self):
return self.request.cradmin_instance.get_rolequeryset()
def get(self, *args, **kwargs):
rolecount = self.get_queryset().count()
if rolecount == 0:
return self.get_no_roles_response(*args, **kwargs)
elif rolecount == 1:
return self.get_single_role_response(*args, **kwargs)
else:
return self.get_multiple_roles_response(*args, **kwargs)
def get_autoredirect_if_single_role(self):
"""
Enables/disables automatic redirect if single role.
Returns the value of :obj:`.autoredirect_if_single_role` by default.
Used by :meth:`.get_single_role_response`.
"""
return self.autoredirect_if_single_role
def get_no_roles_response(self, *args, **kwargs):
"""
Get the response to return if the requesting user only have no roles.
Raises :exc:`django.core.exceptions.PermissionDenied` by default.
If you want to do something more eloborate, you can do one of the following:
- Use a HttpResonseRedirect to redirect to some other view/url.
- Call ``return self.get_multiple_roles_response(*args, **kwargs)``.
The template for this view (``django_cradmin/roleselect.django.html``) has a
``no_roles_section`` block. You can extend this template and
override this block to display a custom message. You must, of course,
set this new template as the :obj:`~.RoleSelectView.template_name`.
"""
raise PermissionDenied()
def get_single_role_response(self, *args, **kwargs):
"""
Get the response to return if the requesting user only have one role.
If :meth:`.get_autoredirect_if_single_role` returns ``True``,
we redirect to the rolefrontpage for the role. Otherwise,
we return :meth:`.get_multiple_roles_response`.
"""
if self.get_autoredirect_if_single_role():
only_role = self.get_queryset().first()
return HttpResponseRedirect(self.request.cradmin_instance.rolefrontpage_url(
self.request.cradmin_instance.get_roleid(only_role)))
else:
return super(RoleSelectView, self).get(*args, **kwargs)
def get_multiple_roles_response(self, *args, **kwargs):
"""
Get the response to return if the requesting user only has multiple roles.
Just calls the ``get()``-method of the superclass by default.
"""
return super(RoleSelectView, self).get(*args, **kwargs)
def get_pagetitle(self):
"""
Get the page title.
Returns the value of :obj:`.pagetitle` by default.
"""
return self.pagetitle
def get_context_data(self, **kwargs):
context = super(RoleSelectView, self).get_context_data(**kwargs)
context['pagetitle'] = self.get_pagetitle()
context['rolecount'] = self.get_queryset().count()
self.add_javascriptregistry_component_ids_to_context(context=context)
return context
| {
"content_hash": "344f3eb6b770e5729958e63be4c7cd06",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 88,
"avg_line_length": 39.87850467289719,
"alnum_prop": 0.657839231310054,
"repo_name": "appressoas/django_cradmin",
"id": "d544eacf3b60dcf35aa4fb1ee09d64de36b72728",
"size": "4267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_cradmin/views/roleselect.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "192105"
},
{
"name": "JavaScript",
"bytes": "1951677"
},
{
"name": "Python",
"bytes": "771868"
},
{
"name": "SCSS",
"bytes": "679114"
}
],
"symlink_target": ""
} |
from behave import given, when, then
from bass.hubkey import generate_hub_key, parse_hub_key, PARTS, is_hub_key
import re
@given(u'parameter "{param}" is "{value}"')
def set_param(context, param, value):
context.params[param] = value
@given(u'parameter "ids" is array "{ids}"')
def id_is_array(context, ids):
context.params['ids'] = ids.split(',')
@given(u'parameter "{param}" is overwritten to an empty string')
def all_params_valid_except_one(context, param):
context.params[param] = ""
@when(u'I generate a hub key')
def generate_a_hub_key(context):
try:
context.hub_key = generate_hub_key(**context.params)
except AttributeError as context.exception:
pass
except TypeError as context.exception:
pass
except ValueError as context.exception:
pass
@then(u'no exception should be thrown')
def no_exception_thrown(context):
assert not context.exception, (
'Exception not expected. Exception message = {}'.format(context.exception.message)
)
@then(u'a "{exception_type}" for the "{param}" should be thrown')
def value_error_exception(context, exception_type, param):
exc_mapper = {
'value error': ValueError,
'attribute error': AttributeError,
'type error': TypeError
}
msg = '{} should match {}'.format(param, PARTS[param])
assert isinstance(context.exception, exc_mapper[exception_type])
assert context.exception.message == msg
@then(u'the hub key should start with "{start}"')
def entry_in_array_startswith(context, start):
assert context.hub_key.startswith(start), (
'Expected "{}" to start with "{}"'.format(context.hub_key, start)
)
@then(u'the hub key should have a uuid as entity id')
def check_entity_id(context):
uuid_regex = re.compile('[0-9a-f]{32}\Z', re.I)
parsed = parse_hub_key(context.hub_key)
assert re.match(uuid_regex, parsed['entity_id'])
@then(u'the hub key should have "{entity_type}" as entity type')
def check_entity_type(context, entity_type):
parsed = parse_hub_key(context.hub_key)
assert parsed['entity_type'] == entity_type
@then(u'a valid hub key should be returned')
def check_entity_type(context):
assert is_hub_key(context.hub_key)
| {
"content_hash": "4701bfce09c6663064004f69086f4813",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 90,
"avg_line_length": 30.324324324324323,
"alnum_prop": 0.678698752228164,
"repo_name": "openpermissions/bass",
"id": "c338ac8350e36df03796ff4cdeacf9e80732a1c5",
"size": "2853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/behave/features/steps/hubkey.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "1905"
},
{
"name": "Makefile",
"bytes": "4401"
},
{
"name": "Python",
"bytes": "24674"
}
],
"symlink_target": ""
} |
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jd): xmlrpclib is not shipped with Python 3
xmlrpclib = None
import six
from ironic.openstack.common import gettextutils
from ironic.openstack.common import importutils
from ironic.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in value.iteritems())
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if xmlrpclib and isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| {
"content_hash": "1c74b95e5d0e12c482eabebf56976c3a",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 79,
"avg_line_length": 34.93788819875776,
"alnum_prop": 0.6264888888888889,
"repo_name": "JioCloud/ironic",
"id": "74db2e90b7e10651358e7f28d95e875971f53c23",
"size": "6440",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ironic/openstack/common/jsonutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1640165"
}
],
"symlink_target": ""
} |
import warnings
from collections.abc import Callable
from django.conf import settings
from django.utils.module_loading import import_string
def su_login_callback(user):
if hasattr(settings, "SU_LOGIN"):
warnings.warn(
"SU_LOGIN is deprecated, use SU_LOGIN_CALLBACK",
DeprecationWarning,
)
func = getattr(settings, "SU_LOGIN_CALLBACK", None)
if func is not None:
if not isinstance(func, Callable):
func = import_string(func)
return func(user)
return user.has_perm("auth.change_user")
def custom_login_action(request, user):
func = getattr(settings, "SU_CUSTOM_LOGIN_ACTION", None)
if func is None:
return False
if not isinstance(func, Callable):
func = import_string(func)
func(request, user)
return True
| {
"content_hash": "b61d3630f1c38f69dc0be21cacb85272",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 60,
"avg_line_length": 26,
"alnum_prop": 0.65625,
"repo_name": "PetrDlouhy/django-su",
"id": "160993be44b5a7c68b7e0de6539b73ec5bec65ff",
"size": "857",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "django_su/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2997"
},
{
"name": "Python",
"bytes": "29575"
}
],
"symlink_target": ""
} |
from networkx import DiGraph, dijkstra_path
import os.path, sys
from pydispatch import dispatcher
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
import inspect, marshal, types
from object import Object
from region import Region
from pathway import Pathway
from neuron import Neuron
from neurite import Neurite
from arborization import Arborization
from synapse import Synapse
from gap_junction import GapJunction
from stimulus import Stimulus
from muscle import Muscle
from innervation import Innervation
from attribute import Attribute
try:
import pydot # pylint: disable=F0401,W0611
except ImportError:
pydot = None
class Network:
def __init__(self):
"""
Networks are containers for all :class:`objects <Network.Object.Object>` that exist in a neural circuit.
"""
self.graph = DiGraph()
self.objects = []
self.idDict = {} # TODO: weak ref dict?
self.displays = []
self._nextUniqueId = -1
self._savePath = None
self._attributes = []
self._displaysAreSynchronized = True
self._weightingFunction = None
self._bulkLoading = False
self._bulkAddObjects = []
self._modified = False
@classmethod
def _fromXMLElement(cls, xmlElement):
network = cls()
network.setBulkLoading(True)
# Load the classes in such an order that any referenced objects are guaranteed to have already been created.
for moduleName, className in [('region', 'Region'), ('pathway', 'Pathway'), ('neuron', 'Neuron'), ('muscle', 'Muscle'), ('arborization', 'Arborization'), ('innervation', 'Innervation'), ('gap_junction', 'GapJunction'), ('synapse', 'Synapse'), ('stimulus', 'Stimulus')]:
elementModule = getattr(sys.modules['network'], moduleName)
elementClass = getattr(elementModule, className)
for element in xmlElement.findall(className):
networkObject = elementClass._fromXMLElement(network, element)
if networkObject is not None:
network.addObject(networkObject)
weightingFunctionElement = xmlElement.find('WeightingFunction')
if weightingFunctionElement is not None:
funcType = weightingFunctionElement.get('type')
funcName = weightingFunctionElement.get('name')
if funcType == 'source':
exec(weightingFunctionElement.text)
network._weightingFunction = eval(funcName)
elif funcType == 'marshal':
code = marshal.loads(eval(weightingFunctionElement.text))
network._weightingFunction = types.FunctionType(code, globals(), funcName or 'weightingFunction')
else:
raise ValueError, gettext('Unknown weighting function type: %s') % (funcType)
for element in xmlElement.findall('Attribute'):
attribute = Attribute._fromXMLElement(network, element)
if attribute is not None:
network._attributes.append(attribute)
network.setBulkLoading(False)
return network
def _toXMLElement(self, parentElement):
networkElement = ElementTree.SubElement(parentElement, 'Network')
for networkObject in self.objects:
# Nested regions are handled by their parents and neurites are handled by their neurons.
if not (isinstance(networkObject, Region) and networkObject.parentRegion is not None) and not isinstance(networkObject, Neurite):
objectElement = networkObject._toXMLElement(networkElement)
if objectElement is None:
pass # TODO: are there any cases where this is NOT an error?
if self._weightingFunction:
weightingFunctionElement = ElementTree.SubElement(networkElement, 'WeightingFunction')
weightingFunctionElement.set('name', self._weightingFunction.func_name)
# First try to get the function source and if that fails then marshal the byte code.
try:
source = inspect.getsource(self._weightingFunction)
weightingFunctionElement.text = source
weightingFunctionElement.set('type', 'source')
except IOError:
weightingFunctionElement.text = repr(marshal.dumps(self._weightingFunction.func_code))
weightingFunctionElement.set('type', 'marshal')
for attribute in self._attributes:
attribute._toXMLElement(networkElement)
return networkElement
def _toScriptFile(self, scriptFile, scriptRefs):
if len(self._attributes) > 0:
scriptFile.write(gettext('# Create the network') + '\n\n')
for attribute in self._attributes:
attribute._toScriptFile(scriptFile, scriptRefs)
if self._weightingFunction:
# First try to get the function source and if that fails then marshal the byte code.
scriptFile.write(gettext('# Add the weighting function') + '\n\n')
funcName = self._weightingFunction.func_name
try:
source = inspect.getsource(self._weightingFunction)
scriptFile.write(source + '\n\nnetwork.setWeightingFunction(' + funcName + ')\n\n')
except IOError:
scriptFile.write('import marshal, types\n')
scriptFile.write('code = marshal.loads(' + repr(marshal.dumps(self._weightingFunction.func_code)) + ')\n')
scriptFile.write('network.setWeightingFunction(types.FunctionType(code, globals(), \'' + funcName + '\'))\n\n')
# Add each network object to the script in an order that guarantees dependent objects will already have been added.
# Neurites will be added by their neurons, sub-regions by their root region.
for objectClass in (Region, Pathway, Muscle, Neuron, Arborization, GapJunction, Innervation, Synapse, Stimulus):
objects = self.objectsOfClass(objectClass)
if len(objects) > 0:
scriptFile.write('\n# ' + gettext('Create each %s') % (objectClass.displayName().lower()) + '\n\n')
for networkObject in objects:
if networkObject._includeInScript(atTopLevel = True):
networkObject._toScriptFile(scriptFile, scriptRefs)
def setBulkLoading(self, bulkLoading):
"""
Indicate whether or not a large quantity of objects are being added to the network.
>>> network.setBulkLoading(True)
>>> # ... add lots of objects ...
>>> network.setBulkLoading(False)
If bulk loading is enabled then various actions are delayed to make loading faster.
"""
if bulkLoading != self._bulkLoading:
self._bulkLoading = bulkLoading
if self._bulkLoading == False:
self._updateGraph()
if any(self._bulkAddObjects):
dispatcher.send('addition', self, affectedObjects = self._bulkAddObjects)
self._bulkAddObjects = []
dispatcher.send(('set', 'bulkLoading'), self)
def setSavePath(self, path):
if path != self._savePath:
self._savePath = path
dispatcher.send(('set', 'savePath'), self)
def savePath(self):
return self._savePath
def name(self):
if self._savePath is None:
return gettext('Untitled Network')
else:
return os.path.splitext(os.path.basename(self._savePath))[0]
def _generateUniqueId(self):
self._nextUniqueId += 1
return self._nextUniqueId
def findObject(self, objectClass, name = None, default = False):
"""
Return the first object of the given class with the given name.
>>> neuron = network.findObject(Neuron, 'AVAL')
Returns an :class:`Object <Network.Object.Object>` or None if there are no matching objects.
If default is True then each object's :meth:`defaultName() <Network.Object.Object.defaultName>` will be queried instead of its name.
"""
if name is not None:
for networkObject in self.objects:
if isinstance(networkObject, objectClass) and ((not default and networkObject.name == name) or (default and networkObject.defaultName() == name)):
return networkObject
return None
def findObjects(self, objectClass, name = None, default = False):
"""
Return all objects of the given class with the given name.
>>> neuron = network.findObject(Neuron, 'AVAL')
Returns a list of :class:`Object <Network.Object.Object>` or None if there are no matching objects.
If default is True then each object's :meth:`defaultName() <Network.Object.Object.defaultName>` will be queried instead of its name.
"""
objects = []
if name is not None:
for networkObject in self.objects:
if isinstance(networkObject, objectClass) and ((not default and networkObject.name == name) or (default and networkObject.defaultName() == name)):
objects.append(networkObject)
return objects if objects else None
def findObjectsRegex(self, objectClass, nameRegex = None, default = False):
"""
Return all objects of the given class whos name matches the regular expression
>>> neuron = network.findObject(Neuron, 'AVAL')
Returns a list of :class:`Object <Network.Object.Object>` or None if there are no matching objects.
"""
from re import search
matchingObjects = []
if nameRegex is not None:
for networkObject in self.objects:
if isinstance(networkObject, objectClass) and ((not default and search(nameRegex, networkObject.name)) or (default and search(nameRegex, networkObject.defaultName()))):
matchingObjects.append(networkObject)
return matchingObjects if matchingObjects else None
def createRegion(self, addSubTerms = False, *args, **keywordArgs):
"""
Create a new region optionally associated with an ontology term.
>>> region = network.createRegion(name = 'Glomerulus 2')
To associate the region with an ontology term pass in a term from an ontology in the library:
>>> flyBrainOnt = library.ontology('flybrain')
>>> ellipsoidBody = network.createRegion(ontologyTerm = flyBrainOnt.findTerm(name = 'Ellipsoid body'))
If addSubTerms is true then sub-regions will be created for all sub-terms in the ontology.
Returns the :class:`region <Network.Region.Region>` that is created.
"""
region = Region(self, *args, **keywordArgs)
self.addObject(region)
if region.ontologyTerm is not None and addSubTerms:
for term in region.ontologyTerm.parts:
self.createRegion(ontologyTerm = term, parentRegion = region, addSubTerms = True)
return region
def findRegion(self, name = None):
"""
Find the first region with the given name.
>>> region = network.findRegion('Ellipsoid body')
Returns a :class:`region <Network.Region.Region>` or None if there are no regions with the name.
"""
return self.findObject(Region, name)
def regions(self):
"""
Return a list of all :class:`regions <Network.Region.Region>` in the network.
>>> for region in network.regions():
... display.setVisibleColor(region, (1.0, 0.0, 0.0))
An empty list will be returned if there are no regions in the network.
"""
return self.objectsOfClass(Region)
def pathways(self):
"""
Return a list of all :class:`pathways <Network.Pathway.Pathway>` in the network.
>>> for pathway in network.pathways():
... display.setVisibleColor(pathway, (1.0, 0.0, 0.0))
An empty list will be returned if there are no pathways in the network.
"""
return self.objectsOfClass(Pathway)
def createNeuron(self, *args, **keywordArgs):
"""
Create a new neuron.
>>> neuron = network.createNeuron(name = 'AVAL')
Returns the :class:`neuron <Network.Neuron.Neuron>` that is created.
"""
neuron = Neuron(self, *args, **keywordArgs)
self.addObject(neuron)
return neuron
def findNeuron(self, name = None):
"""
Find the first neuron with the given name.
>>> neuron = network.findNeuron('AVAL')
Returns a :class:`neuron <Network.Neuron.Neuron>` or None if there are no neurons with the name.
"""
return self.findObject(Neuron, name)
def neurons(self):
"""
Return a list of all :class:`neurons <Network.Neuron.Neuron>` in the network.
>>> for neuron in network.neurons():
... neuron.setHasFunction(Neuron.Function.SENSORY, False)
An empty list will be returned if there are no neurons in the network.
"""
return self.objectsOfClass(Neuron)
def neurites(self):
"""
Return a list of all :class:`neurites <Network.Neurite.Neurite>` in the network.
>>> for neurite in network.neurites():
... neurite.setPathway(None)
An empty list will be returned if there are no neurites in the network.
"""
return self.objectsOfClass(Neurite)
def arborizations(self):
"""
Return a list of all :class:`arborizations <Network.Arborization.Arborization>` in the network.
>>> for arborization in network.arborizations():
... display.setVisibleShape(arborization, shapes['Cone'])
An empty list will be returned if there are no arborizations in the network.
"""
return self.objectsOfClass(Arborization)
def gapJunctions(self):
"""
Return a list of all :class:`gap junctions <Network.GapJunction.GapJunction>` in the network.
>>> for gapJunction in network.gapJunctions():
... display.setVisibleColor(gapJunction, (0, 0, 0))
An empty list will be returned if there are no gap junctions in the network.
"""
return self.objectsOfClass(GapJunction)
def innervations(self):
"""
Return a list of all :class:`innervations <Network.Innervation.Innervation>` in the network.
>>> for innervation in network.innervations():
... display.setVisibleWeight(innervation, 2.0)
An empty list will be returned if there are no innervations in the network.
"""
return self.objectsOfClass(Innervation)
def synapses(self):
"""
Return a list of all :class:`chemical synapses <Network.Synapse.Synapse>` in the network.
>>> for synapse in network.synapses():
... synapse.activation = None
An empty list will be returned if there are no chemical synapses in the network.
"""
return self.objectsOfClass(Synapse)
def createStimulus(self, *args, **keywordArgs):
"""
Create a new stimulus. DEPRECATED: Call :meth:`stimulate() <Network.Object.Object.stimulate>` on the desired target object instead.
>>> stimulus = network.createStimulus(target = neuron1, modality = library.modality('light'))
Returns the :class:`stimulus <Network.Stimulus.Stimulus>` that is created.
"""
target = keywordArgs['target']
del keywordArgs['target']
return target.stimulate(*args, **keywordArgs)
def findStimulus(self, name = None):
"""
Find the first stimulus with the given name.
>>> stimulus = network.findStimulus('Light')
Returns a :class:`stimulus <Network.Stimulus.Stimulus>` or None if there are no stimuli with the name.
"""
return self.findObject(Stimulus, name)
def stimuli(self):
"""
Return a list of all :class:`stimuli <Network.Stimulus.Stimulus>` in the network.
>>> for stimulus in network.stimuli():
... if stimulus.modality == library.modality('light'):
... display.setVisibleColor(stimulus, (1, 1, 1))
An empty list will be returned if there are no stimuli in the network.
"""
return self.objectsOfClass(Stimulus)
def createMuscle(self, *args, **keywordArgs):
"""
Create a new muscle.
>>> muscle = network.createMuscle(name = 'M1')
Returns the :class:`muscle <Network.Muscle.Muscle>` that is created.
"""
muscle = Muscle(self, *args, **keywordArgs)
self.addObject(muscle)
return muscle
def findMuscle(self, name = None):
"""
Find the first muscle with the given name.
>>> muscle = network.findMuscle('M1')
Returns a :class:`muscle <Network.Muscle.Muscle>` or None if there are no muscles with the name.
"""
return self.findObject(Muscle, name)
def muscles(self):
"""
Return a list of all :class:`muscles <Network.Muscle.Muscle>` in the network.
>>> for muscle in network.muscles():
... display.setVisibleOpacity(muscle, 0.5)
An empty list will be returned if there are no muscles in the network.
"""
return self.objectsOfClass(Muscle)
def _updateGraph(self, objectToUpdate = None):
if objectToUpdate is None:
# Rebuild the entire graph.
objectsToUpdate = self.objects
self.graph.clear()
else:
# Just rebuild the connections to the one object.
objectsToUpdate = [objectToUpdate]
# Remove the object if it was there before. This will also delete any edges from the node.
objectId = objectToUpdate.networkId
if objectId in self.graph:
self.graph.remove_node(objectId)
# Maintain a temporary cache of weights so that rebuilding the whole graph doesn't take so long.
objectWeights = {}
def weightOfObject(weightedObject):
if weightedObject.networkId in objectWeights:
objectWeight = objectWeights[weightedObject.networkId]
else:
objectWeight = self.weightOfObject(weightedObject)
objectWeights[weightedObject.networkId] = objectWeight
return objectWeight
for objectToUpdate in objectsToUpdate:
objectId = objectToUpdate.networkId
# (Re-)Add the object to the graph.
self.graph.add_node(objectId)
# Get the weight of this object.
objectWeight = weightOfObject(objectToUpdate)
# Add the connections to other objects already in the graph.
# (Each connection to an object not in the graph will be added when that object is added.)
# The weight of each edge is the average of the weights of the two objects it connects.
inputIds = set([objectInput.networkId for objectInput in objectToUpdate.inputs(recurse = False)])
outputIds = set([objectOutput.networkId for objectOutput in objectToUpdate.outputs(recurse = False)])
unknownIds = set([objectInput.networkId for objectInput in objectToUpdate.connections(recurse = False)]).difference(inputIds).difference(outputIds)
for inputId in inputIds.union(unknownIds):
if inputId in self.graph:
otherWeight = weightOfObject(objectToUpdate)
self.graph.add_edge(inputId, objectId, weight = (objectWeight + otherWeight) / 2.0)
for outputId in outputIds.union(unknownIds):
if outputId in self.graph:
otherWeight = weightOfObject(objectToUpdate)
self.graph.add_edge(objectId, outputId, weight = (objectWeight + otherWeight) / 2.0)
def _objectChanged(self, sender):
if not self._bulkLoading:
self._updateGraph(sender)
if not self._modified:
self._modified = True
dispatcher.send(('set', 'modified'), self)
def simplifiedGraph(self):
"""
Return a simplified version of the NetworkX representation of the network.
This version of the network will have far fewer nodes but will not accurately model edges with more than two end points (hyperedges). This speeds processing when using NetworkX's algorithms.
"""
def addEdge(graph, object1, object2, weight):
node1 = object1.networkId
node2 = object2.networkId
if node1 in graph and node2 in graph[node1]:
if weight < graph[node1][node2]['weight']:
# Use a smaller weight for an existing edge.
graph[node1][node2]['weight'] = weight
else:
# Create a new edge.
graph.add_edge(node1, node2, weight = weight)
simplifiedGraph = DiGraph()
# In self.graph edges are actually nodes to support hyperedges. Convert these to standard edges in the simplified graph.
# TODO: make this object type independent
for arborization in self.arborizations():
if arborization.sendsOutput:
addEdge(simplifiedGraph, arborization.neurite.neuron(), arborization.region, self.weightOfObject(arborization))
if arborization.receivesInput:
addEdge(simplifiedGraph, arborization.region, arborization.neurite.neuron(), self.weightOfObject(arborization))
for synapse in self.synapses():
for postPartner in synapse.postSynapticPartners:
if isinstance(postPartner, Neurite):
postPartner = postPartner.neuron()
addEdge(simplifiedGraph, synapse.preSynapticNeurite.neuron(), postPartner, self.weightOfObject(synapse))
for gapJunction in self.gapJunctions():
neurites = gapJunction.neurites()
addEdge(simplifiedGraph, neurites[0].neuron(), neurites[1].neuron(), self.weightOfObject(gapJunction))
addEdge(simplifiedGraph, neurites[1].neuron(), neurites[0].neuron(), self.weightOfObject(gapJunction))
for innervation in self.innervations():
addEdge(simplifiedGraph, innervation.neurite.neuron(), innervation.muscle, self.weightOfObject(innervation))
for pathway in self.pathways():
region1, region2 = pathway.regions()
weight = self.weightOfObject(pathway)
if pathway.region1Projects:
addEdge(simplifiedGraph, region1, region2, weight)
if pathway.region2Projects:
addEdge(simplifiedGraph, region2, region1, weight)
for stimulus in self.stimuli():
addEdge(simplifiedGraph, stimulus, stimulus.target.rootObject(), self.weightOfObject(stimulus))
return simplifiedGraph
def setModified(self, modified):
"""
Set whether or not this network is dirty and needs to be saved.
"""
if self._modified != modified:
self._modified = modified
dispatcher.send(('set', 'modified'), self)
def isModified(self):
"""
Return whether the network has been modified and needs to be saved.
"""
return self._modified
def addObject(self, objectToAdd):
if objectToAdd.networkId in self.idDict:
raise ValueError, gettext('All objects in a network must have unique identifiers.')
self.objects.append(objectToAdd)
self.idDict[objectToAdd.networkId] = objectToAdd
if objectToAdd.networkId > self._nextUniqueId:
self._nextUniqueId = objectToAdd.networkId
# Update the NetworkX graph representation of the object and its connections.
if not self._bulkLoading:
self._updateGraph(objectToAdd)
# Watch for any changes to the object so we can update our dirty state and the graph.
dispatcher.connect(self._objectChanged, dispatcher.Any, objectToAdd)
# Let anyone who cares know that the network was changed.
if self._bulkLoading:
self._bulkAddObjects += [objectToAdd]
else:
dispatcher.send('addition', self, affectedObjects = [objectToAdd])
def objectWithId(self, objectId):
if (isinstance(objectId, str) or isinstance(objectId, unicode)) and objectId.isdigit():
objectId = int(objectId)
return self.idDict[objectId] if objectId in self.idDict else None
def objectsOfClass(self, objectClass):
objects = []
for networkObject in self.objects:
if isinstance(networkObject, objectClass):
objects.append(networkObject)
return objects
def setWeightingFunction(self, weightingFunction = None):
"""
Set a function to be used to calculate the weight of objects in the network.
The function should accept a single argument (an :class:`object <network.object.Object>` in the network) and return a floating point value indicating the weight of the object. An object with a higher weight is considered more expensive to traverse.
"""
if weightingFunction is not None and not callable(weightingFunction):
raise ValueError, gettext('The function passed to setWeightingFunction must be callable.')
if weightingFunction != self._weightingFunction:
self._weightingFunction = weightingFunction
self._updateGraph()
dispatcher.send(('set', 'weightingFunction'), self)
def weightingFunction(self):
"""
Return the function being used to calculate the weights of objects in the network.
If no function has been set then None will be returned.
"""
return self._weightingFunction
def weightOfObject(self, weightedObject):
"""
Return the weight of the indicated object or 1.0 if no weighting function has been set.
"""
return 1.0 if not self._weightingFunction else self._weightingFunction(weightedObject)
def shortestPath(self, startObject, endObject):
"""
Return one of the shortest paths through the :class:`network <Network.Network.Network>` from the first object to the second.
Returns a list of objects in the path from the first object to the second. If the second object cannot be reached from the first then an empty list will be returned.
"""
if not isinstance(startObject, Object) or startObject.network != self or not isinstance(endObject, Object) or endObject.network != self:
raise ValueError, 'The objects passed to shortestPath() must be from the same network.'
path = []
try:
nodeList = dijkstra_path(self.graph, startObject.networkId, endObject.networkId)
except:
nodeList = []
for nodeID in nodeList:
pathObject = self.objectWithId(nodeID)
if pathObject is not startObject:
path.append(pathObject)
return path
def removeObject(self, networkObject):
"""
Remove the indicated object and any dependent objects from the network and any displays.
>>> network.removeObject(network.findNeuron('AVAL'))
"""
if networkObject in self.objects:
# Determine all of the objects that will need to be removed
objectsToRemove = set([networkObject])
objectsToInspect = [networkObject]
while any(objectsToInspect):
objectToInspect = objectsToInspect.pop(0)
dependentObjects = set(objectToInspect.dependentObjects())
objectsToInspect += list(dependentObjects.difference(objectsToRemove))
objectsToRemove = objectsToRemove.union(dependentObjects)
# Remove all of the objects.
for objectToRemove in objectsToRemove:
objectToRemove.disconnectFromNetwork()
self.objects.remove(objectToRemove)
del self.idDict[objectToRemove.networkId]
# Keep the NetworkX graph in sync.
if objectToRemove.networkId in self.graph:
self.graph.remove_node(objectToRemove.networkId)
# Let anyone who cares know that the network was changed.
dispatcher.send('deletion', self, affectedObjects = objectsToRemove)
def removeAllObjects(self):
"""
Remove all objects from the network and any displays.
"""
removedObjects = list(self.objects)
for networkObject in self.objects:
networkObject.network = None
self.objects = []
self.idDict = {}
self.graph.clear()
# Let anyone who cares know that the network was changed.
dispatcher.send('deletion', self, affectedObjects = removedObjects)
def addDisplay(self, display):
self.displays.append(display)
dispatcher.connect(self._synchronizeDisplays, ('set', 'selection'), display)
def removeDisplay(self, display):
self.displays.remove(display)
dispatcher.disconnect(self._synchronizeDisplays, ('set', 'selection'), display)
def setSynchronizeDisplays(self, synchronize):
if synchronize != self._displaysAreSynchronized:
self._displaysAreSynchronized = synchronize
if synchronize and any(self.displays):
self._synchronizeDisplays(None, self.displays[0])
def _synchronizeDisplays(self, sender):
if self._displaysAreSynchronized:
selection = sender.selectedObjects()
for display in self.displays:
if display != sender:
display.selectObjects(selection)
def addAttribute(self, name = None, type = None, value = None): # pylint: disable=W0622
"""
Add a user-defined attribute to this network.
>>> network.addAttribute('Preliminary', Attribute.BOOLEAN_TYPE, True)
The type parameter should be one of the :class:`Attribute.*_TYPE <Network.Attribute.Attribute>` values.
Returns the attribute object that is created.
"""
if name is None or type is None or value is None:
raise ValueError, gettext('The name, type and value parameters must be specified when adding an attribute.')
if not isinstance(name, str):
raise TypeError, 'The name parameter passed to addAttribute() must be a string.'
if type not in Attribute.TYPES:
raise TypeError, 'The type parameter passed to addAttribute() must be one of the Attribute.*_TYPE values.'
# TODO: validate value based on the type?
attribute = Attribute(self, name, type, value)
self._attributes.append(attribute)
dispatcher.send(('set', 'attributes'), self)
return attribute
def getAttribute(self, name):
"""
Return the first user-defined :class:`attribute <Network.Attribute.Attribute>` of this network with the given name or None if there is no matching attribute.
>>> creationDate = network.getAttribute('Creation Date').value()
"""
for attribute in self._attributes:
if attribute.name() == name:
return attribute
return None
def getAttributes(self, name = None):
"""
Return a list of all user-defined :class:`attributes <Network.Attribute.Attribute>` of this network or only those with the given name.
>>> reviewers = [reviewer.value() for reviewer in network.getAttributes('Reviewed By')]
If there are no attributes then an empty list will be returned.
"""
attributes = []
for attribute in self._attributes:
if name == None or attribute.name() == name:
attributes += [attribute]
return attributes
def removeAttribute(self, attribute):
"""
Remove the given attribute from the network.
"""
if not isinstance(attribute, Attribute) or not attribute in self._attributes:
raise ValueError, 'The attribute passed to removeAttribute() must be an existing attribute of the network.'
self._attributes.remove(attribute)
dispatcher.send(('set', 'attributes'), self)
| {
"content_hash": "d5309f19709174d67aef448b7c6b0057",
"timestamp": "",
"source": "github",
"line_count": 849,
"max_line_length": 277,
"avg_line_length": 40.266195524146056,
"alnum_prop": 0.6073538875563096,
"repo_name": "JaneliaSciComp/Neuroptikon",
"id": "10775cd86e4868857ba068762b04488d45cbcaf4",
"size": "34416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/network/network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "GLSL",
"bytes": "3048"
},
{
"name": "HTML",
"bytes": "97998"
},
{
"name": "Inno Setup",
"bytes": "2349"
},
{
"name": "Python",
"bytes": "8142986"
}
],
"symlink_target": ""
} |
from tests.base_unittest import BaseUnitTest
from pypokerengine.engine.card import Card
from pypokerengine.engine.player import Player
from pypokerengine.engine.poker_constants import PokerConstants as Const
from nose.tools import *
class PlayerTest(BaseUnitTest):
def setUp(self):
self.player = Player("uuid", 100)
def test_add_holecard(self):
cards = [Card.from_id(cid) for cid in range(1,3)]
self.player.add_holecard(cards)
self.true(cards[0] in self.player.hole_card)
self.true(cards[1] in self.player.hole_card)
@raises(ValueError)
def test_add_single_hole_card(self):
self.player.add_holecard([Card.from_id(1)])
@raises(ValueError)
def test_add_too_many_hole_card(self):
self.player.add_holecard([Card.from_id(cid) for cid in range(1,4)])
@raises(ValueError)
def test_add_hole_card_twice(self):
self.player.add_holecard([Card.from_id(cid) for cid in range(1,3)])
self.player.add_holecard([Card.from_id(cid) for cid in range(1,3)])
def test_clear_holecard(self):
self.player.add_holecard([Card.from_id(cid) for cid in range(1,3)])
self.player.clear_holecard()
self.eq(0, len(self.player.hole_card))
def test_append_chip(self):
self.player.append_chip(10)
self.eq(110, self.player.stack)
def test_collect_bet(self):
self.player.collect_bet(10)
self.eq(90, self.player.stack)
@raises(ValueError)
def test_collect_too_much_bet(self):
self.player.collect_bet(200)
def test_is_active(self):
self.player.pay_info.update_by_pay(10)
self.true(self.player.is_active())
def test_if_allin_player_is_active(self):
self.player.pay_info.update_to_allin()
self.true(self.player.is_active())
def test_if_folded_player_is_not_active(self):
self.player.pay_info.update_to_fold()
self.false(self.player.is_active())
def test_if_no_money_player_is_active(self):
self.player.collect_bet(100)
self.true(self.player.is_active())
def test_is_waiting_ask(self):
self.player.pay_info.update_by_pay(10)
self.true(self.player.is_waiting_ask())
def test_if_allin_player_is_not_waiting_ask(self):
self.player.pay_info.update_to_allin()
self.false(self.player.is_waiting_ask())
def test_if_folded_player_is_not_waiting_ask(self):
self.player.pay_info.update_to_fold()
self.false(self.player.is_waiting_ask())
def test_add_fold_action_history(self):
self.player.add_action_history(Const.Action.FOLD)
self.eq("FOLD", self.player.action_histories[-1]["action"])
def test_add_call_action_history(self):
self.player.add_action_history(Const.Action.CALL, 10)
action = self.player.action_histories[-1]
self.eq("CALL", action["action"])
self.eq(10, action["amount"])
self.eq(10, action["paid"])
def test_add_call_action_history_after_paid(self):
self.player.add_action_history(Const.Action.CALL, 10)
self.player.add_action_history(Const.Action.CALL, 20)
action = self.player.action_histories[-1]
self.eq(20, action["amount"])
self.eq(10, action["paid"])
def test_add_raise_action_history(self):
self.player.add_action_history(Const.Action.RAISE, 10, 5)
action = self.player.action_histories[-1]
self.eq("RAISE", action["action"])
self.eq(10, action["amount"])
self.eq(10, action["paid"])
self.eq(5, action["add_amount"])
def test_add_raise_action_history_after_paid(self):
self.player.add_action_history(Const.Action.CALL, 10)
self.player.add_action_history(Const.Action.RAISE, 20, 10)
action = self.player.action_histories[-1]
self.eq(20, action["amount"])
self.eq(10, action["paid"])
def test_add_small_blind_history(self):
self.player.add_action_history(Const.Action.SMALL_BLIND, sb_amount=5)
action = self.player.action_histories[-1]
self.eq("SMALLBLIND", action["action"])
self.eq(5, action["amount"])
self.eq(5, action["add_amount"])
def test_add_big_blind_history(self):
self.player.add_action_history(Const.Action.BIG_BLIND, sb_amount=5)
action = self.player.action_histories[-1]
self.eq("BIGBLIND", action["action"])
self.eq(10, action["amount"])
self.eq(5, action["add_amount"])
def test_add_ante_history(self):
self.player.add_action_history(Const.Action.ANTE, 10)
action = self.player.action_histories[-1]
self.eq("ANTE", action["action"])
self.eq(10, action["amount"])
@raises(AssertionError)
def test_add_empty_ante_history(self):
self.player.add_action_history(Const.Action.ANTE, 0)
def test_save_street_action_histories(self):
self.assertIsNone(self.player.round_action_histories[Const.Street.PREFLOP])
self.player.add_action_history(Const.Action.BIG_BLIND, sb_amount=5)
self.player.save_street_action_histories(Const.Street.PREFLOP)
self.eq(1, len(self.player.round_action_histories[Const.Street.PREFLOP]))
self.eq("BIGBLIND", self.player.round_action_histories[Const.Street.PREFLOP][0]["action"])
self.eq(0, len(self.player.action_histories))
def test_clear_action_histories(self):
self.player.add_action_history(Const.Action.BIG_BLIND, sb_amount=5)
self.player.save_street_action_histories(Const.Street.PREFLOP)
self.player.add_action_history(Const.Action.CALL, 10)
self.assertIsNotNone(0, len(self.player.round_action_histories[Const.Street.PREFLOP]))
self.neq(0, len(self.player.action_histories))
self.player.clear_action_histories()
self.assertIsNone(self.player.round_action_histories[Const.Street.PREFLOP])
self.eq(0, len(self.player.action_histories))
def test_paid_sum(self):
self.eq(0, self.player.paid_sum())
self.player.add_action_history(Const.Action.BIG_BLIND, sb_amount=5)
self.eq(10, self.player.paid_sum())
self.player.clear_action_histories()
self.eq(0, self.player.paid_sum())
self.player.add_action_history(Const.Action.ANTE, 3)
self.eq(0, self.player.paid_sum())
self.player.add_action_history(Const.Action.BIG_BLIND, sb_amount=5)
self.eq(10, self.player.paid_sum())
def test_serialization(self):
player = self.__setup_player_for_serialization()
serial = player.serialize()
restored = Player.deserialize(serial)
self.eq(player.name, restored.name)
self.eq(player.uuid, restored.uuid)
self.eq(player.stack, restored.stack)
self.eq(player.hole_card, restored.hole_card)
self.eq(player.action_histories, restored.action_histories)
self.eq(player.round_action_histories, restored.round_action_histories)
self.eq(player.pay_info.amount, restored.pay_info.amount)
self.eq(player.pay_info.status, restored.pay_info.status)
def __setup_player_for_serialization(self):
player = Player("uuid", 50, "hoge")
player.add_holecard([Card.from_id(cid) for cid in range(1,3)])
player.add_action_history(Const.Action.SMALL_BLIND, sb_amount=5)
player.save_street_action_histories(Const.Street.PREFLOP)
player.add_action_history(Const.Action.CALL, 10)
player.add_action_history(Const.Action.RAISE, 10, 5)
player.add_action_history(Const.Action.FOLD)
player.pay_info.update_by_pay(15)
player.pay_info.update_to_fold()
return player
| {
"content_hash": "c5fbce926069c5f81cfaaf4158cf824a",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 94,
"avg_line_length": 37.94708994708995,
"alnum_prop": 0.7062186279977691,
"repo_name": "ishikota/PyPokerEngine",
"id": "d765de6c85829a01126665a9c76c196fbb222ccf",
"size": "7172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/pypokerengine/engine/player_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "248078"
}
],
"symlink_target": ""
} |
class Vector2D(object):
def __init__(self, vec2d):
"""
Initialize your data structure here.
:type vec2d: List[List[int]]
"""
self.cursor = -1
if len(vec2d) > 0:
self.data = reduce(lambda a,b:a+b, vec2d)
else:
self.data = []
self.data_len = len(self.data)
def next(self):
self.cursor += 1
return self.data[self.cursor]
def hasNext(self):
return self.cursor < self.data_len - 1
# Your Vector2D object will be instantiated and called as such:
# i, v = Vector2D(vec2d), []
# while i.hasNext(): v.append(i.next())
| {
"content_hash": "68f9bebeec559cecc7dc61c3d20938fe",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 24.035714285714285,
"alnum_prop": 0.5200594353640416,
"repo_name": "luosch/leetcode",
"id": "b2b6f5f0018b7b41629b2de4ceb29abc93fb941b",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/Flatten 2D Vector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "37027"
},
{
"name": "Python",
"bytes": "175260"
},
{
"name": "Shell",
"bytes": "801"
},
{
"name": "Swift",
"bytes": "121"
}
],
"symlink_target": ""
} |
import asyncio
from functools import partial
from unittest import mock
import pytest
from yarl import URL
import aiohttp
import aiohttp.helpers
import aiohttp.web
@pytest.fixture
def proxy_test_server(raw_test_server, loop, monkeypatch):
"""Handle all proxy requests and imitate remote server response."""
_patch_ssl_transport(monkeypatch)
default_response = dict(
status=200,
headers=None,
body=None)
@asyncio.coroutine
def proxy_handler(request, proxy_mock):
proxy_mock.request = request
proxy_mock.requests_list.append(request)
response = default_response.copy()
if isinstance(proxy_mock.return_value, dict):
response.update(proxy_mock.return_value)
headers = response['headers']
if not headers:
headers = {}
if request.method == 'CONNECT':
response['body'] = None
response['headers'] = headers
resp = aiohttp.web.Response(**response)
yield from resp.prepare(request)
yield from resp.drain()
return resp
@asyncio.coroutine
def proxy_server():
proxy_mock = mock.Mock()
proxy_mock.request = None
proxy_mock.requests_list = []
handler = partial(proxy_handler, proxy_mock=proxy_mock)
server = yield from raw_test_server(handler)
proxy_mock.server = server
proxy_mock.url = server.make_url('/')
return proxy_mock
return proxy_server
@asyncio.coroutine
def _request(method, url, loop=None, **kwargs):
client = aiohttp.ClientSession(loop=loop)
try:
resp = yield from client.request(method, url, **kwargs)
yield from resp.release()
return resp
finally:
yield from client.close()
@pytest.fixture()
def get_request(loop):
return partial(_request, method='GET', loop=loop)
@asyncio.coroutine
def test_proxy_http_absolute_path(proxy_test_server, get_request):
url = 'http://aiohttp.io/path?query=yes'
proxy = yield from proxy_test_server()
yield from get_request(url=url, proxy=proxy.url)
assert len(proxy.requests_list) == 1
assert proxy.request.method == 'GET'
assert proxy.request.host == 'aiohttp.io'
assert proxy.request.path_qs == 'http://aiohttp.io/path?query=yes'
@asyncio.coroutine
def test_proxy_http_raw_path(proxy_test_server, get_request):
url = 'http://aiohttp.io:2561/space sheep?q=can:fly'
raw_url = 'http://aiohttp.io:2561/space%20sheep?q=can:fly'
proxy = yield from proxy_test_server()
yield from get_request(url=url, proxy=proxy.url)
assert proxy.request.host == 'aiohttp.io:2561'
assert proxy.request.path_qs == raw_url
@asyncio.coroutine
def test_proxy_http_idna_support(proxy_test_server, get_request):
url = 'http://éé.com/'
raw_url = 'http://xn--9caa.com/'
proxy = yield from proxy_test_server()
yield from get_request(url=url, proxy=proxy.url)
assert proxy.request.host == 'xn--9caa.com'
assert proxy.request.path_qs == raw_url
@asyncio.coroutine
def test_proxy_http_connection_error(get_request):
url = 'http://aiohttp.io/path'
proxy_url = 'http://localhost:2242/'
with pytest.raises(aiohttp.ClientConnectorError):
yield from get_request(url=url, proxy=proxy_url)
@asyncio.coroutine
def test_proxy_http_bad_response(proxy_test_server, get_request):
url = 'http://aiohttp.io/path'
proxy = yield from proxy_test_server()
proxy.return_value = dict(
status=502,
headers={'Proxy-Agent': 'TestProxy'})
resp = yield from get_request(url=url, proxy=proxy.url)
assert resp.status == 502
assert resp.headers['Proxy-Agent'] == 'TestProxy'
@asyncio.coroutine
def test_proxy_http_auth(proxy_test_server, get_request):
url = 'http://aiohttp.io/path'
proxy = yield from proxy_test_server()
yield from get_request(url=url, proxy=proxy.url)
assert 'Authorization' not in proxy.request.headers
assert 'Proxy-Authorization' not in proxy.request.headers
auth = aiohttp.helpers.BasicAuth('user', 'pass')
yield from get_request(url=url, auth=auth, proxy=proxy.url)
assert 'Authorization' in proxy.request.headers
assert 'Proxy-Authorization' not in proxy.request.headers
yield from get_request(url=url, proxy_auth=auth, proxy=proxy.url)
assert 'Authorization' not in proxy.request.headers
assert 'Proxy-Authorization' in proxy.request.headers
yield from get_request(url=url, auth=auth,
proxy_auth=auth, proxy=proxy.url)
assert 'Authorization' in proxy.request.headers
assert 'Proxy-Authorization' in proxy.request.headers
@asyncio.coroutine
def test_proxy_http_auth_utf8(proxy_test_server, get_request):
url = 'http://aiohttp.io/path'
auth = aiohttp.helpers.BasicAuth('юзер', 'пасс', 'utf-8')
proxy = yield from proxy_test_server()
yield from get_request(url=url, auth=auth, proxy=proxy.url)
assert 'Authorization' in proxy.request.headers
assert 'Proxy-Authorization' not in proxy.request.headers
@asyncio.coroutine
def test_proxy_http_auth_from_url(proxy_test_server, get_request):
url = 'http://aiohttp.io/path'
proxy = yield from proxy_test_server()
auth_url = URL(url).with_user('user').with_password('pass')
yield from get_request(url=auth_url, proxy=proxy.url)
assert 'Authorization' in proxy.request.headers
assert 'Proxy-Authorization' not in proxy.request.headers
proxy_url = URL(proxy.url).with_user('user').with_password('pass')
yield from get_request(url=url, proxy=proxy_url)
assert 'Authorization' not in proxy.request.headers
assert 'Proxy-Authorization' in proxy.request.headers
@asyncio.coroutine
def test_proxy_http_acquired_cleanup(proxy_test_server, loop):
url = 'http://aiohttp.io/path'
conn = aiohttp.TCPConnector(loop=loop)
sess = aiohttp.ClientSession(connector=conn, loop=loop)
proxy = yield from proxy_test_server()
assert 0 == len(conn._acquired)
resp = yield from sess.get(url, proxy=proxy.url)
assert resp.closed
assert 0 == len(conn._acquired)
sess.close()
@pytest.mark.skip('we need to reconsider how we test this')
@asyncio.coroutine
def test_proxy_http_acquired_cleanup_force(proxy_test_server, loop):
url = 'http://aiohttp.io/path'
conn = aiohttp.TCPConnector(force_close=True, loop=loop)
sess = aiohttp.ClientSession(connector=conn, loop=loop)
proxy = yield from proxy_test_server()
assert 0 == len(conn._acquired)
@asyncio.coroutine
def request():
resp = yield from sess.get(url, proxy=proxy.url)
assert 1 == len(conn._acquired)
yield from resp.release()
yield from request()
assert 0 == len(conn._acquired)
yield from sess.close()
@pytest.mark.skip('we need to reconsider how we test this')
@asyncio.coroutine
def test_proxy_http_multi_conn_limit(proxy_test_server, loop):
url = 'http://aiohttp.io/path'
limit, multi_conn_num = 1, 5
conn = aiohttp.TCPConnector(limit=limit, loop=loop)
sess = aiohttp.ClientSession(connector=conn, loop=loop)
proxy = yield from proxy_test_server()
current_pid = None
@asyncio.coroutine
def request(pid):
# process requests only one by one
nonlocal current_pid
resp = yield from sess.get(url, proxy=proxy.url)
current_pid = pid
yield from asyncio.sleep(0.2, loop=loop)
assert current_pid == pid
yield from resp.release()
return resp
requests = [request(pid) for pid in range(multi_conn_num)]
responses = yield from asyncio.gather(*requests, loop=loop)
assert len(responses) == multi_conn_num
assert set(resp.status for resp in responses) == {200}
yield from sess.close()
# @pytest.mark.xfail
@asyncio.coroutine
def _test_proxy_https_connect(proxy_test_server, get_request):
proxy = yield from proxy_test_server()
url = 'https://www.google.com.ua/search?q=aiohttp proxy'
yield from get_request(url=url, proxy=proxy.url)
connect = proxy.requests_list[0]
assert connect.method == 'CONNECT'
assert connect.path == 'www.google.com.ua:443'
assert connect.host == 'www.google.com.ua'
assert proxy.request.host == 'www.google.com.ua'
assert proxy.request.path_qs == '/search?q=aiohttp+proxy'
# @pytest.mark.xfail
@asyncio.coroutine
def _test_proxy_https_connect_with_port(proxy_test_server, get_request):
proxy = yield from proxy_test_server()
url = 'https://secure.aiohttp.io:2242/path'
yield from get_request(url=url, proxy=proxy.url)
connect = proxy.requests_list[0]
assert connect.method == 'CONNECT'
assert connect.path == 'secure.aiohttp.io:2242'
assert connect.host == 'secure.aiohttp.io:2242'
assert proxy.request.host == 'secure.aiohttp.io:2242'
assert proxy.request.path_qs == '/path'
# @pytest.mark.xfail
@asyncio.coroutine
def _test_proxy_https_send_body(proxy_test_server, loop):
sess = aiohttp.ClientSession(loop=loop)
proxy = yield from proxy_test_server()
proxy.return_value = {'status': 200, 'body': b'1'*(2**20)}
url = 'https://www.google.com.ua/search?q=aiohttp proxy'
resp = yield from sess.get(url, proxy=proxy.url)
body = yield from resp.read()
yield from resp.release()
yield from sess.close()
assert body == b'1'*(2**20)
# @pytest.mark.xfail
@asyncio.coroutine
def _test_proxy_https_idna_support(proxy_test_server, get_request):
url = 'https://éé.com/'
proxy = yield from proxy_test_server()
yield from get_request(url=url, proxy=proxy.url)
connect = proxy.requests_list[0]
assert connect.method == 'CONNECT'
assert connect.path == 'xn--9caa.com:443'
assert connect.host == 'xn--9caa.com'
@asyncio.coroutine
def test_proxy_https_connection_error(get_request):
url = 'https://secure.aiohttp.io/path'
proxy_url = 'http://localhost:2242/'
with pytest.raises(aiohttp.ClientConnectorError):
yield from get_request(url=url, proxy=proxy_url)
@asyncio.coroutine
def test_proxy_https_bad_response(proxy_test_server, get_request):
url = 'https://secure.aiohttp.io/path'
proxy = yield from proxy_test_server()
proxy.return_value = dict(
status=502,
headers={'Proxy-Agent': 'TestProxy'})
with pytest.raises(aiohttp.ClientHttpProxyError):
yield from get_request(url=url, proxy=proxy.url)
assert len(proxy.requests_list) == 1
assert proxy.request.method == 'CONNECT'
assert proxy.request.path == 'secure.aiohttp.io:443'
# @pytest.mark.xfail
@asyncio.coroutine
def _test_proxy_https_auth(proxy_test_server, get_request):
url = 'https://secure.aiohttp.io/path'
auth = aiohttp.helpers.BasicAuth('user', 'pass')
proxy = yield from proxy_test_server()
yield from get_request(url=url, proxy=proxy.url)
connect = proxy.requests_list[0]
assert 'Authorization' not in connect.headers
assert 'Proxy-Authorization' not in connect.headers
assert 'Authorization' not in proxy.request.headers
assert 'Proxy-Authorization' not in proxy.request.headers
proxy = yield from proxy_test_server()
yield from get_request(url=url, auth=auth, proxy=proxy.url)
connect = proxy.requests_list[0]
assert 'Authorization' not in connect.headers
assert 'Proxy-Authorization' not in connect.headers
assert 'Authorization' in proxy.request.headers
assert 'Proxy-Authorization' not in proxy.request.headers
proxy = yield from proxy_test_server()
yield from get_request(url=url, proxy_auth=auth, proxy=proxy.url)
connect = proxy.requests_list[0]
assert 'Authorization' not in connect.headers
assert 'Proxy-Authorization' in connect.headers
assert 'Authorization' not in proxy.request.headers
assert 'Proxy-Authorization' not in proxy.request.headers
proxy = yield from proxy_test_server()
yield from get_request(url=url, auth=auth,
proxy_auth=auth, proxy=proxy.url)
connect = proxy.requests_list[0]
assert 'Authorization' not in connect.headers
assert 'Proxy-Authorization' in connect.headers
assert 'Authorization' in proxy.request.headers
assert 'Proxy-Authorization' not in proxy.request.headers
# @pytest.mark.xfail
@asyncio.coroutine
def _test_proxy_https_acquired_cleanup(proxy_test_server, loop):
url = 'https://secure.aiohttp.io/path'
conn = aiohttp.TCPConnector(loop=loop)
sess = aiohttp.ClientSession(connector=conn, loop=loop)
proxy = yield from proxy_test_server()
assert 0 == len(conn._acquired)
@asyncio.coroutine
def request():
resp = yield from sess.get(url, proxy=proxy.url)
assert 1 == len(conn._acquired)
yield from resp.release()
yield from request()
assert 0 == len(conn._acquired)
yield from sess.close()
# @pytest.mark.xfail
@asyncio.coroutine
def _test_proxy_https_acquired_cleanup_force(proxy_test_server, loop):
url = 'https://secure.aiohttp.io/path'
conn = aiohttp.TCPConnector(force_close=True, loop=loop)
sess = aiohttp.ClientSession(connector=conn, loop=loop)
proxy = yield from proxy_test_server()
assert 0 == len(conn._acquired)
@asyncio.coroutine
def request():
resp = yield from sess.get(url, proxy=proxy.url)
assert 1 == len(conn._acquired)
yield from resp.release()
yield from request()
assert 0 == len(conn._acquired)
yield from sess.close()
# @pytest.mark.xfail
@asyncio.coroutine
def _test_proxy_https_multi_conn_limit(proxy_test_server, loop):
url = 'https://secure.aiohttp.io/path'
limit, multi_conn_num = 1, 5
conn = aiohttp.TCPConnector(limit=limit, loop=loop)
sess = aiohttp.ClientSession(connector=conn, loop=loop)
proxy = yield from proxy_test_server()
current_pid = None
@asyncio.coroutine
def request(pid):
# process requests only one by one
nonlocal current_pid
resp = yield from sess.get(url, proxy=proxy.url)
current_pid = pid
yield from asyncio.sleep(0.2, loop=loop)
assert current_pid == pid
yield from resp.release()
return resp
requests = [request(pid) for pid in range(multi_conn_num)]
responses = yield from asyncio.gather(*requests, loop=loop)
assert len(responses) == multi_conn_num
assert set(resp.status for resp in responses) == {200}
yield from sess.close()
def _patch_ssl_transport(monkeypatch):
"""Make ssl transport substitution to prevent ssl handshake."""
def _make_ssl_transport_dummy(self, rawsock, protocol, sslcontext,
waiter=None, **kwargs):
return self._make_socket_transport(rawsock, protocol, waiter,
extra=kwargs.get('extra'),
server=kwargs.get('server'))
monkeypatch.setattr(
"asyncio.selector_events.BaseSelectorEventLoop._make_ssl_transport",
_make_ssl_transport_dummy)
| {
"content_hash": "8ad9e74e43243bb24032e396e98db3e4",
"timestamp": "",
"source": "github",
"line_count": 507,
"max_line_length": 76,
"avg_line_length": 29.775147928994084,
"alnum_prop": 0.6758081611022787,
"repo_name": "Eyepea/aiohttp",
"id": "5cc57164f810e3fced39bbd79f4643a336ac612e",
"size": "15108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_proxy_functional.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1738"
},
{
"name": "PowerShell",
"bytes": "3361"
},
{
"name": "Python",
"bytes": "935198"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class DataProtectionClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for DataProtectionClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription Id. Required.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2022-09-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None:
super(DataProtectionClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop("api_version", "2022-09-01-preview") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"])
kwargs.setdefault("sdk_moniker", "mgmt-dataprotection/{}".format(VERSION))
self._configure(**kwargs)
def _configure(self, **kwargs: Any) -> None:
self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get("authentication_policy")
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(
self.credential, *self.credential_scopes, **kwargs
)
| {
"content_hash": "9f929f2bb9d827bb763d5c55ee919995",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 107,
"avg_line_length": 53.05172413793103,
"alnum_prop": 0.7198570035749107,
"repo_name": "Azure/azure-sdk-for-python",
"id": "d04a341320d8020cc4a134f776fd463d362771f4",
"size": "3545",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/dataprotection/azure-mgmt-dataprotection/azure/mgmt/dataprotection/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
import datetime
import time
import platform
import mimetypes
from tempfile import NamedTemporaryFile
import warnings
# DJANGO IMPORTS
from django.core.files import File
from django.utils.six import string_types
# FILEBROWSER IMPORTS
from filebrowser.settings import EXTENSIONS, VERSIONS, ADMIN_VERSIONS, VERSIONS_BASEDIR, VERSION_QUALITY, PLACEHOLDER, FORCE_PLACEHOLDER, SHOW_PLACEHOLDER, STRICT_PIL, IMAGE_MAXBLOCK, DEFAULT_PERMISSIONS
from filebrowser.utils import path_strip, scale_and_crop
from django.utils.encoding import python_2_unicode_compatible, smart_str
# PIL import
if STRICT_PIL:
from PIL import Image
from PIL import ImageFile
else:
try:
from PIL import Image
from PIL import ImageFile
except ImportError:
import Image
import ImageFile
ImageFile.MAXBLOCK = IMAGE_MAXBLOCK # default is 64k
class FileListing():
"""
The FileListing represents a group of FileObjects/FileDirObjects.
An example::
from filebrowser.base import FileListing
filelisting = FileListing(path, sorting_by='date', sorting_order='desc')
print filelisting.files_listing_total()
print filelisting.results_listing_total()
for fileobject in filelisting.files_listing_total():
print fileobject.filetype
where path is a relative path to a storage location
"""
# Four variables to store the length of a listing obtained by various listing methods
# (updated whenever a particular listing method is called).
_results_listing_total = None
_results_walk_total = None
_results_listing_filtered = None
_results_walk_total = None
def __init__(self, path, filter_func=None, sorting_by=None, sorting_order=None, site=None):
self.path = path
self.filter_func = filter_func
self.sorting_by = sorting_by
self.sorting_order = sorting_order
if not site:
from filebrowser.sites import site as default_site
site = default_site
self.site = site
# HELPER METHODS
# sort_by_attr
def sort_by_attr(self, seq, attr):
"""
Sort the sequence of objects by object's attribute
Arguments:
seq - the list or any sequence (including immutable one) of objects to sort.
attr - the name of attribute to sort by
Returns:
the sorted list of objects.
"""
from operator import attrgetter
if isinstance(attr, string_types): # Backward compatibility hack
attr = (attr, )
return sorted(seq, key=attrgetter(*attr))
_is_folder_stored = None
@property
def is_folder(self):
if self._is_folder_stored is None:
self._is_folder_stored = self.site.storage.isdir(self.path)
return self._is_folder_stored
def listing(self):
"List all files for path"
if self.is_folder:
dirs, files = self.site.storage.listdir(self.path)
return (f for f in dirs + files)
return []
def _walk(self, path, filelisting):
"""
Recursively walks the path and collects all files and
directories.
Danger: Symbolic links can create cycles and this function
ends up in a regression.
"""
dirs, files = self.site.storage.listdir(path)
if dirs:
for d in dirs:
self._walk(os.path.join(path, d), filelisting)
filelisting.extend([path_strip(os.path.join(path, d), self.site.directory)])
if files:
for f in files:
filelisting.extend([path_strip(os.path.join(path, f), self.site.directory)])
def walk(self):
"Walk all files for path"
filelisting = []
if self.is_folder:
self._walk(self.path, filelisting)
return filelisting
# Cached results of files_listing_total (without any filters and sorting applied)
_fileobjects_total = None
def files_listing_total(self):
"Returns FileObjects for all files in listing"
if self._fileobjects_total is None:
self._fileobjects_total = []
for item in self.listing():
fileobject = FileObject(os.path.join(self.path, item), site=self.site)
self._fileobjects_total.append(fileobject)
files = self._fileobjects_total
if self.sorting_by:
files = self.sort_by_attr(files, self.sorting_by)
if self.sorting_order == "desc":
files.reverse()
self._results_listing_total = len(files)
return files
def files_walk_total(self):
"Returns FileObjects for all files in walk"
files = []
for item in self.walk():
fileobject = FileObject(os.path.join(self.site.directory, item), site=self.site)
files.append(fileobject)
if self.sorting_by:
files = self.sort_by_attr(files, self.sorting_by)
if self.sorting_order == "desc":
files.reverse()
self._results_walk_total = len(files)
return files
def files_listing_filtered(self):
"Returns FileObjects for filtered files in listing"
if self.filter_func:
listing = list(filter(self.filter_func, self.files_listing_total()))
else:
listing = self.files_listing_total()
self._results_listing_filtered = len(listing)
return listing
def files_walk_filtered(self):
"Returns FileObjects for filtered files in walk"
if self.filter_func:
listing = list(filter(self.filter_func, self.files_walk_total()))
else:
listing = self.files_walk_total()
self._results_walk_filtered = len(listing)
return listing
def results_listing_total(self):
"Counter: all files"
if self._results_listing_total is not None:
return self._results_listing_total
return len(self.files_listing_total())
def results_walk_total(self):
"Counter: all files"
if self._results_walk_total is not None:
return self._results_walk_total
return len(self.files_walk_total())
def results_listing_filtered(self):
"Counter: filtered files"
if self._results_listing_filtered is not None:
return self._results_listing_filtered
return len(self.files_listing_filtered())
def results_walk_filtered(self):
"Counter: filtered files"
if self._results_walk_filtered is not None:
return self._results_walk_filtered
return len(self.files_walk_filtered())
@python_2_unicode_compatible
class FileObject():
"""
The FileObject represents a file (or directory) on the server.
An example::
from filebrowser.base import FileObject
fileobject = FileObject(path)
where path is a relative path to a storage location
"""
def __init__(self, path, site=None):
if not site:
from filebrowser.sites import site as default_site
site = default_site
self.site = site
if platform.system() == 'Windows':
self.path = path.replace('\\', '/')
else:
self.path = path
self.head = os.path.dirname(path)
self.filename = os.path.basename(path)
self.filename_lower = self.filename.lower()
self.filename_root, self.extension = os.path.splitext(self.filename)
self.mimetype = mimetypes.guess_type(self.filename)
def __str__(self):
return smart_str(self.path)
@property
def name(self):
return self.path
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self or "None")
def __len__(self):
return len(self.path)
# HELPER METHODS
# _get_file_type
def _get_file_type(self):
"Get file type as defined in EXTENSIONS."
file_type = ''
for k, v in EXTENSIONS.items():
for extension in v:
if self.extension.lower() == extension.lower():
file_type = k
return file_type
# GENERAL ATTRIBUTES/PROPERTIES
# filetype
# filesize
# date
# datetime
# exists
_filetype_stored = None
@property
def filetype(self):
"Filetype as defined with EXTENSIONS"
if self._filetype_stored is not None:
return self._filetype_stored
if self.is_folder:
self._filetype_stored = 'Folder'
else:
self._filetype_stored = self._get_file_type()
return self._filetype_stored
_filesize_stored = None
@property
def filesize(self):
"Filesize in bytes"
if self._filesize_stored is not None:
return self._filesize_stored
if self.exists:
self._filesize_stored = self.site.storage.size(self.path)
return self._filesize_stored
return None
_date_stored = None
@property
def date(self):
"Modified time (from site.storage) as float (mktime)"
if self._date_stored is not None:
return self._date_stored
if self.exists:
self._date_stored = time.mktime(self.site.storage.modified_time(self.path).timetuple())
return self._date_stored
return None
@property
def datetime(self):
"Modified time (from site.storage) as datetime"
if self.date:
return datetime.datetime.fromtimestamp(self.date)
return None
_exists_stored = None
@property
def exists(self):
"True, if the path exists, False otherwise"
if self._exists_stored is None:
self._exists_stored = self.site.storage.exists(self.path)
return self._exists_stored
# PATH/URL ATTRIBUTES/PROPERTIES
# path (see init)
# path_relative_directory
# path_full
# dirname
# url
@property
def path_relative_directory(self):
"Path relative to site.directory"
return path_strip(self.path, self.site.directory)
@property
def path_full(self):
"Absolute path as defined with site.storage"
return self.site.storage.path(self.path)
@property
def dirname(self):
"The directory (not including site.directory)"
return os.path.dirname(self.path_relative_directory)
@property
def url(self):
"URL for the file/folder as defined with site.storage"
return self.site.storage.url(self.path)
# IMAGE ATTRIBUTES/PROPERTIES
# dimensions
# width
# height
# aspectratio
# orientation
_dimensions_stored = None
@property
def dimensions(self):
"Image dimensions as a tuple"
if self.filetype != 'Image':
return None
if self._dimensions_stored is not None:
return self._dimensions_stored
try:
im = Image.open(self.site.storage.open(self.path))
self._dimensions_stored = im.size
except:
pass
return self._dimensions_stored
@property
def width(self):
"Image width in px"
if self.dimensions:
return self.dimensions[0]
return None
@property
def height(self):
"Image height in px"
if self.dimensions:
return self.dimensions[1]
return None
@property
def aspectratio(self):
"Aspect ratio (float format)"
if self.dimensions:
return float(self.width) / float(self.height)
return None
@property
def orientation(self):
"Image orientation, either 'Landscape' or 'Portrait'"
if self.dimensions:
if self.dimensions[0] >= self.dimensions[1]:
return "Landscape"
else:
return "Portrait"
return None
# FOLDER ATTRIBUTES/PROPERTIES
# directory (deprecated)
# folder (deprecated)
# is_folder
# is_empty
@property
def directory(self):
"Folder(s) relative from site.directory"
warnings.warn("directory will be removed with 3.6, use path_relative_directory instead.", DeprecationWarning)
return path_strip(self.path, self.site.directory)
@property
def folder(self):
"Parent folder(s)"
warnings.warn("directory will be removed with 3.6, use dirname instead.", DeprecationWarning)
return os.path.dirname(path_strip(os.path.join(self.head, ''), self.site.directory))
_is_folder_stored = None
@property
def is_folder(self):
"True, if path is a folder"
if self._is_folder_stored is None:
self._is_folder_stored = self.site.storage.isdir(self.path)
return self._is_folder_stored
@property
def is_empty(self):
"True, if folder is empty. False otherwise, or if the object is not a folder."
if self.is_folder:
dirs, files = self.site.storage.listdir(self.path)
if not dirs and not files:
return True
return False
# VERSION ATTRIBUTES/PROPERTIES
# is_version
# versions_basedir
# original
# original_filename
@property
def is_version(self):
"True if file is a version, false otherwise"
tmp = self.filename_root.split("_")
if tmp[len(tmp) - 1] in VERSIONS:
return True
return False
@property
def versions_basedir(self):
"Main directory for storing versions (either VERSIONS_BASEDIR or site.directory)"
if VERSIONS_BASEDIR:
return VERSIONS_BASEDIR
elif self.site.directory:
return self.site.directory
else:
return ""
@property
def original(self):
"Returns the original FileObject"
if self.is_version:
relative_path = self.head.replace(self.versions_basedir, "").lstrip("/")
return FileObject(os.path.join(self.site.directory, relative_path, self.original_filename), site=self.site)
return self
@property
def original_filename(self):
"Get the filename of an original image from a version"
tmp = self.filename_root.split("_")
if tmp[len(tmp) - 1] in VERSIONS:
return u"%s%s" % (self.filename_root.replace("_%s" % tmp[len(tmp) - 1], ""), self.extension)
return self.filename
# VERSION METHODS
# versions()
# admin_versions()
# version_name(suffix)
# version_path(suffix)
# version_generate(suffix)
def versions(self):
"List of versions (not checking if they actually exist)"
version_list = []
if self.filetype == "Image" and not self.is_version:
for version in sorted(VERSIONS):
version_list.append(os.path.join(self.versions_basedir, self.dirname, self.version_name(version)))
return version_list
def admin_versions(self):
"List of admin versions (not checking if they actually exist)"
version_list = []
if self.filetype == "Image" and not self.is_version:
for version in ADMIN_VERSIONS:
version_list.append(os.path.join(self.versions_basedir, self.dirname, self.version_name(version)))
return version_list
def version_name(self, version_suffix):
"Name of a version" # FIXME: version_name for version?
return self.filename_root + "_" + version_suffix + self.extension
def version_path(self, version_suffix):
"Path to a version (relative to storage location)" # FIXME: version_path for version?
return os.path.join(self.versions_basedir, self.dirname, self.version_name(version_suffix))
def version_generate(self, version_suffix):
"Generate a version" # FIXME: version_generate for version?
path = self.path
version_path = self.version_path(version_suffix)
if not self.site.storage.isfile(version_path):
version_path = self._generate_version(version_suffix)
elif self.site.storage.modified_time(path) > self.site.storage.modified_time(version_path):
version_path = self._generate_version(version_suffix)
return FileObject(version_path, site=self.site)
def _generate_version(self, version_suffix):
"""
Generate Version for an Image.
value has to be a path relative to the storage location.
"""
tmpfile = File(NamedTemporaryFile())
try:
f = self.site.storage.open(self.path)
except IOError:
return ""
im = Image.open(f)
version_path = self.version_path(version_suffix)
version_dir, version_basename = os.path.split(version_path)
root, ext = os.path.splitext(version_basename)
version = scale_and_crop(im, VERSIONS[version_suffix]['width'], VERSIONS[version_suffix]['height'], VERSIONS[version_suffix]['opts'])
if not version:
version = im
# version methods as defined with VERSIONS
if 'methods' in VERSIONS[version_suffix].keys():
for m in VERSIONS[version_suffix]['methods']:
if callable(m):
version = m(version)
# save version
try:
version.save(tmpfile, format=Image.EXTENSION[ext.lower()], quality=VERSION_QUALITY, optimize=(os.path.splitext(version_path)[1] != '.gif'))
except IOError:
version.save(tmpfile, format=Image.EXTENSION[ext.lower()], quality=VERSION_QUALITY)
# remove old version, if any
if version_path != self.site.storage.get_available_name(version_path):
self.site.storage.delete(version_path)
self.site.storage.save(version_path, tmpfile)
# set permissions
if DEFAULT_PERMISSIONS is not None:
os.chmod(self.site.storage.path(version_path), DEFAULT_PERMISSIONS)
return version_path
# DELETE METHODS
# delete()
# delete_versions()
# delete_admin_versions()
def delete(self):
"Delete FileObject (deletes a folder recursively)"
if self.is_folder:
self.site.storage.rmtree(self.path)
else:
self.site.storage.delete(self.path)
def delete_versions(self):
"Delete versions"
for version in self.versions():
try:
self.site.storage.delete(version)
except:
pass
def delete_admin_versions(self):
"Delete admin versions"
for version in self.admin_versions():
try:
self.site.storage.delete(version)
except:
pass
| {
"content_hash": "29d115976615c86f769cc3a61ffc1e3e",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 203,
"avg_line_length": 32.80701754385965,
"alnum_prop": 0.6154545454545455,
"repo_name": "nemesisdesign/django-filebrowser",
"id": "6881a43faf382d89406cb5f94a7b1600a218edbd",
"size": "18734",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "filebrowser/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11293"
},
{
"name": "HTML",
"bytes": "49025"
},
{
"name": "JavaScript",
"bytes": "50111"
},
{
"name": "Python",
"bytes": "166625"
}
],
"symlink_target": ""
} |
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, TYPE_CHECKING
)
from azure.storage.blob import generate_account_sas as generate_blob_account_sas
from azure.storage.blob import generate_container_sas, generate_blob_sas
if TYPE_CHECKING:
from datetime import datetime
from ._models import AccountSasPermissions, FileSystemSasPermissions, FileSasPermissions, ResourceTypes, \
UserDelegationKey
def generate_account_sas(
account_name, # type: str
account_key, # type: str
resource_types, # type: Union[ResourceTypes, str]
permission, # type: Union[AccountSasPermissions, str]
expiry, # type: Optional[Union[datetime, str]]
**kwargs # type: Any
): # type: (...) -> str
"""Generates a shared access signature for the DataLake service.
Use the returned signature as the credential parameter of any DataLakeServiceClient,
FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
:param str account_name:
The storage account name used to generate the shared access signature.
:param str account_key:
The access key to generate the shared access signature.
:param resource_types:
Specifies the resource types that are accessible with the account SAS.
:type resource_types: str or ~azure.storage.filedatalake.ResourceTypes
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.filedatalake.AccountSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:keyword start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:paramtype start: ~datetime.datetime or str
:keyword str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:keyword str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:keyword str encryption_scope:
Specifies the encryption scope for a request made so that all write operations will be service encrypted.
:return: A Shared Access Signature (sas) token.
:rtype: str
"""
return generate_blob_account_sas(
account_name=account_name,
account_key=account_key,
resource_types=resource_types,
permission=permission,
expiry=expiry,
**kwargs
)
def generate_file_system_sas(
account_name, # type: str
file_system_name, # type: str
credential, # type: Union[str, UserDelegationKey]
permission=None, # type: Optional[Union[FileSystemSasPermissions, str]]
expiry=None, # type: Optional[Union[datetime, str]]
**kwargs # type: Any
):
# type: (...) -> str
"""Generates a shared access signature for a file system.
Use the returned signature with the credential parameter of any DataLakeServiceClient,
FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
:param str account_name:
The storage account name used to generate the shared access signature.
:param str file_system_name:
The name of the file system.
:param str credential:
Credential could be either account key or user delegation key.
If use account key is used as credential, then the credential type should be a str.
Instead of an account key, the user could also pass in a user delegation key.
A user delegation key can be obtained from the service by authenticating with an AAD identity;
this can be accomplished
by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
When present, the SAS is signed with the user delegation key instead.
:type credential: str or ~azure.storage.filedatalake.UserDelegationKey
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered racwdlmeop.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.filedatalake.FileSystemSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: datetime or str
:keyword start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:paramtype start: datetime or str
:keyword str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:keyword str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:keyword str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:keyword str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:keyword str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:keyword str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:keyword str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:keyword str preauthorized_agent_object_id:
The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
user delegation key has the required permissions before granting access but no additional permission check for
the agent object id will be performed.
:keyword str agent_object_id:
The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
of the user delegation key has the required permissions before granting access and the service will perform an
additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
:keyword str correlation_id:
The correlation id to correlate the storage audit logs with the audit logs used by the principal
generating and distributing the SAS.
:keyword str encryption_scope:
Specifies the encryption scope for a request made so that all write operations will be service encrypted.
:return: A Shared Access Signature (sas) token.
:rtype: str
"""
return generate_container_sas(
account_name=account_name,
container_name=file_system_name,
account_key=credential if isinstance(credential, str) else None,
user_delegation_key=credential if not isinstance(credential, str) else None,
permission=permission,
expiry=expiry,
**kwargs)
def generate_directory_sas(
account_name, # type: str
file_system_name, # type: str
directory_name, # type: str
credential, # type: Union[str, UserDelegationKey]
permission=None, # type: Optional[Union[FileSasPermissions, str]]
expiry=None, # type: Optional[Union[datetime, str]]
**kwargs # type: Any
):
# type: (...) -> str
"""Generates a shared access signature for a directory.
Use the returned signature with the credential parameter of any DataLakeServiceClient,
FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
:param str account_name:
The storage account name used to generate the shared access signature.
:param str file_system_name:
The name of the file system.
:param str directory_name:
The name of the directory.
:param str credential:
Credential could be either account key or user delegation key.
If use account key is used as credential, then the credential type should be a str.
Instead of an account key, the user could also pass in a user delegation key.
A user delegation key can be obtained from the service by authenticating with an AAD identity;
this can be accomplished
by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
When present, the SAS is signed with the user delegation key instead.
:type credential: str or ~azure.storage.filedatalake.UserDelegationKey
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered racwdlmeop.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.filedatalake.FileSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:keyword start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:paramtype start: ~datetime.datetime or str
:keyword str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:keyword str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:keyword str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:keyword str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:keyword str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:keyword str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:keyword str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:keyword str preauthorized_agent_object_id:
The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
user delegation key has the required permissions before granting access but no additional permission check for
the agent object id will be performed.
:keyword str agent_object_id:
The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
of the user delegation key has the required permissions before granting access and the service will perform an
additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
:keyword str correlation_id:
The correlation id to correlate the storage audit logs with the audit logs used by the principal
generating and distributing the SAS.
:keyword str encryption_scope:
Specifies the encryption scope for a request made so that all write operations will be service encrypted.
:return: A Shared Access Signature (sas) token.
:rtype: str
"""
depth = len(directory_name.strip("/").split("/"))
return generate_blob_sas(
account_name=account_name,
container_name=file_system_name,
blob_name=directory_name,
account_key=credential if isinstance(credential, str) else None,
user_delegation_key=credential if not isinstance(credential, str) else None,
permission=permission,
expiry=expiry,
sdd=depth,
is_directory=True,
**kwargs)
def generate_file_sas(
account_name, # type: str
file_system_name, # type: str
directory_name, # type: str
file_name, # type: str
credential, # type: Union[str, UserDelegationKey]
permission=None, # type: Optional[Union[FileSasPermissions, str]]
expiry=None, # type: Optional[Union[datetime, str]]
**kwargs # type: Any
):
# type: (...) -> str
"""Generates a shared access signature for a file.
Use the returned signature with the credential parameter of any BDataLakeServiceClient,
FileSystemClient, DataLakeDirectoryClient or DataLakeFileClient.
:param str account_name:
The storage account name used to generate the shared access signature.
:param str file_system_name:
The name of the file system.
:param str directory_name:
The name of the directory.
:param str file_name:
The name of the file.
:param str credential:
Credential could be either account key or user delegation key.
If use account key is used as credential, then the credential type should be a str.
Instead of an account key, the user could also pass in a user delegation key.
A user delegation key can be obtained from the service by authenticating with an AAD identity;
this can be accomplished
by calling :func:`~azure.storage.filedatalake.DataLakeServiceClient.get_user_delegation_key`.
When present, the SAS is signed with the user delegation key instead.
:type credential: str or ~azure.storage.filedatalake.UserDelegationKey
:param permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered racwdlmeop.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:type permission: str or ~azure.storage.filedatalake.FileSasPermissions
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: ~datetime.datetime or str
:keyword start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:paramtype start: ~datetime.datetime or str
:keyword str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying ip=168.1.5.65 or ip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:keyword str protocol:
Specifies the protocol permitted for a request made. The default value is https.
:keyword str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:keyword str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:keyword str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:keyword str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:keyword str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
:keyword str preauthorized_agent_object_id:
The AAD object ID of a user assumed to be authorized by the owner of the user delegation key to perform
the action granted by the SAS token. The service will validate the SAS token and ensure that the owner of the
user delegation key has the required permissions before granting access but no additional permission check for
the agent object id will be performed.
:keyword str agent_object_id:
The AAD object ID of a user assumed to be unauthorized by the owner of the user delegation key to
perform the action granted by the SAS token. The service will validate the SAS token and ensure that the owner
of the user delegation key has the required permissions before granting access and the service will perform an
additional POSIX ACL check to determine if this user is authorized to perform the requested operation.
:keyword str correlation_id:
The correlation id to correlate the storage audit logs with the audit logs used by the principal
generating and distributing the SAS. This can only be used when to generate sas with delegation key.
:keyword str encryption_scope:
Specifies the encryption scope for a request made so that all write operations will be service encrypted.
:return: A Shared Access Signature (sas) token.
:rtype: str
"""
if directory_name:
path = directory_name.rstrip('/') + "/" + file_name
else:
path = file_name
return generate_blob_sas(
account_name=account_name,
container_name=file_system_name,
blob_name=path,
account_key=credential if isinstance(credential, str) else None,
user_delegation_key=credential if not isinstance(credential, str) else None,
permission=permission,
expiry=expiry,
**kwargs)
| {
"content_hash": "d9949409657237071d1db7491bdcb64c",
"timestamp": "",
"source": "github",
"line_count": 396,
"max_line_length": 118,
"avg_line_length": 54.64646464646464,
"alnum_prop": 0.711275415896488,
"repo_name": "Azure/azure-sdk-for-python",
"id": "6555dce5d2ec590f0db8ef5068e2c1f616bd56dd",
"size": "21950",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared_access_signature.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |